id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,631 | import sys
import os
import sphinx_rtd_theme
from github_link import make_linkcode_resolve
import sphinx
from packaging.version import Version, parse
def setup(app):
# a copy button to copy snippet of code from the documentation
app.add_js_file("js/copybutton.js")
app.add_css_file("basic.css") | null |
6,632 | import os
import sys
import argparse
import json
import ast
import gc
import psutil
import signal
import pickle
import numpy as np
import warnings
from pathlib import Path
from scipy import stats
from memory_profiler import memory_usage
import benchmarks.trees.train as train
import benchmarks.trees.score as score
from benchmarks.trees.metrics import get_metrics
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import (
xgboost_installed,
lightgbm_installed,
sklearn_installed,
onnx_ml_tools_installed,
onnx_runtime_installed,
tvm_installed,
)
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus | null |
6,633 | import os
import sys
import argparse
import json
import ast
import gc
import psutil
import signal
import pickle
import numpy as np
import warnings
from pathlib import Path
from scipy import stats
from memory_profiler import memory_usage
import benchmarks.trees.train as train
import benchmarks.trees.score as score
from benchmarks.trees.metrics import get_metrics
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import (
xgboost_installed,
lightgbm_installed,
sklearn_installed,
onnx_ml_tools_installed,
onnx_runtime_installed,
tvm_installed,
)
def print_sys_info(args):
import xgboost
import lightgbm
import sklearn
import torch
print("System : %s" % sys.version)
print("OS : %s" % sys.platform)
print("Xgboost : %s" % xgboost.__version__)
print("LightGBM: %s" % lightgbm.__version__)
print("Sklearn : %s" % sklearn.__version__)
print("PyTorch : %s" % torch.__version__)
# Optional imports
try:
import onnxruntime
print("ORT : %s" % onnxruntime.__version__)
except ImportError:
pass
try:
import tvm
print("TVM : %s" % tvm.__version__)
except ImportError:
pass
if args.gpu:
print("Running on GPU")
else:
print("#CPU : %d" % args.cpus) | null |
6,634 | import os
import sys
import argparse
import json
import ast
import gc
import psutil
import signal
import pickle
import numpy as np
import warnings
from pathlib import Path
from scipy import stats
from memory_profiler import memory_usage
import benchmarks.trees.train as train
import benchmarks.trees.score as score
from benchmarks.trees.metrics import get_metrics
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import (
xgboost_installed,
lightgbm_installed,
sklearn_installed,
onnx_ml_tools_installed,
onnx_runtime_installed,
tvm_installed,
)
def signal_handler(signum, frame):
print("1 hour timeout triggered.")
raise Exception("Timeout")
def set_signal():
if sys.platform == "linux":
signal.signal(signal.SIGALRM, signal_handler) | null |
6,635 | import os
import sys
import argparse
import json
import ast
import gc
import psutil
import signal
import pickle
import numpy as np
import warnings
from pathlib import Path
from scipy import stats
from memory_profiler import memory_usage
import benchmarks.trees.train as train
import benchmarks.trees.score as score
from benchmarks.trees.metrics import get_metrics
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import (
xgboost_installed,
lightgbm_installed,
sklearn_installed,
onnx_ml_tools_installed,
onnx_runtime_installed,
tvm_installed,
)
ROOT_PATH = Path(__file__).absolute().parent.parent.parent
def parse_args():
parser = argparse.ArgumentParser(description="Benchmark xgboost/lightgbm/random forest on real datasets")
parser.add_argument(
"-dataset",
default="all",
type=str,
help="The dataset to be used for benchmarking. 'all' for all datasets: "
"fraud, epsilon, year, covtype, higgs, airline",
)
parser.add_argument(
"-datadir", default=os.path.join(ROOT_PATH, "benchmarks/trees/datasets/"), type=str, help="The root datasets folder"
)
parser.add_argument(
"-modeldir", default=os.path.join(ROOT_PATH, "benchmarks/trees/models/"), type=str, help="The root models folder"
)
parser.add_argument(
"-operator", default="all", type=str, help=("Comma-separated list of operators to run; 'all' run rf, xgb and lgbm")
)
parser.add_argument(
"-backend",
default="all",
type=str,
help=(
"Comma-separated list of frameworks to run against the baselines;" "'all' run onnx-ml, hb-pytorch, hb-torchscript"
),
)
parser.add_argument(
"-cpus", default=6, type=int, help=("#CPUs to use for the benchmarks; 0 means psutil.cpu_count(logical=False)")
)
parser.add_argument(
"-batch_size", default=10000, type=int, help=("Supported batch size. By default we score one record at a time.")
)
parser.add_argument(
"-gpu",
default=False,
action="store_true",
help=("Whether to run scoring on GPU (for the supported frameworks) or not"),
)
parser.add_argument("-output", default=None, type=str, help="Output json file with runtime/accuracy stats")
parser.add_argument(
"-ntrees",
default=500,
type=int,
help=("Number of trees. Default is as specified in " "the respective dataset configuration"),
)
parser.add_argument(
"-nrows",
default=None,
type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."
),
)
parser.add_argument("-niters", default=5, type=int, help=("Number of iterations for each experiment"))
parser.add_argument(
"-batch_benchmark",
default=False,
action="store_true",
help=("Whether to do a single batch benchmark with specified batch_size and niters (not on the whole data)"),
)
parser.add_argument("-max_depth", default=8, type=int, help=("Maxmimum number of levels in the trees"))
parser.add_argument(
"-validate", default=False, action="store_true", help="Validate prediction output and fails accordingly."
)
parser.add_argument("-extra", default="{}", help="Extra arguments as a python dictionary")
args = parser.parse_args()
# Default value for output json file.
if not args.output:
args.output = "result-{}-{}-{}-{}.json".format("gpu" if args.gpu else args.cpus, args.ntrees, args.max_depth, args.batch_size)
return args | null |
6,636 | import os
import sys
import argparse
import json
import ast
import gc
import psutil
import signal
import pickle
import numpy as np
import warnings
from pathlib import Path
from scipy import stats
from memory_profiler import memory_usage
import benchmarks.trees.train as train
import benchmarks.trees.score as score
from benchmarks.trees.metrics import get_metrics
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import (
xgboost_installed,
lightgbm_installed,
sklearn_installed,
onnx_ml_tools_installed,
onnx_runtime_installed,
tvm_installed,
)
def set_alarm(timeout=0):
if sys.platform == "linux":
signal.alarm(timeout)
def get_data(data, size=-1):
np_data = data.to_numpy() if not isinstance(data, np.ndarray) else data
if size != -1:
msg = "Requested size bigger than the data size (%d vs %d)" % (size, np_data.shape[0])
assert size <= np_data.shape[0], msg
np_data = np_data[0:size]
return np_data
def get_metrics(y_test, pred, learning_task):
if learning_task == LearningTask.REGRESSION:
return regression_metrics(y_test, pred)
if learning_task == LearningTask.CLASSIFICATION:
return classification_metrics(y_test, pred)
if learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
return classification_metrics_multilabel(y_test, pred)
raise ValueError("No metrics defined for learning task: " + str(learning_task))
def prepare_dataset(dataset_folder, dataset, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
prepare_function = globals()["prepare_" + dataset]
return prepare_function(dataset_folder, nrows)
def onnx_ml_tools_installed():
"""
Checks that *ONNXMLTools* is available.
"""
try:
import onnxmltools
return True
except ImportError:
print("ONNXMLTOOLS not installed. Please check https://github.com/onnx/onnxmltools for instructions.")
return False
def onnx_runtime_installed():
"""
Checks that *ONNX Runtime* is available.
"""
try:
import onnxruntime
return True
except ImportError:
return False
def tvm_installed():
"""
Checks that *TVM* is available.
"""
try:
import tvm
except ImportError:
return False
return True
def benchmark(args, dataset_folder, model_folder, dataset):
warnings.filterwarnings("ignore")
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
args.dataset = dataset
operators = args.operator
if operators == "all":
operators = "rf,lgbm,xgb"
for op in operators.split(","):
print("Running '%s' ..." % op)
results[op] = {}
model_name = op + "-" + str(args.ntrees) + "-" + str(args.max_depth) + "-" + str(args.cpus)
model_full_name = os.path.join(model_folder, model_name + ".pkl")
trainer = train.TrainEnsembleAlgorithm.create(op, data.learning_task)
if args.batch_benchmark:
test_size = args.batch_size
else:
test_size = data.X_test.shape[0]
X_test = get_data(data.X_test, size=test_size)
y_test = get_data(data.y_test, size=test_size)
with trainer:
if not os.path.exists(model_full_name):
train_time = trainer.fit(data, args)
pred = trainer.test(X_test)
results[op] = {
"train_time": str(train_time),
"train_accuracy": str(get_metrics(y_test, pred, data.learning_task)),
}
model = trainer.model
if not os.path.exists(model_folder):
os.makedirs(model_folder)
pickle.dump(model, open(model_full_name, "wb"), protocol=4)
else:
model = pickle.load(open(model_full_name, "rb"))
times = []
mean = 0
mem = 0
try:
for i in range(args.niters):
set_alarm(3600)
times.append(trainer.predict(model, X_test, args))
set_alarm(0)
mean = stats.trim_mean(times, 1 / len(times)) if args.niters > 1 else times[0]
gc.collect()
mem = max(memory_usage((trainer.predict, (model, X_test, args))))
except Exception as e:
print(e)
pass
results[op].update({"prediction_time": mean})
results[op].update({"peak_mem": mem})
outer_ops = args.operator
args.operator = op
if args.backend == "all":
args.backend = "onnx-ml,hb-pytorch,hb-torchscript,hb-onnx,hb-tvm"
if "hb-tvm" in args.backend:
assert (
tvm_installed
), "To run benchmark with TVM you need to have TVM installed. Either install TVM or remove it from the backends."
if "hb-onnx" in args.backend:
assert (
onnx_runtime_installed
), "To run benchmark with ONNX you need to have ONNX runtime installed. Either install ONNX runtime or remove ONNX from the backends."
if "onnx-ml" in args.backend:
assert (
onnx_runtime_installed and onnx_ml_tools_installed
), "To run benchmark with ONNX-ML you need to have ONNX runtime and ONNXMLTOOLS installed. Either install ONNX runtime and ONNXMLTOOLS or remove ONNX-ML from the backends."
for backend in args.backend.split(","):
print("Running '%s' ..." % backend)
scorer = score.ScoreBackend.create(backend)
with scorer:
conversion_time = scorer.convert(model, data, X_test, args, os.path.join(model_folder, model_name))
times = []
prediction_time = 0
try:
for i in range(args.niters):
set_alarm(3600)
times.append(scorer.predict(X_test))
set_alarm(0)
prediction_time = times[0] if args.niters == 1 else stats.trim_mean(times, 1 / len(times))
gc.collect()
mem = max(memory_usage((scorer.predict, (X_test,))))
except Exception as e:
print(e)
pass
results[op][backend] = {
"conversion_time": str(conversion_time),
"prediction_time": str(prediction_time),
"peak_mem": str(mem),
"is_same_output": "None"
if len(trainer.predictions) == 0 or scorer is None or scorer.predictions is None
else np.allclose(trainer.predictions, scorer.predictions, atol=1e-6),
}
print(results[op][backend])
if args.validate:
np.testing.assert_allclose(scorer.predictions, trainer.predictions, rtol=1e-5, atol=1e-6)
args.operator = outer_ops
return results | null |
6,637 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_airline(dataset_folder, nrows): # pylint: disable=too-many-locals
url = "http://kt.ijs.si/elena_ikonomovska/datasets/airline/airline_14col.data.bz2"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder, "airline" + ("" if nrows is None else "-" + str(nrows)) + "-pickle.dat")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
print("Preparing dataset ...")
if not os.path.isfile(local_url):
urlretrieve(url, local_url)
cols = [
"Year",
"Month",
"DayofMonth",
"DayofWeek",
"CRSDepTime",
"CRSArrTime",
"UniqueCarrier",
"FlightNum",
"ActualElapsedTime",
"Origin",
"Dest",
"Distance",
"Diverted",
"ArrDelay",
]
# load the data as int16
dtype = np.int16
dtype_columns = {
"Year": dtype,
"Month": dtype,
"DayofMonth": dtype,
"DayofWeek": dtype,
"CRSDepTime": dtype,
"CRSArrTime": dtype,
"FlightNum": dtype,
"ActualElapsedTime": dtype,
"Distance": dtype,
"Diverted": dtype,
"ArrDelay": dtype,
}
df = pd.read_csv(local_url, names=cols, dtype=dtype_columns, nrows=nrows)
# Encode categoricals as numeric
for col in df.select_dtypes(["object"]).columns:
df[col] = df[col].astype("category").cat.codes
# Turn into binary classification problem
df["ArrDelayBinary"] = 1 * (df["ArrDelay"] > 0)
X = df[df.columns.difference(["ArrDelay", "ArrDelayBinary"])]
y = df["ArrDelayBinary"]
del df
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77, test_size=0.2,)
data = Data(
X_train.astype("|f4").to_numpy(), X_test.astype("|f4").to_numpy(), y_train, y_test, LearningTask.CLASSIFICATION
)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data | null |
6,638 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
def prepare_fraud(dataset_folder, nrows):
url = "https://datahub.io/machine-learning/creditcard/r/creditcard.csv"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder, "fraud" + ("" if nrows is None else "-" + str(nrows)) + "-pickle.dat")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
print("Preparing dataset ...")
if not os.path.isfile(local_url):
urlretrieve(url, local_url)
df = pd.read_csv(local_url, nrows=nrows)
X = df[[col for col in df.columns if col.startswith("V")]]
y = df["Class"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77, test_size=0.2,)
data = Data(
X_train.astype("|f4").to_numpy(), X_test.astype("|f4").to_numpy(), y_train, y_test, LearningTask.CLASSIFICATION
)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data | null |
6,639 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_higgs(dataset_folder, nrows):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder, "higgs" + ("" if nrows is None else "-" + str(nrows)) + "-pickle.dat")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
print("Preparing dataset ...")
if not os.path.isfile(local_url):
urlretrieve(url, local_url)
higgs = pd.read_csv(local_url, nrows=nrows, error_bad_lines=False)
X = higgs.iloc[:, 1:]
y = higgs.iloc[:, 0]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77, test_size=0.2,)
data = Data(
X_train.astype("|f4").to_numpy(), X_test.astype("|f4").to_numpy(), y_train, y_test, LearningTask.CLASSIFICATION
)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data | null |
6,640 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
def prepare_year(dataset_folder, nrows):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt" ".zip"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder, "year" + ("" if nrows is None else "-" + str(nrows)) + "-pickle.dat")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
print("Preparing dataset ...")
if not os.path.isfile(local_url):
urlretrieve(url, local_url)
year = pd.read_csv(local_url, nrows=nrows, header=None)
X = year.iloc[:, 1:]
y = year.iloc[:, 0]
if nrows is None:
# this dataset requires a specific train/test split,
# with the specified number of rows at the start belonging to the train set,
# and the rest being the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False, train_size=463715, test_size=51630)
else:
print("Warning: nrows is specified, not using predefined test/train split for " "YearPredictionMSD.")
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77, test_size=0.2,)
data = Data(X_train.astype("|f4").to_numpy(), X_test.astype("|f4").to_numpy(), y_train, y_test, LearningTask.REGRESSION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data | null |
6,641 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_epsilon(dataset_folder, nrows):
url_train = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary" "/epsilon_normalized.bz2"
url_test = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary" "/epsilon_normalized.t.bz2"
pickle_url = os.path.join(dataset_folder, "epsilon" + ("" if nrows is None else "-" + str(nrows)) + "-pickle.dat")
local_url_train = os.path.join(dataset_folder, os.path.basename(url_train))
local_url_test = os.path.join(dataset_folder, os.path.basename(url_test))
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
print("Preparing dataset ...")
if not os.path.isfile(local_url_train):
urlretrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
urlretrieve(url_test, local_url_test)
X_train, y_train = load_svmlight_file(local_url_train, dtype=np.float32)
X_test, y_test = load_svmlight_file(local_url_test, dtype=np.float32)
X_train = X_train.toarray()
X_test = X_test.toarray()
y_train[y_train <= 0] = 0
y_test[y_test <= 0] = 0
if nrows is not None:
print("Warning: nrows is specified, not using predefined test/train split for epsilon.")
X_train = np.vstack((X_train, X_test))
y_train = np.append(y_train, y_test)
X_train = X_train[:nrows]
y_train = y_train[:nrows]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, random_state=77, test_size=0.2,)
data = Data(X_train.astype("|f4"), X_test.astype("|f4"), y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data | null |
6,642 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_covtype(dataset_folder, nrows=None):
from sklearn.datasets import fetch_covtype
print("Preparing dataset ...")
X, y = fetch_covtype(return_X_y=True) # pylint: disable=unexpected-keyword-arg
if nrows is not None:
X = X[0:nrows]
y = y[0:nrows]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77, test_size=0.2,)
return Data(X_train.astype("|f4"), X_test.astype("|f4"), y_train, y_test, LearningTask.MULTICLASS_CLASSIFICATION) | null |
6,643 | import os
from enum import Enum
import pandas as pd
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_svmlight_file
from urllib.request import urlretrieve
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data:
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None, qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def _modify_dimension(X, n_features):
"""
Modifies the number of features to increase
or reduce the number of features.
"""
if n_features is None or n_features == X.shape[1]:
return X
if n_features < X.shape[1]:
return X[:, :n_features]
res = np.empty((X.shape[0], n_features), dtype=X.dtype)
res[:, : X.shape[1]] = X[:, :]
div = max((n_features // X.shape[1]) + 1, 2)
for i in range(X.shape[1], res.shape[1]):
j = i % X.shape[1]
col = X[:, j]
if X.dtype in (np.float32, np.float64):
sigma = np.var(col) ** 0.5
rnd = np.random.randn(len(col)) * sigma / div
col2 = col + rnd
res[:, j] -= col2 / div
res[:, i] = col2
elif X.dtype in (np.int32, np.int64):
perm = np.random.permutation(col)
h = np.random.randint(0, div) % X.shape[0]
col2 = col.copy()
col2[h::div] = perm[h::div] # pylint: disable=E1136
res[:, i] = col2
h = (h + 1) % X.shape[0]
res[h, j] = perm[h] # pylint: disable=E1136
else:
raise NotImplementedError("Unable to add noise to a feature for this type {}".format(X.dtype))
return res
def _make_n_rows(x, n, y=None):
"""
Multiplies or reduces the rows of x to get
exactly *n* rows.
"""
if n < x.shape[0]:
if y is None:
return x[:n].copy()
return x[:n].copy(), y[:n].copy()
if len(x.shape) < 2:
r = np.empty((n,), dtype=x.dtype)
if y is not None:
ry = np.empty((n,), dtype=y.dtype)
for i in range(0, n, x.shape[0]):
end = min(i + x.shape[0], n)
r[i:end] = x[0 : end - i]
if y is not None:
ry[i:end] = y[0 : end - i]
else:
r = np.empty((n, x.shape[1]), dtype=x.dtype)
if y is not None:
if len(y.shape) < 2:
ry = np.empty((n,), dtype=y.dtype)
else:
ry = np.empty((n, y.shape[1]), dtype=y.dtype)
for i in range(0, n, x.shape[0]):
end = min(i + x.shape[0], n)
r[i:end, :] = x[0 : end - i, :]
if y is not None:
if len(y.shape) < 2:
ry[i:end] = y[0 : end - i]
else:
ry[i:end, :] = y[0 : end - i, :]
if y is None:
return r
return r, ry
def prepare_iris(dataset_folder, nrows):
from sklearn.datasets import load_iris
data = load_iris()
X, y = data.data, data.target
X = _modify_dimension(X, 20)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
X_test, y_test = _make_n_rows(X, nrows, y)
return Data(
X_train.astype("|f4"),
X_test.astype("|f4"),
y_train.astype("|i4"),
y_test.astype("|i4"),
LearningTask.MULTICLASS_CLASSIFICATION,
) | null |
6,644 | import os
import sys
import argparse
import json
import ast
import psutil
from pathlib import Path
import signal
import time
import numpy as np
import sklearn
import joblib
import torch
from scipy import stats
import gc
import benchmarks.pipelines.score as score
from benchmarks.timer import Timer
def print_sys_info(args):
print("System : %s" % sys.version)
print("OS : %s" % sys.platform)
print("Sklearn: %s" % sklearn.__version__)
print("Torch: %s" % torch.__version__)
# Optional imports
try:
import onnxruntime
print("ORT : %s" % onnxruntime.__version__)
except ImportError:
pass
try:
import tvm
print("TVM : %s" % tvm.__version__)
except ImportError:
pass
if args.gpu:
print("Running on GPU")
else:
print("#CPU {}".format(psutil.cpu_count(logical=False))) | null |
6,645 | import os
import sys
import argparse
import json
import ast
import psutil
from pathlib import Path
import signal
import time
import numpy as np
import sklearn
import joblib
import torch
from scipy import stats
import gc
import benchmarks.pipelines.score as score
from benchmarks.timer import Timer
def set_alarm(timeout=0):
if sys.platform == "linux":
signal.alarm(timeout) | null |
6,646 | import os
import sys
import argparse
import json
import ast
import psutil
from pathlib import Path
import signal
import time
import numpy as np
import sklearn
import joblib
import torch
from scipy import stats
import gc
import benchmarks.pipelines.score as score
from benchmarks.timer import Timer
def signal_handler(signum, frame):
print("1 hour timeout triggered.")
raise Exception("Timeout")
def set_signal():
if sys.platform == "linux":
signal.signal(signal.SIGALRM, signal_handler) | null |
6,647 | import os
import sys
import argparse
import json
import ast
import psutil
from pathlib import Path
import signal
import time
import numpy as np
import sklearn
import joblib
import torch
from scipy import stats
import gc
import benchmarks.pipelines.score as score
from benchmarks.timer import Timer
ROOT_PATH = Path(__file__).absolute().parent.parent.parent
def parse_args():
parser = argparse.ArgumentParser(description="Benchmark for OpenML pipelines")
parser.add_argument(
"-pipedir",
default=os.path.join(ROOT_PATH, "benchmarks/pipelines/openml-cc18/"),
type=str,
help=("The root folder containing all pipelines"),
)
parser.add_argument("-backend", default="torch", type=str, help=("Comma-separated list of Hummingbird's backends to run"))
parser.add_argument("-gpu", default=False, action="store_true", help=("Whether to run scoring on GPU or not"))
parser.add_argument("-output", default=None, type=str, help="Output json file with runtime stats")
parser.add_argument("-niters", default=5, type=int, help=("Number of iterations for each experiment"))
parser.add_argument("-validate", default=False, help="Validate prediction output and fails accordigly.")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "result-{}.json".format("gpu" if args.gpu else "cpu")
return args | null |
6,648 | from packaging.version import Version, parse
import openml
import sklearn
import operator
import keyword
import re
from pathlib import Path
import numpy as np
import random
import os
import joblib
import warnings
from sklearn.datasets import fetch_openml
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA, TruncatedSVD, FastICA, KernelPCA
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_selection import SelectKBest, VarianceThreshold, SelectPercentile
from sklearn.linear_model import LogisticRegression, SGDClassifier, LogisticRegressionCV
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import (
OneHotEncoder,
RobustScaler,
MaxAbsScaler,
MinMaxScaler,
StandardScaler,
Normalizer,
Binarizer,
KBinsDiscretizer,
PolynomialFeatures,
LabelEncoder,
)
from sklearn.impute import SimpleImputer as Imputer
from sklearn.svm import LinearSVC, SVC, NuSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.ensemble import ExtraTreesClassifier, AdaBoostClassifier
from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import HistGradientBoostingClassifier
from sklearn.pipeline import FeatureUnion
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from hummingbird.ml.supported import sklearn_operator_list
def init_parameters(p, run_id, component_map):
run = openml.runs.get_run(run_id)
for p in run.parameter_settings:
name = p["oml:name"]
value = p["oml:value"].strip()
comp_id = int(p["oml:component"])
comp = component_map[comp_id]
if hasattr(comp, "n_jobs") and comp.n_jobs != -1:
comp.n_jobs = -1
if value == "true":
value = "True"
elif value == "false":
value = "False"
elif value in ["null", "Null", "NULL", "none", "NONE"]:
value = "None"
if (
name == "validation_fraction"
or name == "dtype"
or value in ["deprecated", '"deprecated"']
or isinstance(comp, sklearn.pipeline.Pipeline) and name == "steps"
or isinstance(comp, sklearn.linear_model.SGDClassifier) and name == "max_iter" and value == "None"
):
continue
if str(type(comp).__name__) == "ColumnTransformer" and name == "transformers":
value = value.replace("true", "True").replace("false", "False")
for p in eval(value):
key = p["value"]["key"]
ids = eval(str(p["value"]["argument_1"]))
for i in range(len(comp.transformers)):
if comp.transformers[i][0] == key:
comp.transformers[i] = (key, comp.transformers[i][1], ids)
break
continue
if isinstance(comp, sklearn.preprocessing.OneHotEncoder) and name == "categorical_features":
idx = eval(value)
if idx is not None and len(idx) > 0:
comp.categorical_features = idx
continue
if value in ["NaN", '"NaN"']:
exec("comp.{} = np.nan".format(name))
else:
try:
exec("comp.{} = {}".format(name, value))
except Exception:
exec("comp.{} = '{}'".format(name, value.replace("'", "").replace('"', ""))) | null |
6,649 | import os
import sys
import argparse
import json
import ast
from pathlib import Path
import psutil
import signal
import numpy as np
import warnings
import gc
from scipy import stats
from memory_profiler import memory_usage
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing.data import (
Binarizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
import benchmarks.operators.train as train
import benchmarks.operators.score as score
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import sklearn_installed, onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus | null |
6,650 | import os
import sys
import argparse
import json
import ast
from pathlib import Path
import psutil
import signal
import numpy as np
import warnings
import gc
from scipy import stats
from memory_profiler import memory_usage
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing.data import (
Binarizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
import benchmarks.operators.train as train
import benchmarks.operators.score as score
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import sklearn_installed, onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
def print_sys_info(args):
import sklearn
import torch
print("System : %s" % sys.version)
print("OS : %s" % sys.platform)
print("Sklearn : %s" % sklearn.__version__)
print("PyTorch : %s" % torch.__version__)
# Optional imports
try:
import onnxruntime
print("ORT : %s" % onnxruntime.__version__)
except ImportError:
pass
try:
import tvm
print("TVM : %s" % tvm.__version__)
except ImportError:
pass
if args.gpu:
print("Running on GPU")
else:
print("#CPU : %d" % args.cpus) | null |
6,651 | import os
import sys
import argparse
import json
import ast
from pathlib import Path
import psutil
import signal
import numpy as np
import warnings
import gc
from scipy import stats
from memory_profiler import memory_usage
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing.data import (
Binarizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
import benchmarks.operators.train as train
import benchmarks.operators.score as score
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import sklearn_installed, onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
def signal_handler(signum, frame):
print("1 hour timeout triggered.")
raise Exception("Timeout")
def set_signal():
if sys.platform == "linux":
signal.signal(signal.SIGALRM, signal_handler) | null |
6,652 | import os
import sys
import argparse
import json
import ast
from pathlib import Path
import psutil
import signal
import numpy as np
import warnings
import gc
from scipy import stats
from memory_profiler import memory_usage
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing.data import (
Binarizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
import benchmarks.operators.train as train
import benchmarks.operators.score as score
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import sklearn_installed, onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
ROOT_PATH = Path(__file__).absolute().parent.parent.parent
def parse_args():
parser = argparse.ArgumentParser(description="Benchmark sklearn/HB on iris")
parser.add_argument("-dataset", default="iris", type=str, help="The dataset to be used for benchmarking.")
parser.add_argument(
"-datadir",
default=os.path.join(ROOT_PATH, "benchmarks/operators/datasets/"),
type=str,
help="The root datasets folder",
)
parser.add_argument(
"-modeldir", default=os.path.join(ROOT_PATH, "benchmarks/operators/models/"), type=str, help="The root models folder"
)
parser.add_argument(
"-operator", default="all", type=str, help=("Comma-separated list of operators to run; " "'all' run all")
)
parser.add_argument(
"-backend",
default="all",
type=str,
help=("Comma-separated list of train algorithms to run; " "'all' run onnx-ml, hb-torchscript, hb-tvm"),
)
parser.add_argument(
"-cpus", default=1, type=int, help=("#CPUs to use for the benchmarks; " "0 means psutil.cpu_count(logical=False)")
)
parser.add_argument(
"-batch_size", default=1000000, type=int, help=("Supported batch size. By default we score one record at a time.")
)
parser.add_argument(
"-gpu", default=False, action="store_true", help=("Whether to run scoring on SPU or not. Adding this flag uses gpu")
)
parser.add_argument("-output", default=None, type=str, help="Output json file with runtime/accuracy stats")
parser.add_argument(
"-nrows",
default=1000000,
type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."
),
)
parser.add_argument("-niters", default=5, type=int, help=("Number of iterations for each experiment"))
parser.add_argument("-validate", default=False, help="Validate prediction output and fails accordigly.")
parser.add_argument("-extra", default="{}", help="Extra arguments as a python dictionary")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "result-{}-{}.json".format("gpu" if args.gpu else "cpu", args.nrows)
return args | null |
6,653 | import os
import sys
import argparse
import json
import ast
from pathlib import Path
import psutil
import signal
import numpy as np
import warnings
import gc
from scipy import stats
from memory_profiler import memory_usage
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing.data import (
Binarizer,
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
RobustScaler,
StandardScaler,
)
from sklearn.svm.classes import LinearSVC, NuSVC, SVC
import benchmarks.operators.train as train
import benchmarks.operators.score as score
from benchmarks.datasets import prepare_dataset, LearningTask
from hummingbird.ml._utils import sklearn_installed, onnx_ml_tools_installed, onnx_runtime_installed, tvm_installed
ALL_OPS = [
# Linear models
LogisticRegression,
SGDClassifier,
LinearSVC,
NuSVC,
SVC,
# Classifiers: Other
BernoulliNB,
MLPClassifier,
# Trees
DecisionTreeClassifier,
# Feature Pre-processing
Binarizer,
MinMaxScaler,
Normalizer,
PolynomialFeatures,
StandardScaler,
]
def set_alarm(timeout=0):
if sys.platform == "linux":
signal.alarm(timeout)
def prepare_dataset(dataset_folder, dataset, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
prepare_function = globals()["prepare_" + dataset]
return prepare_function(dataset_folder, nrows)
def onnx_ml_tools_installed():
"""
Checks that *ONNXMLTools* is available.
"""
try:
import onnxmltools
return True
except ImportError:
print("ONNXMLTOOLS not installed. Please check https://github.com/onnx/onnxmltools for instructions.")
return False
def onnx_runtime_installed():
"""
Checks that *ONNX Runtime* is available.
"""
try:
import onnxruntime
return True
except ImportError:
return False
def tvm_installed():
"""
Checks that *TVM* is available.
"""
try:
import tvm
except ImportError:
return False
return True
def benchmark(args, dataset_folder, model_folder, dataset):
warnings.filterwarnings("ignore")
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
# "all" runs all operators
args.dataset = dataset
operators = args.operator
if operators == "all":
operators = {k.__name__: k for k in ALL_OPS}
operators = ",".join(operators.keys())
for op in operators.split(","):
print("Running '%s' ..." % op)
results[op] = {}
model_name = op + "-" + str(args.cpus)
trainer = train.CreateModel.create(op)
with trainer:
train_time = trainer.fit(data, args)
results[op] = {"train_time": str(train_time)}
model = trainer.model
times = []
mean = 0
mem = 0
try:
for i in range(args.niters):
set_alarm(3600)
times.append(trainer.predict(data))
set_alarm(0)
mean = stats.trim_mean(times, 1 / len(times)) if args.niters > 1 else times[0]
except Exception as e:
print(e)
pass
results[op].update({"prediction_time": mean})
gc.collect()
mem = max(memory_usage((trainer.predict, (data,))))
results[op].update({"peak_mem": mem})
outer_ops = args.operator
args.operator = op
if args.backend == "all":
args.backend = "onnx-ml,hb-pytorch,hb-torchscript,hb-onnx"
if "hb-tvm" in args.backend:
assert (
tvm_installed
), "To run benchmark with TVM you need to have TVM installed. Either install TVM or remove it from the backends."
if "hb-onnx" in args.backend:
assert (
onnx_runtime_installed
), "To run benchmark with ONNX you need to have ONNX runtime installed. Either install ONNX runtime or remove ONNX from the backends."
if "onnx-ml" in args.backend:
assert (
onnx_runtime_installed and onnx_ml_tools_installed
), "To run benchmark with ONNX-ML you need to have ONNX runtime and ONNXMLTOOLS installed. Either install ONNX runtime and ONNXMLTOOLS or remove ONNX-ML from the backends."
for backend in args.backend.split(","):
print("Running '%s' ..." % backend)
scorer = score.ScoreBackend.create(backend)
with scorer:
try:
conversion_time = scorer.convert(model, data, args, os.path.join(model_folder, model_name))
except Exception as e:
print(e)
continue
times = []
prediction_time = 0
mem = 0
try:
for i in range(args.niters):
set_alarm(3600)
times.append(scorer.predict(data))
set_alarm(0)
prediction_time = times[0] if args.niters == 1 else stats.trim_mean(times, 1 / len(times))
gc.collect()
mem = max(memory_usage((scorer.predict, (data,))))
except Exception as e:
print(e)
pass
results[op][backend] = {
"conversion_time": str(conversion_time),
"prediction_time": str(prediction_time),
"peak_mem": str(mem),
"is_same_output": "None"
if len(trainer.predictions) == 0 or scorer is None or scorer.predictions is None
else np.allclose(trainer.predictions, scorer.predictions, atol=1e-6),
}
print(results[op][backend])
if args.validate:
np.testing.assert_allclose(
scorer.predictions, trainer.predictions, equal_nan=False, rtol=1e-5, atol=1e-6
)
args.operator = outer_ops
return results | null |
6,654 | import logging
from mcu import MCU_endstop
class ZCalibrationHelper:
def __init__(self, config):
self.state = None
self.z_endstop = None
self.z_homing = None
self.last_state = False
self.last_z_offset = 0.
self.position_z_endstop = None
self.config = config
self.printer = config.get_printer()
self.switch_offset = config.getfloat('switch_offset', 0.0, above=0.)
# max_deviation is deprecated
self.max_deviation = config.getfloat('max_deviation', None, above=0.)
config.deprecate('max_deviation')
self.offset_margins = self._get_offset_margins('offset_margins',
'-1.0,1.0')
self.speed = config.getfloat('speed', 50.0, above=0.)
# clearance is deprecated
self.clearance = config.getfloat('clearance', None, above=0.)
config.deprecate('clearance')
self.safe_z_height = config.getfloat('safe_z_height', None, above=0.)
self.samples = config.getint('samples', None, minval=1)
self.tolerance = config.getfloat('samples_tolerance', None, above=0.)
self.retries = config.getint('samples_tolerance_retries',
None, minval=0)
atypes = {'none': None, 'median': 'median', 'average': 'average'}
self.samples_result = config.getchoice('samples_result', atypes,
'none')
self.lift_speed = config.getfloat('lift_speed', None, above=0.)
self.probing_speed = config.getfloat('probing_speed', None, above=0.)
self.second_speed = config.getfloat('probing_second_speed',
None, above=0.)
self.retract_dist = config.getfloat('probing_retract_dist',
None, above=0.)
self.position_min = config.getfloat('position_min', None)
self.first_fast = config.getboolean('probing_first_fast', False)
self.nozzle_site = self._get_xy("nozzle_xy_position", True)
self.switch_site = self._get_xy("switch_xy_position", True)
self.switch_xy_offsets = self._get_xy("switch_xy_offsets", True)
self.bed_site = self._get_xy("bed_xy_position", True)
self.wiggle_offsets = self._get_xy("wiggle_xy_offsets", True)
gcode_macro = self.printer.load_object(config, 'gcode_macro')
self.start_gcode = gcode_macro.load_template(config, 'start_gcode', '')
self.switch_gcode = gcode_macro.load_template(config,
'before_switch_gcode',
'')
self.end_gcode = gcode_macro.load_template(config, 'end_gcode', '')
self.query_endstops = self.printer.load_object(config,
'query_endstops')
self.printer.register_event_handler("klippy:connect",
self.handle_connect)
self.printer.register_event_handler("homing:home_rails_end",
self.handle_home_rails_end)
self.gcode = self.printer.lookup_object('gcode')
self.gcode.register_command('CALIBRATE_Z', self.cmd_CALIBRATE_Z,
desc=self.cmd_CALIBRATE_Z_help)
self.gcode.register_command('PROBE_Z_ACCURACY',
self.cmd_PROBE_Z_ACCURACY,
desc=self.cmd_PROBE_Z_ACCURACY_help)
self.gcode.register_command('CALCULATE_SWITCH_OFFSET',
self.cmd_CALCULATE_SWITCH_OFFSET,
desc=self.cmd_CALCULATE_SWITCH_OFFSET_help)
def get_status(self, eventtime):
return {'last_query': self.last_state,
'last_z_offset': self.last_z_offset}
def handle_connect(self):
# get z-endstop object
for endstop, name in self.query_endstops.endstops:
if name == 'z':
# check for virtual endstops..
if not isinstance(endstop, MCU_endstop):
raise self.printer.config_error("A virtual endstop for z"
" is not supported for %s"
% (self.config.get_name()))
self.z_endstop = EndstopWrapper(self.config, endstop)
# get z-endstop position from safe_z_home
if self.nozzle_site is None:
safe_z_home = self.printer.lookup_object('safe_z_home',
default=None)
if safe_z_home is None:
raise self.printer.config_error("No nozzle position"
" configured for %s"
% (self.config.get_name()))
self.nozzle_site = [safe_z_home.home_x_pos,
safe_z_home.home_y_pos,
None]
# check/calculate switch position by offsets
if self.switch_site is None:
if self.switch_xy_offsets is None:
raise self.printer.config_error("No switch position"
" configured for %s"
% (self.config.get_name()))
self.switch_site = [self.nozzle_site[0] + self.switch_xy_offsets[0],
self.nozzle_site[1] + self.switch_xy_offsets[1],
None]
# get probing settings
probe = self.printer.lookup_object('probe', default=None)
if probe is None:
raise self.printer.config_error("A probe is needed for %s"
% (self.config.get_name()))
if self.samples is None:
self.samples = probe.sample_count
if self.tolerance is None:
self.tolerance = probe.samples_tolerance
if self.retries is None:
self.retries = probe.samples_retries
if self.lift_speed is None:
self.lift_speed = probe.lift_speed
# clearance is deprecated
if self.clearance is not None and self.clearance == 0:
self.clearance = 20 # defaults to 20mm
if self.safe_z_height is None:
self.safe_z_height = probe.z_offset * 2
if self.safe_z_height < 3:
self.safe_z_height = 20 # defaults to 20mm
if self.samples_result is None:
self.samples_result = probe.samples_result
def handle_home_rails_end(self, homing_state, rails):
# get z homing position
for rail in rails:
if rail.get_steppers()[0].is_active_axis('z'):
# get homing settings from z rail
self.z_homing = rail.position_endstop
if self.probing_speed is None:
self.probing_speed = rail.homing_speed
if self.second_speed is None:
self.second_speed = rail.second_homing_speed
if self.retract_dist is None:
self.retract_dist = rail.homing_retract_dist
if self.position_min is None:
self.position_min = rail.position_min
self.position_z_endstop = rail.position_endstop
def _build_config(self):
pass
cmd_CALIBRATE_Z_help = ("Automatically calibrates the nozzle offset"
" to the print surface")
def cmd_CALIBRATE_Z(self, gcmd):
if self.z_homing is None:
raise gcmd.error("Must home axes first")
site_attr = gcmd.get("BED_POSITION", None)
if site_attr is not None:
# set bed site from BED_POSITION parameter
self.bed_site = self._parse_xy("BED_POSITION", site_attr)
elif self._get_xy("bed_xy_position", True) is not None:
# set bed site from configuration
self.bed_site = self._get_xy("bed_xy_position", False)
else:
# else get the mesh's zero reference position
try:
mesh = self.printer.lookup_object('bed_mesh', default=None)
if (hasattr(mesh.bmc, 'zero_ref_pos')
and mesh.bmc.zero_ref_pos is not None):
self.bed_site = mesh.bmc.zero_ref_pos
else:
# trying to read the deprecated rri
rri = mesh.bmc.relative_reference_index
self.bed_site = mesh.bmc.points[rri]
logging.debug("Z-CALIBRATION probe bed_x=%.3f bed_y=%.3f"
% (self.bed_site[0], self.bed_site[1]))
except:
raise gcmd.error("Either use the BED_POSITION parameter,"
" configure a bed_xy_position or define"
" a mesh with a zero_reference_position"
" for %s" % (self.config.get_name()))
self._log_config()
state = CalibrationState(self, gcmd)
state.calibrate_z()
cmd_PROBE_Z_ACCURACY_help = ("Probe Z-Endstop accuracy at"
" Nozzle-Endstop position")
def cmd_PROBE_Z_ACCURACY(self, gcmd):
if self.z_homing is None:
raise gcmd.error("Must home axes first")
speed = gcmd.get_float("PROBE_SPEED", self.second_speed, above=0.)
lift_speed = gcmd.get_float("LIFT_SPEED", self.lift_speed, above=0.)
sample_count = gcmd.get_int("SAMPLES", self.samples, minval=1)
sample_retract_dist = gcmd.get_float("SAMPLE_RETRACT_DIST",
self.retract_dist, above=0.)
toolhead = self.printer.lookup_object('toolhead')
pos = toolhead.get_position()
self._move_safe_z(pos, lift_speed)
# move to z-endstop position
self._move(list(self.nozzle_site), self.speed)
pos = toolhead.get_position()
gcmd.respond_info("PROBE_ACCURACY at X:%.3f Y:%.3f Z:%.3f"
" (samples=%d retract=%.3f"
" speed=%.1f lift_speed=%.1f)\n"
% (pos[0], pos[1], pos[2],
sample_count, sample_retract_dist,
speed, lift_speed))
# Probe bed sample_count times
positions = []
while len(positions) < sample_count:
# Probe position
pos = self._probe(self.z_endstop, self.position_min, speed)
positions.append(pos)
# Retract
liftpos = [None, None, pos[2] + sample_retract_dist]
self._move(liftpos, lift_speed)
# Calculate maximum, minimum and average values
max_value = max([p[2] for p in positions])
min_value = min([p[2] for p in positions])
range_value = max_value - min_value
avg_value = self._calc_mean(positions)[2]
median = self._calc_median(positions)[2]
# calculate the standard deviation
deviation_sum = 0
for i in range(len(positions)):
deviation_sum += pow(positions[i][2] - avg_value, 2.)
sigma = (deviation_sum / len(positions)) ** 0.5
# Show information
gcmd.respond_info(
"probe accuracy results: maximum %.6f, minimum %.6f, range %.6f,"
" average %.6f, median %.6f, standard deviation %.6f" % (
max_value, min_value, range_value, avg_value, median, sigma))
cmd_CALCULATE_SWITCH_OFFSET_help = ("Calculates a switch_offset based on"
" the current z position")
def cmd_CALCULATE_SWITCH_OFFSET(self, gcmd):
if self.last_z_offset is None:
raise gcmd.error("Must run CALIBRATE_Z first")
toolhead = self.printer.lookup_object('toolhead')
pos = toolhead.get_position()
new_switch_offset = self.switch_offset - (pos[2] - self.last_z_offset)
if new_switch_offset > 0.0:
gcmd.respond_info("switch_offset=%.3f - (current_z=%.3f - z_offset=%.3f"
") --> new switch_offset=%.3f"
% (self.switch_offset, pos[2],
self.last_z_offset, new_switch_offset))
else:
gcmd.respond_info("The resulting switch offset is negative! Either"
" the nozzle is still too far away or something"
" else is wrong...")
def _get_xy(self, name, optional=False):
if optional and self.config.get(name, None) is None:
return None
else:
return self._parse_xy(name, self.config.get(name))
def _parse_xy(self, name, site):
try:
x_pos, y_pos = site.split(',')
return [float(x_pos), float(y_pos), None]
except:
raise self.config.error("Unable to parse %s in %s"
% (name, self.config.get_name()))
def _get_offset_margins(self, name, default):
try:
margins = self.config.get(name, default).split(',')
for i, val in enumerate(margins):
margins[i] = float(val)
if len(margins) == 1:
val = abs(margins[0])
margins[0] = -val
margins.append(val)
return margins
except:
raise self.config.error("Unable to parse %s in %s"
% (name, self.config.get_name()))
def _probe(self, mcu_endstop, z_position, speed, wiggle=False):
toolhead = self.printer.lookup_object('toolhead')
pos = toolhead.get_position()
pos[2] = z_position
# probe
phoming = self.printer.lookup_object('homing')
curpos = phoming.probing_move(mcu_endstop, pos, speed)
# retract
self._move([None, None, curpos[2] + self.retract_dist],
self.lift_speed)
if wiggle and self.wiggle_offsets is not None:
self._move([curpos[0] + self.wiggle_offsets[0],
curpos[1] + self.wiggle_offsets[1],
None],
self.speed)
self._move([curpos[0], curpos[1], None], self.speed)
self.gcode.respond_info("probe at %.3f,%.3f is z=%.6f"
% (curpos[0], curpos[1], curpos[2]))
return curpos
def _move(self, coord, speed):
self.printer.lookup_object('toolhead').manual_move(coord, speed)
def _move_safe_z(self, pos, lift_speed):
# clearance is deprecated
if self.clearance is not None:
if pos[2] < self.clearance:
# no clearance, better to move up (relative)
self._move([None, None, pos[2] + self.clearance], lift_speed)
else:
if pos[2] < self.safe_z_height:
# no safe z position, better to move up (absolute)
self._move([None, None, self.safe_z_height], lift_speed)
def _calc_mean(self, positions):
count = float(len(positions))
return [sum([pos[i] for pos in positions]) / count
for i in range(3)]
def _calc_median(self, positions):
z_sorted = sorted(positions, key=(lambda p: p[2]))
middle = len(positions) // 2
if (len(positions) & 1) == 1:
# odd number of samples
return z_sorted[middle]
# even number of samples
return self._calc_mean(z_sorted[middle-1:middle+1])
def _log_config(self):
logging.debug("Z-CALIBRATION: switch_offset=%.3f,"
" offset_margins=%.3f,%.3f, speed=%.3f,"
" samples=%i, tolerance=%.3f, retries=%i,"
" samples_result=%s, lift_speed=%.3f,"
" safe_z_height=%.3f, probing_speed=%.3f,"
" second_speed=%.3f, retract_dist=%.3f,"
" position_min=%.3f, probe_nozzle_x=%.3f,"
" probe_nozzle_y=%.3f, probe_switch_x=%.3f,"
" probe_switch_y=%.3f, probe_bed_x=%.3f,"
" probe_bed_y=%.3f"
% (self.switch_offset, self.offset_margins[0],
self.offset_margins[1], self.speed,
self.samples, self.tolerance, self.retries,
self.samples_result, self.lift_speed,
self.safe_z_height, self.probing_speed,
self.second_speed, self.retract_dist,
self.position_min, self.nozzle_site[0],
self.nozzle_site[1], self.switch_site[0],
self.switch_site[1], self.bed_site[0],
self.bed_site[1]))
def load_config(config):
return ZCalibrationHelper(config) | null |
6,655 | from typing import Callable, Dict, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
_PROTO_TYPE_NAME_TO_CLASS: Dict[str, Type[AbstractType]] = {}
T = TypeVar('T', bound='AbstractType')
The provided code snippet includes necessary dependencies for implementing the `_register_proto` function. Write a Python function `def _register_proto( proto_type_name: str, ) -> Callable[[Type[T]], Type[T]]` to solve the following problem:
Register a new type to be used in the protobuf serialization. This will add the type key to the global registry of types key used in the proto serialization and deserialization. This is for internal usage only. --- ```python from docarray.typing.proto_register import register_proto from docarray.typing.abstract_type import AbstractType @register_proto(proto_type_name='my_type') class MyType(AbstractType): ... ``` --- :param cls: the class to register :return: the class
Here is the function:
def _register_proto(
proto_type_name: str,
) -> Callable[[Type[T]], Type[T]]:
"""Register a new type to be used in the protobuf serialization.
This will add the type key to the global registry of types key used in the proto
serialization and deserialization. This is for internal usage only.
---
```python
from docarray.typing.proto_register import register_proto
from docarray.typing.abstract_type import AbstractType
@register_proto(proto_type_name='my_type')
class MyType(AbstractType):
...
```
---
:param cls: the class to register
:return: the class
"""
if proto_type_name in _PROTO_TYPE_NAME_TO_CLASS.keys():
raise ValueError(
f'the key {proto_type_name} is already registered in the global registry'
)
def _register(cls: Type[T]) -> Type[T]:
cls._proto_type_name = proto_type_name
_PROTO_TYPE_NAME_TO_CLASS[proto_type_name] = cls
return cls
return _register | Register a new type to be used in the protobuf serialization. This will add the type key to the global registry of types key used in the proto serialization and deserialization. This is for internal usage only. --- ```python from docarray.typing.proto_register import register_proto from docarray.typing.abstract_type import AbstractType @register_proto(proto_type_name='my_type') class MyType(AbstractType): ... ``` --- :param cls: the class to register :return: the class |
6,656 | import copy
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field, replace
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
from pydantic.error_wrappers import ValidationError
from typing_inspect import get_args, is_optional_type, is_union_type
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.typing import ID, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import is_tensor_union, safe_issubclass
from docarray.utils._internal.misc import import_library
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils.find import (
FindResult,
FindResultBatched,
SubindexFindResult,
_FindResult,
_FindResultBatched,
)
def _raise_not_composable(name):
def _inner(self, *args, **kwargs):
raise NotImplementedError(
f'`{name}` is not usable through the query builder of this Document index ({type(self)}). '
f'But you can call `{type(self)}.{name}()` directly.'
)
return _inner | null |
6,657 | import copy
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field, replace
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
from pydantic.error_wrappers import ValidationError
from typing_inspect import get_args, is_optional_type, is_union_type
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.typing import ID, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import is_tensor_union, safe_issubclass
from docarray.utils._internal.misc import import_library
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils.find import (
FindResult,
FindResultBatched,
SubindexFindResult,
_FindResult,
_FindResultBatched,
)
def _raise_not_supported(name):
def _inner(self, *args, **kwargs):
raise NotImplementedError(
f'`{name}` is not usable through the query builder of this Document index ({type(self)}). '
)
return _inner | null |
6,658 | from typing import Any, Dict, List, Tuple, Type, cast
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
def _collect_query_args(method_name: str): # TODO: use partialmethod instead
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f'Positional arguments are not supported for '
f'`{type(self)}.{method_name}`.'
f' Use keyword arguments instead.'
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner | null |
6,659 | from typing import Any, Dict, List, Tuple, Type, cast
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
class BaseDocIndex(ABC, Generic[TSchema]):
"""Abstract class for all Document Stores"""
# the BaseDoc that defines the schema of the store
# for subclasses this is filled automatically
_schema: Optional[Type[BaseDoc]] = None
def __init__(self, db_config=None, subindex: bool = False, **kwargs):
if self._schema is None:
raise ValueError(
'A DocumentIndex must be typed with a Document type.'
'To do so, use the syntax: DocumentIndex[DocumentType]'
)
if subindex:
class _NewSchema(self._schema): # type: ignore
parent_id: Optional[ID] = None
self._ori_schema = self._schema
self._schema = cast(Type[BaseDoc], _NewSchema)
self._logger = logging.getLogger('docarray')
self._db_config = db_config or self.DBConfig(**kwargs)
if not isinstance(self._db_config, self.DBConfig):
raise ValueError(f'db_config must be of type {self.DBConfig}')
self._logger.info('DB config created')
self._runtime_config = self.RuntimeConfig()
self._logger.info('Runtime config created')
self._column_infos: Dict[str, _ColumnInfo] = self._create_column_infos(
self._schema
)
self._is_subindex = subindex
self._subindices: Dict[str, BaseDocIndex] = {}
self._init_subindex()
###############################################
# Inner classes for query builder and configs #
# Subclasses must subclass & implement these #
###############################################
class QueryBuilder(ABC):
def build(self, *args, **kwargs) -> Any:
"""Build the DB specific query object.
The DB specific implementation can leverage self._queries to do so.
The output of this should be able to be passed to execute_query().
"""
...
# TODO support subindex in QueryBuilder
# the methods below need to be implemented by subclasses
# If, in your subclass, one of these is not usable in a query builder, but
# can be called directly on the DocumentIndex, use `_raise_not_composable`.
# If the method is not supported _at all_, use `_raise_not_supported`.
find = abstractmethod(lambda *args, **kwargs: ...)
filter = abstractmethod(lambda *args, **kwargs: ...)
text_search = abstractmethod(lambda *args, **kwargs: ...)
find_batched = abstractmethod(lambda *args, **kwargs: ...)
filter_batched = abstractmethod(lambda *args, **kwargs: ...)
text_search_batched = abstractmethod(lambda *args, **kwargs: ...)
class DBConfig(ABC):
index_name: Optional[str] = None
# default configurations for every column type
# a dictionary from a column type (DB specific) to a dictionary
# of default configurations for that type
# These configs are used if no configs are specified in the `Field(...)`
# of a field in the Document schema (`cls._schema`)
# Example: `default_column_config['VARCHAR'] = {'length': 255}`
default_column_config: Dict[Type, Dict[str, Any]] = field(default_factory=dict)
class RuntimeConfig(ABC):
pass
def index_name(self):
"""Return the name of the index in the database."""
...
#####################################
# Abstract methods #
# Subclasses must implement these #
#####################################
def python_type_to_db_type(self, python_type: Type) -> Any:
"""Map python type to database type.
Takes any python type and returns the corresponding database column type.
:param python_type: a python type.
:return: the corresponding database column type,
or None if ``python_type`` is not supported.
"""
...
def _index(self, column_to_data: Dict[str, Generator[Any, None, None]]):
"""index a document into the store"""
# `column_to_data` is a dictionary from column name to a generator
# that yields the data for that column.
# If you want to work directly on documents, you can implement index() instead
# If you implement index(), _index() only needs a dummy implementation.
...
def num_docs(self) -> int:
"""Return the number of indexed documents"""
...
def _is_index_empty(self) -> bool:
"""
Check if index is empty by comparing the number of documents to zero.
:return: True if the index is empty, False otherwise.
"""
return self.num_docs() == 0
def _del_items(self, doc_ids: Sequence[str]):
"""Delete Documents from the index.
:param doc_ids: ids to delete from the Document Store
"""
...
def _get_items(
self, doc_ids: Sequence[str]
) -> Union[Sequence[TSchema], Sequence[Dict[str, Any]]]:
"""Get Documents from the index, by `id`.
If no document is found, a KeyError is raised.
:param doc_ids: ids to get from the Document index
:return: Sequence of Documents, sorted corresponding to the order of `doc_ids`. Duplicate `doc_ids` can be omitted in the output.
"""
...
def execute_query(self, query: Any, *args, **kwargs) -> Any:
"""
Execute a query on the database.
Can take two kinds of inputs:
1. A native query of the underlying database. This is meant as a passthrough so that you
can enjoy any functionality that is not available through the Document index API.
2. The output of this Document index' `QueryBuilder.build()` method.
:param query: the query to execute
:param args: positional arguments to pass to the query
:param kwargs: keyword arguments to pass to the query
:return: the result of the query
"""
...
def _doc_exists(self, doc_id: str) -> bool:
"""
Checks if a given document exists in the index.
:param doc_id: The id of a document to check.
:return: True if the document exists in the index, False otherwise.
"""
...
def _find(
self,
query: np.ndarray,
limit: int,
search_field: str = '',
) -> _FindResult:
"""Find documents in the index
:param query: query vector for KNN/ANN search. Has single axis.
:param limit: maximum number of documents to return per query
:param search_field: name of the field to search on
:return: a named tuple containing `documents` and `scores`
"""
# NOTE: in standard implementations,
# `search_field` is equal to the column name to search on
...
def _find_batched(
self,
queries: np.ndarray,
limit: int,
search_field: str = '',
) -> _FindResultBatched:
"""Find documents in the index
:param queries: query vectors for KNN/ANN search.
Has shape (batch_size, vector_dim)
:param limit: maximum number of documents to return
:param search_field: name of the field to search on
:return: a named tuple containing `documents` and `scores`
"""
...
def _filter(
self,
filter_query: Any,
limit: int,
) -> Union[DocList, List[Dict]]:
"""Find documents in the index based on a filter query
:param filter_query: the DB specific filter query to execute
:param limit: maximum number of documents to return
:return: a DocList containing the documents that match the filter query
"""
...
def _filter_batched(
self,
filter_queries: Any,
limit: int,
) -> Union[List[DocList], List[List[Dict]]]:
"""Find documents in the index based on multiple filter queries.
Each query is considered individually, and results are returned per query.
:param filter_queries: the DB specific filter queries to execute
:param limit: maximum number of documents to return per query
:return: List of DocLists containing the documents that match the filter
queries
"""
...
def _text_search(
self,
query: str,
limit: int,
search_field: str = '',
) -> _FindResult:
"""Find documents in the index based on a text search query
:param query: The text to search for
:param limit: maximum number of documents to return
:param search_field: name of the field to search on
:return: a named tuple containing `documents` and `scores`
"""
# NOTE: in standard implementations,
# `search_field` is equal to the column name to search on
...
def _text_search_batched(
self,
queries: Sequence[str],
limit: int,
search_field: str = '',
) -> _FindResultBatched:
"""Find documents in the index based on a text search query
:param queries: The texts to search for
:param limit: maximum number of documents to return per query
:param search_field: name of the field to search on
:return: a named tuple containing `documents` and `scores`
"""
# NOTE: in standard implementations,
# `search_field` is equal to the column name to search on
...
####################################################
# Optional overrides #
# Subclasses may or may not need to change these #
####################################################
def __getitem__(
self, key: Union[str, Sequence[str]]
) -> Union[TSchema, DocList[TSchema]]:
"""Get one or multiple Documents into the index, by `id`.
If no document is found, a KeyError is raised.
:param key: id or ids to get from the Document index
"""
# normalize input
if isinstance(key, str):
return_singleton = True
key = [key]
else:
return_singleton = False
# retrieve data
doc_sequence = self._get_items(key)
# check data
if len(doc_sequence) == 0:
raise KeyError(f'No document with id {key} found')
# retrieve nested data
for field_name, type_, _ in self._flatten_schema(
cast(Type[BaseDoc], self._schema)
):
if safe_issubclass(type_, AnyDocArray) and isinstance(
doc_sequence[0], Dict
):
for doc in doc_sequence:
self._get_subindex_doclist(doc, field_name) # type: ignore
# cast output
if isinstance(doc_sequence, DocList):
out_docs: DocList[TSchema] = doc_sequence
elif isinstance(doc_sequence[0], Dict):
out_docs = self._dict_list_to_docarray(doc_sequence) # type: ignore
else:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], self._schema))
out_docs = docs_cls(doc_sequence)
return out_docs[0] if return_singleton else out_docs
def __delitem__(self, key: Union[str, Sequence[str]]):
"""Delete one or multiple Documents from the index, by `id`.
If no document is found, a KeyError is raised.
:param key: id or ids to delete from the Document index
"""
self._logger.info(f'Deleting documents with id(s) {key} from the index')
if isinstance(key, str):
key = [key]
# delete nested data
for field_name, type_, _ in self._flatten_schema(
cast(Type[BaseDoc], self._schema)
):
if safe_issubclass(type_, AnyDocArray):
for doc_id in key:
nested_docs_id = self._subindices[field_name]._filter_by_parent_id(
doc_id
)
if nested_docs_id:
del self._subindices[field_name][nested_docs_id]
# delete data
self._del_items(key)
def __contains__(self, item: BaseDoc) -> bool:
"""
Checks if a given document exists in the index.
:param item: The document to check.
It must be an instance of BaseDoc or its subclass.
:return: True if the document exists in the index, False otherwise.
"""
if safe_issubclass(type(item), BaseDoc):
return self._doc_exists(str(item.id))
else:
raise TypeError(
f"item must be an instance of BaseDoc or its subclass, not '{type(item).__name__}'"
)
def configure(self, runtime_config=None, **kwargs):
"""
Configure the DocumentIndex.
You can either pass a config object to `config` or pass individual config
parameters as keyword arguments.
If a configuration object is passed, it will replace the current configuration.
If keyword arguments are passed, they will update the current configuration.
:param runtime_config: the configuration to apply
:param kwargs: individual configuration parameters
"""
if runtime_config is None:
self._runtime_config = replace(self._runtime_config, **kwargs)
else:
if not isinstance(runtime_config, self.RuntimeConfig):
raise ValueError(f'runtime_config must be of type {self.RuntimeConfig}')
self._runtime_config = runtime_config
def index(self, docs: Union[BaseDoc, Sequence[BaseDoc]], **kwargs):
"""index Documents into the index.
!!! note
Passing a sequence of Documents that is not a DocList
(such as a List of Docs) comes at a performance penalty.
This is because the Index needs to check compatibility between itself and
the data. With a DocList as input this is a single check; for other inputs
compatibility needs to be checked for every Document individually.
:param docs: Documents to index.
"""
n_docs = 1 if isinstance(docs, BaseDoc) else len(docs)
self._logger.debug(f'Indexing {n_docs} documents')
docs_validated = self._validate_docs(docs)
self._update_subindex_data(docs_validated)
data_by_columns = self._get_col_value_dict(docs_validated)
self._index(data_by_columns, **kwargs)
def find(
self,
query: Union[AnyTensor, BaseDoc],
search_field: str = '',
limit: int = 10,
**kwargs,
) -> FindResult:
"""Find documents in the index using nearest neighbor search.
:param query: query vector for KNN/ANN search.
Can be either a tensor-like (np.array, torch.Tensor, etc.)
with a single axis, or a Document
:param search_field: name of the field to search on.
Documents in the index are retrieved based on this similarity
of this field to the query.
:param limit: maximum number of documents to return
:return: a named tuple containing `documents` and `scores`
"""
self._logger.debug(f'Executing `find` for search field {search_field}')
self._validate_search_field(search_field)
if isinstance(query, BaseDoc):
query_vec = self._get_values_by_column([query], search_field)[0]
else:
query_vec = query
query_vec_np = self._to_numpy(query_vec)
docs, scores = self._find(
query_vec_np, search_field=search_field, limit=limit, **kwargs
)
if isinstance(docs, List) and not isinstance(docs, DocList):
docs = self._dict_list_to_docarray(docs)
return FindResult(documents=docs, scores=scores)
def find_subindex(
self,
query: Union[AnyTensor, BaseDoc],
subindex: str = '',
search_field: str = '',
limit: int = 10,
**kwargs,
) -> SubindexFindResult:
"""Find documents in subindex level.
:param query: query vector for KNN/ANN search.
Can be either a tensor-like (np.array, torch.Tensor, etc.)
with a single axis, or a Document
:param subindex: name of the subindex to search on
:param search_field: name of the field to search on
:param limit: maximum number of documents to return
:return: a named tuple containing root docs, subindex docs and scores
"""
self._logger.debug(f'Executing `find_subindex` for search field {search_field}')
sub_docs, scores = self._find_subdocs(
query, subindex=subindex, search_field=search_field, limit=limit, **kwargs
)
fields = subindex.split('__')
root_ids = [
self._get_root_doc_id(doc.id, fields[0], '__'.join(fields[1:]))
for doc in sub_docs
]
root_docs = DocList[self._schema]() # type: ignore
for id in root_ids:
root_docs.append(self[id])
return SubindexFindResult(
root_documents=root_docs, sub_documents=sub_docs, scores=scores # type: ignore
)
def find_batched(
self,
queries: Union[AnyTensor, DocList],
search_field: str = '',
limit: int = 10,
**kwargs,
) -> FindResultBatched:
"""Find documents in the index using nearest neighbor search.
:param queries: query vector for KNN/ANN search.
Can be either a tensor-like (np.array, torch.Tensor, etc.) with a,
or a DocList.
If a tensor-like is passed, it should have shape (batch_size, vector_dim)
:param search_field: name of the field to search on.
Documents in the index are retrieved based on this similarity
of this field to the query.
:param limit: maximum number of documents to return per query
:return: a named tuple containing `documents` and `scores`
"""
self._logger.debug(f'Executing `find_batched` for search field {search_field}')
if search_field:
if '__' in search_field:
fields = search_field.split('__')
if safe_issubclass(self._schema._get_field_annotation(fields[0]), AnyDocArray): # type: ignore
return self._subindices[fields[0]].find_batched(
queries,
search_field='__'.join(fields[1:]),
limit=limit,
**kwargs,
)
self._validate_search_field(search_field)
if isinstance(queries, Sequence):
query_vec_list = self._get_values_by_column(queries, search_field)
query_vec_np = np.stack(
tuple(self._to_numpy(query_vec) for query_vec in query_vec_list)
)
else:
query_vec_np = self._to_numpy(queries)
da_list, scores = self._find_batched(
query_vec_np, search_field=search_field, limit=limit, **kwargs
)
if (
len(da_list) > 0
and isinstance(da_list[0], List)
and not isinstance(da_list[0], DocList)
):
da_list = [self._dict_list_to_docarray(docs) for docs in da_list]
return FindResultBatched(documents=da_list, scores=scores) # type: ignore
def filter(
self,
filter_query: Any,
limit: int = 10,
**kwargs,
) -> DocList:
"""Find documents in the index based on a filter query
:param filter_query: the DB specific filter query to execute
:param limit: maximum number of documents to return
:return: a DocList containing the documents that match the filter query
"""
self._logger.debug(f'Executing `filter` for the query {filter_query}')
docs = self._filter(filter_query, limit=limit, **kwargs)
if isinstance(docs, List) and not isinstance(docs, DocList):
docs = self._dict_list_to_docarray(docs)
return docs
def filter_subindex(
self,
filter_query: Any,
subindex: str,
limit: int = 10,
**kwargs,
) -> DocList:
"""Find documents in subindex level based on a filter query
:param filter_query: the DB specific filter query to execute
:param subindex: name of the subindex to search on
:param limit: maximum number of documents to return
:return: a DocList containing the subindex level documents that match the filter query
"""
self._logger.debug(
f'Executing `filter` for the query {filter_query} in subindex {subindex}'
)
if '__' in subindex:
fields = subindex.split('__')
return self._subindices[fields[0]].filter_subindex(
filter_query, '__'.join(fields[1:]), limit=limit, **kwargs
)
else:
return self._subindices[subindex].filter(
filter_query, limit=limit, **kwargs
)
def filter_batched(
self,
filter_queries: Any,
limit: int = 10,
**kwargs,
) -> List[DocList]:
"""Find documents in the index based on multiple filter queries.
:param filter_queries: the DB specific filter query to execute
:param limit: maximum number of documents to return
:return: a DocList containing the documents that match the filter query
"""
self._logger.debug(
f'Executing `filter_batched` for the queries {filter_queries}'
)
da_list = self._filter_batched(filter_queries, limit=limit, **kwargs)
if len(da_list) > 0 and isinstance(da_list[0], List):
da_list = [self._dict_list_to_docarray(docs) for docs in da_list]
return da_list # type: ignore
def text_search(
self,
query: Union[str, BaseDoc],
search_field: str = '',
limit: int = 10,
**kwargs,
) -> FindResult:
"""Find documents in the index based on a text search query.
:param query: The text to search for
:param search_field: name of the field to search on
:param limit: maximum number of documents to return
:return: a named tuple containing `documents` and `scores`
"""
self._logger.debug(f'Executing `text_search` for search field {search_field}')
self._validate_search_field(search_field)
if isinstance(query, BaseDoc):
query_text = self._get_values_by_column([query], search_field)[0]
else:
query_text = query
docs, scores = self._text_search(
query_text, search_field=search_field, limit=limit, **kwargs
)
if isinstance(docs, List) and not isinstance(docs, DocList):
docs = self._dict_list_to_docarray(docs)
return FindResult(documents=docs, scores=scores)
def text_search_batched(
self,
queries: Union[Sequence[str], Sequence[BaseDoc]],
search_field: str = '',
limit: int = 10,
**kwargs,
) -> FindResultBatched:
"""Find documents in the index based on a text search query.
:param queries: The texts to search for
:param search_field: name of the field to search on
:param limit: maximum number of documents to return
:return: a named tuple containing `documents` and `scores`
"""
self._logger.debug(
f'Executing `text_search_batched` for search field {search_field}'
)
self._validate_search_field(search_field)
if isinstance(queries[0], BaseDoc):
query_docs: Sequence[BaseDoc] = cast(Sequence[BaseDoc], queries)
query_texts: Sequence[str] = self._get_values_by_column(
query_docs, search_field
)
else:
query_texts = cast(Sequence[str], queries)
da_list, scores = self._text_search_batched(
query_texts, search_field=search_field, limit=limit, **kwargs
)
if len(da_list) > 0 and isinstance(da_list[0], List):
docs = [self._dict_list_to_docarray(docs) for docs in da_list]
return FindResultBatched(documents=docs, scores=scores)
da_list_ = cast(List[DocList], da_list)
return FindResultBatched(documents=da_list_, scores=scores)
def _filter_by_parent_id(self, id: str) -> Optional[List[str]]:
"""Filter the ids of the subindex documents given id of root document.
:param id: the root document id to filter by
:return: a list of ids of the subindex documents
"""
return None
##########################################################
# Helper methods #
# These might be useful in your subclass implementation #
##########################################################
def _get_values_by_column(docs: Sequence[BaseDoc], col_name: str) -> List[Any]:
"""Get the value of a column of a document.
:param docs: The DocList to get the values from
:param col_name: The name of the column, e.g. 'text' or 'image__tensor'
:return: The value of the column of `doc`
"""
leaf_vals = []
for doc in docs:
if '__' in col_name:
fields = col_name.split('__')
leaf_doc: BaseDoc = doc
for f in fields[:-1]:
leaf_doc = getattr(leaf_doc, f)
leaf_vals.append(getattr(leaf_doc, fields[-1]))
else:
leaf_vals.append(getattr(doc, col_name))
return leaf_vals
def _transpose_col_value_dict(
col_value_dict: Mapping[str, Iterable[Any]]
) -> Generator[Dict[str, Any], None, None]:
"""'Transpose' the output of `_get_col_value_dict()`: Yield rows of columns, where each row represent one Document.
Since a generator is returned, this process comes at negligible cost.
:param docs: The DocList to get the values from
:return: The `docs` flattened out as rows. Each row is a dictionary mapping from column name to value
"""
return (dict(zip(col_value_dict, row)) for row in zip(*col_value_dict.values()))
def _get_col_value_dict(
self, docs: Union[BaseDoc, Sequence[BaseDoc]]
) -> Dict[str, Generator[Any, None, None]]:
"""
Get all data from a (sequence of) document(s), flattened out by column.
This can be seen as the transposed representation of `_get_rows()`.
:param docs: The document(s) to get the data from
:return: A dictionary mapping column names to a generator of values
"""
if isinstance(docs, BaseDoc):
docs_seq: Sequence[BaseDoc] = [docs]
else:
docs_seq = docs
def _col_gen(col_name: str):
return (
self._to_numpy(
self._get_values_by_column([doc], col_name)[0],
allow_passthrough=True,
)
for doc in docs_seq
)
return {col_name: _col_gen(col_name) for col_name in self._column_infos}
def _update_subindex_data(
self,
docs: DocList[BaseDoc],
):
"""
Add `parent_id` to all sublevel documents.
:param docs: The document(s) to update the `parent_id` for
"""
for field_name, type_, _ in self._flatten_schema(
cast(Type[BaseDoc], self._schema)
):
if safe_issubclass(type_, AnyDocArray):
for doc in docs:
_list = getattr(doc, field_name)
for i, nested_doc in enumerate(_list):
nested_doc = self._subindices[field_name]._schema( # type: ignore
**nested_doc.__dict__
)
nested_doc.parent_id = doc.id
_list[i] = nested_doc
##################################################
# Behind-the-scenes magic #
# Subclasses should not need to implement these #
##################################################
def __class_getitem__(cls, item: Type[TSchema]):
if not isinstance(item, type):
# do nothing
# enables use in static contexts with type vars, e.g. as type annotation
return Generic.__class_getitem__.__func__(cls, item)
if not safe_issubclass(item, BaseDoc):
raise ValueError(
f'{cls.__name__}[item] `item` should be a Document not a {item} '
)
class _DocumentIndexTyped(cls): # type: ignore
_schema: Type[TSchema] = item
_DocumentIndexTyped.__name__ = f'{cls.__name__}[{item.__name__}]'
_DocumentIndexTyped.__qualname__ = f'{cls.__qualname__}[{item.__name__}]'
return _DocumentIndexTyped
def build_query(self) -> QueryBuilder:
"""
Build a query for this DocumentIndex.
:return: a new `QueryBuilder` object for this DocumentIndex
"""
return self.QueryBuilder() # type: ignore
def _flatten_schema(
cls, schema: Type[BaseDoc], name_prefix: str = ''
) -> List[Tuple[str, Type, 'ModelField']]:
"""Flatten the schema of a Document into a list of column names and types.
Nested Documents are handled in a recursive manner by adding `'__'` as a prefix to the column name.
:param schema: The schema to flatten
:param name_prefix: prefix to append to the column names. Used for recursive calls to handle nesting.
:return: A list of column names, types, and fields
"""
names_types_fields: List[Tuple[str, Type, 'ModelField']] = []
for field_name, field_ in schema._docarray_fields().items():
t_ = schema._get_field_annotation(field_name)
inner_prefix = name_prefix + field_name + '__'
if is_union_type(t_):
union_args = get_args(t_)
if is_tensor_union(t_):
names_types_fields.append(
(name_prefix + field_name, AbstractTensor, field_)
)
elif len(union_args) == 2 and type(None) in union_args:
# simple "Optional" type, treat as special case:
# treat as if it was a single non-optional type
for t_arg in union_args:
if t_arg is not type(None):
if safe_issubclass(t_arg, BaseDoc):
names_types_fields.extend(
cls._flatten_schema(t_arg, name_prefix=inner_prefix)
)
else:
names_types_fields.append(
(name_prefix + field_name, t_arg, field_)
)
else:
raise ValueError(
f'Union type {t_} is not supported. Only Union of subclasses of AbstractTensor or Union[type, None] are supported.'
)
elif safe_issubclass(t_, BaseDoc):
names_types_fields.extend(
cls._flatten_schema(t_, name_prefix=inner_prefix)
)
elif safe_issubclass(t_, AbstractTensor):
names_types_fields.append(
(name_prefix + field_name, AbstractTensor, field_)
)
else:
names_types_fields.append((name_prefix + field_name, t_, field_))
return names_types_fields
def _create_column_infos(self, schema: Type[BaseDoc]) -> Dict[str, _ColumnInfo]:
"""Collects information about every column that is implied by a given schema.
:param schema: The schema (subclass of BaseDoc) to analyze and parse
columns from
:returns: A dictionary mapping from column names to column information.
"""
column_infos: Dict[str, _ColumnInfo] = dict()
for field_name, type_, field_ in self._flatten_schema(schema):
# Union types are handle in _flatten_schema
if safe_issubclass(type_, AnyDocArray):
column_infos[field_name] = _ColumnInfo(
docarray_type=type_, db_type=None, config=dict(), n_dim=None
)
else:
column_infos[field_name] = self._create_single_column(field_, type_)
return column_infos
def _create_single_column(self, field: 'ModelField', type_: Type) -> _ColumnInfo:
custom_config = (
field.json_schema_extra if is_pydantic_v2 else field.field_info.extra
)
if custom_config is None:
custom_config = dict()
if 'col_type' in custom_config.keys():
db_type = custom_config['col_type']
custom_config.pop('col_type')
if db_type not in self._db_config.default_column_config.keys():
raise ValueError(
f'The given col_type is not a valid db type: {db_type}'
)
else:
db_type = self.python_type_to_db_type(type_)
config = self._db_config.default_column_config[db_type].copy()
config.update(custom_config)
# parse n_dim from parametrized tensor type
field_type = field.annotation if is_pydantic_v2 else field.type_
if (
hasattr(field_type, '__docarray_target_shape__')
and field_type.__docarray_target_shape__
):
if len(field_type.__docarray_target_shape__) == 1:
n_dim = field_type.__docarray_target_shape__[0]
else:
n_dim = field_type.__docarray_target_shape__
else:
n_dim = None
return _ColumnInfo(
docarray_type=type_, db_type=db_type, config=config, n_dim=n_dim
)
def _init_subindex(
self,
):
"""Initialize subindices if any column is subclass of AnyDocArray."""
for col_name, col in self._column_infos.items():
if safe_issubclass(col.docarray_type, AnyDocArray):
sub_db_config = copy.deepcopy(self._db_config)
sub_db_config.index_name = f'{self.index_name}__{col_name}'
self._subindices[col_name] = self.__class__[col.docarray_type.doc_type]( # type: ignore
db_config=sub_db_config, subindex=True
)
def _validate_docs(
self, docs: Union[BaseDoc, Sequence[BaseDoc]]
) -> DocList[BaseDoc]:
"""Validates Document against the schema of the Document Index.
For validation to pass, the schema of `docs` and the schema of the Document
Index need to evaluate to the same flattened columns.
If Validation fails, a ValueError is raised.
:param docs: Document to evaluate. If this is a DocList, validation is
performed using its `doc_type` (parametrization), without having to check
ever Document in `docs`. If this check fails, or if `docs` is not a
DocList, evaluation is performed for every Document in `docs`.
:return: A DocList containing the Documents in `docs`
"""
if isinstance(docs, BaseDoc):
docs = [docs]
if isinstance(docs, DocList):
# validation shortcut for DocList; only look at the schema
reference_schema_flat = self._flatten_schema(
cast(Type[BaseDoc], self._schema)
)
reference_names = [name for (name, _, _) in reference_schema_flat]
reference_types = [t_ for (_, t_, _) in reference_schema_flat]
try:
input_schema_flat = self._flatten_schema(docs.doc_type)
except ValueError:
pass
else:
input_names = [name for (name, _, _) in input_schema_flat]
input_types = [t_ for (_, t_, _) in input_schema_flat]
# this could be relaxed in the future,
# see schema translation ideas in the design doc
names_compatible = reference_names == input_names
types_compatible = all(
(safe_issubclass(t2, t1))
for (t1, t2) in zip(reference_types, input_types)
)
if names_compatible and types_compatible:
return docs
out_docs = []
for i in range(len(docs)):
# validate the data
try:
out_docs.append(
cast(Type[BaseDoc], self._schema).parse_obj(dict(docs[i]))
)
except (ValueError, ValidationError) as e:
raise ValueError(
'The schema of the input Documents is not compatible with the schema of the Document Index.'
' Ensure that the field names of your data match the field names of the Document Index schema,'
' and that the types of your data match the types of the Document Index schema.'
f'original error {e}'
)
return DocList[BaseDoc].construct(out_docs)
def _validate_search_field(self, search_field: Union[str, None]) -> bool:
"""
Validate if the given `search_field` corresponds to one of the
columns that was parsed from the schema.
Some backends, like weaviate, don't use search fields, so the function
returns True if `search_field` is empty or None.
:param search_field: search field to validate.
:return: True if the field exists, False otherwise.
"""
if not search_field or search_field in self._column_infos.keys():
if not search_field:
self._logger.info('Empty search field was passed')
return True
else:
valid_search_fields = ', '.join(self._column_infos.keys())
raise ValueError(
f'{search_field} is not a valid search field. Valid search fields are: {valid_search_fields}'
)
def _to_numpy(self, val: Any, allow_passthrough=False) -> Any:
"""
Converts a value to a numpy array, if possible.
:param val: The value to convert
:param allow_passthrough: If True, the value is returned as-is if it is not convertible to a numpy array.
If False, a `ValueError` is raised if the value is not convertible to a numpy array.
:return: The value as a numpy array, or as-is if `allow_passthrough` is True and the value is not convertible
"""
if isinstance(val, np.ndarray):
return val
if tf is not None and isinstance(val, TensorFlowTensor):
return val.unwrap().numpy()
if isinstance(val, (list, tuple)):
return np.array(val)
if torch is not None and isinstance(val, torch.Tensor):
return val.detach().numpy()
if tf is not None and isinstance(val, tf.Tensor):
return val.numpy()
if allow_passthrough:
return val
raise ValueError(f'Unsupported input type for {type(self)}: {type(val)}')
def _convert_dict_to_doc(
self, doc_dict: Dict[str, Any], schema: Type[BaseDoc], inner=False
) -> BaseDoc:
"""
Convert a dict to a Document object.
:param doc_dict: A dict that contains all the flattened fields of a Document, the field names are the keys and follow the pattern {field_name} or {field_name}__{nested_name}
:param schema: The schema of the Document object
:return: A Document object
"""
for field_name, _ in schema._docarray_fields().items():
t_ = schema._get_field_annotation(field_name)
if not is_union_type(t_) and safe_issubclass(t_, AnyDocArray):
self._get_subindex_doclist(doc_dict, field_name)
if is_optional_type(t_):
for t_arg in get_args(t_):
if t_arg is not type(None):
t_ = t_arg
if not is_union_type(t_) and safe_issubclass(t_, BaseDoc):
inner_dict = {}
fields = [
key for key in doc_dict.keys() if key.startswith(f'{field_name}__')
]
for key in fields:
nested_name = key[len(f'{field_name}__') :]
inner_dict[nested_name] = doc_dict.pop(key)
doc_dict[field_name] = self._convert_dict_to_doc(
inner_dict, t_, inner=True
)
if self._is_subindex and not inner:
doc_dict.pop('parent_id', None)
schema_cls = cast(Type[BaseDoc], self._ori_schema)
else:
schema_cls = cast(Type[BaseDoc], schema)
doc = schema_cls(**doc_dict)
return doc
def _dict_list_to_docarray(self, dict_list: Sequence[Dict[str, Any]]) -> DocList:
"""Convert a list of docs in dict type to a DocList of the schema type."""
doc_list = [self._convert_dict_to_doc(doc_dict, self._schema) for doc_dict in dict_list] # type: ignore
if self._is_subindex:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], self._ori_schema))
else:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], self._schema))
return docs_cls(doc_list)
def __len__(self) -> int:
return self.num_docs()
def _index_subindex(self, column_to_data: Dict[str, Generator[Any, None, None]]):
"""Index subindex documents in the corresponding subindex.
:param column_to_data: A dictionary from column name to a generator
"""
for col_name, col in self._column_infos.items():
if safe_issubclass(col.docarray_type, AnyDocArray):
docs = [
doc for doc_list in column_to_data[col_name] for doc in doc_list
]
self._subindices[col_name].index(docs)
column_to_data.pop(col_name, None)
def _get_subindex_doclist(self, doc: Dict[str, Any], field_name: str):
"""Get subindex Documents from the index and assign them to `field_name`.
:param doc: a dictionary mapping from column name to value
:param field_name: field name of the subindex Documents
"""
if field_name not in doc.keys():
parent_id = doc['id']
nested_docs_id = self._subindices[field_name]._filter_by_parent_id(
parent_id
)
if nested_docs_id:
doc[field_name] = self._subindices[field_name].__getitem__(
nested_docs_id
)
def _find_subdocs(
self,
query: Union[AnyTensor, BaseDoc],
subindex: str = '',
search_field: str = '',
limit: int = 10,
**kwargs,
) -> FindResult:
"""Find documents in the subindex and return subindex docs and scores."""
fields = subindex.split('__')
if not subindex or not safe_issubclass(
self._schema._get_field_annotation(fields[0]), AnyDocArray # type: ignore
):
raise ValueError(f'subindex {subindex} is not valid')
if len(fields) == 1:
return self._subindices[fields[0]].find(
query, search_field=search_field, limit=limit, **kwargs
)
return self._subindices[fields[0]]._find_subdocs(
query,
subindex='___'.join(fields[1:]),
search_field=search_field,
limit=limit,
**kwargs,
)
def _get_root_doc_id(self, id: str, root: str, sub: str) -> str:
"""Get the root_id given the id of a subindex Document and the root and subindex name
:param id: id of the subindex Document
:param root: root index name
:param sub: subindex name
:return: the root_id of the Document
"""
subindex = self._subindices[root]
if not sub:
sub_doc = subindex._get_items([id])
parent_id = (
sub_doc[0]['parent_id']
if isinstance(sub_doc[0], dict)
else sub_doc[0].parent_id
)
return parent_id
else:
fields = sub.split('__')
cur_root_id = subindex._get_root_doc_id(
id, fields[0], '__'.join(fields[1:])
)
return self._get_root_doc_id(cur_root_id, root, '')
def subindex_contains(self, item: BaseDoc) -> bool:
"""Checks if a given BaseDoc item is contained in the index or any of its subindices.
:param item: the given BaseDoc
:return: if the given BaseDoc item is contained in the index/subindices
"""
if self._is_index_empty:
return False
if safe_issubclass(type(item), BaseDoc):
return self.__contains__(item) or any(
index.subindex_contains(item) for index in self._subindices.values()
)
else:
raise TypeError(
f"item must be an instance of BaseDoc or its subclass, not '{type(item).__name__}'"
)
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the given filter query.
Filter queries use the same syntax as the MongoDB query language (https://www.mongodb.com/docs/manual/tutorial/query-documents/#specify-conditions-using-query-operators).
You can see a list of the supported operators here (https://www.mongodb.com/docs/manual/reference/operator/query/#std-label-query-selectors)
---
```python
from docarray import DocList, BaseDoc
from docarray.documents import TextDoc, ImageDoc
from docarray.utils.filter import filter_docs
class MyDocument(BaseDoc):
caption: TextDoc
ImageDoc: ImageDoc
price: int
docs = DocList[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
ImageDoc=ImageDoc(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle',
ImageDoc=ImageDoc(url='turtlepic.png'),
price=50,
),
MyDocument(
caption='A couple birdwatching with binoculars',
ImageDoc=ImageDoc(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'ImageDoc__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].ImageDoc.url == 'binocularsphoto.png'
```
---
:param docs: the DocList where to apply the filter
:param query: the query to filter by
:return: A DocList containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils._internal.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocList.__class_getitem__(docs.doc_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
class FindResult(NamedTuple):
documents: DocList
scores: AnyTensor
The provided code snippet includes necessary dependencies for implementing the `_execute_find_and_filter_query` function. Write a Python function `def _execute_find_and_filter_query( doc_index: BaseDocIndex, query: List[Tuple[str, Dict]], reverse_order: bool = False ) -> FindResult` to solve the following problem:
Executes all find calls from query first using `doc_index.find()`, and filtering queries after that using DocArray's `filter_docs()`. Text search is not supported. :param doc_index: Document index instance. Either InMemoryExactNNIndex or HnswDocumentIndex. :param query: Dictionary containing search and filtering configuration. :param reverse_order: Flag indicating whether to sort in descending order. If set to False (default), the sorting will be in ascending order. This option is necessary because, depending on the index, lower scores can correspond to better matches, and vice versa. :return: Sorted documents and their corresponding scores.
Here is the function:
def _execute_find_and_filter_query(
doc_index: BaseDocIndex, query: List[Tuple[str, Dict]], reverse_order: bool = False
) -> FindResult:
"""
Executes all find calls from query first using `doc_index.find()`,
and filtering queries after that using DocArray's `filter_docs()`.
Text search is not supported.
:param doc_index: Document index instance.
Either InMemoryExactNNIndex or HnswDocumentIndex.
:param query: Dictionary containing search and filtering configuration.
:param reverse_order: Flag indicating whether to sort in descending order.
If set to False (default), the sorting will be in ascending order.
This option is necessary because, depending on the index, lower scores
can correspond to better matches, and vice versa.
:return: Sorted documents and their corresponding scores.
"""
docs_found = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))([])
filter_conditions = []
filter_limit = None
doc_to_score: Dict[BaseDoc, Any] = {}
for op, op_kwargs in query:
if op == 'find':
docs, scores = doc_index.find(**op_kwargs)
docs_found.extend(docs)
doc_to_score.update(zip(docs.__getattribute__('id'), scores))
elif op == 'filter':
filter_conditions.append(op_kwargs['filter_query'])
filter_limit = op_kwargs.get('limit')
else:
raise ValueError(f'Query operation is not supported: {op}')
doc_index._logger.debug(f'Executing query {query}')
docs_filtered = docs_found
for cond in filter_conditions:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))
docs_filtered = docs_cls(filter_docs(docs_filtered, cond))
if filter_limit:
docs_filtered = docs_filtered[:filter_limit]
doc_index._logger.debug(f'{len(docs_filtered)} results found')
docs_and_scores = zip(
docs_filtered, (doc_to_score[doc.id] for doc in docs_filtered)
)
docs_sorted = sorted(docs_and_scores, key=lambda x: x[1], reverse=reverse_order)
out_docs, out_scores = zip(*docs_sorted)
return FindResult(documents=out_docs, scores=out_scores) | Executes all find calls from query first using `doc_index.find()`, and filtering queries after that using DocArray's `filter_docs()`. Text search is not supported. :param doc_index: Document index instance. Either InMemoryExactNNIndex or HnswDocumentIndex. :param query: Dictionary containing search and filtering configuration. :param reverse_order: Flag indicating whether to sort in descending order. If set to False (default), the sorting will be in ascending order. This option is necessary because, depending on the index, lower scores can correspond to better matches, and vice versa. :return: Sorted documents and their corresponding scores. |
6,660 | from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model
from docarray.utils._internal.pydantic import is_pydantic_v2
if not is_pydantic_v2:
from pydantic import create_model_from_typeddict
else:
def create_model_from_typeddict(*args, **kwargs):
raise NotImplementedError(
"This function is not compatible with pydantic v2 anymore"
)
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.utils._internal._typing import safe_issubclass
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
The provided code snippet includes necessary dependencies for implementing the `create_doc_from_typeddict` function. Write a Python function `def create_doc_from_typeddict( typeddict_cls: Type['TypedDict'], # type: ignore **kwargs: Any, )` to solve the following problem:
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict. --- ```python from typing_extensions import TypedDict from docarray import BaseDoc from docarray.documents import Audio from docarray.documents.helper import create_doc_from_typeddict from docarray.typing.tensor.audio import AudioNdArray class MyAudio(TypedDict): title: str tensor: AudioNdArray Doc = create_doc_from_typeddict(MyAudio, __base__=Audio) assert safe_issubclass(Doc, BaseDoc) assert safe_issubclass(Doc, Audio) ``` --- :param typeddict_cls: TypedDict class to use for the new Document class :param kwargs: extra arguments to pass to `create_model_from_typeddict` :return: the new Document class
Here is the function:
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
---
```python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert safe_issubclass(Doc, BaseDoc)
assert safe_issubclass(Doc, Audio)
```
---
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
"""
if '__base__' in kwargs:
if not safe_issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc | Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict. --- ```python from typing_extensions import TypedDict from docarray import BaseDoc from docarray.documents import Audio from docarray.documents.helper import create_doc_from_typeddict from docarray.typing.tensor.audio import AudioNdArray class MyAudio(TypedDict): title: str tensor: AudioNdArray Doc = create_doc_from_typeddict(MyAudio, __base__=Audio) assert safe_issubclass(Doc, BaseDoc) assert safe_issubclass(Doc, Audio) ``` --- :param typeddict_cls: TypedDict class to use for the new Document class :param kwargs: extra arguments to pass to `create_model_from_typeddict` :return: the new Document class |
6,661 | from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model
from docarray.utils._internal.pydantic import is_pydantic_v2
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.utils._internal._typing import safe_issubclass
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
!!! note
To pickle a dynamically created BaseDoc subclass:
- the class must be defined globally
- it must provide `__module__`
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert safe_issubclass(MyAudio, BaseDoc)
assert safe_issubclass(MyAudio, Audio)
```
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
"""
if not safe_issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
The provided code snippet includes necessary dependencies for implementing the `create_doc_from_dict` function. Write a Python function `def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']` to solve the following problem:
Create a subclass of BaseDoc based on example data given as a dictionary. In case the example contains None as a value, corresponding field will be viewed as the type Any. --- ```python import numpy as np from docarray.documents import ImageDoc from docarray.documents.helper import create_doc_from_dict data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'} MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict) assert safe_issubclass(MyDoc, BaseDoc) ``` --- :param model_name: Name of the new Document class :param data_dict: Dictionary of field types to their corresponding values. :return: the new Document class
Here is the function:
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
---
```python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert safe_issubclass(MyDoc, BaseDoc)
```
---
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore | Create a subclass of BaseDoc based on example data given as a dictionary. In case the example contains None as a value, corresponding field will be viewed as the type Any. --- ```python import numpy as np from docarray.documents import ImageDoc from docarray.documents.helper import create_doc_from_dict data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'} MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict) assert safe_issubclass(MyDoc, BaseDoc) ``` --- :param model_name: Name of the new Document class :param data_dict: Dictionary of field types to their corresponding values. :return: the new Document class |
6,662 | from typing import (
Any,
Iterable,
List,
Sequence,
TypeVar,
Union,
cast,
no_type_check,
overload,
)
import numpy as np
from typing_extensions import SupportsIndex
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
def _is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it | null |
6,663 | import base64
import csv
import io
import os
import pathlib
import pickle
from abc import abstractmethod
from contextlib import nullcontext
from io import StringIO, TextIOWrapper
from itertools import compress
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
ContextManager,
Dict,
Generator,
Iterable,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import orjson
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.helper import (
_access_path_dict_to_nested_dict,
_all_access_paths_valid,
_dict_to_access_paths,
)
from docarray.utils._internal.compress import _decompress_bytes, _get_compress_ctx
from docarray.utils._internal.misc import import_library, ProtocolType
ALLOWED_PROTOCOLS = ARRAY_PROTOCOLS.union(SINGLE_PROTOCOLS)
ALLOWED_COMPRESSIONS = {'lz4', 'bz2', 'lzma', 'zlib', 'gzip'}
ProtocolType = Literal[
'protobuf', 'pickle', 'json', 'json-array', 'protobuf-array', 'pickle-array'
]
The provided code snippet includes necessary dependencies for implementing the `_protocol_and_compress_from_file_path` function. Write a Python function `def _protocol_and_compress_from_file_path( file_path: Union[pathlib.Path, str], default_protocol: Optional[ProtocolType] = None, default_compress: Optional[str] = None, ) -> Tuple[Optional[ProtocolType], Optional[str]]` to solve the following problem:
Extract protocol and compression algorithm from a string, use defaults if not found. :param file_path: path of a file. :param default_protocol: default serialization protocol used in case not found. :param default_compress: default compression method used in case not found. Examples: >>> _protocol_and_compress_from_file_path('./docarray_fashion_mnist.protobuf.gzip') ('protobuf', 'gzip') >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.protobuf') ('protobuf', None) >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.gzip') (None, gzip)
Here is the function:
def _protocol_and_compress_from_file_path(
file_path: Union[pathlib.Path, str],
default_protocol: Optional[ProtocolType] = None,
default_compress: Optional[str] = None,
) -> Tuple[Optional[ProtocolType], Optional[str]]:
"""Extract protocol and compression algorithm from a string, use defaults if not found.
:param file_path: path of a file.
:param default_protocol: default serialization protocol used in case not found.
:param default_compress: default compression method used in case not found.
Examples:
>>> _protocol_and_compress_from_file_path('./docarray_fashion_mnist.protobuf.gzip')
('protobuf', 'gzip')
>>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.protobuf')
('protobuf', None)
>>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.gzip')
(None, gzip)
"""
protocol = default_protocol
compress = default_compress
file_extensions = [e.replace('.', '') for e in pathlib.Path(file_path).suffixes]
for extension in file_extensions:
if extension in ALLOWED_PROTOCOLS:
protocol = cast(ProtocolType, extension)
elif extension in ALLOWED_COMPRESSIONS:
compress = extension
return protocol, compress | Extract protocol and compression algorithm from a string, use defaults if not found. :param file_path: path of a file. :param default_protocol: default serialization protocol used in case not found. :param default_compress: default compression method used in case not found. Examples: >>> _protocol_and_compress_from_file_path('./docarray_fashion_mnist.protobuf.gzip') ('protobuf', 'gzip') >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.protobuf') ('protobuf', None) >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.gzip') (None, gzip) |
6,664 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
NONE_NDARRAY_PROTO_SHAPE = (0,)
NONE_NDARRAY_PROTO_DTYPE = 'None'
def _none_ndarray_proto() -> 'NdArrayProto':
from docarray.proto import NdArrayProto
zeros_arr = parse_obj_as(NdArray, np.zeros(NONE_NDARRAY_PROTO_SHAPE))
nd_proto = NdArrayProto()
nd_proto.dense.buffer = zeros_arr.tobytes()
nd_proto.dense.ClearField('shape')
nd_proto.dense.shape.extend(list(zeros_arr.shape))
nd_proto.dense.dtype = NONE_NDARRAY_PROTO_DTYPE
return nd_proto | null |
6,665 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
def _none_docvec_proto() -> 'DocVecProto':
from docarray.proto import DocVecProto
return DocVecProto() | null |
6,666 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
def _none_list_of_docvec_proto() -> 'ListOfDocArrayProto':
from docarray.proto import ListOfDocVecProto
return ListOfDocVecProto() | null |
6,667 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
NONE_NDARRAY_PROTO_SHAPE = (0,)
NONE_NDARRAY_PROTO_DTYPE = 'None'
def _is_none_ndarray_proto(proto: 'NdArrayProto') -> bool:
return (
proto.dense.shape == list(NONE_NDARRAY_PROTO_SHAPE)
and proto.dense.dtype == NONE_NDARRAY_PROTO_DTYPE
) | null |
6,668 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
def _is_none_docvec_proto(proto: 'DocVecProto') -> bool:
return (
proto.tensor_columns == {}
and proto.doc_columns == {}
and proto.docs_vec_columns == {}
and proto.any_columns == {}
) | null |
6,669 | import base64
import io
import pathlib
from abc import abstractmethod
from contextlib import nullcontext
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Optional,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
import orjson
from pydantic import parse_obj_as
from docarray.array.doc_list.io import (
SINGLE_PROTOCOLS,
IOMixinDocList,
_LazyRequestReader,
)
from docarray.array.doc_vec.column_storage import ColumnStorage
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.base_doc import BaseDoc
from docarray.base_doc.mixins.io import _type_to_protobuf
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
from docarray.utils._internal.misc import ProtocolType
def _is_none_list_of_docvec_proto(proto: 'ListOfDocVecProto') -> bool:
from docarray.proto import ListOfDocVecProto
return isinstance(proto, ListOfDocVecProto) and len(proto.data) == 0 | null |
6,670 | from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Type, TypeVar
from typing_inspect import get_origin
from docarray.utils._internal._typing import safe_issubclass
def _similar_schemas(model1, model2):
return model1.__annotations__ == model2.__annotations__ | null |
6,671 | import base64
import pickle
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from typing import _GenericAlias as GenericAlias
from typing import get_origin
import numpy as np
from typing_inspect import get_args, is_union_type
from docarray.base_doc.base_node import BaseNode
from docarray.typing import NdArray
from docarray.typing.proto_register import _PROTO_TYPE_NAME_TO_CLASS
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.compress import _compress_bytes, _decompress_bytes
from docarray.utils._internal.misc import ProtocolType, import_library
from docarray.utils._internal.pydantic import is_pydantic_v2
class BaseNode(ABC):
"""
A DocumentNode is an object than can be nested inside a Document.
A Document itself is a DocumentNode as well as prebuilt type
"""
_proto_type_name: Optional[str] = None
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert itself into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
...
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
"""
...
The provided code snippet includes necessary dependencies for implementing the `_type_to_protobuf` function. Write a Python function `def _type_to_protobuf(value: Any) -> 'NodeProto'` to solve the following problem:
Convert any type to a NodeProto :param value: any object that need to be serialized :return: a NodeProto
Here is the function:
def _type_to_protobuf(value: Any) -> 'NodeProto':
"""Convert any type to a NodeProto
:param value: any object that need to be serialized
:return: a NodeProto
"""
from docarray.proto import NodeProto
basic_type_to_key = {
str: 'text',
bool: 'boolean',
int: 'integer',
float: 'float',
bytes: 'blob',
}
container_type_to_key = {list: 'list', set: 'set', tuple: 'tuple'}
nested_item: 'NodeProto'
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
return nested_item
base_node_wrap: BaseNode
if torch is not None:
if isinstance(value, torch.Tensor):
base_node_wrap = TorchTensor._docarray_from_native(value)
return base_node_wrap._to_node_protobuf()
if tf is not None:
if isinstance(value, tf.Tensor):
base_node_wrap = TensorFlowTensor._docarray_from_native(value)
return base_node_wrap._to_node_protobuf()
if isinstance(value, np.ndarray):
base_node_wrap = NdArray._docarray_from_native(value)
return base_node_wrap._to_node_protobuf()
for basic_type, key_name in basic_type_to_key.items():
if isinstance(value, basic_type):
nested_item = NodeProto(**{key_name: value})
return nested_item
for container_type, key_name in container_type_to_key.items():
if isinstance(value, container_type):
from docarray.proto import ListOfAnyProto
lvalue = ListOfAnyProto()
for item in value:
lvalue.data.append(_type_to_protobuf(item))
nested_item = NodeProto(**{key_name: lvalue})
return nested_item
if isinstance(value, dict):
from docarray.proto import DictOfAnyProto
data = {}
for key, content in value.items():
if not isinstance(key, str):
raise ValueError(
f'Protobuf only support string as key, but got {type(key)}'
)
data[key] = _type_to_protobuf(content)
struct = DictOfAnyProto(data=data)
nested_item = NodeProto(dict=struct)
return nested_item
elif value is None:
nested_item = NodeProto()
return nested_item
else:
raise ValueError(f'{type(value)} is not supported with protobuf') | Convert any type to a NodeProto :param value: any object that need to be serialized :return: a NodeProto |
6,672 | from typing import Any, Callable, Dict, Type
import orjson
from docarray.utils._internal.pydantic import is_pydantic_v2
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to str using orjson
return orjson_dumps(v, default=default).decode() | null |
6,673 | import glob
import itertools
import os
import re
from types import LambdaType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Type,
Union,
)
import numpy as np
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
def _is_access_path_valid(doc_type: Type['BaseDoc'], access_path: str) -> bool:
"""
Check if a given access path ("__"-separated) is a valid path for a given Document class.
"""
field_type = _get_field_annotation_by_access_path(doc_type, access_path)
return field_type is not None
The provided code snippet includes necessary dependencies for implementing the `_all_access_paths_valid` function. Write a Python function `def _all_access_paths_valid( doc_type: Type['BaseDoc'], access_paths: List[str] ) -> List[bool]` to solve the following problem:
Check if all access paths ("__"-separated) are valid for a given Document class.
Here is the function:
def _all_access_paths_valid(
doc_type: Type['BaseDoc'], access_paths: List[str]
) -> List[bool]:
"""
Check if all access paths ("__"-separated) are valid for a given Document class.
"""
return [_is_access_path_valid(doc_type, path) for path in access_paths] | Check if all access paths ("__"-separated) are valid for a given Document class. |
6,674 | import glob
import itertools
import os
import re
from types import LambdaType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Type,
Union,
)
import numpy as np
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
def _access_path_to_dict(access_path: str, value) -> Dict[str, Any]:
"""
Convert an access path ("__"-separated) and its value to a (potentially) nested dict.
```python
assert access_path_to_dict('image__url', 'img.png') == {'image': {'url': 'img.png'}}
```
"""
fields = access_path.split('__')
for field in reversed(fields):
result = {field: value}
value = result
return result
def _is_none_like(val: Any) -> bool:
"""
:param val: any value
:return: true iff `val` equals to `None`, `'None'` or `''`
"""
# Convoluted implementation, but fixes https://github.com/docarray/docarray/issues/1821
# tensor-like types can have unexpected (= broadcast) `==`/`in` semantics,
# so treat separately
is_np_arr = isinstance(val, np.ndarray)
if is_np_arr:
return False
is_torch_tens = is_torch_available() and isinstance(val, torch.Tensor)
if is_torch_tens:
return False
is_tf_tens = is_tf_available() and isinstance(val, tf.Tensor)
if is_tf_tens:
return False
is_jax_arr = is_jax_available() and isinstance(val, jax.numpy.ndarray)
if is_jax_arr:
return False
# "normal" case
return val in ['', 'None', None]
def _update_nested_dicts(
to_update: Dict[Any, Any], update_with: Dict[Any, Any]
) -> None:
"""
Update a dict with another one, while considering shared nested keys.
```python
d1 = {'image': {'tensor': None}, 'title': 'hello'}
d2 = {'image': {'url': 'some.png'}}
update_nested_dicts(d1, d2)
assert d1 == {'image': {'tensor': None, 'url': 'some.png'}, 'title': 'hello'}
```
:param to_update: dict that should be updated
:param update_with: dict to update with
:return: merged dict
"""
for k, v in update_with.items():
if k not in to_update.keys():
to_update[k] = v
else:
_update_nested_dicts(to_update[k], update_with[k])
The provided code snippet includes necessary dependencies for implementing the `_access_path_dict_to_nested_dict` function. Write a Python function `def _access_path_dict_to_nested_dict(access_path2val: Dict[str, Any]) -> Dict[Any, Any]` to solve the following problem:
Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary. --- ```python access_path2val = {'image__url': 'some.png'} assert access_path_dict_to_nested_dict(access_path2val) == { 'image': {'url': 'some.png'} } ``` --- :param access_path2val: dict with access_paths as keys :return: nested dict where the access path keys are split into separate field names and nested keys
Here is the function:
def _access_path_dict_to_nested_dict(access_path2val: Dict[str, Any]) -> Dict[Any, Any]:
"""
Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary.
---
```python
access_path2val = {'image__url': 'some.png'}
assert access_path_dict_to_nested_dict(access_path2val) == {
'image': {'url': 'some.png'}
}
```
---
:param access_path2val: dict with access_paths as keys
:return: nested dict where the access path keys are split into separate field names and nested keys
"""
nested_dict: Dict[Any, Any] = {}
for access_path, value in access_path2val.items():
field2val = _access_path_to_dict(
access_path=access_path,
value=None if _is_none_like(value) else value,
)
_update_nested_dicts(to_update=nested_dict, update_with=field2val)
return nested_dict | Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary. --- ```python access_path2val = {'image__url': 'some.png'} assert access_path_dict_to_nested_dict(access_path2val) == { 'image': {'url': 'some.png'} } ``` --- :param access_path2val: dict with access_paths as keys :return: nested dict where the access path keys are split into separate field names and nested keys |
6,675 | import glob
import itertools
import os
import re
from types import LambdaType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Type,
Union,
)
import numpy as np
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
The provided code snippet includes necessary dependencies for implementing the `_dict_to_access_paths` function. Write a Python function `def _dict_to_access_paths(d: dict) -> Dict[str, Any]` to solve the following problem:
Convert a (nested) dict to a Dict[access_path, value]. Access paths are defined as a path of field(s) separated by "__". ```python assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'} ```
Here is the function:
def _dict_to_access_paths(d: dict) -> Dict[str, Any]:
"""
Convert a (nested) dict to a Dict[access_path, value].
Access paths are defined as a path of field(s) separated by "__".
```python
assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'}
```
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = _dict_to_access_paths(v)
for nested_k, nested_v in v.items():
new_key = '__'.join([k, nested_k])
result[new_key] = nested_v
else:
result[k] = v
return result | Convert a (nested) dict to a Dict[access_path, value]. Access paths are defined as a path of field(s) separated by "__". ```python assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'} ``` |
6,676 | import glob
import itertools
import os
import re
from types import LambdaType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Type,
Union,
)
import numpy as np
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
The provided code snippet includes necessary dependencies for implementing the `get_paths` function. Write a Python function `def get_paths( patterns: Union[str, List[str]], recursive: bool = True, size: Optional[int] = None, exclude_regex: Optional[str] = None, ) -> Generator[str, None, None]` to solve the following problem:
Yield file paths described by `patterns`. --- ```python from typing import Optional from docarray import BaseDoc, DocList from docarray.helper import get_paths from docarray.typing import TextUrl, ImageUrl class Banner(BaseDoc): text_url: TextUrl image_url: Optional[ImageUrl] # you can call it in the constructor docs = DocList[Banner]([Banner(text_url=url) for url in get_paths(patterns='*.txt')]) # and call it after construction to set the urls docs.image_url = list(get_paths(patterns='*.jpg', exclude_regex='test')) for doc in docs: assert doc.image_url.endswith('.txt') assert doc.text_url.endswith('.jpg') ``` --- :param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]' :param recursive: If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories :param size: the maximum number of the files :param exclude_regex: if set, then filenames that match to this pattern are not included. :yield: file paths
Here is the function:
def get_paths(
patterns: Union[str, List[str]],
recursive: bool = True,
size: Optional[int] = None,
exclude_regex: Optional[str] = None,
) -> Generator[str, None, None]:
"""
Yield file paths described by `patterns`.
---
```python
from typing import Optional
from docarray import BaseDoc, DocList
from docarray.helper import get_paths
from docarray.typing import TextUrl, ImageUrl
class Banner(BaseDoc):
text_url: TextUrl
image_url: Optional[ImageUrl]
# you can call it in the constructor
docs = DocList[Banner]([Banner(text_url=url) for url in get_paths(patterns='*.txt')])
# and call it after construction to set the urls
docs.image_url = list(get_paths(patterns='*.jpg', exclude_regex='test'))
for doc in docs:
assert doc.image_url.endswith('.txt')
assert doc.text_url.endswith('.jpg')
```
---
:param patterns: The pattern may contain simple shell-style wildcards,
e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any
files and zero or more directories and subdirectories
:param size: the maximum number of the files
:param exclude_regex: if set, then filenames that match to this pattern
are not included.
:yield: file paths
"""
if isinstance(patterns, str):
patterns = [patterns]
regex_to_exclude = None
if exclude_regex:
try:
regex_to_exclude = re.compile(exclude_regex)
except re.error:
raise ValueError(f'`{exclude_regex}` is not a valid regex.')
def _iter_file_extensions(ps):
return itertools.chain.from_iterable(
glob.iglob(os.path.expanduser(p), recursive=recursive) for p in ps
)
num_docs = 0
for file_path in _iter_file_extensions(patterns):
if regex_to_exclude and regex_to_exclude.match(file_path):
continue
yield file_path
num_docs += 1
if size is not None and num_docs >= size:
break | Yield file paths described by `patterns`. --- ```python from typing import Optional from docarray import BaseDoc, DocList from docarray.helper import get_paths from docarray.typing import TextUrl, ImageUrl class Banner(BaseDoc): text_url: TextUrl image_url: Optional[ImageUrl] # you can call it in the constructor docs = DocList[Banner]([Banner(text_url=url) for url in get_paths(patterns='*.txt')]) # and call it after construction to set the urls docs.image_url = list(get_paths(patterns='*.jpg', exclude_regex='test')) for doc in docs: assert doc.image_url.endswith('.txt') assert doc.text_url.endswith('.jpg') ``` --- :param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]' :param recursive: If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories :param size: the maximum number of the files :param exclude_regex: if set, then filenames that match to this pattern are not included. :yield: file paths |
6,677 | import glob
import itertools
import os
import re
from types import LambdaType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Type,
Union,
)
import numpy as np
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
def _shallow_copy_doc(doc):
return doc.__class__._shallow_copy(doc) | null |
6,678 | import warnings
from typing import Any, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
The provided code snippet includes necessary dependencies for implementing the `_expand_if_single_axis` function. Write a Python function `def _expand_if_single_axis(*matrices: np.ndarray) -> List[np.ndarray]` to solve the following problem:
Expands arrays that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be expanded :return: List of the input matrices, where single axis matrices are expanded at dim 0.
Here is the function:
def _expand_if_single_axis(*matrices: np.ndarray) -> List[np.ndarray]:
"""Expands arrays that only have one axis, at dim 0.
This ensures that all outputs can be treated as matrices, not vectors.
:param matrices: Matrices to be expanded
:return: List of the input matrices,
where single axis matrices are expanded at dim 0.
"""
expanded = []
for m in matrices:
if len(m.shape) == 1:
expanded.append(np.expand_dims(m, axis=0))
else:
expanded.append(m)
return expanded | Expands arrays that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be expanded :return: List of the input matrices, where single axis matrices are expanded at dim 0. |
6,679 | import warnings
from typing import Any, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
def _expand_if_scalar(arr: np.ndarray) -> np.ndarray:
if len(arr.shape) == 0: # avoid scalar output
arr = np.expand_dims(arr, axis=0)
return arr | null |
6,680 | import warnings
from typing import Any, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
def identity(array: np.ndarray) -> np.ndarray:
return array | null |
6,681 | import typing
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import TensorFlowTensor
from docarray.utils._internal.misc import import_library
The provided code snippet includes necessary dependencies for implementing the `_unsqueeze_if_single_axis` function. Write a Python function `def _unsqueeze_if_single_axis(*matrices: tf.Tensor) -> List[tf.Tensor]` to solve the following problem:
Unsqueezes tensors that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be unsqueezed :return: List of the input matrices, where single axis matrices are unsqueezed at dim 0.
Here is the function:
def _unsqueeze_if_single_axis(*matrices: tf.Tensor) -> List[tf.Tensor]:
"""
Unsqueezes tensors that only have one axis, at dim 0.
This ensures that all outputs can be treated as matrices, not vectors.
:param matrices: Matrices to be unsqueezed
:return: List of the input matrices,
where single axis matrices are unsqueezed at dim 0.
"""
unsqueezed = []
for m in matrices:
if len(m.shape) == 1:
unsqueezed.append(tf.expand_dims(m, axis=0))
else:
unsqueezed.append(m)
return unsqueezed | Unsqueezes tensors that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be unsqueezed :return: List of the input matrices, where single axis matrices are unsqueezed at dim 0. |
6,682 | import typing
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import TensorFlowTensor
from docarray.utils._internal.misc import import_library
The provided code snippet includes necessary dependencies for implementing the `_unsqueeze_if_scalar` function. Write a Python function `def _unsqueeze_if_scalar(t: tf.Tensor) -> tf.Tensor` to solve the following problem:
Unsqueezes tensor of a scalar, from shape () to shape (1,). :param t: tensor to unsqueeze. :return: unsqueezed tf.Tensor
Here is the function:
def _unsqueeze_if_scalar(t: tf.Tensor) -> tf.Tensor:
"""
Unsqueezes tensor of a scalar, from shape () to shape (1,).
:param t: tensor to unsqueeze.
:return: unsqueezed tf.Tensor
"""
if len(t.shape) == 0: # avoid scalar output
t = tf.expand_dims(t, 0)
return t | Unsqueezes tensor of a scalar, from shape () to shape (1,). :param t: tensor to unsqueeze. :return: unsqueezed tf.Tensor |
6,683 | import typing
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import TensorFlowTensor
from docarray.utils._internal.misc import import_library
def norm_left(t: tf.Tensor) -> TensorFlowTensor:
return TensorFlowTensor(tensor=t) | null |
6,684 | import typing
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import TensorFlowTensor
from docarray.utils._internal.misc import import_library
def norm_right(t: TensorFlowTensor) -> tf.Tensor:
return t.tensor | null |
6,685 | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import JaxArray
from docarray.utils._internal.misc import import_library
The provided code snippet includes necessary dependencies for implementing the `_expand_if_single_axis` function. Write a Python function `def _expand_if_single_axis(*matrices: jnp.ndarray) -> List[jnp.ndarray]` to solve the following problem:
Expands arrays that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be expanded :return: List of the input matrices, where single axis matrices are expanded at dim 0.
Here is the function:
def _expand_if_single_axis(*matrices: jnp.ndarray) -> List[jnp.ndarray]:
"""Expands arrays that only have one axis, at dim 0.
This ensures that all outputs can be treated as matrices, not vectors.
:param matrices: Matrices to be expanded
:return: List of the input matrices,
where single axis matrices are expanded at dim 0.
"""
expanded = []
for m in matrices:
if len(m.shape) == 1:
expanded.append(jnp.expand_dims(m, axis=0))
else:
expanded.append(m)
return expanded | Expands arrays that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be expanded :return: List of the input matrices, where single axis matrices are expanded at dim 0. |
6,686 | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import JaxArray
from docarray.utils._internal.misc import import_library
def _expand_if_scalar(arr: jnp.ndarray) -> jnp.ndarray:
if len(arr.shape) == 0: # avoid scalar output
arr = jnp.expand_dims(arr, axis=0)
return arr | null |
6,687 | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import JaxArray
from docarray.utils._internal.misc import import_library
def norm_left(t: jnp.ndarray) -> JaxArray:
return JaxArray(tensor=t) | null |
6,688 | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
from docarray.typing import JaxArray
from docarray.utils._internal.misc import import_library
def norm_right(t: JaxArray) -> jnp.ndarray:
return t.tensor | null |
6,689 | from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.utils._internal.misc import import_library
The provided code snippet includes necessary dependencies for implementing the `_unsqueeze_if_single_axis` function. Write a Python function `def _unsqueeze_if_single_axis(*matrices: torch.Tensor) -> List[torch.Tensor]` to solve the following problem:
Unsqueezes tensors that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be unsqueezed :return: List of the input matrices, where single axis matrices are unsqueezed at dim 0.
Here is the function:
def _unsqueeze_if_single_axis(*matrices: torch.Tensor) -> List[torch.Tensor]:
"""Unsqueezes tensors that only have one axis, at dim 0.
This ensures that all outputs can be treated as matrices, not vectors.
:param matrices: Matrices to be unsqueezed
:return: List of the input matrices,
where single axis matrices are unsqueezed at dim 0.
"""
unsqueezed = []
for m in matrices:
if len(m.shape) == 1:
unsqueezed.append(m.unsqueeze(0))
else:
unsqueezed.append(m)
return unsqueezed | Unsqueezes tensors that only have one axis, at dim 0. This ensures that all outputs can be treated as matrices, not vectors. :param matrices: Matrices to be unsqueezed :return: List of the input matrices, where single axis matrices are unsqueezed at dim 0. |
6,690 | from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import numpy as np
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
from docarray.utils._internal.misc import import_library
def _unsqueeze_if_scalar(t: torch.Tensor):
if len(t.shape) == 0: # avoid scalar output
t = t.unsqueeze(0)
return t | null |
6,691 | from contextlib import nullcontext
from typing import Dict, Iterable, Iterator, NoReturn, Optional, Sequence, Type, TypeVar
from rich import filesize
from typing_extensions import TYPE_CHECKING, Protocol
from docarray.utils._internal.misc import ProtocolType
from docarray.utils._internal.progress_bar import _get_progressbar
__version__ = '0.40.1'
The provided code snippet includes necessary dependencies for implementing the `get_version_info` function. Write a Python function `def get_version_info() -> Dict` to solve the following problem:
Get the version of libraries used in Jina and environment variables. :return: Version information and environment variables
Here is the function:
def get_version_info() -> Dict:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import platform
from uuid import getnode
import google.protobuf
from google.protobuf.internal import api_implementation
from docarray import __version__
return {
'docarray': __version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation.Type(),
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
} | Get the version of libraries used in Jina and environment variables. :return: Version information and environment variables |
6,692 | from contextlib import nullcontext
from typing import Dict, Iterable, Iterator, NoReturn, Optional, Sequence, Type, TypeVar
from rich import filesize
from typing_extensions import TYPE_CHECKING, Protocol
from docarray.utils._internal.misc import ProtocolType
from docarray.utils._internal.progress_bar import _get_progressbar
The provided code snippet includes necessary dependencies for implementing the `ibatch` function. Write a Python function `def ibatch(iterable: Sequence, batch_size: int = 32) -> Iterable` to solve the following problem:
Get an iterator of batched items from Sequence.
Here is the function:
def ibatch(iterable: Sequence, batch_size: int = 32) -> Iterable:
"""Get an iterator of batched items from Sequence."""
seq_len = len(iterable)
for offset in range(0, seq_len, batch_size):
yield iterable[offset : min(offset + batch_size, seq_len)] | Get an iterator of batched items from Sequence. |
6,693 | from contextlib import nullcontext
from typing import Dict, Iterable, Iterator, NoReturn, Optional, Sequence, Type, TypeVar
from rich import filesize
from typing_extensions import TYPE_CHECKING, Protocol
from docarray.utils._internal.misc import ProtocolType
from docarray.utils._internal.progress_bar import _get_progressbar
The provided code snippet includes necessary dependencies for implementing the `raise_req_error` function. Write a Python function `def raise_req_error(resp: 'requests.Response') -> NoReturn` to solve the following problem:
Definitely raise an error from a response.
Here is the function:
def raise_req_error(resp: 'requests.Response') -> NoReturn:
"""Definitely raise an error from a response."""
resp.raise_for_status()
raise ValueError(f'Unexpected response status: {resp.status_code}') | Definitely raise an error from a response. |
6,694 | from contextlib import nullcontext
from typing import Dict, Iterable, Iterator, NoReturn, Optional, Sequence, Type, TypeVar
from rich import filesize
from typing_extensions import TYPE_CHECKING, Protocol
from docarray.utils._internal.misc import ProtocolType
from docarray.utils._internal.progress_bar import _get_progressbar
ProtocolType = Literal[
'protobuf', 'pickle', 'json', 'json-array', 'protobuf-array', 'pickle-array'
]
def _get_progressbar(description: str, disable: bool, total: Optional[int]):
progress = _get_pbar(disable, total)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
def _to_binary_stream(
iterator: Iterator['Streamable'],
total: Optional[int] = None,
protocol: ProtocolType = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
) -> Iterator[bytes]:
if show_progress:
pbar, t = _get_progressbar(
'Serializing', disable=not show_progress, total=total
)
else:
pbar = nullcontext()
with pbar:
if show_progress:
_total_size = 0
count = 0
pbar.start_task(t)
for item in iterator:
item_bytes = item.to_bytes(protocol=protocol, compress=compress)
len_item_as_bytes = len(item_bytes).to_bytes(4, 'big', signed=False)
all_bytes = len_item_as_bytes + item_bytes
yield all_bytes
if show_progress:
_total_size += len(all_bytes)
count += 1
pbar.update(t, advance=1, total_size=str(filesize.decimal(_total_size)))
yield int(0).to_bytes(4, 'big', signed=False) | null |
6,695 | from contextlib import nullcontext
from typing import Dict, Iterable, Iterator, NoReturn, Optional, Sequence, Type, TypeVar
from rich import filesize
from typing_extensions import TYPE_CHECKING, Protocol
from docarray.utils._internal.misc import ProtocolType
from docarray.utils._internal.progress_bar import _get_progressbar
class ReadableBytes(Protocol):
def read(self, size: int = -1) -> bytes:
...
def close(self):
...
T = TypeVar('T', bound=Streamable)
ProtocolType = Literal[
'protobuf', 'pickle', 'json', 'json-array', 'protobuf-array', 'pickle-array'
]
def _get_progressbar(description: str, disable: bool, total: Optional[int]):
progress = _get_pbar(disable, total)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
def _from_binary_stream(
cls: Type[T],
stream: ReadableBytes,
total: Optional[int] = None,
protocol: ProtocolType = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
) -> Iterator['T']:
try:
if show_progress:
pbar, t = _get_progressbar(
'Deserializing', disable=not show_progress, total=total
)
else:
pbar = nullcontext()
with pbar:
if show_progress:
_total_size = 0
pbar.start_task(t)
while True:
len_bytes = stream.read(4)
if len(len_bytes) < 4:
raise ValueError('Unexpected end of stream')
len_item = int.from_bytes(len_bytes, 'big', signed=False)
if len_item == 0:
break
item_bytes = stream.read(len_item)
if len(item_bytes) < len_item:
raise ValueError('Unexpected end of stream')
item = cls.from_bytes(item_bytes, protocol=protocol, compress=compress)
yield item
if show_progress:
_total_size += len_item + 4
pbar.update(
t, advance=1, total_size=str(filesize.decimal(_total_size))
)
finally:
stream.close() | null |
6,696 | from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
casted = left.doc_type(**doc.__dict__)
left.append(casted)
return left
The provided code snippet includes necessary dependencies for implementing the `reduce_all` function. Write a Python function `def reduce_all(docs: List[DocList]) -> DocList` to solve the following problem:
Reduces a list of DocLists into one DocList. Changes are applied to the first DocList in-place. The resulting DocList contains Documents of all DocLists. If a Document exists (identified by their ID) in many DocLists, data properties are merged with priority to the left-most DocLists (that is, if a data attribute is set in a Document belonging to many DocLists, the attribute value of the left-most DocList is kept). Nested DocLists belonging to many DocLists are also reduced in the same way. !!! note - Nested DocLists order does not follow any specific rule. You might want to re-sort them in a later step. - The final result depends on the order of DocLists when applying reduction. :param docs: List of DocLists to be reduced :return: the resulting DocList
Here is the function:
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left | Reduces a list of DocLists into one DocList. Changes are applied to the first DocList in-place. The resulting DocList contains Documents of all DocLists. If a Document exists (identified by their ID) in many DocLists, data properties are merged with priority to the left-most DocLists (that is, if a data attribute is set in a Document belonging to many DocLists, the attribute value of the left-most DocList is kept). Nested DocLists belonging to many DocLists are also reduced in the same way. !!! note - Nested DocLists order does not follow any specific rule. You might want to re-sort them in a later step. - The final result depends on the order of DocLists when applying reduction. :param docs: List of DocLists to be reduced :return: the resulting DocList |
6,697 | from contextlib import nullcontext
from math import ceil
from multiprocessing.pool import Pool, ThreadPool
from typing import Callable, Generator, Optional, TypeVar, Union
from rich.progress import track
from docarray import BaseDoc
from docarray.array.any_array import AnyDocArray
from docarray.helper import _is_lambda_or_partial_or_local_function
T = TypeVar('T', bound=AnyDocArray)
T_doc = TypeVar('T_doc', bound=BaseDoc)
def _get_pool(backend, num_worker) -> Union[Pool, ThreadPool]:
"""
Get Pool instance for multiprocessing or ThreadPool instance for multithreading.
"""
if backend == 'thread':
return ThreadPool(processes=num_worker)
elif backend == 'process':
return Pool(processes=num_worker)
else:
raise ValueError(
f'`backend` must be either `process` or `thread`, receiving {backend}'
)
def _is_lambda_or_partial_or_local_function(func: Callable[[Any], Any]) -> bool:
"""
Return True if `func` is lambda, local or partial function, else False.
"""
return (
(isinstance(func, LambdaType) and func.__name__ == '<lambda>')
or not hasattr(func, '__qualname__')
or ('<locals>' in func.__qualname__)
)
The provided code snippet includes necessary dependencies for implementing the `map_docs` function. Write a Python function `def map_docs( docs: T, func: Callable[[T_doc], T_doc], backend: str = 'thread', num_worker: Optional[int] = None, pool: Optional[Union[Pool, ThreadPool]] = None, show_progress: bool = False, ) -> Generator[T_doc, None, None]` to solve the following problem:
Return an iterator that applies `func` to every Document in `docs` in parallel, yielding the results. --- ```python from docarray import DocList from docarray.documents import ImageDoc from docarray.utils.map import map_docs def load_url_to_tensor(img: ImageDoc) -> ImageDoc: img.tensor = img.url.load() return img url = ( 'https://upload.wikimedia.org/wikipedia/commons/8/80/' 'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg' ) docs = DocList[ImageDoc]([ImageDoc(url=url) for _ in range(100)]) docs = DocList[ImageDoc]( list(map_docs(docs, load_url_to_tensor, backend='thread')) ) # threading is usually a good option for IO-bound tasks such as loading an # ImageDoc from url for doc in docs: assert doc.tensor is not None ``` --- :param docs: DocList to apply function to :param func: a function that takes a [`BaseDoc`][docarray.base_doc.doc.BaseDoc] as input and outputs a [`BaseDoc`][docarray.base_doc.doc.BaseDoc]. :param backend: `thread` for multithreading and `process` for multiprocessing. Defaults to `thread`. In general, if `func` is IO-bound then `thread` is a good choice. On the other hand, if `func` is CPU-bound, then you may use `process`. In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements in-place, regardless of IO/CPU-bound, you should always use `thread` backend. Note that computation that is offloaded to non-python code (e.g. through np/torch/tf) falls under the "IO-bound" category. !!! warning When using `process` backend, your `func` should not modify elements in-place. This is because the multiprocessing backend passes the variable via pickle and works in another process. The passed object and the original object do **not** share the same memory. :param num_worker: the number of parallel workers. If not given, the number of CPUs in the system will be used. :param pool: use an existing/external process or thread pool. If given, you will be responsible for closing the pool. :param show_progress: show a progress bar. Defaults to False. :return: yield Documents returned from `func`
Here is the function:
def map_docs(
docs: T,
func: Callable[[T_doc], T_doc],
backend: str = 'thread',
num_worker: Optional[int] = None,
pool: Optional[Union[Pool, ThreadPool]] = None,
show_progress: bool = False,
) -> Generator[T_doc, None, None]:
"""
Return an iterator that applies `func` to every Document in `docs` in parallel,
yielding the results.
---
```python
from docarray import DocList
from docarray.documents import ImageDoc
from docarray.utils.map import map_docs
def load_url_to_tensor(img: ImageDoc) -> ImageDoc:
img.tensor = img.url.load()
return img
url = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
docs = DocList[ImageDoc]([ImageDoc(url=url) for _ in range(100)])
docs = DocList[ImageDoc](
list(map_docs(docs, load_url_to_tensor, backend='thread'))
) # threading is usually a good option for IO-bound tasks such as loading an
# ImageDoc from url
for doc in docs:
assert doc.tensor is not None
```
---
:param docs: DocList to apply function to
:param func: a function that takes a [`BaseDoc`][docarray.base_doc.doc.BaseDoc]
as input and outputs a [`BaseDoc`][docarray.base_doc.doc.BaseDoc].
:param backend: `thread` for multithreading and `process` for multiprocessing.
Defaults to `thread`.
In general, if `func` is IO-bound then `thread` is a good choice.
On the other hand, if `func` is CPU-bound, then you may use `process`.
In practice, you should try yourselves to figure out the best value.
However, if you wish to modify the elements in-place, regardless of IO/CPU-bound,
you should always use `thread` backend.
Note that computation that is offloaded to non-python code (e.g. through np/torch/tf)
falls under the "IO-bound" category.
!!! warning
When using `process` backend, your `func` should not modify elements in-place.
This is because the multiprocessing backend passes the variable via pickle
and works in another process.
The passed object and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, the number of CPUs
in the system will be used.
:param pool: use an existing/external process or thread pool. If given, you will
be responsible for closing the pool.
:param show_progress: show a progress bar. Defaults to False.
:return: yield Documents returned from `func`
"""
if backend == 'process' and _is_lambda_or_partial_or_local_function(func):
raise ValueError(
f'Multiprocessing does not allow functions that are local, lambda or partial: {func}'
)
context_pool: Union[nullcontext, Union[Pool, ThreadPool]]
if pool:
p = pool
context_pool = nullcontext()
else:
p = _get_pool(backend, num_worker)
context_pool = p
with context_pool:
imap = p.imap(func, docs)
for x in track(imap, total=len(docs), disable=not show_progress):
yield x | Return an iterator that applies `func` to every Document in `docs` in parallel, yielding the results. --- ```python from docarray import DocList from docarray.documents import ImageDoc from docarray.utils.map import map_docs def load_url_to_tensor(img: ImageDoc) -> ImageDoc: img.tensor = img.url.load() return img url = ( 'https://upload.wikimedia.org/wikipedia/commons/8/80/' 'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg' ) docs = DocList[ImageDoc]([ImageDoc(url=url) for _ in range(100)]) docs = DocList[ImageDoc]( list(map_docs(docs, load_url_to_tensor, backend='thread')) ) # threading is usually a good option for IO-bound tasks such as loading an # ImageDoc from url for doc in docs: assert doc.tensor is not None ``` --- :param docs: DocList to apply function to :param func: a function that takes a [`BaseDoc`][docarray.base_doc.doc.BaseDoc] as input and outputs a [`BaseDoc`][docarray.base_doc.doc.BaseDoc]. :param backend: `thread` for multithreading and `process` for multiprocessing. Defaults to `thread`. In general, if `func` is IO-bound then `thread` is a good choice. On the other hand, if `func` is CPU-bound, then you may use `process`. In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements in-place, regardless of IO/CPU-bound, you should always use `thread` backend. Note that computation that is offloaded to non-python code (e.g. through np/torch/tf) falls under the "IO-bound" category. !!! warning When using `process` backend, your `func` should not modify elements in-place. This is because the multiprocessing backend passes the variable via pickle and works in another process. The passed object and the original object do **not** share the same memory. :param num_worker: the number of parallel workers. If not given, the number of CPUs in the system will be used. :param pool: use an existing/external process or thread pool. If given, you will be responsible for closing the pool. :param show_progress: show a progress bar. Defaults to False. :return: yield Documents returned from `func` |
6,698 | from contextlib import nullcontext
from math import ceil
from multiprocessing.pool import Pool, ThreadPool
from typing import Callable, Generator, Optional, TypeVar, Union
from rich.progress import track
from docarray import BaseDoc
from docarray.array.any_array import AnyDocArray
from docarray.helper import _is_lambda_or_partial_or_local_function
T = TypeVar('T', bound=AnyDocArray)
T_doc = TypeVar('T_doc', bound=BaseDoc)
def _get_pool(backend, num_worker) -> Union[Pool, ThreadPool]:
"""
Get Pool instance for multiprocessing or ThreadPool instance for multithreading.
"""
if backend == 'thread':
return ThreadPool(processes=num_worker)
elif backend == 'process':
return Pool(processes=num_worker)
else:
raise ValueError(
f'`backend` must be either `process` or `thread`, receiving {backend}'
)
def _is_lambda_or_partial_or_local_function(func: Callable[[Any], Any]) -> bool:
"""
Return True if `func` is lambda, local or partial function, else False.
"""
return (
(isinstance(func, LambdaType) and func.__name__ == '<lambda>')
or not hasattr(func, '__qualname__')
or ('<locals>' in func.__qualname__)
)
The provided code snippet includes necessary dependencies for implementing the `map_docs_batched` function. Write a Python function `def map_docs_batched( docs: T, func: Callable[[T], Union[T, T_doc]], batch_size: int, backend: str = 'thread', num_worker: Optional[int] = None, shuffle: bool = False, pool: Optional[Union[Pool, ThreadPool]] = None, show_progress: bool = False, ) -> Generator[Union[T, T_doc], None, None]` to solve the following problem:
Return an iterator that applies `func` to every **minibatch** of iterable in parallel, yielding the results. Each element in the returned iterator is an `AnyDocArray`. --- ```python from docarray import BaseDoc, DocList from docarray.utils.map import map_docs_batched class MyDoc(BaseDoc): name: str def upper_case_name(docs: DocList[MyDoc]) -> DocList[MyDoc]: docs.name = [n.upper() for n in docs.name] return docs batch_size = 16 docs = DocList[MyDoc]([MyDoc(name='my orange cat') for _ in range(100)]) it = map_docs_batched(docs, upper_case_name, batch_size=batch_size) for i, d in enumerate(it): docs[i * batch_size : (i + 1) * batch_size] = d assert len(docs) == 100 print(docs.name[:3]) ``` --- ``` ['MY ORANGE CAT', 'MY ORANGE CAT', 'MY ORANGE CAT'] ``` --- :param docs: DocList to apply function to :param batch_size: Size of each generated batch (except the last one, which might be smaller). :param shuffle: If set, shuffle the Documents before dividing into minibatches. :param func: a function that takes an :class:`AnyDocArray` as input and outputs an :class:`AnyDocArray` or a :class:`BaseDoc`. :param backend: `thread` for multithreading and `process` for multiprocessing. Defaults to `thread`. In general, if `func` is IO-bound then `thread` is a good choice. On the other hand, if `func` is CPU-bound, then you may use `process`. In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements in-place, regardless of IO/CPU-bound, you should always use `thread` backend. Note that computation that is offloaded to non-python code (e.g. through np/torch/tf) falls under the "IO-bound" category. !!! warning When using `process` backend, your `func` should not modify elements in-place. This is because the multiprocessing backend passes the variable via pickle and works in another process. The passed object and the original object do **not** share the same memory. :param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used. :param show_progress: show a progress bar :param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool. :return: yield DocLists returned from `func`
Here is the function:
def map_docs_batched(
docs: T,
func: Callable[[T], Union[T, T_doc]],
batch_size: int,
backend: str = 'thread',
num_worker: Optional[int] = None,
shuffle: bool = False,
pool: Optional[Union[Pool, ThreadPool]] = None,
show_progress: bool = False,
) -> Generator[Union[T, T_doc], None, None]:
"""
Return an iterator that applies `func` to every **minibatch** of iterable in parallel,
yielding the results.
Each element in the returned iterator is an `AnyDocArray`.
---
```python
from docarray import BaseDoc, DocList
from docarray.utils.map import map_docs_batched
class MyDoc(BaseDoc):
name: str
def upper_case_name(docs: DocList[MyDoc]) -> DocList[MyDoc]:
docs.name = [n.upper() for n in docs.name]
return docs
batch_size = 16
docs = DocList[MyDoc]([MyDoc(name='my orange cat') for _ in range(100)])
it = map_docs_batched(docs, upper_case_name, batch_size=batch_size)
for i, d in enumerate(it):
docs[i * batch_size : (i + 1) * batch_size] = d
assert len(docs) == 100
print(docs.name[:3])
```
---
```
['MY ORANGE CAT', 'MY ORANGE CAT', 'MY ORANGE CAT']
```
---
:param docs: DocList to apply function to
:param batch_size: Size of each generated batch (except the last one, which might
be smaller).
:param shuffle: If set, shuffle the Documents before dividing into minibatches.
:param func: a function that takes an :class:`AnyDocArray` as input and outputs
an :class:`AnyDocArray` or a :class:`BaseDoc`.
:param backend: `thread` for multithreading and `process` for multiprocessing.
Defaults to `thread`.
In general, if `func` is IO-bound then `thread` is a good choice.
On the other hand, if `func` is CPU-bound, then you may use `process`.
In practice, you should try yourselves to figure out the best value.
However, if you wish to modify the elements in-place, regardless of IO/CPU-bound,
you should always use `thread` backend.
Note that computation that is offloaded to non-python code (e.g. through np/torch/tf)
falls under the "IO-bound" category.
!!! warning
When using `process` backend, your `func` should not modify elements in-place.
This is because the multiprocessing backend passes the variable via pickle
and works in another process.
The passed object and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs
in the system will be used.
:param show_progress: show a progress bar
:param pool: use an existing/external pool. If given, `backend` is ignored and you will
be responsible for closing the pool.
:return: yield DocLists returned from `func`
"""
if backend == 'process' and _is_lambda_or_partial_or_local_function(func):
raise ValueError(
f'Multiprocessing does not allow functions that are local, lambda or partial: {func}'
)
context_pool: Union[nullcontext, Union[Pool, ThreadPool]]
if pool:
p = pool
context_pool = nullcontext()
else:
p = _get_pool(backend, num_worker)
context_pool = p
with context_pool:
imap = p.imap(func, docs._batch(batch_size=batch_size, shuffle=shuffle))
for x in track(
imap, total=ceil(len(docs) / batch_size), disable=not show_progress
):
yield x | Return an iterator that applies `func` to every **minibatch** of iterable in parallel, yielding the results. Each element in the returned iterator is an `AnyDocArray`. --- ```python from docarray import BaseDoc, DocList from docarray.utils.map import map_docs_batched class MyDoc(BaseDoc): name: str def upper_case_name(docs: DocList[MyDoc]) -> DocList[MyDoc]: docs.name = [n.upper() for n in docs.name] return docs batch_size = 16 docs = DocList[MyDoc]([MyDoc(name='my orange cat') for _ in range(100)]) it = map_docs_batched(docs, upper_case_name, batch_size=batch_size) for i, d in enumerate(it): docs[i * batch_size : (i + 1) * batch_size] = d assert len(docs) == 100 print(docs.name[:3]) ``` --- ``` ['MY ORANGE CAT', 'MY ORANGE CAT', 'MY ORANGE CAT'] ``` --- :param docs: DocList to apply function to :param batch_size: Size of each generated batch (except the last one, which might be smaller). :param shuffle: If set, shuffle the Documents before dividing into minibatches. :param func: a function that takes an :class:`AnyDocArray` as input and outputs an :class:`AnyDocArray` or a :class:`BaseDoc`. :param backend: `thread` for multithreading and `process` for multiprocessing. Defaults to `thread`. In general, if `func` is IO-bound then `thread` is a good choice. On the other hand, if `func` is CPU-bound, then you may use `process`. In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements in-place, regardless of IO/CPU-bound, you should always use `thread` backend. Note that computation that is offloaded to non-python code (e.g. through np/torch/tf) falls under the "IO-bound" category. !!! warning When using `process` backend, your `func` should not modify elements in-place. This is because the multiprocessing backend passes the variable via pickle and works in another process. The passed object and the original object do **not** share the same memory. :param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used. :param show_progress: show a progress bar :param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool. :return: yield DocLists returned from `func` |
6,699 | from typing import IO, TYPE_CHECKING, Callable, Optional
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import ( # noqa: F401
JaxArray,
JaxArrayEmbedding,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor.audio import AudioJaxArray # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageJaxArray # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoJaxArray # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _compress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
data = frame.compress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.compress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.compress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.compress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.compress(data)
return data | null |
6,700 | from typing import IO, TYPE_CHECKING, Callable, Optional
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import ( # noqa: F401
JaxArray,
JaxArrayEmbedding,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor.audio import AudioJaxArray # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageJaxArray # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoJaxArray # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
def _decompress_bytes(data: bytes, algorithm: Optional[str] = None) -> bytes:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
data = frame.decompress(data)
elif algorithm == 'bz2':
import bz2
data = bz2.decompress(data)
elif algorithm == 'lzma':
import lzma
data = lzma.decompress(data)
elif algorithm == 'zlib':
import zlib
data = zlib.decompress(data)
elif algorithm == 'gzip':
import gzip
data = gzip.decompress(data)
return data | null |
6,701 | from typing import IO, TYPE_CHECKING, Callable, Optional
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import ( # noqa: F401
JaxArray,
JaxArrayEmbedding,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor.audio import AudioJaxArray # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageJaxArray # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoJaxArray # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_compress_ctx(algorithm: Optional[str] = None) -> Optional[Callable]:
if algorithm == 'lz4':
if TYPE_CHECKING:
from lz4 import frame
else:
lz4 = import_library('lz4', raise_error=True) # noqa: F841
from lz4 import frame
def _fun(x: IO[bytes]):
return frame.LZ4FrameFile(x, 'wb')
compress_ctx = _fun
elif algorithm == 'gzip':
import gzip
def _fun(x: IO[bytes]):
return gzip.GzipFile(fileobj=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'bz2':
import bz2
def _fun(x: IO[bytes]):
return bz2.BZ2File(filename=x, mode='wb')
compress_ctx = _fun
elif algorithm == 'lzma':
import lzma
def _fun(x: IO[bytes]):
return lzma.LZMAFile(filename=x, mode='wb')
compress_ctx = _fun
else:
compress_ctx = None
return compress_ctx | null |
6,702 | import os
from functools import lru_cache
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `_get_cache_path` function. Write a Python function `def _get_cache_path() -> Path` to solve the following problem:
Get the path to the cache directory. :return: The path to the cache directory.
Here is the function:
def _get_cache_path() -> Path:
"""
Get the path to the cache directory.
:return: The path to the cache directory.
"""
cache_path = Path.home() / '.cache' / 'docarray'
if "DOCARRAY_CACHE" in os.environ:
cache_path = Path(os.environ["DOCARRAY_CACHE"])
cache_path.mkdir(parents=True, exist_ok=True)
return cache_path | Get the path to the cache directory. :return: The path to the cache directory. |
6,703 | import importlib
import os
import re
import types
from typing import Any, Optional, Literal
import numpy as np
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path | null |
6,704 | import importlib
import os
import re
import types
from typing import Any, Optional, Literal
import numpy as np
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it | null |
6,705 | import importlib
import os
import re
import types
from typing import Any, Optional, Literal
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `is_notebook` function. Write a Python function `def is_notebook() -> bool` to solve the following problem:
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter. :return: True if run in a Jupyter notebook else False.
Here is the function:
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False | Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter. :return: True if run in a Jupyter notebook else False. |
6,706 | import re
from functools import partial
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
PLACEHOLDER_PATTERN = re.compile(r'\{\s*([a-zA-Z0-9_]*)\s*}')
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified "dunder separated key"
A "dunder separated key" is just a fieldname that may or may not contain "__"
for referencing nested keys in a dict or object. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
key 'b' can be referrenced as 'a__b'
:param _dict: (dict, list, struct or object) which we want to index into
:param key: (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
if _dict is None:
return None
part1: Union[str, int]
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, dict):
result = _dict[part1]
elif isinstance(_dict, Sequence):
result = _dict[int(part1)]
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
def point_partition(key: str) -> Tuple[str, Optional[str]]:
"""Splits a dot-separated key into 2 parts.
The first part is everything before the final double underscore
The second part is after the final double underscore
>>> point_partition('a.b.c')
>>> ('a.b', 'c')
"""
parts = key.rsplit('.', 1)
return (parts[0], parts[1]) if len(parts) > 1 else (parts[0], None)
iff_not_none = partial(iff, lambda x: x is not None)
def guard_str(val: Any) -> str:
if not isinstance(val, str):
raise LookupyError('Value not a {classinfo}'.format(classinfo=str))
return val
def guard_iter(val: Any) -> Iterator:
try:
iter(val)
except TypeError:
raise LookupyError('Value not an iterable')
else:
return val
The provided code snippet includes necessary dependencies for implementing the `lookup` function. Write a Python function `def lookup(key: str, val: Any, doc: Any) -> bool` to solve the following problem:
Checks if key-val pair exists in doc using various lookup types The lookup types are derived from the `key` and then used to check if the lookup holds true for the document:: >>> lookup('text.exact', 'hello', doc) The above will return True if doc.text == 'hello' else False. And >>> lookup('text.exact', '{tags__name}', doc) will return True if doc.text == doc.tags.name else False :param key: the field name to find :param val: object to match the value in the document against :param doc: the document to match
Here is the function:
def lookup(key: str, val: Any, doc: Any) -> bool:
"""Checks if key-val pair exists in doc using various lookup types
The lookup types are derived from the `key` and then used to check
if the lookup holds true for the document::
>>> lookup('text.exact', 'hello', doc)
The above will return True if doc.text == 'hello' else False. And
>>> lookup('text.exact', '{tags__name}', doc)
will return True if doc.text == doc.tags.name else False
:param key: the field name to find
:param val: object to match the value in the document against
:param doc: the document to match
"""
get_key, last = point_partition(key)
if isinstance(val, str) and val.startswith('{'):
r = PLACEHOLDER_PATTERN.findall(val)
if r and len(r) == 1:
val = getattr(doc, r[0], None)
else:
raise ValueError(f'The placeholder `{val}` is illegal')
field_exists = True
try:
if '__' in get_key:
value = dunder_get(doc, get_key)
else:
value = getattr(doc, get_key)
except (AttributeError, KeyError):
field_exists = False
if last != 'exists':
return False
if last == 'exact':
return value == val
elif last == 'neq':
return value != val
elif last == 'contains':
val = guard_str(val)
return iff_not_none(value, lambda y: val in y)
elif last == 'icontains':
val = guard_str(val)
return iff_not_none(value, lambda y: val.lower() in y.lower())
elif last == 'in':
val = guard_iter(val)
return value in val
elif last == 'nin':
val = guard_iter(val)
return value not in val
elif last == 'startswith':
val = guard_str(val)
return iff_not_none(value, lambda y: y.startswith(val))
elif last == 'istartswith':
val = guard_str(val)
return iff_not_none(value, lambda y: y.lower().startswith(val.lower()))
elif last == 'endswith':
val = guard_str(val)
return iff_not_none(value, lambda y: y.endswith(val))
elif last == 'iendswith':
val = guard_str(val)
return iff_not_none(value, lambda y: y.lower().endswith(val.lower()))
elif last == 'gt':
return iff_not_none(value, lambda y: y > val)
elif last == 'gte':
return iff_not_none(value, lambda y: y >= val)
elif last == 'lt':
return iff_not_none(value, lambda y: y < val)
elif last == 'lte':
return iff_not_none(value, lambda y: y <= val)
elif last == 'regex':
v = getattr(value, '_get_string_for_regex_filter', lambda *args: value)()
return iff_not_none(v, lambda y: re.search(val, y) is not None)
elif last == 'size':
return iff_not_none(value, lambda y: len(y) == val)
elif last == 'exists':
if not isinstance(val, bool):
raise ValueError(
'$exists operator can only accept True/False as value for comparison'
)
if val:
return field_exists
else:
return not field_exists
else:
raise ValueError(
f'The given compare operator "{last}" (derived from "{key}")'
f' is not supported'
) | Checks if key-val pair exists in doc using various lookup types The lookup types are derived from the `key` and then used to check if the lookup holds true for the document:: >>> lookup('text.exact', 'hello', doc) The above will return True if doc.text == 'hello' else False. And >>> lookup('text.exact', '{tags__name}', doc) will return True if doc.text == doc.tags.name else False :param key: the field name to find :param val: object to match the value in the document against :param doc: the document to match |
6,707 | import re
from functools import partial
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `iff` function. Write a Python function `def iff(precond: Callable, val: Any, f: Callable) -> bool` to solve the following problem:
If and only if the precond is True Shortcut function for precond(val) and f(val). It is mainly used to create partial functions for commonly required preconditions :param precond : (function) represents the precondition :param val : (mixed) value to which the functions are applied :param f : (function) the actual function
Here is the function:
def iff(precond: Callable, val: Any, f: Callable) -> bool:
"""If and only if the precond is True
Shortcut function for precond(val) and f(val). It is mainly used
to create partial functions for commonly required preconditions
:param precond : (function) represents the precondition
:param val : (mixed) value to which the functions are applied
:param f : (function) the actual function
"""
return False if not precond(val) else f(val) | If and only if the precond is True Shortcut function for precond(val) and f(val). It is mainly used to create partial functions for commonly required preconditions :param precond : (function) represents the precondition :param val : (mixed) value to which the functions are applied :param f : (function) the actual function |
6,708 | from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
class LookupTreeElem(object):
"""Base class for a child in the lookup expression tree"""
def __init__(self):
self.negate = False
def evaluate(self, item: Any) -> bool:
raise NotImplementedError
def __or__(self, other: 'LookupTreeElem'):
node = LookupNode()
node.op = 'or'
node.add_child(self)
node.add_child(other)
return node
def __and__(self, other: 'LookupTreeElem'):
node = LookupNode()
node.add_child(self)
node.add_child(other)
return node
class LookupNode(LookupTreeElem):
"""A node (element having children) in the lookup expression tree
Typically it's any object composed of two ``Q`` objects eg::
>>> Q(language.neq='Ruby') | Q(framework.startswith='S')
>>> ~Q(language.exact='PHP')
"""
def __init__(self, op: Union[str, bool] = 'and', negate: bool = False):
super(LookupNode, self).__init__()
self.children: List[LookupNode] = []
self.op = op
self.negate = negate
def add_child(self, child) -> None:
self.children.append(child)
def evaluate(self, doc: Any) -> bool:
"""Evaluates the expression represented by the object for the document
:param doc: the document to match
:return: returns true if lookup passed
"""
results = map(lambda x: x.evaluate(doc), self.children)
result = any(results) if self.op == 'or' else all(results)
return not result if self.negate else result
def __invert__(self):
newnode = LookupNode()
for c in self.children:
newnode.add_child(c)
newnode.negate = not self.negate
return newnode
def __repr__(self):
return f'{self.op}: [{self.children}]'
class LookupLeaf(LookupTreeElem):
"""Class for a leaf in the lookup expression tree"""
def __init__(self, **kwargs):
super(LookupLeaf, self).__init__()
self.lookups = kwargs
def evaluate(self, doc: Any) -> bool:
"""Evaluates the expression represented by the object for the document
:param doc: the document to match
:return: returns true if lookup passed
"""
result = all(lookup(k, v, doc) for k, v in self.lookups.items())
return not result if self.negate else result
def __invert__(self):
newleaf = LookupLeaf(**self.lookups)
newleaf.negate = not self.negate
return newleaf
def __repr__(self):
return f'{self.lookups}'
Q = LookupLeaf
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node | null |
6,709 | from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
from docarray.typing.tensor.abstract_tensor import AbstractTensor
return isinstance(type_, type) and safe_issubclass(type_, AbstractTensor)
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
The provided code snippet includes necessary dependencies for implementing the `is_tensor_union` function. Write a Python function `def is_tensor_union(type_: Any) -> bool` to solve the following problem:
Return True if type is a Union of type Tensors.
Here is the function:
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or safe_issubclass(t, type(None)))
for t in get_args(type_)
) | Return True if type is a Union of type Tensors. |
6,710 | from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
The provided code snippet includes necessary dependencies for implementing the `change_cls_name` function. Write a Python function `def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None` to solve the following problem:
Change the name of a class. :param cls: the class to change the name of :param new_name: the new name :param scope: the scope in which the class is defined
Here is the function:
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name | Change the name of a class. :param cls: the class to change the name of :param new_name: the new name :param scope: the scope in which the class is defined |
6,711 | from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, create_model
from pydantic.fields import FieldInfo
from docarray.base_doc.doc import BaseDocWithoutId
from docarray import BaseDoc, DocList
from docarray.typing import AnyTensor
from docarray.utils._internal._typing import safe_issubclass
from docarray.utils._internal.pydantic import is_pydantic_v2
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
):
return False
return issubclass(x, a_tuple)
is_pydantic_v2 = pydantic.__version__.startswith('2.')
if not is_pydantic_v2:
from pydantic.validators import bytes_validator
else:
from pydantic.v1.validators import bytes_validator
The provided code snippet includes necessary dependencies for implementing the `create_pure_python_type_model` function. Write a Python function `def create_pure_python_type_model(model: BaseModel) -> BaseDoc` to solve the following problem:
Take a Pydantic model and cast DocList fields into List fields. This may be necessary due to limitations in Pydantic: https://github.com/docarray/docarray/issues/1521 https://github.com/pydantic/pydantic/issues/1457 --- ```python from docarray import BaseDoc class MyDoc(BaseDoc): tensor: Optional[AnyTensor] url: ImageUrl title: str texts: DocList[TextDoc] MyDocCorrected = create_new_model_cast_doclist_to_list(CustomDoc) ``` --- :param model: The input model :return: A new subclass of BaseDoc, where every DocList type in the schema is replaced by List.
Here is the function:
def create_pure_python_type_model(model: BaseModel) -> BaseDoc:
"""
Take a Pydantic model and cast DocList fields into List fields.
This may be necessary due to limitations in Pydantic:
https://github.com/docarray/docarray/issues/1521
https://github.com/pydantic/pydantic/issues/1457
---
```python
from docarray import BaseDoc
class MyDoc(BaseDoc):
tensor: Optional[AnyTensor]
url: ImageUrl
title: str
texts: DocList[TextDoc]
MyDocCorrected = create_new_model_cast_doclist_to_list(CustomDoc)
```
---
:param model: The input model
:return: A new subclass of BaseDoc, where every DocList type in the schema is replaced by List.
"""
fields: Dict[str, Any] = {}
import copy
fields_copy = copy.deepcopy(model.__fields__)
annotations_copy = copy.deepcopy(model.__annotations__)
for field_name, field in annotations_copy.items():
if field_name not in fields_copy:
continue
if is_pydantic_v2:
field_info = fields_copy[field_name]
else:
field_info = fields_copy[field_name].field_info
try:
if safe_issubclass(field, DocList):
t: Any = field.doc_type
t_aux = create_pure_python_type_model(t)
fields[field_name] = (List[t_aux], field_info)
else:
fields[field_name] = (field, field_info)
except TypeError:
fields[field_name] = (field, field_info)
return create_model(model.__name__, __base__=model, __doc__=model.__doc__, **fields) | Take a Pydantic model and cast DocList fields into List fields. This may be necessary due to limitations in Pydantic: https://github.com/docarray/docarray/issues/1521 https://github.com/pydantic/pydantic/issues/1457 --- ```python from docarray import BaseDoc class MyDoc(BaseDoc): tensor: Optional[AnyTensor] url: ImageUrl title: str texts: DocList[TextDoc] MyDocCorrected = create_new_model_cast_doclist_to_list(CustomDoc) ``` --- :param model: The input model :return: A new subclass of BaseDoc, where every DocList type in the schema is replaced by List. |
6,712 | import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args() | null |
6,713 | import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file | null |
6,714 | import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser | null |
6,715 | import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen(cfg, is_train)` to solve the following problem:
Create a list of :class:`TransformGen` from config. Returns: list[TransformGen]
Here is the function:
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens | Create a list of :class:`TransformGen` from config. Returns: list[TransformGen] |
6,716 | from detectron2.config import CfgNode as CN
The provided code snippet includes necessary dependencies for implementing the `add_detr_config` function. Write a Python function `def add_detr_config(cfg)` to solve the following problem:
Add config for DETR.
Here is the function:
def add_detr_config(cfg):
"""
Add config for DETR.
"""
cfg.MODEL.DETR = CN()
cfg.MODEL.DETR.NUM_CLASSES = 80
# For Segmentation
cfg.MODEL.DETR.FROZEN_WEIGHTS = ''
# LOSS
cfg.MODEL.DETR.GIOU_WEIGHT = 2.0
cfg.MODEL.DETR.L1_WEIGHT = 5.0
cfg.MODEL.DETR.DEEP_SUPERVISION = True
cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
# TRANSFORMER
cfg.MODEL.DETR.NHEADS = 8
cfg.MODEL.DETR.DROPOUT = 0.1
cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048
cfg.MODEL.DETR.ENC_LAYERS = 6
cfg.MODEL.DETR.DEC_LAYERS = 6
cfg.MODEL.DETR.PRE_NORM = False
cfg.MODEL.DETR.HIDDEN_DIM = 256
cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 | Add config for DETR. |
6,717 | import os
import sys
import itertools
import time
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from d2.detr import DetrDatasetMapper, add_detr_config
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
from detectron2.solver.build import maybe_add_gradient_clipping
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args)` to solve the following problem:
Create configs and perform basic setups.
Here is the function:
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_detr_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg | Create configs and perform basic setups. |
6,718 | import json
import argparse
import numpy as np
import torch
def parse_args():
parser = argparse.ArgumentParser("D2 model converter")
parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert")
parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model")
return parser.parse_args() | null |
6,719 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
The provided code snippet includes necessary dependencies for implementing the `detr_resnet50` function. Write a Python function `def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False)` to solve the following problem:
DETR R50 with 6 encoder and 6 decoder layers. Achieves 42/62.4 AP/AP50 on COCO val5k.
Here is the function:
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model | DETR R50 with 6 encoder and 6 decoder layers. Achieves 42/62.4 AP/AP50 on COCO val5k. |
6,720 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
The provided code snippet includes necessary dependencies for implementing the `detr_resnet50_dc5` function. Write a Python function `def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False)` to solve the following problem:
DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 43.3/63.1 AP/AP50 on COCO val5k.
Here is the function:
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model | DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 43.3/63.1 AP/AP50 on COCO val5k. |
6,721 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
The provided code snippet includes necessary dependencies for implementing the `detr_resnet101` function. Write a Python function `def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False)` to solve the following problem:
DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 43.5/63.8 AP/AP50 on COCO val5k.
Here is the function:
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model | DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 43.5/63.8 AP/AP50 on COCO val5k. |
6,722 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
The provided code snippet includes necessary dependencies for implementing the `detr_resnet101_dc5` function. Write a Python function `def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False)` to solve the following problem:
DETR-DC5 R101 with 6 encoder and 6 decoder layers. The last block of ResNet-101 has dilation to increase output resolution. Achieves 44.9/64.7 AP/AP50 on COCO val5k.
Here is the function:
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model | DETR-DC5 R101 with 6 encoder and 6 decoder layers. The last block of ResNet-101 has dilation to increase output resolution. Achieves 44.9/64.7 AP/AP50 on COCO val5k. |
6,723 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
The provided code snippet includes necessary dependencies for implementing the `detr_resnet50_panoptic` function. Write a Python function `def detr_resnet50_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False )` to solve the following problem:
DETR R50 with 6 encoder and 6 decoder layers. Achieves 43.4 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction
Here is the function:
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model | DETR R50 with 6 encoder and 6 decoder layers. Achieves 43.4 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction |
6,724 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
The provided code snippet includes necessary dependencies for implementing the `detr_resnet50_dc5_panoptic` function. Write a Python function `def detr_resnet50_dc5_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False )` to solve the following problem:
DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 44.6 on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction
Here is the function:
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model | DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 44.6 on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction |
6,725 | import torch
from models.backbone import Backbone, Joiner
from models.detr import DETR, PostProcess
from models.position_encoding import PositionEmbeddingSine
from models.segmentation import DETRsegm, PostProcessPanoptic
from models.transformer import Transformer
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
The provided code snippet includes necessary dependencies for implementing the `detr_resnet101_panoptic` function. Write a Python function `def detr_resnet101_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False )` to solve the following problem:
DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 45.1 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction
Here is the function:
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model | DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 45.1 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction |
6,726 | import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) | null |
6,727 | import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params) | null |
6,728 | import os
import contextlib
import copy
import numpy as np
import torch
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from util.misc import all_gather
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(self)` to solve the following problem:
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs :return: None
Here is the function:
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs | Run per image evaluation on given images and store results (a list of dict) in self.evalImgs :return: None |
6,729 | import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target | null |
6,730 | import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.