language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 226242,
"end": 228676
}
|
class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"author",
"author_association",
"body",
"body_html",
"body_text",
"created_at",
"created_via_email",
"editor",
"id",
"includes_created_edit",
"last_edited_at",
"published_at",
"updated_at",
"user_content_edits",
"viewer_did_author",
)
author = sgqlc.types.Field(Actor, graphql_name="author")
author_association = sgqlc.types.Field(
sgqlc.types.non_null(CommentAuthorAssociation), graphql_name="authorAssociation"
)
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
body_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bodyHTML")
body_text = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="bodyText")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
created_via_email = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="createdViaEmail"
)
editor = sgqlc.types.Field(Actor, graphql_name="editor")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
includes_created_edit = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="includesCreatedEdit"
)
last_edited_at = sgqlc.types.Field(DateTime, graphql_name="lastEditedAt")
published_at = sgqlc.types.Field(DateTime, graphql_name="publishedAt")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
user_content_edits = sgqlc.types.Field(
"UserContentEditConnection",
graphql_name="userContentEdits",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
viewer_did_author = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerDidAuthor"
)
|
Comment
|
python
|
doocs__leetcode
|
solution/1600-1699/1668.Maximum Repeating Substring/Solution.py
|
{
"start": 0,
"end": 199
}
|
class ____:
def maxRepeating(self, sequence: str, word: str) -> int:
for k in range(len(sequence) // len(word), -1, -1):
if word * k in sequence:
return k
|
Solution
|
python
|
scikit-learn__scikit-learn
|
sklearn/model_selection/_split.py
|
{
"start": 52997,
"end": 57979
}
|
class ____(GroupsConsumerMixin, BaseCrossValidator):
"""Leave P Group(s) Out cross-validator.
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <leave_p_groups_out>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(groups=groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2], group=[3]
Test: index=[0 1], group=[1 2]
Fold 1:
Train: index=[1], group=[2]
Test: index=[0 2], group=[1 3]
Fold 2:
Train: index=[0], group=[1]
Test: index=[1 2], group=[2 3]
See Also
--------
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(
groups, input_name="groups", copy=True, ensure_2d=False, dtype=None
)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1)
)
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
Always ignored, exists for API compatibility.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
|
LeavePGroupsOut
|
python
|
doocs__leetcode
|
solution/2100-2199/2150.Find All Lonely Numbers in the Array/Solution.py
|
{
"start": 0,
"end": 215
}
|
class ____:
def findLonely(self, nums: List[int]) -> List[int]:
cnt = Counter(nums)
return [
x for x, v in cnt.items() if v == 1 and cnt[x - 1] == 0 and cnt[x + 1] == 0
]
|
Solution
|
python
|
pytorch__pytorch
|
test/jit/test_typing.py
|
{
"start": 483,
"end": 21075
}
|
class ____(JitTestCase):
def test_dict_in_not_in(self):
def test_in_dict(x):
# type: (Dict[str, int]) -> bool
return "hi" in x
self.checkScript(test_in_dict, ({"hi": 2, "bye": 3},))
self.checkScript(test_in_dict, ({"bye": 3},))
# Check evaluation order
@torch.jit.script
def a():
print("a")
return 3
@torch.jit.script
def b():
print("b")
return {3: 2, 4: 1}
@torch.jit.script
def fn():
return a() in b()
with self.capture_stdout() as captured:
self.assertTrue(fn())
if not IS_WINDOWS:
# no stdout capturing on windows
self.assertEqual(captured[0], "a\nb\n")
def test_not_in_dict(a):
# type: (Dict[str, int]) -> bool
if "hello" not in a:
return False
else:
return True
self.checkScript(test_not_in_dict, ({"hello": 1, "world": 2},))
self.checkScript(test_not_in_dict, ({"world": 2},))
def test_dict_tensor_key(a, t):
# type: (Dict[Tensor, int], Tensor) -> bool
if t in a:
return True
else:
return False
inp1 = torch.tensor(3)
inp2 = torch.tensor(5)
dict_a = {inp1: 1, inp2: 3}
self.checkScript(test_dict_tensor_key, (dict_a, torch.tensor(4)))
self.checkScript(test_dict_tensor_key, (dict_a, torch.tensor(3)))
self.checkScript(test_dict_tensor_key, (dict_a, inp1))
self.checkScript(test_dict_tensor_key, (dict_a, inp2))
def test_list_type_refinement_annotation_element_mismatch(self):
def fn():
l: List[int] = [1, 2, "foo", 3]
return l
with self.assertRaisesRegex(
RuntimeError,
"List type annotation"
r" `List\[int\]` did not match the "
"types of the given list elements",
):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_key_mismatch(self):
def fn():
l1 = [1, 2, "foo", 3]
l2 = ["foo", "bar", "baz", "qux"]
d: Dict[int, str] = dict(zip(l1, l2))
return d
with self.assertRaisesRegex(
RuntimeError,
"Dicts may only "
"contain homogeneous keys, but the "
"type of the first generated key "
r"was Union\[int, str\]",
):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_value_mismatch(self):
def fn():
l1 = ["foo", "bar", "baz", "qux"]
l2 = [1, 2, "foo", 3]
d: Dict[str, int] = dict(zip(l1, l2))
return d
with self.assertRaisesRegex(
RuntimeError,
"Dict type annotation"
r" `Dict\[str, int\]` did not match"
" the type of an actual value type"
r" `Union\[int, str\]`",
):
torch.jit.script(fn)
def test_dict_invalid_annotations(self):
# Check for invalid value type annotation
def wrong_value_type(dictionary: Dict[str, torch.jit.ScriptModule]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_value_type)
# Check for invalid key type annotation
def wrong_key_type(dictionary: Dict[torch.jit.ScriptModule, str]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_type)
# Check for invalid key and value type annotation
def wrong_key_value_type(
dictionary: Dict[torch.jit.ScriptModule, torch.jit.ScriptModule],
):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_value_type)
def test_tuple_specialization(self):
@torch.jit.script
def f(t, s):
# type: (Tuple[Tensor, Tuple[int, Tensor]], str) -> Tensor
x, t2 = t
_, y = t2
return x + y
t = (
torch.randn(2, 2),
(1, torch.randn(2, 2)),
)
f(t, "hi")
graph = f.graph_for(t, "hi")
input_types = list(next(graph.inputs()).type().elements())
w = input_types[0]
self.assertEqual(input_types[0].kind(), "TensorType")
self.assertEqual(input_types[1].elements()[1].kind(), "TensorType")
def test_tuple_io(self):
def stuff(x):
# type: (Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]
a, b = x
return b, a
a = (torch.rand(3), torch.rand(3))
self.checkScript(stuff, (a,))
def test_tuple_keyword(self):
def bar():
f = tuple((1, 2)) # noqa: C409
return f
self.checkScript(bar, ())
def foo():
return tuple(1, 2)
self.checkScriptRaisesRegex(foo, (), Exception, "1 argument")
def cant_infer_size():
return tuple([1, 2, 3]) # noqa: C409
with self.assertRaisesRegex(Exception, "cannot statically infer the expected"):
torch.jit.script(cant_infer_size)
def test_tuple_create_return(self):
def stuff2(x):
# type: (int) -> Tuple[Tensor, Tensor]
a = (torch.ones(x), torch.zeros(x))
return a
self.checkScript(stuff2, (3,))
def test_list_io(self):
def stuff3(x):
# type: (List[int]) -> Tuple[Tensor, List[int]]
return torch.ones(x), x
self.checkScript(stuff3, ([3, 2],))
def test_bool_list_io(self):
@torch.jit.script
def stuff4(x):
# type: (List[bool]) -> Tuple[List[bool], List[bool], List[List[bool]]]
return x, [True, False], [[True]]
li_1, li_2, li_3 = stuff4([True])
li_3 = li_3[0]
for li in [li_1, li_2, li_3]:
self.assertTrue(type(li[0]) is bool)
def test_nested_list(self):
def foo(z):
# type: (Tuple[int, List[List[int]]]) -> int
x, y = z
return y[0][1]
self.checkScript(foo, ((1, [[1, 2], [3, 4]]),))
def test_list_sum(self):
def fn(x: List[int]) -> int:
return sum(x)
def fn1(x: List[float]):
return sum(x)
def fn2(x: List[bool]):
return sum(x)
self.checkScript(fn, ([1, 2, 3],))
self.checkScript(fn1, ([1.0, 2.0, 3.0],))
self.checkScript(fn1, ([1, 2.8, 3],))
self.checkScript(fn2, ([True, False, False],))
self.checkScript(fn2, ([False, False, False],))
self.checkScript(fn2, ([0, 1, 1, 0],))
def test_list_unification(self):
def fn():
return [1, None, 2]
def fn2(x):
return [torch.ones(2, 2), None, x]
self.checkScript(fn, [])
self.checkScript(fn2, (torch.ones(2, 2),))
# to avoid defining sum_list in multiple tests
def get_sum_list_fn(self):
def sum_list(a):
# type: (List[int]) -> int
sum = 0
for i in a:
sum += i
return sum
return sum_list
def test_sum_list_diff_elms(self):
self.checkScript(self.get_sum_list_fn(), ([1, 2, 3, 4, 5],))
def test_sum_list_empty(self):
self.checkScript(self.get_sum_list_fn(), ([],))
def test_sum_list_one(self):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
def sum_list():
# type: () -> int
sum = 0
for i in [1, 2, 3, 4, 5]:
sum += i
return sum
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
@torch.jit.script
def sum_list(a):
# type: (int) -> int
sum = 0
for i in a: # noqa: T484
sum += i
return sum
sum_list(1)
def test_list_iterables(self):
with self.assertRaisesRegex(
RuntimeError, "List of iterables is not supported currently"
):
cu = torch.jit.CompilationUnit(
"""
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
"""
)
def test_for_in_string(self):
def test_strings(x):
# type: (str) -> str
reverse = ""
for c in x:
reverse = c + reverse
return reverse
self.checkScript(test_strings, ("hello",))
self.checkScript(test_strings, ("",))
def test_list_strings(x):
# type: (List[str]) -> str
result = ""
for sub_str in x:
result += sub_str
return result
self.checkScript(test_list_strings, (["hello", "world"],))
self.checkScript(test_list_strings, (["hello", " ", "world", ""],))
def test_for_in_dict(self):
def test_dicts(x):
# type: (Dict[str, int]) -> int
sum = 0
for key in x:
sum += x[key]
return sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_dict_keys_values(x):
# type: (Dict[str, int]) -> Tuple[str, int]
key_str = ""
sum = 0
for key in x:
key_str += key
for val in x.values():
sum += val
return key_str, sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_for_tuple_unpack(self):
def for_tuple_unpack(x, y):
for i, j in [[3, 4], [5, 6], [7, 8]]:
x += i
y += j
return x, y
self.checkScript(for_tuple_unpack, (torch.tensor(3), torch.tensor(5)))
def nested_tuple_unpack(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for i, (j, k), v in zip(x, enumerate(x), y):
sum += i + j + k + v
return sum
self.checkScript(nested_tuple_unpack, ([1, 3, 5], [2, 4, 6]))
def test_dict_comprehension(self):
def fn():
return {i: chr(i + 65) for i in range(4)}
self.checkScript(fn, ())
def test_dict_comprehension_with_type_annotation(self):
def fn():
d: Dict[int, str] = {i: chr(i + 65) for i in range(4)}
return d
self.checkScript(fn, ())
with self.assertRaisesRegex(RuntimeError, ""):
with self.assertRaisesRegex(
AssertionError,
"Expected Dict "
"type annotation for dict "
"comprehension, found "
"Tuple[int, str]",
):
@torch.jit.script
def fn():
d: Tuple[int, str] = {i: chr(i + 65) for i in range(4)}
return d
def test_dict_comprehension_scope(self):
def comprehension_can_access_outer_scope_variables():
lst = ["foo", "bar", "baz"]
return {l: len(l) for l in lst}
self.checkScript(comprehension_can_access_outer_scope_variables, ())
with self.assertRaisesRegex(RuntimeError, "undefined value i"):
@torch.jit.script
def outer_scope_cannot_access_comprehension_variables():
d = {i: chr(i + 65) for i in range(4)}
i = i + 1 # noqa: F821
def test_for_tuple_assign(self):
def test_simple_assign(x):
# type: (Tuple[int, float]) -> float
sum = 0.0
for a in x:
sum += float(a)
return sum
self.checkScript(test_simple_assign, ((1, 2.5),))
def test_tuple_assign(x):
# type: (Tuple[Tuple[int, int], Tuple[int, int]]) -> int
sum = 0
for a in x:
sum += a[0]
sum += a[1]
return sum
self.checkScript(test_tuple_assign, (((1, 2), (4, 7)),))
def test_single_starred_lhs(self):
with self.assertRaisesRegex(
RuntimeError,
"A Starred expression may only appear on the lhs within the presence"
" of another non-starred expression",
):
cu = torch.jit.CompilationUnit(
"""
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
"""
)
def test_singleton_tuple_unpack(self):
def foo(a):
(b,) = (a,)
return b + 1
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
def var_tuple_assign(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
(a, b), c = x, y
return a + b + c
tuple_inputs = (torch.randn(1, 4), torch.randn(3, 4))
self.checkScript(var_tuple_assign, (tuple_inputs, torch.randn(3, 4)))
def nested_tuple_assign(x, y, z):
# type: (int, Tuple[int, Tuple[int, int]], Tuple[int, int]) -> int
a, (b, (c, d)), (e, f) = x, y, z
return a + b + c + d + e + f
self.checkScript(nested_tuple_assign, ((1, (2, (3, 4)), (5, 6))))
def subscript_tuple_assign(a, x, i):
# type: (List[int], Tensor, int) -> Tuple[int, Tensor, int]
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
self.checkScript(
subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0)
)
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
a, (b, *c), *d = 1, (2, 3, 4), 5, 6
return a, b, c, d
self.checkScript(star_tuple_assign, ())
def subscript_tuple_augmented_assign(a):
# type: (Tuple[int, int]) -> Tuple[int, int]
a[0] += 1
return a
with self.assertRaisesRegex(RuntimeError, "does not support augmented assign"):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
def test_multiple_assign(self):
def test():
a = b, c = d, f = (1, 1)
# side effect
ten = torch.tensor(1)
ten1 = ten2 = ten.add_(1)
# ordering
x = 1
y = 3
x, y = y, x + y
return a, b, c, d, f, ten, ten1, ten2, x, y
self.checkScript(test, ())
def test_opt_opt_refinement(self):
@torch.jit.script
def test_unify(weight, bias):
# type: (Optional[int], Optional[int]) -> Optional[int]
if weight is not None:
opt = None
else:
if bias is not None:
opt = 1
else:
opt = None
return opt
def test_optional_refinement(self):
@torch.jit.script
def test_if_none_assignment(x):
# type: (Optional[int]) -> int
if x is None:
x = 1
return x + 1
self.assertEqual(test_if_none_assignment(1), 2)
def test_optional_conversion(self):
@torch.jit.script
def other_fn(x=None):
# type: (Optional[int]) -> int
return torch.jit._unwrap_optional(x)
@torch.jit.script
def fn(x):
# type: (int) -> int
return other_fn(x)
self.assertEqual(fn(2), 2)
@torch.jit.script
def unify_to_optional(x):
# type: (bool) -> Optional[int]
if x:
a = None
else:
a = 2
return a
self.assertEqual(unify_to_optional(True), None)
self.assertEqual(unify_to_optional(False), 2)
@torch.jit.script
def opt_list(x):
# type: (Optional[List[float]]) -> int
return 2
@torch.jit.script
def broadcast_opt_list(x):
# type: (Optional[BroadcastingList2[float]]) -> int
return 2
@torch.jit.script
def opt_list_tuple_caller(x):
# type: (Tuple[float, float]) -> int
return opt_list(x) + broadcast_opt_list(x)
self.assertEqual(opt_list_tuple_caller((2.0, 3.0)), 4)
def test_optional_tuple(self):
def fn(x=None):
# type: (Optional[Tuple[int, int]]) -> Tuple[int, int]
if x is None:
new_x = (1, 2)
else:
new_x = x
return new_x
self.checkScript(fn, ((3, 4),))
self.checkScript(fn, ())
def test_namedtuple_redefine(self):
global _1, _2
_1 = namedtuple("GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"])
_2 = namedtuple("GoogLeNetOutputs", ["different"])
with self.assertRaisesRegex(RuntimeError, r"redefine"):
@torch.jit.script
def foo(x, y):
# type: (_1, _2) -> _1
return x
def test_namedtuple_py2(self):
global _GoogLeNetOutputs # see [local resolution in python]
_GoogLeNetOutputs = namedtuple(
"GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"]
)
@torch.jit.script
def foo(x):
# type: (_GoogLeNetOutputs) -> _GoogLeNetOutputs
return x
vals = torch.rand(3), torch.rand(4), torch.rand(5)
out = foo(
_GoogLeNetOutputs(logits=vals[0], aux_logits2=vals[1], aux_logits1=vals[2])
)
self.assertEqual(out.logits, vals[0])
self.assertEqual(out.aux_logits2, vals[1])
self.assertEqual(out.aux_logits1, vals[2])
def test_namedtuple_good_error(self):
global _GoogLeNetOutputs # see [local resolution in python]
_GoogLeNetOutputs = namedtuple(
"GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"]
)
@torch.jit.script
def foo(x):
# type: (_GoogLeNetOutputs) -> _GoogLeNetOutputs
return x
with self.assertRaisesRegex(
RuntimeError, r"aka NamedTuple\(logits, aux_logits2, aux_logits1\)"
):
out = foo(_GoogLeNetOutputs(logits="3", aux_logits2="4", aux_logits1="5"))
def test_namedtuple_error_source_attribution(self):
class _NamedTupleBadMemberType(NamedTuple):
f1: torch.Tensor
f2: "ABadForwardRefType" # noqa: F821
make_global(_NamedTupleBadMemberType) # see [local resolution in python]
def fn(x: _NamedTupleBadMemberType) -> torch.Tensor:
return x.f1.relu()
# assert that this has a location associated with the error.
# note the " +" is regex (i.e. "at least one space")
with self.assertRaisesRegex(ValueError, "at +File"):
torch.jit.script(fn)
def test_inherited_annotations_python_310(self):
# See #104484
# In python >=3.10, inspect.get_annotations doesn't always return the same values.
# Sometimes it will show all annotations; other times it will show only annotations
# that show in that class, not classes it inherits fro.
class BaseModule(torch.nn.Module):
state: List[int]
def forward(self, x):
pass
def do_something_with_list(x: List[int]):
if x:
return x[-1]
return 5
class Submodule(BaseModule):
def __init__(self, self_x_value):
super().__init__()
self.x = self_x_value
self.state = []
def forward(self, x):
return self.x + x + do_something_with_list(self.state)
class LowestModule(Submodule):
def __init__(self) -> None:
super().__init__(123)
mod = LowestModule()
mod2 = LowestModule()
mod_s = torch.jit.script(mod)
mod2_s = torch.jit.script(mod2)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
TestTyping
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/events.py
|
{
"start": 69082,
"end": 74822
}
|
class ____(Request):
"""
Return the debug image per metric and variant for the provided iteration
:param task: Task ID
:type task: str
:param metric: Metric name
:type metric: str
:param variant: Metric variant
:type variant: str
:param iteration: The iteration to bring debug image from. If not specified
then the latest reported image is retrieved
:type iteration: int
:param refresh: If set then scroll state will be refreshed to reflect the
latest changes in the debug images
:type refresh: bool
:param scroll_id: Scroll ID from the previous call to get_debug_image_sample or
empty
:type scroll_id: str
:param navigate_current_metric: If set then subsequent navigation with
next_debug_image_sample is done on the debug images for the passed metric only.
Otherwise for all the metrics
:type navigate_current_metric: bool
"""
_service = "events"
_action = "get_debug_image_sample"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"iteration": {
"description": "The iteration to bring debug image from. If not specified then the latest reported image is retrieved",
"type": "integer",
},
"metric": {"description": "Metric name", "type": "string"},
"navigate_current_metric": {
"default": True,
"description": "If set then subsequent navigation with next_debug_image_sample is done on the debug images for the passed metric only. Otherwise for all the metrics",
"type": "boolean",
},
"refresh": {
"description": "If set then scroll state will be refreshed to reflect the latest changes in the debug images",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID from the previous call to get_debug_image_sample or empty",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
"variant": {"description": "Metric variant", "type": "string"},
},
"required": ["task", "metric", "variant"],
"type": "object",
}
def __init__(
self,
task: str,
metric: str,
variant: str,
iteration: Optional[int] = None,
refresh: Optional[bool] = None,
scroll_id: Optional[str] = None,
navigate_current_metric: Optional[bool] = True,
**kwargs: Any
) -> None:
super(GetDebugImageSampleRequest, self).__init__(**kwargs)
self.task = task
self.metric = metric
self.variant = variant
self.iteration = iteration
self.refresh = refresh
self.scroll_id = scroll_id
self.navigate_current_metric = navigate_current_metric
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metric")
def metric(self) -> str:
return self._property_metric
@metric.setter
def metric(self, value: str) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variant")
def variant(self) -> str:
return self._property_variant
@variant.setter
def variant(self, value: str) -> None:
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
@schema_property("refresh")
def refresh(self) -> Optional[bool]:
return self._property_refresh
@refresh.setter
def refresh(self, value: Optional[bool]) -> None:
if value is None:
self._property_refresh = None
return
self.assert_isinstance(value, "refresh", (bool,))
self._property_refresh = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("navigate_current_metric")
def navigate_current_metric(self) -> Optional[bool]:
return self._property_navigate_current_metric
@navigate_current_metric.setter
def navigate_current_metric(self, value: Optional[bool]) -> None:
if value is None:
self._property_navigate_current_metric = None
return
self.assert_isinstance(value, "navigate_current_metric", (bool,))
self._property_navigate_current_metric = value
|
GetDebugImageSampleRequest
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_fitpack2.py
|
{
"start": 1050,
"end": 17361
}
|
class ____:
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(), [1, 3])
assert_array_almost_equal(lut.get_coeffs(), [3, 3])
assert abs(lut.get_residual()) < 1e-10
assert_array_almost_equal(lut([1, 1.5, 2]), [3, 3, 3])
@pytest.mark.parametrize("bc_type", [None, 'periodic'])
def test_linear_constant_periodic(self, bc_type):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
spl = make_splrep(x, y, k=1, s=len(x), bc_type=bc_type)
xp_assert_close(spl.t[1:-1], lut.get_knots(), atol=1e-15)
xp_assert_close(spl.c, lut.get_coeffs(), atol=1e-15)
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert shape(arg) == shape(lut(arg))
assert shape(arg) == shape(lut(arg, nu=1))
arg = [1.5, 2, 2.5]
assert shape(arg) == shape(lut(arg))
assert shape(arg) == shape(lut(arg, nu=1))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert abs(lut.get_residual()) < 1e-15
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
xp_assert_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
xp_assert_equal(spl([]), array([]))
def test_roots(self):
x = [1, 3, 5, 7, 9]
y = [0, 4, 9, 12, 21]
spl = UnivariateSpline(x, y, k=3)
assert_almost_equal(spl.roots()[0], 1.050290639101332)
def test_roots_length(self): # for gh18335
x = np.linspace(0, 50 * np.pi, 1000)
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
assert len(spl.roots()) == 50
def test_derivatives(self):
x = [1, 3, 5, 7, 9]
y = [0, 4, 9, 12, 21]
spl = UnivariateSpline(x, y, k=3)
assert_almost_equal(spl.derivatives(3.5),
[5.5152902, 1.7146577, -0.1830357, 0.3125])
def test_derivatives_2(self):
x = np.arange(8)
y = x**3 + 2.*x**2
tck = splrep(x, y, s=0)
ders = spalde(3, tck)
xp_assert_close(ders, [45., # 3**3 + 2*(3)**2
39., # 3*(3)**2 + 4*(3)
22., # 6*(3) + 4
6.], # 6*3**0
atol=1e-15)
spl = UnivariateSpline(x, y, s=0, k=3)
xp_assert_close(spl.derivatives(3),
ders,
atol=1e-15)
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
xp_assert_close(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
xp_assert_close(spl(xp, ext=ext), xp**3, atol=1e-16)
xp_assert_close(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
xp_assert_close(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
xp_assert_close(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
xp_assert_close(spl(xp, ext=ext), xp_clip**3, atol=2e-16)
xp_assert_close(cls(x, y, ext=ext)(xp), xp_clip**3, atol=2e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
xp_assert_close(spl(xp, ext=0), xp**3, atol=1e-16)
xp_assert_close(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
xp_assert_close(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
xp_assert_close(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
xp_assert_close(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
def test_derivative_extrapolation(self):
# Regression test for gh-10195: for a const-extrapolation spline
# its derivative evaluates to zero for extrapolation
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 5]
f = UnivariateSpline(x_values, y_values, ext='const', k=3)
x = [-1, 0, -0.5, 9, 9.5, 10]
xp_assert_close(f.derivative()(x), np.zeros_like(x), atol=1e-15)
def test_integral_out_of_bounds(self):
# Regression test for gh-7906: .integral(a, b) is wrong if both
# a and b are out-of-bounds
x = np.linspace(0., 1., 7)
for ext in range(4):
f = UnivariateSpline(x, x, s=0, ext=ext)
for (a, b) in [(1, 1), (1, 5), (2, 5),
(0, 0), (-2, 0), (-2, -1)]:
assert abs(f.integral(a, b)) < 1e-15
def test_nan(self):
# bail out early if the input data contains nans
x = np.arange(10, dtype=float)
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(x, y, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
y_end = y[-1]
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, check_finite=True))
y[-1] = y_end # check valid y but invalid w
w[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_strictly_increasing_x(self):
# Test the x is required to be strictly increasing for
# UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
# but merely increasing for UnivariateSpline if s>0
# and for LSQUnivariateSpline; see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0]
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, s=0, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
def test_increasing_x(self):
# Test that x is required to be increasing, see gh-8535
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0] - 1.0
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_invalid_input_for_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
UnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
UnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
UnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
with assert_raises(ValueError) as info:
UnivariateSpline(x_values, y_values, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_invalid_input_for_interpolated_univariate_spline(self):
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
InterpolatedUnivariateSpline(x_values, y_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [-1.0, 1.0, 1.0, 1.0]
InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-1)
InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
InterpolatedUnivariateSpline(x_values, y_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_invalid_input_for_lsq_univariate_spline(self):
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
spl = UnivariateSpline(x_values, y_values, check_finite=True)
t_values = spl.get_knots()[3:4] # interior knots w/ default k=3
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5]
LSQUnivariateSpline(x_values, y_values, t_values)
assert "x and y should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x_values = [1, 2, 4, 6, 8.5]
y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
w_values = [1.0, 1.0, 1.0, 1.0]
LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
assert "x, y, and w should have a same length" in str(info.value)
message = "Interior knots t must satisfy Schoenberg-Whitney conditions"
with assert_raises(ValueError, match=message) as info:
bbox = (100, -100)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
with assert_raises(ValueError) as info:
bbox = (-1)
LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
assert "bbox shape should be (2,)" in str(info.value)
with assert_raises(ValueError) as info:
LSQUnivariateSpline(x_values, y_values, t_values, k=6)
assert "k should be 1 <= k <= 5" in str(info.value)
def test_array_like_input(self):
x_values = np.array([1, 2, 4, 6, 8.5])
y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
bbox = np.array([-100, 100])
# np.array input
spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
bbox=bbox)
# list input
spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
w=w_values.tolist(), bbox=bbox.tolist())
xp_assert_close(spl1([0.1, 0.5, 0.9, 0.99]),
spl2([0.1, 0.5, 0.9, 0.99]))
def test_fpknot_oob_crash(self):
# https://github.com/scipy/scipy/issues/3691
x = range(109)
y = [0., 0., 0., 0., 0., 10.9, 0., 11., 0.,
0., 0., 10.9, 0., 0., 0., 0., 0., 0.,
10.9, 0., 0., 0., 11., 0., 0., 0., 10.9,
0., 0., 0., 10.5, 0., 0., 0., 10.7, 0.,
0., 0., 11., 0., 0., 0., 0., 0., 0.,
10.9, 0., 0., 10.7, 0., 0., 0., 10.6, 0.,
0., 0., 10.5, 0., 0., 10.7, 0., 0., 10.5,
0., 0., 11.5, 0., 0., 0., 10.7, 0., 0.,
10.7, 0., 0., 10.9, 0., 0., 10.8, 0., 0.,
0., 10.7, 0., 0., 10.6, 0., 0., 0., 10.4,
0., 0., 10.6, 0., 0., 10.5, 0., 0., 0.,
10.7, 0., 0., 0., 10.4, 0., 0., 0., 10.8, 0.]
msg = r"does not satisfy the condition abs\(fp-s\)/s < tol"
with pytest.warns(UserWarning, match=msg):
UnivariateSpline(x, y, k=1)
def test_concurrency(self):
# Check that no segfaults appear with concurrent access to
# UnivariateSpline
xx = np.arange(100, dtype=float)
yy = xx**3
x = np.arange(100, dtype=float)
x[1] = x[0]
spl = UnivariateSpline(xx, yy, check_finite=True)
def worker_fn(_, interp, x):
interp(x)
_run_concurrent_barrier(10, worker_fn, spl, x)
|
TestUnivariateSpline
|
python
|
urllib3__urllib3
|
test/test_collections.py
|
{
"start": 184,
"end": 3032
}
|
class ____:
def test_maxsize(self) -> None:
d: Container[int, str] = Container(5)
for i in range(5):
d[i] = str(i)
assert len(d) == 5
for i in range(5):
assert d[i] == str(i)
d[i + 1] = str(i + 1)
assert len(d) == 5
assert 0 not in d
assert (i + 1) in d
def test_maxsize_0(self) -> None:
d: Container[int, int] = Container(0)
d[1] = 1
assert len(d) == 0
def test_expire(self) -> None:
d: Container[int, str] = Container(5)
for i in range(5):
d[i] = str(i)
for i in range(5):
d.get(0)
# Add one more entry
d[5] = "5"
# Check state
assert list(d._container.keys()) == [2, 3, 4, 0, 5]
def test_same_key(self) -> None:
d: Container[str, int] = Container(5)
for i in range(10):
d["foo"] = i
assert list(d._container.keys()) == ["foo"]
assert len(d) == 1
def test_access_ordering(self) -> None:
d: Container[int, bool] = Container(5)
for i in range(10):
d[i] = True
# Keys should be ordered by access time
assert list(d._container.keys()) == [5, 6, 7, 8, 9]
new_order = [7, 8, 6, 9, 5]
for k in new_order:
d[k]
assert list(d._container.keys()) == new_order
def test_delete(self) -> None:
d: Container[int, bool] = Container(5)
for i in range(5):
d[i] = True
del d[0]
assert 0 not in d
d.pop(1)
assert 1 not in d
d.pop(1, None)
def test_get(self) -> None:
d: Container[int, bool | int] = Container(5)
for i in range(5):
d[i] = True
r = d.get(4)
assert r is True
r = d.get(5)
assert r is None
r = d.get(5, 42)
assert r == 42
with pytest.raises(KeyError):
d[5]
def test_disposal(self) -> None:
evicted_items: list[int] = []
def dispose_func(arg: int) -> None:
# Save the evicted datum for inspection
evicted_items.append(arg)
d: Container[int, int] = Container(5, dispose_func=dispose_func)
for i in range(5):
d[i] = i
assert list(d._container.keys()) == list(range(5))
assert evicted_items == [] # Nothing disposed
d[5] = 5
assert list(d._container.keys()) == list(range(1, 6))
assert evicted_items == [0]
del d[1]
assert evicted_items == [0, 1]
d.clear()
assert evicted_items == [0, 1, 2, 3, 4, 5]
def test_iter(self) -> None:
d: Container[str, str] = Container()
with pytest.raises(NotImplementedError):
d.__iter__()
|
TestLRUContainer
|
python
|
pypa__packaging
|
src/packaging/_musllinux.py
|
{
"start": 327,
"end": 2707
}
|
class ____(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> _MuslVersion | None:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache
def _get_musl_version(executable: str) -> _MuslVersion | None:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], check=False, stderr=subprocess.PIPE, text=True)
return _parse_musl_version(proc.stderr)
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param archs: Sequence of compatible architectures.
The first one shall be the closest to the actual architecture and be the part of
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for arch in archs:
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
|
_MuslVersion
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-number-of-k-even-arrays.py
|
{
"start": 1412,
"end": 1957
}
|
class ____(object):
def countOfArrays(self, n, m, k):
"""
:type n: int
:type m: int
:type k: int
:rtype: int
"""
MOD = 10**9+7
even, odd = m//2, (m+1)//2
dp = [[0]*(k+1) for _ in xrange(2)]
dp[0][0], dp[1][0] = even, odd
for _ in xrange(n-1):
for i in reversed(xrange(k+1)):
dp[0][i], dp[1][i] = (((dp[0][i-1] if i-1 >= 0 else 0)+dp[1][i])*even)%MOD, ((dp[0][i]+dp[1][i])*odd)%MOD
return (dp[0][k]+dp[1][k])%MOD
|
Solution2
|
python
|
jazzband__django-simple-history
|
simple_history/tests/tests/test_models.py
|
{
"start": 73905,
"end": 74416
}
|
class ____(TestCase):
def setUp(self):
self.model = ForeignKeyToSelfModel
self.history_model = self.model.history.model
def test_foreign_key_to_self_using_model_str(self):
self.assertEqual(
self.model, self.history_model.fk_to_self.field.remote_field.model
)
def test_foreign_key_to_self_using_self_str(self):
self.assertEqual(
self.model, self.history_model.fk_to_self_using_str.field.remote_field.model
)
|
ForeignKeyToSelfTest
|
python
|
urllib3__urllib3
|
test/test_queue_monkeypatch.py
|
{
"start": 178,
"end": 254
}
|
class ____(Exception):
"""
This should not be raised.
"""
|
BadError
|
python
|
scipy__scipy
|
scipy/stats/tests/test_multivariate.py
|
{
"start": 49511,
"end": 58025
}
|
class ____:
def test_bad_input(self):
# Check that bad inputs raise errors
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
# Incorrect dimensions
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
assert_raises(ValueError, matrix_normal, M, U, U)
assert_raises(ValueError, matrix_normal, M, V, V)
assert_raises(ValueError, matrix_normal, M.T, U, V)
e = np.linalg.LinAlgError
# Singular covariance for the rvs method of a non-frozen instance
assert_raises(e, matrix_normal.rvs,
M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal.rvs,
M, np.ones((num_rows, num_rows)), V)
# Singular covariance for a frozen instance
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
def test_default_inputs(self):
# Check that default argument handling works
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
Z = np.zeros((num_rows, num_cols))
Zr = np.zeros((num_rows, 1))
Zc = np.zeros((1, num_cols))
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
I1 = np.identity(1)
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U).shape,
(num_rows, 1))
assert_equal(matrix_normal.rvs(colcov=V).shape,
(1, num_cols))
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal(mean=M).rowcov, Ir)
assert_equal(matrix_normal(mean=M).colcov, Ic)
assert_equal(matrix_normal(rowcov=U).mean, Zr)
assert_equal(matrix_normal(rowcov=U).colcov, I1)
assert_equal(matrix_normal(colcov=V).mean, Zc)
assert_equal(matrix_normal(colcov=V).rowcov, I1)
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
def test_covariance_expansion(self):
# Check that covariance can be specified with scalar or vector
num_rows = 4
num_cols = 3
M = np.full((num_rows, num_cols), 0.3)
Uv = np.full(num_rows, 0.2)
Us = 0.2
Vv = np.full(num_cols, 0.1)
Vs = 0.1
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
0.1*Ic)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
0.1*Ic)
def test_frozen_matrix_normal(self):
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
random_state=1234)
assert_equal(rvs1, rvs2)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(pdf1, pdf2)
logpdf1 = frozen.logpdf(X)
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(logpdf1, logpdf2)
def test_matches_multivariate(self):
# Check that the pdfs match those obtained by vectorising and
# treating as a multivariate normal.
for i in range(1,5):
for j in range(1,5):
M = np.full((i,j), 0.3)
U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
logpdf1 = frozen.logpdf(X)
entropy1 = frozen.entropy()
vecX = X.T.flatten()
vecM = M.T.flatten()
cov = np.kron(V,U)
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
entropy2 = multivariate_normal.entropy(mean=vecM, cov=cov)
assert_allclose(pdf1, pdf2, rtol=1E-10)
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
assert_allclose(entropy1, entropy2)
def test_array_input(self):
# Check array of inputs has the same output as the separate entries.
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 10
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X1 = frozen.rvs(size=N, random_state=1234)
X2 = frozen.rvs(size=N, random_state=4321)
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
assert_equal(X.shape, (2, N, num_rows, num_cols))
array_logpdf = frozen.logpdf(X)
assert_equal(array_logpdf.shape, (2, N))
for i in range(2):
for j in range(N):
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
rowcov=U, colcov=V)
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
def test_moments(self):
# Check that the sample moments match the parameters
num_rows = 4
num_cols = 3
M = np.full((num_rows,num_cols), 0.3)
U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
N = 1000
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(size=N, random_state=1234)
sample_mean = np.mean(X,axis=0)
assert_allclose(sample_mean, M, atol=0.1)
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
assert_allclose(sample_colcov, V, atol=0.1)
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
N*num_cols,num_rows).T)
assert_allclose(sample_rowcov, U, atol=0.1)
def test_samples(self):
# Regression test to ensure that we always generate the same stream of
# random variates.
actual = matrix_normal.rvs(
mean=np.array([[1, 2], [3, 4]]),
rowcov=np.array([[4, -1], [-1, 2]]),
colcov=np.array([[5, 1], [1, 10]]),
random_state=np.random.default_rng(0),
size=2
)
expected = np.array(
[[[1.56228264238181, -1.24136424071189],
[2.46865788392114, 6.22964440489445]],
[[3.86405716144353, 10.73714311429529],
[2.59428444080606, 5.79987854490876]]]
)
assert_allclose(actual, expected)
|
TestMatrixNormal
|
python
|
tiangolo__fastapi
|
tests/test_pydantic_v1_v2_mixed.py
|
{
"start": 564,
"end": 620
}
|
class ____(NewBaseModel):
new_sub_name: str
|
NewSubItem
|
python
|
Textualize__textual
|
tests/test_animation.py
|
{
"start": 144,
"end": 5249
}
|
class ____(App):
CSS = """
#foo {
height: 1;
}
"""
def compose(self) -> ComposeResult:
yield Static("foo", id="foo")
async def test_animate_height() -> None:
"""Test animating styles.height works."""
# Styles.height is a scalar, which makes it more complicated to animate
app = AnimApp()
async with app.run_test() as pilot:
static = app.query_one(Static)
assert static.size.height == 1
assert static.styles.height.value == 1
static.styles.animate("height", 100, duration=0.5, easing="linear")
start = perf_counter()
# Wait for the animation to finished
await pilot.wait_for_animation()
elapsed = perf_counter() - start
# Check that the full time has elapsed
assert elapsed >= 0.5
# Check the height reached the maximum
assert static.styles.height.value == 100
async def test_scheduling_animation() -> None:
"""Test that scheduling an animation works."""
app = AnimApp()
delay = 0.1
async with app.run_test() as pilot:
styles = app.query_one(Static).styles
styles.background = "black"
styles.animate("background", "white", delay=delay, duration=0)
# Still black immediately after call, animation hasn't started yet due to `delay`
assert styles.background.rgb == (0, 0, 0)
await pilot.wait_for_scheduled_animations()
assert styles.background.rgb == (255, 255, 255)
async def test_wait_for_current_animations() -> None:
"""Test that we can wait only for the current animations taking place."""
app = AnimApp()
delay = 10
async with app.run_test() as pilot:
styles = app.query_one(Static).styles
styles.animate("height", 100, duration=0.1)
start = perf_counter()
styles.animate("height", 200, duration=0.1, delay=delay)
# Wait for the first animation to finish
await pilot.wait_for_animation()
elapsed = perf_counter() - start
assert elapsed < (delay / 2)
async def test_wait_for_current_and_scheduled_animations() -> None:
"""Test that we can wait for current and scheduled animations."""
app = AnimApp()
async with app.run_test() as pilot:
styles = app.query_one(Static).styles
start = perf_counter()
styles.animate("height", 50, duration=0.01)
styles.animate("background", "black", duration=0.01, delay=0.05)
await pilot.wait_for_scheduled_animations()
elapsed = perf_counter() - start
assert elapsed >= 0.06
assert styles.background.rgb == (0, 0, 0)
async def test_reverse_animations() -> None:
"""Test that you can create reverse animations.
Regression test for #1372 https://github.com/Textualize/textual/issues/1372
"""
app = AnimApp()
async with app.run_test() as pilot:
static = app.query_one(Static)
styles = static.styles
# Starting point.
styles.background = "black"
assert styles.background.rgb == (0, 0, 0)
# First, make sure we can go from black to white and back, step by step.
styles.animate("background", "white", duration=0.01)
await pilot.wait_for_animation()
assert styles.background.rgb == (255, 255, 255)
styles.animate("background", "black", duration=0.01)
await pilot.wait_for_animation()
assert styles.background.rgb == (0, 0, 0)
# Now, the actual test is to make sure we go back to black if creating both at once.
styles.animate("background", "white", duration=0.01)
styles.animate("background", "black", duration=0.01)
await pilot.wait_for_animation()
assert styles.background.rgb == (0, 0, 0)
async def test_schedule_reverse_animations() -> None:
"""Test that you can schedule reverse animations.
Regression test for #1372 https://github.com/Textualize/textual/issues/1372
"""
app = AnimApp()
async with app.run_test() as pilot:
static = app.query_one(Static)
styles = static.styles
# Starting point.
styles.background = "black"
assert styles.background.rgb == (0, 0, 0)
# First, make sure we can go from black to white and back, step by step.
styles.animate("background", "white", delay=0.01, duration=0.01)
await pilot.wait_for_scheduled_animations()
assert styles.background.rgb == (255, 255, 255)
styles.animate("background", "black", delay=0.01, duration=0.01)
await pilot.wait_for_scheduled_animations()
assert styles.background.rgb == (0, 0, 0)
# Now, the actual test is to make sure we go back to black if scheduling both at once.
styles.animate("background", "white", delay=0.025, duration=0.05)
# While the black -> white animation runs, start the white -> black animation.
styles.animate("background", "black", delay=0.05, duration=0.01)
await pilot.wait_for_scheduled_animations()
assert styles.background.rgb == (0, 0, 0)
|
AnimApp
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1434164,
"end": 1434542
}
|
class ____(VegaLiteSchema):
"""
TooltipContent schema wrapper.
Parameters
----------
content : Literal['encoding', 'data']
"""
_schema = {"$ref": "#/definitions/TooltipContent"}
def __init__(
self, content: Optional[Literal["encoding", "data"]] = Undefined, **kwds
):
super().__init__(content=content, **kwds)
|
TooltipContent
|
python
|
keras-team__keras
|
keras/src/metrics/regression_metrics_test.py
|
{
"start": 151,
"end": 1191
}
|
class ____(testing.TestCase):
def test_config(self):
# TODO
pass
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
|
MeanSquaredErrorTest
|
python
|
numba__numba
|
numba/tests/npyufunc/test_dufunc.py
|
{
"start": 4272,
"end": 5840
}
|
class ____(TestCase):
@functools.cache
def _generate_jit(self, ufunc, kind, identity=None):
assert kind in ('reduce', 'reduceat', 'at')
if kind == 'reduce':
if ufunc.nin == 2:
vec = vectorize(identity=identity)(lambda a, b: ufunc(a, b))
else:
vec = vectorize(identity=identity)(lambda a: ufunc(a))
@njit
def fn(array, axis=0, initial=None):
return vec.reduce(array, axis=axis, initial=initial)
return fn
elif kind == 'reduceat':
if ufunc.nin != 2:
raise ValueError('reduceat only supported for binary functions')
vec = vectorize(identity=identity)(lambda a, b: ufunc(a, b))
@njit
def fn(array, indices, axis=0, dtype=None, out=None):
return vec.reduceat(array, indices, axis, dtype, out)
return fn
else:
if ufunc.nin == 2:
vec = vectorize(identity=identity)(lambda a, b: ufunc(a, b))
else:
vec = vectorize(identity=identity)(lambda a: ufunc(a))
@njit
def fn(*args):
return vec.at(*args)
return fn
def _reduce(self, ufunc, identity):
return self._generate_jit(ufunc, 'reduce', identity=identity)
def _reduceat(self, ufunc, identity):
return self._generate_jit(ufunc, 'reduceat', identity=identity)
def _at(self, ufunc):
return self._generate_jit(ufunc, 'at')
|
TestDUFuncMethodsBase
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_color.py
|
{
"start": 14893,
"end": 14962
}
|
class ____(scale_color_gradient):
pass
@alias
|
scale_colour_gradient
|
python
|
Netflix__metaflow
|
metaflow/plugins/cards/card_modules/test_cards.py
|
{
"start": 4163,
"end": 5363
}
|
class ____(MetaflowCard):
"""
This card takes components and helps test the `current.card.components["A"].update()`
interface
"""
HTML_TEMPLATE = REFRESHABLE_HTML_TEMPLATE
RUNTIME_UPDATABLE = True
ALLOW_USER_COMPONENTS = True
# Not implementing Reload Policy here since the reload Policy is set to always
RELOAD_POLICY = MetaflowCard.RELOAD_POLICY_ONCHANGE
type = "test_component_refresh_card"
def __init__(self, components=[], **kwargs):
self._components = components
def render(self, task) -> str:
# Calling `render`/`render_runtime` wont require the `data` object
return self.HTML_TEMPLATE.replace(
"[REPLACE_CONTENT_HERE]", json.dumps(self._components)
).replace("[PATHSPEC]", task.pathspec)
def render_runtime(self, task, data):
return self.render(task)
def refresh(self, task, data):
# Govers the information passed in the data update
return data["components"]
def reload_content_token(self, task, data):
if task.finished:
return "final"
return "runtime-%s" % _component_values_to_hash(data["components"])
|
TestRefreshComponentCard
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/query/query_transform/base.py
|
{
"start": 4853,
"end": 6873
}
|
class ____(BaseQueryTransform):
"""
Decompose query transform.
Decomposes query into a subquery given the current index struct.
Performs a single step transformation.
Args:
llm_predictor (Optional[LLM]): LLM for generating
hypothetical documents
"""
def __init__(
self,
llm: Optional[LLM] = None,
decompose_query_prompt: Optional[DecomposeQueryTransformPrompt] = None,
verbose: bool = False,
) -> None:
"""Init params."""
super().__init__()
self._llm = llm or Settings.llm
self._decompose_query_prompt: BasePromptTemplate = (
decompose_query_prompt or DEFAULT_DECOMPOSE_QUERY_TRANSFORM_PROMPT
)
self.verbose = verbose
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"decompose_query_prompt": self._decompose_query_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "decompose_query_prompt" in prompts:
self._decompose_query_prompt = prompts["decompose_query_prompt"]
def _run(self, query_bundle: QueryBundle, metadata: Dict) -> QueryBundle:
"""Run query transform."""
# currently, just get text from the index structure
index_summary = cast(str, metadata.get("index_summary", "None"))
# given the text from the index, we can use the query bundle to generate
# a new query bundle
query_str = query_bundle.query_str
new_query_str = self._llm.predict(
self._decompose_query_prompt,
query_str=query_str,
context_str=index_summary,
)
if self.verbose:
print_text(f"> Current query: {query_str}\n", color="yellow")
print_text(f"> New query: {new_query_str}\n", color="pink")
return QueryBundle(
query_str=new_query_str,
custom_embedding_strs=[new_query_str],
)
|
DecomposeQueryTransform
|
python
|
google__jax
|
tests/pallas/mgpu_collective_matmul_test.py
|
{
"start": 1294,
"end": 5407
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if collective_matmul_mgpu is None:
self.skipTest("Mosaic GPU not available.")
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_equal("9.0")):
self.skipTest("Only works on GPU with capability sm90a")
if not mgpu.supports_cross_device_collectives():
if "FAIL_ON_NVSHMEM_UNAVAILABLE" in os.environ:
raise ValueError("NVSHMEM library unavailable.")
else:
self.skipTest("NVSHMEM library unavailable.")
if jax.process_count() == 1:
self.skipTest("Test requires multiple processes.")
if os.environ.get("XLA_PYTHON_CLIENT_ALLOCATOR", "") == "platform":
self.skipTest("NVSHMEM doesn't work with the platform allocator.")
self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True))
num_devices = jax.device_count()
mesh = jax.make_mesh(
(num_devices,), ("x",), axis_types=(jax.sharding.AxisType.Explicit,)
)
self.enter_context(jax.set_mesh(mesh))
@parameterized.product(
m_shard=(3072,),
n_shard=(256, 576),
k=(4096,),
tile_m=(64, 128, 192),
tile_n=(64, 128, 192),
tile_k=(64, 128),
grid_minor_dim=(collective_matmul_mgpu.MatmulDimension.N,),
grid_tile_width=(1,),
wg_dimension=(collective_matmul_mgpu.MatmulDimension.N,),
max_concurrent_steps=(2, 4),
dtype=(jnp.bfloat16,),
)
def test_all_gather_lhs_matmul(
self,
m_shard,
n_shard,
k,
tile_m,
tile_n,
tile_k,
max_concurrent_steps,
grid_minor_dim,
grid_tile_width,
wg_dimension,
dtype,
):
num_devices = jax.device_count()
epi_tile_size = 64 * 64
num_epi_tiles = tile_m * tile_n // epi_tile_size
cta_tile_m = tile_m * (1 + (wg_dimension == collective_matmul_mgpu.MatmulDimension.M))
cta_tile_n = tile_n * (1 + (wg_dimension == collective_matmul_mgpu.MatmulDimension.N))
if (
(cta_tile_m + cta_tile_n) * tile_k * max_concurrent_steps
+ 2 * min(2, num_epi_tiles) * epi_tile_size
) * 2 > 228000:
self.skipTest("Tile too big to fit into SMEM")
if n_shard % cta_tile_n:
self.skipTest("n_shard must be divisible by block_n for now.")
if m_shard % cta_tile_m:
self.skipTest("m_shard must be divisible by block_m for now.")
k1, k2 = random.split(random.key(1234), num=2)
lhs = random.normal(k1, (num_devices * m_shard, k), dtype)
rhs = random.normal(k2, (k, num_devices * n_shard), dtype)
lhs = jax.sharding.reshard(lhs, P("x", None))
rhs = jax.sharding.reshard(rhs, P(None, "x"))
def run(body):
out = jax.jit(
jax.shard_map(body, out_specs=P(None, "x"), check_vma=False)
)(lhs, rhs)
# Gather output, for NumPy comparison on the host.
out = jax.shard_map(
lambda x: lax.all_gather(x, "x", axis=1, tiled=True),
out_specs=P(None), check_vma=False,
)(out)
return out
ref_out = run(lambda x, y: lax.all_gather(x, "x", axis=0, tiled=True) @ y)
config = collective_matmul_mgpu.TuningConfig(
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
max_concurrent_steps=max_concurrent_steps,
grid_minor_dim=grid_minor_dim,
grid_tile_width=grid_tile_width,
wg_dimension=wg_dimension,
)
out = run(
functools.partial(
collective_matmul_mgpu.all_gather_lhs_matmul,
axis_name="x",
config=config,
dtype=dtype,
)
)
np.testing.assert_allclose(out, ref_out)
if __name__ == "__main__":
# This test doesn't work with the platform allocator, so we override it
# if it's ran alone. If it's part of a larger test suite and the platform
# allocator is used, setUp will skip the test.
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.01"
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "default"
os.environ["XLA_FLAGS"] = (
os.environ.get("XLA_FLAGS", "") + " --xla_gpu_autotune_level=0"
)
jt_multiprocess.main()
|
CollectiveMatmulTestCase
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_clear_expired_snoozes.py
|
{
"start": 437,
"end": 3571
}
|
class ____(TestCase):
def test_task_persistent_name(self) -> None:
assert clear_expired_snoozes.name == "sentry.tasks.clear_expired_snoozes"
@patch("sentry.signals.issue_unignored.send_robust")
def test_simple(self, send_robust: MagicMock) -> None:
group1 = self.create_group(status=GroupStatus.IGNORED)
snooze1 = GroupSnooze.objects.create(
group=group1, until=timezone.now() - timedelta(minutes=1)
)
group2 = self.create_group(status=GroupStatus.IGNORED)
snooze2 = GroupSnooze.objects.create(
group=group2, until=timezone.now() + timedelta(minutes=1)
)
clear_expired_snoozes()
group1.refresh_from_db()
group2.refresh_from_db()
assert group1.status == GroupStatus.UNRESOLVED
assert group1.substatus == GroupSubStatus.ONGOING
# Check if unexpired snooze got cleared
assert group2.status == GroupStatus.IGNORED
assert not GroupSnooze.objects.filter(id=snooze1.id).exists()
assert GroupSnooze.objects.filter(id=snooze2.id).exists()
assert GroupHistory.objects.filter(group=group1, status=GroupHistoryStatus.ONGOING).exists()
assert not GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.ONGOING
).exists()
assert send_robust.called
@patch("sentry.signals.issue_unignored.send_robust")
def test_simple_with_escalating_issues(self, send_robust: MagicMock) -> None:
group1 = self.create_group(status=GroupStatus.IGNORED)
snooze1 = GroupSnooze.objects.create(
group=group1, until=timezone.now() - timedelta(minutes=1)
)
group2 = self.create_group(status=GroupStatus.IGNORED)
snooze2 = GroupSnooze.objects.create(
group=group2, until=timezone.now() + timedelta(minutes=1)
)
clear_expired_snoozes()
group1.refresh_from_db()
group2.refresh_from_db()
assert group1.status == GroupStatus.UNRESOLVED
assert group1.substatus == GroupSubStatus.ONGOING
# Check if unexpired snooze got cleared
assert group2.status == GroupStatus.IGNORED
assert not GroupSnooze.objects.filter(id=snooze1.id).exists()
assert GroupSnooze.objects.filter(id=snooze2.id).exists()
assert GroupHistory.objects.filter(group=group1, status=GroupHistoryStatus.ONGOING).exists()
assert not GroupHistory.objects.filter(
group=group2, status=GroupHistoryStatus.UNIGNORED
).exists()
assert send_robust.called
def test_resolved_group(self) -> None:
group1 = self.create_group(status=GroupStatus.RESOLVED)
snooze1 = GroupSnooze.objects.create(
group=group1, until=timezone.now() - timedelta(minutes=1)
)
clear_expired_snoozes()
group1.refresh_from_db()
assert group1.status == GroupStatus.RESOLVED
# Validate that even though the group wasn't modified, we still remove the snooze
assert not GroupSnooze.objects.filter(id=snooze1.id).exists()
|
ClearExpiredSnoozesTest
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/auto_suggest.py
|
{
"start": 4998,
"end": 5798
}
|
class ____(AutoSuggest):
"""
Validator class that can dynamically returns any Validator.
:param get_validator: Callable that returns a :class:`.Validator` instance.
"""
def __init__(self, get_auto_suggest: Callable[[], AutoSuggest | None]) -> None:
self.get_auto_suggest = get_auto_suggest
def get_suggestion(self, buff: Buffer, document: Document) -> Suggestion | None:
auto_suggest = self.get_auto_suggest() or DummyAutoSuggest()
return auto_suggest.get_suggestion(buff, document)
async def get_suggestion_async(
self, buff: Buffer, document: Document
) -> Suggestion | None:
auto_suggest = self.get_auto_suggest() or DummyAutoSuggest()
return await auto_suggest.get_suggestion_async(buff, document)
|
DynamicAutoSuggest
|
python
|
langchain-ai__langchain
|
libs/partners/qdrant/langchain_qdrant/vectorstores.py
|
{
"start": 1981,
"end": 94727
}
|
class ____(VectorStore):
"""`Qdrant` vector store.
```python
from qdrant_client import QdrantClient
from langchain_qdrant import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
```
"""
CONTENT_KEY: str = "page_content"
METADATA_KEY: str = "metadata"
VECTOR_NAME: str | None = None
def __init__(
self,
client: Any,
collection_name: str,
embeddings: Embeddings | None = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: str | None = VECTOR_NAME,
async_client: Any | None = None,
embedding_function: Callable | None = None, # deprecated
) -> None:
"""Initialize with necessary components."""
if not isinstance(client, QdrantClient):
msg = (
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
raise TypeError(msg)
if async_client is not None and not isinstance(async_client, AsyncQdrantClient):
msg = (
f"async_client should be an instance of qdrant_client.AsyncQdrantClient"
f"got {type(async_client)}"
)
raise ValueError(msg)
if embeddings is None and embedding_function is None:
msg = "`embeddings` value can't be None. Pass `embeddings` instance."
raise ValueError(msg)
if embeddings is not None and embedding_function is not None:
msg = (
"Both `embeddings` and `embedding_function` are passed. "
"Use `embeddings` only."
)
raise ValueError(msg)
self._embeddings = embeddings
self._embeddings_function = embedding_function
self.client: QdrantClient = client
self.async_client: AsyncQdrantClient | None = async_client
self.collection_name = collection_name
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
self.vector_name = vector_name or self.VECTOR_NAME
if embedding_function is not None:
warnings.warn(
"Using `embedding_function` is deprecated. "
"Pass `Embeddings` instance to `embeddings` instead.",
stacklevel=2,
)
if not isinstance(embeddings, Embeddings):
warnings.warn(
"`embeddings` should be an instance of `Embeddings`."
"Using `embeddings` as `embedding_function` which is deprecated",
stacklevel=2,
)
self._embeddings_function = embeddings
self._embeddings = None
self.distance_strategy = distance_strategy.upper()
@property
def embeddings(self) -> Embeddings | None:
return self._embeddings
def add_texts(
self,
texts: Iterable[str],
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
batch_size: int = 64,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the `VectorStore`.
Args:
texts: Iterable of strings to add to the `VectorStore`.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: `64`
**kwargs: Additional keyword arguments.
Returns:
List of ids from adding the texts into the `VectorStore`.
"""
added_ids = []
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
self.client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
@sync_call_fallback
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
batch_size: int = 64,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the `VectorStore`.
Args:
texts: Iterable of strings to add to the `VectorStore`.
metadatas: Optional list of metadatas associated with the texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
batch_size:
How many vectors upload per-request.
Default: `64`
**kwargs: Additional keyword arguments.
Returns:
List of ids from adding the texts into the `VectorStore`.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
msg = "QdrantLocal cannot interoperate with sync and async clients"
raise NotImplementedError(msg)
added_ids = []
async for batch_ids, points in self._agenerate_rest_batches(
texts, metadatas, ids, batch_size
):
await self.async_client.upsert(
collection_name=self.collection_name, points=points, **kwargs
)
added_ids.extend(batch_ids)
return added_ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of `Document` objects most similar to the query.
"""
results = self.similarity_search_with_score(
query,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
**kwargs: Additional keyword arguments.
Returns:
List of `Document` objects most similar to the query.
"""
results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
return self.similarity_search_with_score_by_vector(
self._embed_query(query),
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
query_embedding = await self._aembed_query(query)
return await self.asimilarity_search_with_score_by_vector(
query_embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def similarity_search_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of `Document` objects most similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def asimilarity_search_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of `Document` objects most similar to the query.
"""
results = await self.asimilarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
search_params=search_params,
offset=offset,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def similarity_search_with_score_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
stacklevel=2,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # LangChain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result,
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
result.score,
)
for result in results
]
@sync_call_fallback
async def asimilarity_search_with_score_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
offset: int = 0,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
filter: Filter by metadata.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to
AsyncQdrantClient.Search().
Returns:
List of documents most similar to the query text and distance for each.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
msg = "QdrantLocal cannot interoperate with sync and async clients"
raise NotImplementedError(msg)
if filter is not None and isinstance(filter, dict):
warnings.warn(
"Using dict as a `filter` is deprecated. Please use qdrant-client "
"filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/",
DeprecationWarning,
stacklevel=2,
)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, embedding) # type: ignore[assignment]
results = await self.async_client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=qdrant_filter,
search_params=search_params,
limit=k,
offset=offset,
with_payload=True,
with_vectors=False, # LangChain does not expect vectors to be returned
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return [
(
self._document_from_scored_point(
result,
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
result.score,
)
for result in results
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree
of diversity among the results with `0` corresponding to maximum
diversity and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of `Document` objects selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
@sync_call_fallback
async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree
of diversity among the results with `0` corresponding
to maximum diversity and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- `int` - number of replicas to query, values should present in all
queried replicas
- `'majority'` - query all replicas, but return values present in the
majority of replicas
- `'quorum'` - query the majority of replicas, return values present in
all of them
- `'all'` - query all replicas, and return values present in all
replicas
**kwargs:
Any other named arguments to pass through to
`AsyncQdrantClient.Search()`.
Returns:
List of `Document` objects selected by maximal marginal relevance.
"""
query_embedding = await self._aembed_query(query)
return await self.amax_marginal_relevance_search_by_vector(
query_embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree
of diversity among the results with `0` corresponding
to maximum diversity and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
e.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- `int` - number of replicas to query, values should present in all
queried replicas
- `'majority'` - query all replicas, but return values present in the
majority of replicas
- `'quorum'` - query the majority of replicas, return values present in
all of them
- `'all'` - query all replicas, and return values present in all
replicas
**kwargs:
Any other named arguments to pass through to `QdrantClient.search()`
Returns:
List of `Document` objects selected by maximal marginal relevance.
"""
results = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
@sync_call_fallback
async def amax_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of `Document` objects to return.
fetch_k: Number of `Document` to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree
of diversity among the results with `0` corresponding
to maximum diversity and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- `int` - number of replicas to query, values should present in all
queried replicas
- `'majority'` - query all replicas, but return values present in the
majority of replicas
- `'quorum'` - query the majority of replicas, return values present in
all of them
- `'all'` - query all replicas, and return values present in all
replicas
**kwargs:
Any other named arguments to pass through to
`AsyncQdrantClient.Search()`.
Returns:
List of `Document` objects selected by maximal marginal relevance and
distance for each.
"""
results = await self.amax_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
search_params=search_params,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
return list(map(itemgetter(0), results))
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree of
diversity among the results with `0` corresponding to maximum diversity
and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of `Document` objects selected by maximal marginal relevance and
distance for each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=search_params,
limit=fetch_k,
with_payload=True,
with_vectors=True,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
embeddings = [
result.vector.get(self.vector_name) # type: ignore[index, union-attr]
if self.vector_name is not None
else result.vector
for result in results
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point(
results[i],
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
results[i].score,
)
for i in mmr_selected
]
@sync_call_fallback
async def amax_marginal_relevance_search_with_score_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: MetadataFilter | None = None, # noqa: A002
search_params: models.SearchParams | None = None,
score_threshold: float | None = None,
consistency: models.ReadConsistency | None = None,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between `0` and `1` that determines the degree of
diversity among the results with `0` corresponding to maximum diversity
and `1` to minimum diversity.
filter: Filter by metadata.
search_params: Additional search params.
score_threshold: Define a minimal score threshold for the result.
consistency: Read consistency of the search.
**kwargs: Additional keyword arguments.
Returns:
List of `Document` objects selected by maximal marginal relevance and
distance for each.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
msg = "QdrantLocal cannot interoperate with sync and async clients"
raise NotImplementedError(msg)
query_vector = embedding
if self.vector_name is not None:
query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
results = await self.async_client.search(
collection_name=self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=search_params,
limit=fetch_k,
with_payload=True,
with_vectors=True,
score_threshold=score_threshold,
consistency=consistency,
**kwargs,
)
embeddings = [
result.vector.get(self.vector_name) # type: ignore[index, union-attr]
if self.vector_name is not None
else result.vector
for result in results
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
)
return [
(
self._document_from_scored_point(
results[i],
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
),
results[i].score,
)
for i in mmr_selected
]
def delete(self, ids: list[str] | None = None, **kwargs: Any) -> bool | None:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
True if deletion is successful, `False` otherwise.
"""
result = self.client.delete(
collection_name=self.collection_name,
points_selector=ids,
)
return result.status == models.UpdateStatus.COMPLETED
@sync_call_fallback
async def adelete(self, ids: list[str] | None = None, **kwargs: Any) -> bool | None:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
True if deletion is successful, `False` otherwise.
"""
if self.async_client is None or isinstance(
self.async_client._client, AsyncQdrantLocal
):
msg = "QdrantLocal cannot interoperate with sync and async clients"
raise NotImplementedError(msg)
result = await self.async_client.delete(
collection_name=self.collection_name,
points_selector=ids,
)
return result.status == models.UpdateStatus.COMPLETED
@classmethod
def from_texts(
cls: type[Qdrant],
texts: list[str],
embedding: Embeddings,
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
path: str | None = None,
collection_name: str | None = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: str | None = VECTOR_NAME,
batch_size: int = 64,
shard_number: int | None = None,
replication_factor: int | None = None,
write_consistency_factor: int | None = None,
on_disk_payload: bool | None = None, # noqa: FBT001
hnsw_config: models.HnswConfigDiff | None = None,
optimizers_config: models.OptimizersConfigDiff | None = None,
wal_config: models.WalConfigDiff | None = None,
quantization_config: models.QuantizationConfig | None = None,
init_from: models.InitFrom | None = None,
on_disk: bool | None = None, # noqa: FBT001
force_recreate: bool = False, # noqa: FBT001, FBT002
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If ':memory:' - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "scheme | None, host, port | None,
prefix | None".
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key:
API key for authentication in Qdrant Cloud. Default: None
Can also be set via environment variable `QDRANT_API_KEY`.
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
on_disk:
If true - vectors will be stored on disk, reducing memory usage.
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
```python
from langchain_qdrant import Qdrant
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
```
"""
qdrant = cls.construct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
qdrant.add_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
def from_existing_collection(
cls: type[Qdrant],
embedding: Embeddings,
path: str | None = None,
collection_name: str | None = None,
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
distance_strategy: str = "COSINE",
vector_name: str | None = VECTOR_NAME,
**kwargs: Any,
) -> Qdrant:
"""Get instance of an existing Qdrant collection.
This method will return the instance of the store without inserting any new
embeddings.
"""
if collection_name is None:
msg = "Must specify collection_name. Received None."
raise ValueError(msg)
client, async_client = cls._generate_clients(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
return cls(
client=client,
async_client=async_client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_strategy,
vector_name=vector_name,
)
@classmethod
@sync_call_fallback
async def afrom_texts(
cls: type[Qdrant],
texts: list[str],
embedding: Embeddings,
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
path: str | None = None,
collection_name: str | None = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: str | None = VECTOR_NAME,
batch_size: int = 64,
shard_number: int | None = None,
replication_factor: int | None = None,
write_consistency_factor: int | None = None,
on_disk_payload: bool | None = None, # noqa: FBT001
hnsw_config: models.HnswConfigDiff | None = None,
optimizers_config: models.OptimizersConfigDiff | None = None,
wal_config: models.WalConfigDiff | None = None,
quantization_config: models.QuantizationConfig | None = None,
init_from: models.InitFrom | None = None,
on_disk: bool | None = None, # noqa: FBT001
force_recreate: bool = False, # noqa: FBT001, FBT002
**kwargs: Any,
) -> Qdrant:
"""Construct Qdrant wrapper from a list of texts.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
ids:
Optional list of ids to associate with the texts. Ids have to be
uuid-like strings.
location:
If ':memory:' - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - fallback to relying on `host` and `port` parameters.
url: either host or str of "scheme | None, host, port | None,
prefix | None".
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If true - use gPRC interface whenever possible in custom methods.
Default: False
https: If true - use HTTPS(SSL) protocol. Default: None
api_key:
API key for authentication in Qdrant Cloud. Default: None
Can also be set via environment variable `QDRANT_API_KEY`.
prefix:
If not None - add prefix to the REST URL path.
Example: service/v1 will result in
http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
Default: None
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: None
path:
Path in which the vectors will be stored while using local mode.
Default: None
collection_name:
Name of the Qdrant collection to be used. If not provided,
it will be created randomly. Default: None
distance_func:
Distance function. One of: "Cosine" / "Euclid" / "Dot".
Default: "Cosine"
content_payload_key:
A payload key used to store the content of the document.
Default: "page_content"
metadata_payload_key:
A payload key used to store the metadata of the document.
Default: "metadata"
vector_name:
Name of the vector to be used internally in Qdrant.
Default: None
batch_size:
How many vectors upload per-request.
Default: 64
shard_number: Number of shards in collection. Default is 1, minimum is 1.
replication_factor:
Replication factor for collection. Default is 1, minimum is 1.
Defines how many copies of each shard will be created.
Have effect only in distributed mode.
write_consistency_factor:
Write consistency factor for collection. Default is 1, minimum is 1.
Defines how many replicas should apply the operation for us to consider
it successful. Increasing this number will make the collection more
resilient to inconsistencies, but will also make it fail if not enough
replicas are available.
Does not have any performance impact.
Have effect only in distributed mode.
on_disk_payload:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
hnsw_config: Params for HNSW index
optimizers_config: Params for optimizer
wal_config: Params for Write-Ahead-Log
quantization_config:
Params for quantization, if None - quantization will be disabled
init_from:
Use data stored in another collection to initialize this collection
on_disk:
If true - point`s payload will not be stored in memory.
It will be read from the disk every time it is requested.
This setting saves RAM by (slightly) increasing the response time.
Note: those payload values that are involved in filtering and are
indexed - remain in RAM.
force_recreate:
Force recreating the collection
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user-friendly interface that:
1. Creates embeddings, one for each text
2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
```python
from langchain_qdrant import Qdrant
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
```
"""
qdrant = await cls.aconstruct_instance(
texts,
embedding,
location,
url,
port,
grpc_port,
prefer_grpc,
https,
api_key,
prefix,
timeout,
host,
path,
collection_name,
distance_func,
content_payload_key,
metadata_payload_key,
vector_name,
shard_number,
replication_factor,
write_consistency_factor,
on_disk_payload,
hnsw_config,
optimizers_config,
wal_config,
quantization_config,
init_from,
on_disk,
force_recreate,
**kwargs,
)
await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
return qdrant
@classmethod
def construct_instance(
cls: type[Qdrant],
texts: list[str],
embedding: Embeddings,
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
path: str | None = None,
collection_name: str | None = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: str | None = VECTOR_NAME,
shard_number: int | None = None,
replication_factor: int | None = None,
write_consistency_factor: int | None = None,
on_disk_payload: bool | None = None, # noqa: FBT001
hnsw_config: models.HnswConfigDiff | None = None,
optimizers_config: models.OptimizersConfigDiff | None = None,
wal_config: models.WalConfigDiff | None = None,
quantization_config: models.QuantizationConfig | None = None,
init_from: models.InitFrom | None = None,
on_disk: bool | None = None, # noqa: FBT001
force_recreate: bool = False, # noqa: FBT001, FBT002
**kwargs: Any,
) -> Qdrant:
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client, async_client = cls._generate_clients(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
collection_exists = client.collection_exists(collection_name)
if collection_exists and force_recreate:
client.delete_collection(collection_name)
collection_exists = False
if collection_exists:
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is raised.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
msg = (
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
msg = (
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}."
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
msg = (
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
raise QdrantException(msg)
if not isinstance(current_vector_config, models.VectorParams):
msg = (
"Expected current_vector_config to be an instance of "
f"models.VectorParams, but got {type(current_vector_config)}"
)
raise ValueError(msg)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size:
msg = (
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} "
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
msg = (
f"Existing Qdrant collection is configured for "
f"{current_distance_func} similarity, but requested "
f"{distance_func}. Please set `distance_func` parameter to "
f"`{current_distance_func}` if you want to reuse it. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
else:
vectors_config = models.VectorParams(
size=vector_size,
distance=models.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.create_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
return cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
async_client=async_client,
)
@classmethod
async def aconstruct_instance(
cls: type[Qdrant],
texts: list[str],
embedding: Embeddings,
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
path: str | None = None,
collection_name: str | None = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
vector_name: str | None = VECTOR_NAME,
shard_number: int | None = None,
replication_factor: int | None = None,
write_consistency_factor: int | None = None,
on_disk_payload: bool | None = None, # noqa: FBT001
hnsw_config: models.HnswConfigDiff | None = None,
optimizers_config: models.OptimizersConfigDiff | None = None,
wal_config: models.WalConfigDiff | None = None,
quantization_config: models.QuantizationConfig | None = None,
init_from: models.InitFrom | None = None,
on_disk: bool | None = None, # noqa: FBT001
force_recreate: bool = False, # noqa: FBT001, FBT002
**kwargs: Any,
) -> Qdrant:
# Just do a single quick embedding to get vector size
partial_embeddings = await embedding.aembed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client, async_client = cls._generate_clients(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
collection_exists = client.collection_exists(collection_name)
if collection_exists and force_recreate:
client.delete_collection(collection_name)
collection_exists = False
if collection_exists:
# Get the vector configuration of the existing collection and vector, if it
# was specified. If the old configuration does not match the current one,
# an exception is raised.
collection_info = client.get_collection(collection_name=collection_name)
current_vector_config = collection_info.config.params.vectors
if isinstance(current_vector_config, dict) and vector_name is not None:
if vector_name not in current_vector_config:
msg = (
f"Existing Qdrant collection {collection_name} does not "
f"contain vector named {vector_name}. Did you mean one of the "
f"existing vectors: {', '.join(current_vector_config.keys())}? "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
current_vector_config = current_vector_config.get(vector_name) # type: ignore[assignment]
elif isinstance(current_vector_config, dict) and vector_name is None:
msg = (
f"Existing Qdrant collection {collection_name} uses named vectors. "
f"If you want to reuse it, please set `vector_name` to any of the "
f"existing named vectors: "
f"{', '.join(current_vector_config.keys())}."
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
elif (
not isinstance(current_vector_config, dict) and vector_name is not None
):
msg = (
f"Existing Qdrant collection {collection_name} doesn't use named "
f"vectors. If you want to reuse it, please set `vector_name` to "
f"`None`. If you want to recreate the collection, set "
f"`force_recreate` parameter to `True`."
)
raise QdrantException(msg)
if not isinstance(current_vector_config, models.VectorParams):
msg = (
"Expected current_vector_config to be an instance of "
f"models.VectorParams, but got {type(current_vector_config)}"
)
raise ValueError(msg)
# Check if the vector configuration has the same dimensionality.
if current_vector_config.size != vector_size:
msg = (
f"Existing Qdrant collection is configured for vectors with "
f"{current_vector_config.size} "
f"dimensions. Selected embeddings are {vector_size}-dimensional. "
f"If you want to recreate the collection, set `force_recreate` "
f"parameter to `True`."
)
raise QdrantException(msg)
current_distance_func = (
current_vector_config.distance.name.upper() # type: ignore[union-attr]
)
if current_distance_func != distance_func:
msg = (
f"Existing Qdrant collection is configured for "
f"{current_vector_config.distance} " # type: ignore[union-attr]
f"similarity. Please set `distance_func` parameter to "
f"`{distance_func}` if you want to reuse it. If you want to "
f"recreate the collection, set `force_recreate` parameter to "
f"`True`."
)
raise QdrantException(msg)
else:
vectors_config = models.VectorParams(
size=vector_size,
distance=models.Distance[distance_func],
on_disk=on_disk,
)
# If vector name was provided, we're going to use the named vectors feature
# with just a single vector.
if vector_name is not None:
vectors_config = { # type: ignore[assignment]
vector_name: vectors_config,
}
client.create_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
init_from=init_from,
timeout=timeout, # type: ignore[arg-type]
)
return cls(
client=client,
collection_name=collection_name,
embeddings=embedding,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
distance_strategy=distance_func,
vector_name=vector_name,
async_client=async_client,
)
@staticmethod
def _cosine_relevance_score_fn(distance: float) -> float:
"""Normalize the distance to a score on a scale [0, 1]."""
return (distance + 1.0) / 2.0
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""Your 'correct' relevance function may differ depending on a few things.
For example:
- The distance / similarity metric used by the VectorStore
- The scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- Embedding dimensionality
- etc.
"""
if self.distance_strategy == "COSINE":
return self._cosine_relevance_score_fn
if self.distance_strategy == "DOT":
return self._max_inner_product_relevance_score_fn
if self.distance_strategy == "EUCLID":
return self._euclidean_relevance_score_fn
msg = (
"Unknown distance strategy, must be cosine, max_inner_product, or euclidean"
)
raise ValueError(msg)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs and relevance scores in the range `[0, 1]`.
`0` is dissimilar, `1` is most similar.
Args:
query: input text
k: Number of Documents to return.
**kwargs: Kwargs to be passed to similarity search.
Should include `score_threshold`, an optional floating point value
between `0` to `1` to filter the resulting set of retrieved docs.
Returns:
List of tuples of `(doc, similarity_score)`
"""
return self.similarity_search_with_score(query, k, **kwargs)
@sync_call_fallback
async def _asimilarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
"""Return docs and relevance scores in the range `[0, 1]`.
`0` is dissimilar, `1` is most similar.
Args:
query: input text
k: Number of Documents to return.
**kwargs: Kwargs to be passed to similarity search.
Should include `score_threshold`, an optional floating point value
between `0` to `1` to filter the resulting set of retrieved docs.
Returns:
List of tuples of `(doc, similarity_score)`
"""
return await self.asimilarity_search_with_score(query, k, **kwargs)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: list[dict] | None,
content_payload_key: str,
metadata_payload_key: str,
) -> list[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
msg = (
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
raise ValueError(msg)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
collection_name: str,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
metadata = scored_point.payload.get(metadata_payload_key) or {}
metadata["_id"] = scored_point.id
metadata["_collection_name"] = collection_name
return Document(
page_content=scored_point.payload.get(content_payload_key, ""),
metadata=metadata,
)
def _build_condition(self, key: str, value: Any) -> list[models.FieldCondition]:
out = []
if isinstance(value, dict):
for _key, _value in value.items():
out.extend(self._build_condition(f"{key}.{_key}", _value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f"{key}[]", _value))
else:
out.extend(self._build_condition(f"{key}", _value))
else:
out.append(
models.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=models.MatchValue(value=value),
)
)
return out
def _qdrant_filter_from_dict(
self, filter_: DictFilter | None
) -> models.Filter | None:
if not filter_:
return None
return models.Filter(
must=[
condition
for key, value in filter_.items() # type: ignore[union-attr]
for condition in self._build_condition(key, value)
]
)
def _embed_query(self, query: str) -> list[float]:
"""Embed query text.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = self.embeddings.embed_query(query)
elif self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
msg = "Neither of embeddings or embedding_function is set"
raise ValueError(msg)
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
async def _aembed_query(self, query: str) -> list[float]:
"""Embed query text asynchronously.
Used to provide backward compatibility with `embedding_function` argument.
Args:
query: Query text.
Returns:
List of floats representing the query embedding.
"""
if self.embeddings is not None:
embedding = await self.embeddings.aembed_query(query)
elif self._embeddings_function is not None:
embedding = self._embeddings_function(query)
else:
msg = "Neither of embeddings or embedding_function is set"
raise ValueError(msg)
return embedding.tolist() if hasattr(embedding, "tolist") else embedding
def _embed_texts(self, texts: Iterable[str]) -> list[list[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = self.embeddings.embed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
msg = "Neither of embeddings or embedding_function is set"
raise ValueError(msg)
return embeddings
async def _aembed_texts(self, texts: Iterable[str]) -> list[list[float]]:
"""Embed search texts.
Used to provide backward compatibility with `embedding_function` argument.
Args:
texts: Iterable of texts to embed.
Returns:
List of floats representing the texts embedding.
"""
if self.embeddings is not None:
embeddings = await self.embeddings.aembed_documents(list(texts))
if hasattr(embeddings, "tolist"):
embeddings = embeddings.tolist()
elif self._embeddings_function is not None:
embeddings = []
for text in texts:
embedding = self._embeddings_function(text)
if hasattr(embeddings, "tolist"):
embedding = embedding.tolist()
embeddings.append(embedding)
else:
msg = "Neither of embeddings or embedding_function is set"
raise ValueError(msg)
return embeddings
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
batch_size: int = 64,
) -> Generator[tuple[list[str], list[models.PointStruct]], None, None]:
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = self._embed_texts(batch_texts)
points = [
models.PointStruct(
id=point_id,
vector=vector # type: ignore[arg-type]
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
strict=False,
)
]
yield batch_ids, points
async def _agenerate_rest_batches(
self,
texts: Iterable[str],
metadatas: list[dict] | None = None,
ids: Sequence[str] | None = None,
batch_size: int = 64,
) -> AsyncGenerator[tuple[list[str], list[models.PointStruct]], None]:
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the embeddings for all the texts in a batch
batch_embeddings = await self._aembed_texts(batch_texts)
points = [
models.PointStruct(
id=point_id,
vector=vector # type: ignore[arg-type]
if self.vector_name is None
else {self.vector_name: vector},
payload=payload,
)
for point_id, vector, payload in zip(
batch_ids,
batch_embeddings,
self._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
strict=False,
)
]
yield batch_ids, points
@staticmethod
def _generate_clients(
location: str | None = None,
url: str | None = None,
port: int | None = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False, # noqa: FBT001, FBT002
https: bool | None = None, # noqa: FBT001
api_key: str | None = None,
prefix: str | None = None,
timeout: int | None = None,
host: str | None = None,
path: str | None = None,
**kwargs: Any,
) -> tuple[QdrantClient, AsyncQdrantClient | None]:
if api_key is None:
api_key = os.getenv("QDRANT_API_KEY")
sync_client = QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
if location == ":memory:" or path is not None:
# Local Qdrant cannot co-exist with Sync and Async clients
# We fallback to sync operations in this case
async_client = None
else:
async_client = AsyncQdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
**kwargs,
)
return sync_client, async_client
|
Qdrant
|
python
|
openai__openai-python
|
src/openai/_base_client.py
|
{
"start": 27818,
"end": 28486
}
|
class ____(httpx.Client):
def __init__(self, **kwargs: Any) -> None:
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
kwargs.setdefault("follow_redirects", True)
super().__init__(**kwargs)
if TYPE_CHECKING:
DefaultHttpxClient = httpx.Client
"""An alias to `httpx.Client` that provides the same defaults that this SDK
uses internally.
This is useful because overriding the `http_client` with your own instance of
`httpx.Client` will result in httpx's defaults being used, not ours.
"""
else:
DefaultHttpxClient = _DefaultHttpxClient
|
_DefaultHttpxClient
|
python
|
run-llama__llama_index
|
llama-index-core/tests/prompts/test_mixin.py
|
{
"start": 190,
"end": 653
}
|
class ____(PromptMixin):
def __init__(self) -> None:
self._prompt_dict_2 = {
"abc": PromptTemplate("{abc} {def}"),
}
def _get_prompts(self) -> PromptDictType:
return self._prompt_dict_2
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
if "abc" in prompts:
self._prompt_dict_2["abc"] = prompts["abc"]
|
MockObject2
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_doc_building.py
|
{
"start": 947,
"end": 3312
}
|
class ____(TestCase):
def test_command_not_recorded(self):
api_client = mock.MagicMock()
build_env = LocalBuildEnvironment(api_client=api_client)
with build_env:
build_env.run("true", record=False)
self.assertEqual(len(build_env.commands), 0)
api_client.command.post.assert_not_called()
def test_record_command_as_success(self):
api_client = mock.MagicMock()
api_client.command().patch.return_value = {
"id": 1,
}
project = APIProject(**get(Project).__dict__)
build_env = LocalBuildEnvironment(
project=project,
build={
"id": 1,
},
api_client=api_client,
)
with build_env:
build_env.run(
"false",
record_as_success=True,
# Use a directory that exists so the command doesn't fail.
cwd="/tmp",
)
self.assertEqual(len(build_env.commands), 1)
command = build_env.commands[0]
assert command.exit_code == 0
assert command.id == 1
api_client.command.post.assert_called_once_with(
{
"build": mock.ANY,
"command": command.get_command(),
"output": "",
"exit_code": None,
"start_time": None,
"end_time": None,
}
)
api_client.command().patch.assert_called_once_with(
{
"build": mock.ANY,
"command": command.get_command(),
"output": command.output,
"exit_code": 0,
"start_time": command.start_time,
"end_time": command.end_time,
}
)
# TODO: translate these tests into
# `readthedocs/projects/tests/test_docker_environment.py`. I've started the
# work there but it requires a good amount of work to mock it properly and
# reliably. I think we can skip these tests (3) for now since we are raising
# BuildAppError on these cases which we are already handling in other test
# cases.
#
# Once we mock the DockerBuildEnvironment properly, we could also translate the
# new tests from `readthedocs/projects/tests/test_build_tasks.py` to use this
# mocks.
@pytest.mark.skip
|
TestLocalBuildEnvironment
|
python
|
doocs__leetcode
|
solution/1400-1499/1466.Reorder Routes to Make All Paths Lead to the City Zero/Solution2.py
|
{
"start": 0,
"end": 495
}
|
class ____:
def minReorder(self, n: int, connections: List[List[int]]) -> int:
g = [[] for _ in range(n)]
for a, b in connections:
g[a].append((b, 1))
g[b].append((a, 0))
q = deque([0])
vis = {0}
ans = 0
while q:
a = q.popleft()
for b, c in g[a]:
if b not in vis:
vis.add(b)
q.append(b)
ans += c
return ans
|
Solution
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_table09.py
|
{
"start": 315,
"end": 2262
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table09.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("B:K", 10.288)
worksheet.write_string("A1", "Column1")
worksheet.write_string("B1", "Column2")
worksheet.write_string("C1", "Column3")
worksheet.write_string("D1", "Column4")
worksheet.write_string("E1", "Column5")
worksheet.write_string("F1", "Column6")
worksheet.write_string("G1", "Column7")
worksheet.write_string("H1", "Column8")
worksheet.write_string("I1", "Column9")
worksheet.write_string("J1", "Column10")
worksheet.write_string("K1", "Total")
data = [0, 0, 0, None, None, 0, 0, 0, 0, 0]
worksheet.write_row("B4", data)
worksheet.write_row("B5", data)
worksheet.add_table(
"B3:K6",
{
"total_row": 1,
"columns": [
{"total_string": "Total"},
{},
{"total_function": "average"},
{"total_function": "count"},
{"total_function": "count_nums"},
{"total_function": "max"},
{"total_function": "min"},
{"total_function": "sum"},
{"total_function": "stdDev"},
{"total_function": "var"},
],
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
readthedocs__readthedocs.org
|
readthedocs/search/api/v3/views.py
|
{
"start": 1026,
"end": 1164
}
|
class ____(UserRateThrottle):
"""Rate limit for the search API for authenticated users."""
rate = RATE_LIMIT
|
SearchUserRateThrottle
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/renderer.py
|
{
"start": 11015,
"end": 11969
}
|
class ____(Dict[str, bool]):
"""
Cache for remember which style strings don't render the default output
style (default fg/bg, no underline and no reverse and no blink). That way
we know that we should render these cells, even when they're empty (when
they contain a space).
Note: we don't consider bold/italic/hidden because they don't change the
output if there's no text in the cell.
"""
def __init__(self, style_string_to_attrs: dict[str, Attrs]) -> None:
self.style_string_to_attrs = style_string_to_attrs
def __missing__(self, style_str: str) -> bool:
attrs = self.style_string_to_attrs[style_str]
is_default = bool(
attrs.color
or attrs.bgcolor
or attrs.underline
or attrs.strike
or attrs.blink
or attrs.reverse
)
self[style_str] = is_default
return is_default
|
_StyleStringHasStyleCache
|
python
|
numpy__numpy
|
numpy/_core/arrayprint.py
|
{
"start": 49849,
"end": 50195
}
|
class ____:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
|
BoolFormat
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/event/attr.py
|
{
"start": 3297,
"end": 7928
}
|
class ____(RefCollection[_ET]):
"""Class-level events on :class:`._Dispatch` classes."""
__slots__ = (
"clsname",
"name",
"arg_names",
"has_kw",
"legacy_signatures",
"_clslevel",
"__weakref__",
)
clsname: str
name: str
arg_names: Sequence[str]
has_kw: bool
legacy_signatures: MutableSequence[legacy._LegacySignatureType]
_clslevel: MutableMapping[
Type[_ET], _ListenerFnSequenceType[_ListenerFnType]
]
def __init__(
self,
parent_dispatch_cls: Type[_HasEventsDispatch[_ET]],
fn: _ListenerFnType,
):
self.name = fn.__name__
self.clsname = parent_dispatch_cls.__name__
argspec = util.inspect_getfullargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.varkw)
self.legacy_signatures = list(
reversed(
sorted(
getattr(fn, "_legacy_signatures", []), key=lambda s: s[0]
)
)
)
fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
def _adjust_fn_spec(
self, fn: _ListenerFnType, named: bool
) -> _ListenerFnType:
if named:
fn = self._wrap_fn_for_kw(fn)
if self.legacy_signatures:
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
pass
else:
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn: _ListenerFnType) -> _ListenerFnType:
def wrap_kw(*args: Any, **kw: Any) -> Any:
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def _do_insert_or_append(
self, event_key: _EventKey[_ET], is_append: bool
) -> None:
target = event_key.dispatch_target
assert isinstance(
target, type
), "Class-level Event targets must be classes."
if not getattr(target, "_sa_propagate_class_events", True):
raise exc.InvalidRequestError(
f"Can't assign an event directly to the {target} class"
)
cls: Type[_ET]
for cls in util.walk_subclasses(target):
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self.update_subclass(cls)
if is_append:
self._clslevel[cls].append(event_key._listen_fn)
else:
self._clslevel[cls].appendleft(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def insert(self, event_key: _EventKey[_ET], propagate: bool) -> None:
self._do_insert_or_append(event_key, is_append=False)
def append(self, event_key: _EventKey[_ET], propagate: bool) -> None:
self._do_insert_or_append(event_key, is_append=True)
def update_subclass(self, target: Type[_ET]) -> None:
if target not in self._clslevel:
if getattr(target, "_sa_propagate_class_events", True):
self._clslevel[target] = collections.deque()
else:
self._clslevel[target] = _empty_collection()
clslevel = self._clslevel[target]
cls: Type[_ET]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend(
[fn for fn in self._clslevel[cls] if fn not in clslevel]
)
def remove(self, event_key: _EventKey[_ET]) -> None:
target = event_key.dispatch_target
cls: Type[_ET]
for cls in util.walk_subclasses(target):
if cls in self._clslevel:
self._clslevel[cls].remove(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self) -> None:
"""Clear all class level listeners"""
to_clear: Set[_ListenerFnType] = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher.clear()
registry._clear(self, to_clear)
def for_modify(self, obj: _Dispatch[_ET]) -> _ClsLevelDispatch[_ET]:
"""Return an event collection which can be modified.
For _ClsLevelDispatch at the class level of
a dispatcher, this returns self.
"""
return self
|
_ClsLevelDispatch
|
python
|
huggingface__transformers
|
src/transformers/models/aya_vision/modular_aya_vision.py
|
{
"start": 9509,
"end": 12817
}
|
class ____(LlavaForConditionalGeneration):
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, AyaVisionForConditionalGeneration
>>> import torch
>>> torch_device = "cuda:0"
>>> processor = AutoProcessor.from_pretrained("CohereForAI/aya-vision-8b", use_fast=True)
>>> model = AyaVisionForConditionalGeneration.from_pretrained("CohereForAI/aya-vision-8b", device_map=torch_device)
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://pbs.twimg.com/media/Fx7YvfQWYAIp6rZ?format=jpg&name=medium",
... },
... {"type": "text", "text": "चित्र में लिखा पाठ क्या कहता है?"},
... ],
... }
... ]
>>> inputs = processor.apply_chat_template(
... messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", device=torch_device
... ).to(model.device)
>>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3)
>>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
```"""
super().forward(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
labels=labels,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
image_sizes=image_sizes,
**kwargs,
)
__all__ = ["AyaVisionForConditionalGeneration", "AyaVisionPreTrainedModel", "AyaVisionModel"]
|
AyaVisionForConditionalGeneration
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axes_grid1/axes_size.py
|
{
"start": 1612,
"end": 1960
}
|
class ____(_Base):
"""
Sum of two sizes.
"""
def __init__(self, a, b):
self._a = a
self._b = b
def get_size(self, renderer):
a_rel_size, a_abs_size = self._a.get_size(renderer)
b_rel_size, b_abs_size = self._b.get_size(renderer)
return a_rel_size + b_rel_size, a_abs_size + b_abs_size
|
Add
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/python_test_framework_unittest.py
|
{
"start": 640,
"end": 884
}
|
class ____(unittest.TestCase):
def test_issue(self):
# Expected False Negative
_test_sink(_test_source())
# Whole file is ignored
def false_negative():
# Expected False Negative
_test_sink(_test_source())
|
TestWithIssue
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vsts/test_provider.py
|
{
"start": 823,
"end": 2538
}
|
class ____(TestCase):
@responses.activate
def test_exchange_token(self) -> None:
view = VSTSOAuth2CallbackView(
access_token_url="https://app.vssps.visualstudio.com/oauth2/token",
client_id="vsts-client-id",
client_secret="vsts-client-secret",
)
request = Mock()
pipeline = Mock(
config={"redirect_url": "https://app.vssps.visualstudio.com/oauth2/authorize"}
)
responses.add(
responses.POST,
"https://app.vssps.visualstudio.com/oauth2/token",
json={
"access_token": "xxxxxxxxx",
"token_type": "jwt-bearer",
"expires_in": "3599",
"refresh_token": "zzzzzzzzzz",
},
)
result = view.exchange_token(request, pipeline, "oauth-code")
mock_request = responses.calls[0].request
req_params = parse_qs(mock_request.body)
assert req_params["grant_type"] == ["urn:ietf:params:oauth:grant-type:jwt-bearer"]
assert req_params["assertion"] == ["oauth-code"]
assert req_params["redirect_uri"] == ["https://app.vssps.visualstudio.com/oauth2/authorize"]
assert req_params["client_assertion_type"] == [
"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
]
assert req_params["client_assertion"] == ["vsts-client-secret"]
assert result["access_token"] == "xxxxxxxxx"
assert result["token_type"] == "jwt-bearer"
assert result["expires_in"] == "3599"
assert result["refresh_token"] == "zzzzzzzzzz"
@control_silo_test
@override_options({"vsts.consent-prompt": True})
|
TestVSTSOAuthCallbackView
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/resources/models.py
|
{
"start": 11148,
"end": 11460
}
|
class ____:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = _legacy_response.to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
models.list,
)
|
ModelsWithRawResponse
|
python
|
marshmallow-code__marshmallow
|
src/marshmallow/fields.py
|
{
"start": 44745,
"end": 46293
}
|
class ____(_TemporalField[dt.datetime]):
"""A formatted datetime string.
Example: ``'2014-12-22T03:12:58.019077+00:00'``
:param format: Either ``"rfc"`` (for RFC822), ``"iso"`` (for ISO8601),
``"timestamp"``, ``"timestamp_ms"`` (for a POSIX timestamp) or a date format string.
If `None`, defaults to "iso".
:param kwargs: The same keyword arguments that :class:`Field` receives.
.. versionchanged:: 3.0.0rc9
Does not modify timezone information on (de)serialization.
.. versionchanged:: 3.19
Add timestamp as a format.
"""
SERIALIZATION_FUNCS: dict[str, typing.Callable[[dt.datetime], str | float]] = {
"iso": dt.datetime.isoformat,
"iso8601": dt.datetime.isoformat,
"rfc": email.utils.format_datetime,
"rfc822": email.utils.format_datetime,
"timestamp": utils.timestamp,
"timestamp_ms": utils.timestamp_ms,
}
DESERIALIZATION_FUNCS: dict[str, typing.Callable[[str], dt.datetime]] = {
"iso": dt.datetime.fromisoformat,
"iso8601": dt.datetime.fromisoformat,
"rfc": email.utils.parsedate_to_datetime,
"rfc822": email.utils.parsedate_to_datetime,
"timestamp": utils.from_timestamp,
"timestamp_ms": utils.from_timestamp_ms,
}
DEFAULT_FORMAT = "iso"
OBJ_TYPE = "datetime"
SCHEMA_OPTS_VAR_NAME = "datetimeformat"
@staticmethod
def _make_object_from_format(value, data_format) -> dt.datetime:
return dt.datetime.strptime(value, data_format)
|
DateTime
|
python
|
streamlit__streamlit
|
lib/streamlit/elements/vega_charts.py
|
{
"start": 3141,
"end": 7053
}
|
class ____(TypedDict, total=False):
"""
The schema for the Vega-Lite event state.
The event state is stored in a dictionary-like object that supports both
key and attribute notation. Event states cannot be programmatically
changed or set through Session State.
Only selection events are supported at this time.
Attributes
----------
selection : dict
The state of the ``on_select`` event. This attribute returns a
dictionary-like object that supports both key and attribute notation.
The name of each Vega-Lite selection parameter becomes an attribute in
the ``selection`` dictionary. The format of the data within each
attribute is determined by the selection parameter definition within
Vega-Lite.
Examples
--------
The following two examples have equivalent definitions. Each one has a
point and interval selection parameter include in the chart definition.
The point selection parameter is named ``"point_selection"``. The interval
or box selection parameter is named ``"interval_selection"``.
**Example 1: Chart selections with ``st.altair_chart``**
>>> import altair as alt
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(rng(0).standard_normal((20, 3)), columns=["a", "b", "c"])
>>>
>>> point_selector = alt.selection_point("point_selection")
>>> interval_selector = alt.selection_interval("interval_selection")
>>> chart = (
... alt.Chart(df)
... .mark_circle()
... .encode(
... x="a",
... y="b",
... size="c",
... color="c",
... tooltip=["a", "b", "c"],
... fillOpacity=alt.condition(point_selector, alt.value(1), alt.value(0.3)),
... )
... .add_params(point_selector, interval_selector)
... )
>>>
>>> event = st.altair_chart(chart, key="alt_chart", on_select="rerun")
>>>
>>> event
**Example 2: Chart selections with ``st.vega_lite_chart``**
>>> import pandas as pd
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = pd.DataFrame(rng(0).standard_normal((20, 3)), columns=["a", "b", "c"])
>>>
>>> spec = {
... "mark": {"type": "circle", "tooltip": True},
... "params": [
... {"name": "interval_selection", "select": "interval"},
... {"name": "point_selection", "select": "point"},
... ],
... "encoding": {
... "x": {"field": "a", "type": "quantitative"},
... "y": {"field": "b", "type": "quantitative"},
... "size": {"field": "c", "type": "quantitative"},
... "color": {"field": "c", "type": "quantitative"},
... "fillOpacity": {
... "condition": {"param": "point_selection", "value": 1},
... "value": 0.3,
... },
... },
... }
>>>
>>> event = st.vega_lite_chart(df, spec, key="vega_chart", on_select="rerun")
>>>
>>> event
Try selecting points in this interactive example. When you click a point,
the selection will appear under the attribute, ``"point_selection"``, which
is the name given to the point selection parameter. Similarly, when you
make an interval selection, it will appear under the attribute
``"interval_selection"``. You can give your selection parameters other
names if desired.
If you hold ``Shift`` while selecting points, existing point selections
will be preserved. Interval selections are not preserved when making
additional selections.
.. output::
https://doc-chart-events-vega-lite-state.streamlit.app
height: 600px
"""
selection: Required[AttributeDictionary]
@dataclass
|
VegaLiteState
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/ingest/event_logger.py
|
{
"start": 9825,
"end": 14467
}
|
class ____(TypedDict):
component_name: str
node: dict[str, Any]
project_id: int
replay_event: dict[str, Any]
replay_id: str
selector: str
timestamp: int
url: str
def gen_rage_clicks(
event_meta: ParsedEventMeta,
project_id: int,
replay_id: str,
replay_event: dict[str, Any] | None,
) -> Generator[RageClickIssue]:
if not replay_event:
return None
for click in filter(lambda c: c.is_rage and c.url, event_meta.click_events):
yield {
"component_name": click.component_name,
"node": {
"id": click.node_id,
"tagName": click.tag,
"attributes": {
"id": click.id,
"class": " ".join(click.classes),
"aria-label": click.aria_label,
"role": click.role,
"alt": click.alt,
"data-testid": click.testid,
"title": click.title,
"data-sentry-component": click.component_name,
},
"textContent": click.text,
},
"project_id": project_id,
"replay_event": replay_event,
"replay_id": replay_id,
"selector": click.selector,
"timestamp": click.timestamp,
"url": str(click.url),
}
@sentry_sdk.trace
def report_rage_click(
event_meta: ParsedEventMeta,
project_id: int,
replay_id: str,
replay_event: dict[str, Any] | None,
context: ProcessorContext,
) -> None:
clicks = list(gen_rage_clicks(event_meta, project_id, replay_id, replay_event))
if len(clicks) == 0 or not _should_report_rage_click_issue(project_id, context):
return None
metrics.incr("replay.rage_click_detected", amount=len(clicks))
for click in clicks:
report_rage_click_issue_with_replay_event(
click["project_id"],
click["replay_id"],
click["timestamp"],
click["selector"],
click["url"],
click["node"],
click["component_name"],
click["replay_event"],
)
def _largest_attr(ti: TraceItem) -> tuple[str, int]:
if not ti.attributes:
return ("", 0)
name, anyv = max(ti.attributes.items(), key=lambda kv: kv[1].ByteSize())
return name, anyv.ByteSize()
def _attr_stats(ti: TraceItem) -> tuple[int, int]:
if not ti.attributes:
return (0, 0)
count = len(ti.attributes)
total_size = sum(v.ByteSize() for v in ti.attributes.values())
return (count, total_size)
@sentry_sdk.trace
def emit_trace_items_to_eap(trace_items: list[TraceItem]) -> None:
# Get largest attribute across trace items
largest_attribute = max(
((ti, *_largest_attr(ti)) for ti in trace_items),
key=lambda t: t[2],
default=None,
)
# Get total attribute count and size
total_attr_count = 0
total_attr_size_bytes = 0
for ti in trace_items:
c, s = _attr_stats(ti)
total_attr_count += c
total_attr_size_bytes += s
with sentry_sdk.start_span(op="process", name="write_trace_items") as span:
span.set_data("attribute_count_total", total_attr_count)
span.set_data("attribute_size_total_bytes", total_attr_size_bytes)
if largest_attribute:
ti, name, size = largest_attribute
span.set_data("largest_attr_trace_id", ti.trace_id)
span.set_data("largest_attr_name", name)
span.set_data("largest_attr_size_bytes", size)
write_trace_items(trace_items)
@sentry_sdk.trace
def _should_report_hydration_error_issue(project_id: int, context: ProcessorContext) -> bool:
"""
Checks the project option, controlled by a project owner.
"""
if context["options_cache"]:
return context["options_cache"][project_id][0]
else:
return ProjectOption.objects.get_value(
project_id,
"sentry:replay_hydration_error_issues",
default=True,
)
@sentry_sdk.trace
def _should_report_rage_click_issue(project_id: int, context: ProcessorContext) -> bool:
"""
Checks the project option, controlled by a project owner.
"""
if context["options_cache"]:
return context["options_cache"][project_id][1]
else:
return ProjectOption.objects.get_value(
project_id,
"sentry:replay_rage_click_issues",
default=True,
)
def encode_as_uuid(message: str) -> str:
return str(uuid.UUID(md5(message.encode()).hexdigest()))
|
RageClickIssue
|
python
|
django__django
|
tests/generic_views/test_base.py
|
{
"start": 1236,
"end": 1413
}
|
class ____(TemplateView):
template_name = "generic_views/about.html"
def get(self, request):
return self.render_to_response(context={})
|
AboutTemplateAttributeView
|
python
|
huggingface__transformers
|
src/transformers/models/sam/modeling_sam.py
|
{
"start": 36335,
"end": 38669
}
|
class ____(SamVisionAttention):
"""
Multi-head Attention block with relative position embeddings.
Using SDPA instead of the default attention.
"""
def __init__(self, config, window_size):
super().__init__(config, window_size)
def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
batch_size, height, width, _ = hidden_states.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = (
self.qkv(hidden_states)
.reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
.permute(2, 0, 3, 1, 4)
)
# q, k, v with shape (B * nHead, H * W, C)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_bias = None
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(
query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
)
decomposed_rel_pos = decomposed_rel_pos.reshape(
batch_size, self.num_attention_heads, height * width, height * width
)
attn_bias = decomposed_rel_pos
query = query.view(batch_size, self.num_attention_heads, height * width, -1)
key = key.view(batch_size, self.num_attention_heads, height * width, -1)
value = value.view(batch_size, self.num_attention_heads, height * width, -1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attn_bias)
attn_output = (
attn_output.view(batch_size, self.num_attention_heads, height, width, -1)
.permute(0, 2, 3, 1, 4)
.reshape(batch_size, height, width, -1)
)
attn_output = self.proj(attn_output)
return attn_output, None
SAM_VISION_ATTENTION_CLASSES = {
"eager": SamVisionAttention,
"sdpa": SamVisionSdpaAttention,
}
|
SamVisionSdpaAttention
|
python
|
scrapy__scrapy
|
tests/test_pipelines.py
|
{
"start": 2404,
"end": 5102
}
|
class ____:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def _on_item_scraped(self, item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
self.items.append(item)
def _create_crawler(self, pipeline_class):
settings = {
"ITEM_PIPELINES": {pipeline_class: 1},
}
crawler = get_crawler(ItemSpider, settings)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
self.items = []
return crawler
@inlineCallbacks
def test_simple_pipeline(self):
crawler = self._create_crawler(SimplePipeline)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 1
@inlineCallbacks
def test_deferred_pipeline(self):
crawler = self._create_crawler(DeferredPipeline)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 1
@inlineCallbacks
def test_asyncdef_pipeline(self):
crawler = self._create_crawler(AsyncDefPipeline)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 1
@pytest.mark.only_asyncio
@inlineCallbacks
def test_asyncdef_asyncio_pipeline(self):
crawler = self._create_crawler(AsyncDefAsyncioPipeline)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 1
@pytest.mark.only_not_asyncio
@inlineCallbacks
def test_asyncdef_not_asyncio_pipeline(self):
crawler = self._create_crawler(AsyncDefNotAsyncioPipeline)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 1
@deferred_f_from_coro_f
async def test_deprecated_spider_arg(self, mockserver: MockServer) -> None:
crawler = self._create_crawler(DeprecatedSpiderArgPipeline)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.open_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.close_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.process_item\(\) requires a spider argument",
),
):
await maybe_deferred_to_future(crawler.crawl(mockserver=mockserver))
assert len(self.items) == 1
|
TestPipeline
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/msgraph.py
|
{
"start": 2673,
"end": 4017
}
|
class ____(ResponseHandler):
"""DefaultResponseHandler returns JSON payload or content in bytes or response headers."""
@staticmethod
def get_value(response: Response) -> Any:
with suppress(JSONDecodeError):
return response.json()
content = response.content
if not content:
return {key: value for key, value in response.headers.items()}
return content
async def handle_response_async(
self, response: NativeResponseType, error_map: dict[str, ParsableFactory] | None
) -> Any:
"""
Invoke this callback method when a response is received.
param response: The type of the native response object.
param error_map: The error dict to use in case of a failed request.
"""
resp: Response = cast("Response", response)
value = self.get_value(resp)
if resp.status_code not in {200, 201, 202, 204, 302}:
message = value or resp.reason_phrase
status_code = HTTPStatus(resp.status_code)
if status_code == HTTPStatus.BAD_REQUEST:
raise AirflowBadRequest(message)
if status_code == HTTPStatus.NOT_FOUND:
raise AirflowNotFoundException(message)
raise AirflowException(message)
return value
|
DefaultResponseHandler
|
python
|
apache__airflow
|
providers/apache/beam/src/airflow/providers/apache/beam/triggers/beam.py
|
{
"start": 1053,
"end": 2682
}
|
class ____(BaseTrigger):
"""Base class for Beam Pipeline Triggers."""
@staticmethod
def _get_async_hook(*args, **kwargs) -> BeamAsyncHook:
return BeamAsyncHook(*args, **kwargs)
@staticmethod
def file_has_gcs_path(file_path: str):
return file_path.lower().startswith("gs://")
@staticmethod
async def provide_gcs_tempfile(gcs_file, gcp_conn_id):
try:
from airflow.providers.google.cloud.hooks.gcs import GCSAsyncHook
except ImportError:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(
"Failed to import GCSAsyncHook. To use the GCSAsyncHook functionality, please install the "
"apache-airflow-google-provider."
)
async_gcs_hook = GCSAsyncHook(gcp_conn_id=gcp_conn_id)
sync_gcs_hook = await async_gcs_hook.get_sync_hook()
loop = asyncio.get_running_loop()
# Running synchronous `enter_context()` method in a separate
# thread using the default executor `None`. The `run_in_executor()` function returns the
# file object, which is created using gcs function `provide_file()`, asynchronously.
# This means we can perform asynchronous operations with this file.
create_tmp_file_call = sync_gcs_hook.provide_file(object_url=gcs_file)
tmp_gcs_file: IO[str] = await loop.run_in_executor(
None,
contextlib.ExitStack().enter_context,
create_tmp_file_call,
)
return tmp_gcs_file
|
BeamPipelineBaseTrigger
|
python
|
urllib3__urllib3
|
src/urllib3/exceptions.py
|
{
"start": 470,
"end": 624
}
|
class ____(Warning):
"""Base warning used by this module."""
_TYPE_REDUCE_RESULT = tuple[typing.Callable[..., object], tuple[object, ...]]
|
HTTPWarning
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/_margin.py
|
{
"start": 235,
"end": 5400
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.margin"
_valid_props = {"autoexpand", "b", "l", "pad", "r", "t"}
@property
def autoexpand(self):
"""
Turns on/off margin expansion computations. Legends, colorbars,
updatemenus, sliders, axis rangeselector and rangeslider are
allowed to push the margins by defaults.
The 'autoexpand' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autoexpand"]
@autoexpand.setter
def autoexpand(self, val):
self["autoexpand"] = val
@property
def b(self):
"""
Sets the bottom margin (in px).
The 'b' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
@property
def l(self):
"""
Sets the left margin (in px).
The 'l' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["l"]
@l.setter
def l(self, val):
self["l"] = val
@property
def pad(self):
"""
Sets the amount of padding (in px) between the plotting area
and the axis lines
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
@property
def r(self):
"""
Sets the right margin (in px).
The 'r' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["r"]
@r.setter
def r(self, val):
self["r"] = val
@property
def t(self):
"""
Sets the top margin (in px).
The 't' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["t"]
@t.setter
def t(self, val):
self["t"] = val
@property
def _prop_descriptions(self):
return """\
autoexpand
Turns on/off margin expansion computations. Legends,
colorbars, updatemenus, sliders, axis rangeselector and
rangeslider are allowed to push the margins by
defaults.
b
Sets the bottom margin (in px).
l
Sets the left margin (in px).
pad
Sets the amount of padding (in px) between the plotting
area and the axis lines
r
Sets the right margin (in px).
t
Sets the top margin (in px).
"""
def __init__(
self,
arg=None,
autoexpand=None,
b=None,
l=None,
pad=None,
r=None,
t=None,
**kwargs,
):
"""
Construct a new Margin object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Margin`
autoexpand
Turns on/off margin expansion computations. Legends,
colorbars, updatemenus, sliders, axis rangeselector and
rangeslider are allowed to push the margins by
defaults.
b
Sets the bottom margin (in px).
l
Sets the left margin (in px).
pad
Sets the amount of padding (in px) between the plotting
area and the axis lines
r
Sets the right margin (in px).
t
Sets the top margin (in px).
Returns
-------
Margin
"""
super().__init__("margin")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Margin
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Margin`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autoexpand", arg, autoexpand)
self._set_property("b", arg, b)
self._set_property("l", arg, l)
self._set_property("pad", arg, pad)
self._set_property("r", arg, r)
self._set_property("t", arg, t)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Margin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-servers-that-communicate.py
|
{
"start": 37,
"end": 621
}
|
class ____(object):
def countServers(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
rows, cols = [0]*len(grid), [0]*len(grid[0])
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if grid[i][j]:
rows[i] += 1
cols[j] += 1
result = 0
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if grid[i][j] and (rows[i] > 1 or cols[j] > 1):
result += 1
return result
|
Solution
|
python
|
streamlit__streamlit
|
lib/streamlit/errors.py
|
{
"start": 19074,
"end": 19600
}
|
class ____(LocalizableStreamlitException):
def __init__(
self, color: str | Collection[Any] | tuple[int, int, int, int]
) -> None:
super().__init__(
"This does not look like a valid color: {color}.\n\n"
"Colors must be in one of the following formats:"
"* Hex string with 3, 4, 6, or 8 digits. Example: `'#00ff00'`"
"* List or tuple with 3 or 4 components. Example: `[1.0, 0.5, 0, 0.2]`",
color=repr(color),
)
|
StreamlitInvalidColorError
|
python
|
modin-project__modin
|
asv_bench/benchmarks/benchmarks.py
|
{
"start": 23628,
"end": 24274
}
|
class ____:
param_names = ["shape", "drop", "level"]
params = [
get_benchmark_shapes("TimeResetIndex"),
[False, True],
[None, "level_1"],
]
def setup(self, shape, drop, level):
self.df = generate_dataframe("int", *shape, RAND_LOW, RAND_HIGH)
if level:
index = IMPL.MultiIndex.from_product(
[self.df.index[: shape[0] // 2], ["bar", "foo"]],
names=["level_1", "level_2"],
)
self.df.index = index
def time_reset_index(self, shape, drop, level):
execute(self.df.reset_index(drop=drop, level=level))
|
TimeResetIndex
|
python
|
astropy__astropy
|
astropy/nddata/ccddata.py
|
{
"start": 2523,
"end": 29839
}
|
class ____(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
psf : `numpy.ndarray` or None, optional
Image representation of the PSF at the center of this image. In order
for convolution to be flux-preserving, this should generally be
normalized to sum to unity.
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if "meta" not in kwd:
kwd["meta"] = kwd.pop("header", None)
if "header" in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def psf(self):
return self._psf
@psf.setter
def psf(self, value):
if value is not None and not isinstance(value, np.ndarray):
raise TypeError("The psf must be a numpy array.")
self._psf = value
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, "_parent_nddata", None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as data.")
self._uncertainty = StdDevUncertainty(value)
log.info(
"array provided for uncertainty; assuming it is a "
"StdDevUncertainty."
)
else:
raise TypeError(
"uncertainty must be an instance of a "
"NDUncertainty object or a numpy array."
)
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
return fits.HDUList(hdus)
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f"HIERARCH {key.upper()}"] = (
short_name,
f"Shortened name for {key}",
)
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {
"ELECTRONS/S": u.electron / u.s,
"ELECTRONS": u.electron,
"electrons": u.electron,
}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = ["JD-OBS", "MJD-OBS", "DATE-OBS"]
_PCs = {"PC1_1", "PC1_2", "PC2_1", "PC2_2"}
_CDs = {"CD1_1", "CD1_2", "CD2_1", "CD2_2"}
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info(
"An exception happened while extracting WCS information from "
f"the Header.\n{type(exc).__name__}: {str(exc)}"
)
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = "{}_{}_{}"
polynomials = ["A", "B", "AP", "BP"]
for poly in polynomials:
order = wcs.sip.__getattribute__(f"{poly.lower()}_order")
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(
filename,
hdu=0,
unit=None,
hdu_uncertainty="UNCERT",
hdu_mask="MASK",
hdu_flags=None,
key_uncertainty_type="UTYPE",
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
"do_not_scale_image_data": "Image data must be scaled.",
"scale_back": "Scale information is not preserved.",
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f"unsupported keyword: {key}."
raise TypeError(f"{prefix} {msg}")
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, "None")
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError("loading flags is currently not supported.")
if hdu_psf is not None and hdu_psf in hdus:
psf = hdus[hdu_psf].data
else:
psf = None
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (
hdus.info(hdu)[i][3] == "ImageHDU"
and hdus.fileinfo(i)["datSpan"] > 0
):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if "bunit" in hdr:
fits_unit_string = hdr["bunit"]
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == "adu":
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
f"The Header value for the key BUNIT ({fits_unit_string}) "
"cannot be interpreted as valid unit. To successfully read the "
"file as CCDData you can pass in a valid `unit` "
"argument explicitly or change the header of the FITS "
"file before reading it."
)
else:
log.info(
f"using the unit {unit} passed to the FITS reader instead "
f"of the unit {fits_unit_string} in the FITS file."
)
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
return CCDData(
hdus[hdu].data,
meta=hdr,
unit=use_unit,
mask=mask,
uncertainty=uncertainty,
wcs=wcs,
psf=psf,
)
def fits_ccddata_writer(
ccd_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu,
hdu_psf=hdu_psf,
)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader("fits", CCDData, fits_ccddata_reader)
registry.register_writer("fits", CCDData, fits_ccddata_writer)
registry.register_identifier("fits", CCDData, fits.connect.is_fits)
|
CCDData
|
python
|
mlflow__mlflow
|
mlflow/tracing/export/async_export_queue.py
|
{
"start": 959,
"end": 6766
}
|
class ____:
"""A queue-based asynchronous tracing export processor."""
def __init__(self):
self._queue: Queue[Task] = Queue(maxsize=MLFLOW_ASYNC_TRACE_LOGGING_MAX_QUEUE_SIZE.get())
self._lock = threading.RLock()
self._max_workers = MLFLOW_ASYNC_TRACE_LOGGING_MAX_WORKERS.get()
# Thread event that indicates the queue should stop processing tasks
self._stop_event = threading.Event()
self._is_active = False
self._atexit_callback_registered = False
self._active_tasks = set()
self._last_full_queue_warning_time = None
def put(self, task: Task):
"""Put a new task to the queue for processing."""
if not self.is_active():
self.activate()
# If stop event is set, wait for the queue to be drained before putting the task
if self._stop_event.is_set():
self._stop_event.wait()
try:
# Do not block if the queue is full, it will block the main application
self._queue.put(task, block=False)
except queue_Full:
if self._last_full_queue_warning_time is None or (
time.time() - self._last_full_queue_warning_time > 30
):
_logger.warning(
"Trace export queue is full, trace will be discarded. "
"Consider increasing the queue size through "
"`MLFLOW_ASYNC_TRACE_LOGGING_MAX_QUEUE_SIZE` environment variable or "
"number of workers through `MLFLOW_ASYNC_TRACE_LOGGING_MAX_WORKERS`"
" environment variable."
)
self._last_full_queue_warning_time = time.time()
def _consumer_loop(self) -> None:
while not self._stop_event.is_set():
self._dispatch_task()
# Drain remaining tasks when stopping
while not self._queue.empty():
self._dispatch_task()
def _dispatch_task(self) -> None:
"""Dispatch a task from the queue to the worker thread pool."""
# NB: Monitor number of active tasks being processed by the workers. If the all
# workers are busy, wait for one of them to finish before draining a new task
# from the queue. This is because ThreadPoolExecutor does not have a built-in
# mechanism to limit the number of pending tasks in the internal queue.
# This ruins the purpose of having a size bound for self._queue, because the
# TPE's internal queue can grow indefinitely and potentially run out of memory.
# Therefore, we should only dispatch a new task when there is a worker available,
# and pend the new tasks in the self._queue which has a size bound.
if len(self._active_tasks) >= self._max_workers:
_, self._active_tasks = wait(self._active_tasks, return_when=FIRST_COMPLETED)
try:
task = self._queue.get(timeout=1)
except Empty:
return
def _handle(task):
task.handle()
self._queue.task_done()
try:
future = self._worker_threadpool.submit(_handle, task)
self._active_tasks.add(future)
except Exception as e:
# In case it fails to submit the task to the worker thread pool
# such as interpreter shutdown, handle the task in this thread
_logger.debug(
f"Failed to submit task to worker thread pool. Error: {e}",
exc_info=True,
)
_handle(task)
def activate(self) -> None:
"""Activate the async queue to accept and handle incoming tasks."""
with self._lock:
if self._is_active:
return
self._set_up_threads()
# Callback to ensure remaining tasks are processed before program exit
if not self._atexit_callback_registered:
atexit.register(self._at_exit_callback)
self._atexit_callback_registered = True
self._is_active = True
def is_active(self) -> bool:
return self._is_active
def _set_up_threads(self) -> None:
"""Set up the consumer and worker threads."""
with self._lock:
self._worker_threadpool = ThreadPoolExecutor(
max_workers=self._max_workers,
thread_name_prefix="MlflowTraceLoggingWorker",
)
self._consumer_thread = threading.Thread(
target=self._consumer_loop,
name="MLflowTraceLoggingConsumer",
daemon=True,
)
self._consumer_thread.start()
def _at_exit_callback(self) -> None:
"""Callback function executed when the program is exiting."""
try:
_logger.info(
"Flushing the async trace logging queue before program exit. "
"This may take a while..."
)
self.flush(terminate=True)
except Exception as e:
_logger.error(f"Error while finishing trace export requests: {e}")
def flush(self, terminate=False) -> None:
"""
Flush the async logging queue.
Args:
terminate: If True, shut down the logging threads after flushing.
"""
if not self.is_active():
return
self._stop_event.set()
self._consumer_thread.join()
# Wait for all tasks to be processed
self._queue.join()
self._worker_threadpool.shutdown(wait=True)
self._is_active = False
# Restart threads to listen to incoming requests after flushing, if not terminating
if not terminate:
self._stop_event.clear()
self.activate()
|
AsyncTraceExportQueue
|
python
|
viewflow__viewflow
|
tests/json/test_json__basics.py
|
{
"start": 2866,
"end": 3028
}
|
class ____(Client):
approved = jsonstore.BooleanField()
personal_phone = jsonstore.CharField(max_length=250)
class Meta:
proxy = True
|
VIPClient
|
python
|
getsentry__sentry
|
fixtures/sudo_testutils.py
|
{
"start": 157,
"end": 426
}
|
class ____:
"""Stub backend
Always authenticates when the password matches self.password
"""
password = "stub"
def authenticate(self, request, username, password):
if password == self.password:
return User()
|
StubPasswordBackend
|
python
|
getsentry__sentry
|
src/sentry/db/models/query.py
|
{
"start": 915,
"end": 6580
}
|
class ____(Exception):
pass
def resolve_combined_expression(instance: Model, node: CombinedExpression) -> BaseExpression:
def _resolve(instance: Model, node: BaseExpression | F) -> BaseExpression:
if isinstance(node, Value):
return node.value
if isinstance(node, F):
return getattr(instance, node.name)
if isinstance(node, CombinedExpression):
return resolve_combined_expression(instance, node)
return node
if isinstance(node, Value):
return node.value
if not isinstance(node, CombinedExpression):
raise CannotResolveExpression
op = COMBINED_EXPRESSION_CALLBACKS.get(node.connector, None)
if not op:
raise CannotResolveExpression
if hasattr(node, "children"):
children = node.children
else:
children = [node.lhs, node.rhs]
runner = _resolve(instance, children[0])
for n in children[1:]:
runner = op(runner, _resolve(instance, n))
return runner
def _get_field(model: type[Model], key: str) -> Field[object, object]:
field = model._meta.get_field(key)
if not isinstance(field, Field):
raise TypeError(f"expected Field for {key}, got ({field})")
return field
def _handle_value(instance: BaseModel, value: Any) -> Any:
if isinstance(value, CombinedExpression):
return resolve_combined_expression(instance, value)
return value
def update(instance: BaseModel, using: str | None = None, **kwargs: Any) -> int:
"""
Updates specified attributes on the current instance.
"""
assert instance.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(instance.__class__, instance=instance)
for field in instance._meta.fields:
if getattr(field, "auto_now", False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(instance, False)
affected = (
instance.__class__.objects.using(using)
.filter(pk=instance.pk)
# Disable the post update query signal since we're going to send a more specific `post_save` signal here.
.with_post_update_signal(False)
.update(**kwargs)
)
for k, v in kwargs.items():
setattr(instance, k, _handle_value(instance, v))
if affected == 1:
post_save.send_robust(
sender=instance.__class__,
instance=instance,
created=False,
update_fields=list(kwargs.keys()),
)
return affected
elif affected == 0:
return affected
elif affected < 0:
raise ValueError(
"Somehow we have updated a negative number of rows. You seem to have a problem with your db backend."
)
else:
raise ValueError("Somehow we have updated multiple rows. This is very, very bad.")
update.alters_data = True # type: ignore[attr-defined]
def create_or_update(
model: type[Model], using: str | None = None, **kwargs: Any
) -> tuple[int, Literal[False]] | tuple[Model, Literal[True]]:
"""
Similar to get_or_create, either updates a row or creates it.
In order to determine if the row exists, this searches on all of the kwargs
besides `values` and `default`.
If the row exists, it is updated with the data in `values`. If it
doesn't, it is created with the data in `values`, `defaults`, and the remaining
kwargs.
The result will be (rows affected, False) if the row was not created,
or (instance, True) if the object is new.
>>> create_or_update(MyModel, key='value', values={
>>> 'col_name': F('col_name') + 1,
>>> }, defaults={'created_at': timezone.now()})
"""
values = kwargs.pop("values", {})
defaults = kwargs.pop("defaults", {})
if not using:
using = router.db_for_write(model)
objects = model.objects.using(using)
affected = objects.filter(**kwargs).update(**values)
if affected:
return affected, False
create_kwargs = kwargs.copy()
inst = objects.model()
for k, v in itertools.chain(values.items(), defaults.items()):
# XXX(dcramer): we want to support column shortcut on create so
# we can do create_or_update(..., {'project': 1})
if not isinstance(v, Model):
k = _get_field(model, k).attname
if isinstance(v, CombinedExpression):
create_kwargs[k] = resolve_combined_expression(inst, v)
else:
create_kwargs[k] = v
try:
with transaction.atomic(using=using):
return objects.create(**create_kwargs), True
except IntegrityError:
metrics.incr(
"db.models.query.create_or_update.integrity_error",
tags={"model": model.__name__},
sample_rate=1,
)
affected = objects.filter(**kwargs).update(**values)
return affected, False
def in_iexact(column: str, values: Any) -> Q:
"""Operator to test if any of the given values are (case-insensitive)
matching to values in the given column."""
from operator import or_
query = f"{column}__iexact"
# if values is empty, have a default value for the reduce call that will essentially resolve a column in []
query_in = f"{column}__in"
return reduce(or_, [Q(**{query: v}) for v in values], Q(**{query_in: []}))
def in_icontains(column: str, values: Any) -> Q:
"""Operator to test if any of the given values are (case-insensitively)
contained within values in the given column."""
from operator import or_
query = f"{column}__icontains"
return reduce(or_, [Q(**{query: v}) for v in values])
|
CannotResolveExpression
|
python
|
scipy__scipy
|
scipy/io/arff/_arffread.py
|
{
"start": 19502,
"end": 26143
}
|
class ____:
"""Small container to keep useful information on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print(i)
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Methods
-------
names
types
Notes
-----
Also maintains the list of attributes in order, i.e., doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
self._attributes = {a.name: a for a in attr}
def __repr__(self):
msg = ""
msg += f"Dataset: {self.name}\n"
for i in self._attributes:
msg += f"\t{i}'s type is {self._attributes[i].type_name}"
if self._attributes[i].range:
msg += f", range is {str(self._attributes[i].range)}"
msg += '\n'
return msg
def __iter__(self):
return iter(self._attributes)
def __getitem__(self, key):
attr = self._attributes[key]
return (attr.type_name, attr.range)
def names(self):
"""Return the list of attribute names.
Returns
-------
attrnames : list of str
The attribute names.
"""
return list(self._attributes)
def types(self):
"""Return the list of attribute types.
Returns
-------
attr_types : list of str
The attribute types.
"""
attr_types = [self._attributes[name].type_name
for name in self._attributes]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of NumPy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc.
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from io import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f)
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg) from e
# Check whether we have a string attribute (not supported yet)
hasstr = False
for a in attr:
if isinstance(a, StringAttribute):
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of converters to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
if hasstr:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(attr)
def generator(row_iter, delim=','):
# TODO: this is where we are spending time (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
dialect = None
for raw in row_iter:
# We do not abstract skipping comments and empty lines for
# performance reasons.
if r_comment.match(raw) or r_empty.match(raw):
continue
row, dialect = split_data_line(raw, dialect)
yield tuple([attr[i].parse_data(row[i]) for i in elems])
a = list(generator(ofile))
# No error should happen here: it is a bug otherwise
data = np.array(a, [(a.name, a.dtype) for a in attr])
return data, meta
|
MetaData
|
python
|
kubernetes-client__python
|
kubernetes/base/stream/ws_client.py
|
{
"start": 1367,
"end": 9515
}
|
class ____:
def __init__(self, configuration, url, headers, capture_all, binary=False):
"""A websocket client with support for channels.
Exec command uses different channels for different streams. for
example, 0 is stdin, 1 is stdout and 2 is stderr. Some other API calls
like port forwarding can forward different pods' streams to different
channels.
"""
self._connected = False
self._channels = {}
self.binary = binary
self.newline = '\n' if not self.binary else b'\n'
if capture_all:
self._all = StringIO() if not self.binary else BytesIO()
else:
self._all = _IgnoredIO()
self.sock = create_websocket(configuration, url, headers)
self._connected = True
self._returncode = None
def peek_channel(self, channel, timeout=0):
"""Peek a channel and return part of the input,
empty string otherwise."""
self.update(timeout=timeout)
if channel in self._channels:
return self._channels[channel]
return ""
def read_channel(self, channel, timeout=0):
"""Read data from a channel."""
if channel not in self._channels:
ret = self.peek_channel(channel, timeout)
else:
ret = self._channels[channel]
if channel in self._channels:
del self._channels[channel]
return ret
def readline_channel(self, channel, timeout=None):
"""Read a line from a channel."""
if timeout is None:
timeout = float("inf")
start = time.time()
while self.is_open() and time.time() - start < timeout:
if channel in self._channels:
data = self._channels[channel]
if self.newline in data:
index = data.find(self.newline)
ret = data[:index]
data = data[index+1:]
if data:
self._channels[channel] = data
else:
del self._channels[channel]
return ret
self.update(timeout=(timeout - time.time() + start))
def write_channel(self, channel, data):
"""Write data to a channel."""
# check if we're writing binary data or not
binary = six.PY3 and type(data) == six.binary_type
opcode = ABNF.OPCODE_BINARY if binary else ABNF.OPCODE_TEXT
channel_prefix = chr(channel)
if binary:
channel_prefix = six.binary_type(channel_prefix, "ascii")
payload = channel_prefix + data
self.sock.send(payload, opcode=opcode)
def peek_stdout(self, timeout=0):
"""Same as peek_channel with channel=1."""
return self.peek_channel(STDOUT_CHANNEL, timeout=timeout)
def read_stdout(self, timeout=None):
"""Same as read_channel with channel=1."""
return self.read_channel(STDOUT_CHANNEL, timeout=timeout)
def readline_stdout(self, timeout=None):
"""Same as readline_channel with channel=1."""
return self.readline_channel(STDOUT_CHANNEL, timeout=timeout)
def peek_stderr(self, timeout=0):
"""Same as peek_channel with channel=2."""
return self.peek_channel(STDERR_CHANNEL, timeout=timeout)
def read_stderr(self, timeout=None):
"""Same as read_channel with channel=2."""
return self.read_channel(STDERR_CHANNEL, timeout=timeout)
def readline_stderr(self, timeout=None):
"""Same as readline_channel with channel=2."""
return self.readline_channel(STDERR_CHANNEL, timeout=timeout)
def read_all(self):
"""Return buffered data received on stdout and stderr channels.
This is useful for non-interactive call where a set of command passed
to the API call and their result is needed after the call is concluded.
Should be called after run_forever() or update()
TODO: Maybe we can process this and return a more meaningful map with
channels mapped for each input.
"""
out = self._all.getvalue()
self._all = self._all.__class__()
self._channels = {}
return out
def is_open(self):
"""True if the connection is still alive."""
return self._connected
def write_stdin(self, data):
"""The same as write_channel with channel=0."""
self.write_channel(STDIN_CHANNEL, data)
def update(self, timeout=0):
"""Update channel buffers with at most one complete frame of input."""
if not self.is_open():
return
if not self.sock.connected:
self._connected = False
return
# The options here are:
# select.select() - this will work on most OS, however, it has a
# limitation of only able to read fd numbers up to 1024.
# i.e. does not scale well. This was the original
# implementation.
# select.poll() - this will work on most unix based OS, but not as
# efficient as epoll. Will work for fd numbers above 1024.
# select.epoll() - newest and most efficient way of polling.
# However, only works on linux.
if hasattr(select, "poll"):
poll = select.poll()
poll.register(self.sock.sock, select.POLLIN)
if timeout is not None:
timeout *= 1_000 # poll method uses milliseconds as the time unit
r = poll.poll(timeout)
poll.unregister(self.sock.sock)
else:
r, _, _ = select.select(
(self.sock.sock, ), (), (), timeout)
if r:
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
self._connected = False
return
elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT:
data = frame.data
if six.PY3 and not self.binary:
data = data.decode("utf-8", "replace")
if len(data) > 1:
channel = data[0]
if six.PY3 and not self.binary:
channel = ord(channel)
data = data[1:]
if data:
if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]:
# keeping all messages in the order they received
# for non-blocking call.
self._all.write(data)
if channel not in self._channels:
self._channels[channel] = data
else:
self._channels[channel] += data
def run_forever(self, timeout=None):
"""Wait till connection is closed or timeout reached. Buffer any input
received during this time."""
if timeout:
start = time.time()
while self.is_open() and time.time() - start < timeout:
self.update(timeout=(timeout - time.time() + start))
else:
while self.is_open():
self.update(timeout=None)
@property
def returncode(self):
"""
The return code, A None value indicates that the process hasn't
terminated yet.
"""
if self.is_open():
return None
else:
if self._returncode is None:
err = self.read_channel(ERROR_CHANNEL)
err = yaml.safe_load(err)
if err['status'] == "Success":
self._returncode = 0
else:
self._returncode = int(err['details']['causes'][0]['message'])
return self._returncode
def close(self, **kwargs):
"""
close websocket connection.
"""
self._connected = False
if self.sock:
self.sock.close(**kwargs)
WSResponse = collections.namedtuple('WSResponse', ['data'])
|
WSClient
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axes_grid1/axes_grid.py
|
{
"start": 10468,
"end": 22631
}
|
class ____(Grid):
"""
A grid of Axes for Image display.
This class is a specialization of `~.axes_grid1.axes_grid.Grid` for displaying a
grid of images. In particular, it forces all axes in a column to share their x-axis
and all axes in a row to share their y-axis. It further provides helpers to add
colorbars to some or all axes.
"""
def __init__(self, fig,
rect,
nrows_ncols,
n_axes=None,
direction="row",
axes_pad=0.02,
*,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Parameters
----------
fig : `.Figure`
The parent figure.
rect : (float, float, float, float) or int
The axes position, as a ``(left, bottom, width, height)`` tuple or
as a three-digit subplot position code (e.g., "121").
nrows_ncols : (int, int)
Number of rows and columns in the grid.
n_axes : int, optional
If given, only the first *n_axes* axes in the grid are created.
direction : {"row", "column"}, default: "row"
Whether axes are created in row-major ("row by row") or
column-major order ("column by column"). This also affects the
order in which axes are accessed using indexing (``grid[index]``).
axes_pad : float or (float, float), default: 0.02in
Padding or (horizontal padding, vertical padding) between axes, in
inches.
share_all : bool, default: False
Whether all axes share their x- and y-axis. Note that in any case,
all axes in a column share their x-axis and all axes in a row share
their y-axis.
aspect : bool, default: True
Whether the axes aspect ratio follows the aspect ratio of the data
limits.
label_mode : {"L", "1", "all"}, default: "L"
Determines which axes will get tick labels:
- "L": All axes on the left column get vertical tick labels;
all axes on the bottom row get horizontal tick labels.
- "1": Only the bottom left axes is labelled.
- "all": all axes are labelled.
cbar_mode : {"each", "single", "edge", None}, default: None
Whether to create a colorbar for "each" axes, a "single" colorbar
for the entire grid, colorbars only for axes on the "edge"
determined by *cbar_location*, or no colorbars. The colorbars are
stored in the :attr:`!cbar_axes` attribute.
cbar_location : {"left", "right", "bottom", "top"}, default: "right"
cbar_pad : float, default: None
Padding between the image axes and the colorbar axes.
.. versionchanged:: 3.10
``cbar_mode="single"`` no longer adds *axes_pad* between the axes
and the colorbar if the *cbar_location* is "left" or "bottom".
cbar_size : size specification (see `!.Size.from_any`), default: "5%"
Colorbar size.
cbar_set_cax : bool, default: True
If True, each axes in the grid has a *cax* attribute that is bound
to associated *cbar_axes*.
axes_class : subclass of `matplotlib.axes.Axes`, default: `.mpl_axes.Axes`
"""
_api.check_in_list(["each", "single", "edge", None],
cbar_mode=cbar_mode)
_api.check_in_list(["left", "right", "bottom", "top"],
cbar_location=cbar_location)
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
# The colorbar axes are created in _init_locators().
super().__init__(
fig, rect, nrows_ncols, n_axes,
direction=direction, axes_pad=axes_pad,
share_all=share_all, share_x=True, share_y=True, aspect=aspect,
label_mode=label_mode, axes_class=axes_class)
for ax in self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
elif self._colorbar_mode == "edge":
for index, ax in enumerate(self.axes_all):
col, row = self._get_col_row(index)
if self._colorbar_location in ("left", "right"):
ax.cax = self.cbar_axes[row]
else:
ax.cax = self.cbar_axes[col]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
def _init_locators(self):
# Slightly abusing this method to inject colorbar creation into init.
if self._colorbar_pad is None:
# horizontal or vertical arrangement?
if self._colorbar_location in ("left", "right"):
self._colorbar_pad = self._horiz_pad_size.fixed_size
else:
self._colorbar_pad = self._vert_pad_size.fixed_size
self.cbar_axes = [
_cbaraxes_class_factory(self._defaultAxesClass)(
self.axes_all[0].get_figure(root=False), self._divider.get_position(),
orientation=self._colorbar_location)
for _ in range(self.n_axes)]
cb_mode = self._colorbar_mode
cb_location = self._colorbar_location
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if cb_mode == "single" and cb_location in ("left", "bottom"):
if cb_location == "left":
sz = self._nrows * Size.AxesX(self.axes_llc)
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif cb_location == "bottom":
sz = self._ncols * Size.AxesY(self.axes_llc)
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.n_axes):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if col != 0:
h.append(self._horiz_pad_size)
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (cb_location == "left"
and (cb_mode == "each"
or (cb_mode == "edge" and col == 0))):
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if (cb_location == "right"
and (cb_mode == "each"
or (cb_mode == "edge" and col == self._ncols - 1))):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if row != 0:
v.append(self._vert_pad_size)
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (cb_location == "bottom"
and (cb_mode == "each"
or (cb_mode == "edge" and row == 0))):
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if (cb_location == "top"
and (cb_mode == "each"
or (cb_mode == "edge" and row == self._nrows - 1))):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.n_axes):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if cb_mode == "each":
if cb_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif cb_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif cb_mode == "edge":
if (cb_location == "left" and col == 0
or cb_location == "right" and col == self._ncols - 1):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows - 1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif (cb_location == "bottom" and row == self._nrows - 1
or cb_location == "top" and row == 0):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if cb_mode == "single":
if cb_location == "right":
sz = self._nrows * Size.AxesX(self.axes_llc)
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif cb_location == "top":
sz = self._ncols * Size.AxesY(self.axes_llc)
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if cb_location in ("right", "top"):
for i in range(self.n_axes):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif cb_mode == "each":
for i in range(self.n_axes):
self.cbar_axes[i].set_visible(True)
elif cb_mode == "edge":
if cb_location in ("right", "left"):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.n_axes):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.n_axes):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
|
ImageGrid
|
python
|
PrefectHQ__prefect
|
src/prefect/_versioning.py
|
{
"start": 12380,
"end": 14132
}
|
class ____(str, Enum):
SIMPLE = "prefect:simple"
GITHUB = "vcs:github"
GITLAB = "vcs:gitlab"
BITBUCKET = "vcs:bitbucket"
AZUREDEVOPS = "vcs:azuredevops"
GIT = "vcs:git"
async def get_inferred_version_info(
version_type: Optional[str] = None,
) -> VersionInfo | None:
"""
Attempts to infer version information from the environment.
Args:
version_type: Optional type of version info to get. If provided, only that
type will be attempted.
Returns:
VersionInfo: The inferred version information
Raises:
ValueError: If unable to infer version info from any source
"""
# Map version types to their getter functions
type_to_getter: Dict[str, Callable[..., Coroutine[Any, Any, Any]]] = {
VersionType.GITHUB: get_github_version_info,
VersionType.GITLAB: get_gitlab_version_info,
VersionType.BITBUCKET: get_bitbucket_version_info,
VersionType.AZUREDEVOPS: get_azuredevops_version_info,
VersionType.GIT: get_git_version_info,
}
# Default order of getters to try
default_getters = [
get_github_version_info,
get_gitlab_version_info,
get_bitbucket_version_info,
get_azuredevops_version_info,
get_git_version_info,
]
if version_type is VersionType.SIMPLE:
return None
if version_type:
if version_type not in type_to_getter:
raise ValueError(f"Unknown version type: {version_type}")
getters = [type_to_getter[version_type]]
else:
getters = default_getters
for getter in getters:
try:
return await getter()
except ValueError:
continue
return None
|
VersionType
|
python
|
boto__boto3
|
tests/functional/test_crt.py
|
{
"start": 1041,
"end": 2017
}
|
class ____(ContextDecorator):
"""Helper class to simulate a CRT optimized EC2 instance."""
DEFAULT_LOCK_MOCK = mock.Mock()
def __init__(self, lock=DEFAULT_LOCK_MOCK, optimized=True):
self.acquire_process_lock = mock.patch(
'boto3.crt.acquire_crt_s3_process_lock'
)
self.acquire_process_lock.return_value = lock
self.is_optimized = mock.patch('awscrt.s3.is_optimized_for_system')
self.is_optimized.return_value = optimized
def __enter__(self, *args, **kwargs):
self.acquire_process_lock.start()
self.is_optimized.start()
def __exit__(self, *args, **kwargs):
self.acquire_process_lock.stop()
self.is_optimized.stop()
def create_mock_client(region_name='us-west-2'):
client = mock.Mock()
client.meta.region_name = region_name
client._get_credentials.return_value = Credentials(
'access', 'secret', 'token'
)
return client
|
MockOptimizedInstance
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 83472,
"end": 83881
}
|
class ____:
xlPasteSpecialOperationAdd = 2 # from enum XlPasteSpecialOperation
xlPasteSpecialOperationDivide = 5 # from enum XlPasteSpecialOperation
xlPasteSpecialOperationMultiply = 4 # from enum XlPasteSpecialOperation
xlPasteSpecialOperationNone = -4142 # from enum XlPasteSpecialOperation
xlPasteSpecialOperationSubtract = 3 # from enum XlPasteSpecialOperation
|
PasteSpecialOperation
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_privacy_urls.py
|
{
"start": 18216,
"end": 18568
}
|
class ____(URLAccessMixin):
def setUp(self):
super().setUp()
self.default_kwargs.update(
{
"username": self.tester.username,
}
)
def test_public_urls(self):
from readthedocs.profiles.urls.public import urlpatterns
self._test_url(urlpatterns)
|
PublicUserProfileMixin
|
python
|
pytorch__pytorch
|
test/distributed/test_p2p_ipc.py
|
{
"start": 484,
"end": 2062
}
|
class ____(MultiProcContinuousTest):
@classmethod
def backend_str(cls):
return "gloo"
def _init_device(self) -> None:
# init and pin the process to the device
device_module.set_device(self.device)
torch.empty(1, device=self.device)
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
def test_p2p_ipc(self) -> None:
"""
Test that cross-process P2P access works, by reducing a tensor,
and then constructing a new tensor from the reduced tensor,
while modifying the 6-th argument.
This test is here to help stabilize the P2P share mechanism,
preventing bc-breakage.
"""
self._init_device()
tensor: torch.Tensor
if self.rank == 0:
tensor = torch.randn(2333, device=self.device)
tensor_meta = reduce_tensor(tensor)
torch.distributed.broadcast_object_list([tensor_meta], src=0)
else:
recv_list = [None]
torch.distributed.broadcast_object_list(recv_list, src=0)
tensor_meta = recv_list[0]
func, args = tensor_meta
args = list(args)
args[6] = self.rank
tensor = func(*args)
torch.distributed.barrier()
if self.rank == 0:
tensor.fill_(1)
device_module.synchronize()
torch.distributed.barrier()
assert tensor.allclose(tensor, 1)
torch.distributed.barrier()
if __name__ == "__main__":
run_tests()
|
P2PIpcTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/pyramid-transition-matrix.py
|
{
"start": 174,
"end": 1535
}
|
class ____(object):
def pyramidTransition(self, bottom, allowed):
"""
:type bottom: str
:type allowed: List[str]
:rtype: bool
"""
def pyramidTransitionHelper(bottom, edges, lookup):
def dfs(bottom, edges, new_bottom, idx, lookup):
if idx == len(bottom)-1:
return pyramidTransitionHelper("".join(new_bottom), edges, lookup)
for i in edges[ord(bottom[idx])-ord('A')][ord(bottom[idx+1])-ord('A')]:
new_bottom[idx] = chr(i+ord('A'))
if dfs(bottom, edges, new_bottom, idx+1, lookup):
return True
return False
if len(bottom) == 1:
return True
if bottom in lookup:
return False
lookup.add(bottom)
for i in xrange(len(bottom)-1):
if not edges[ord(bottom[i])-ord('A')][ord(bottom[i+1])-ord('A')]:
return False
new_bottom = ['A']*(len(bottom)-1)
return dfs(bottom, edges, new_bottom, 0, lookup)
edges = [[[] for _ in xrange(7)] for _ in xrange(7)]
for s in allowed:
edges[ord(s[0])-ord('A')][ord(s[1])-ord('A')].append(ord(s[2])-ord('A'))
return pyramidTransitionHelper(bottom, edges, set())
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/config/test_config.py
|
{
"start": 195,
"end": 17851
}
|
class ____:
@pytest.fixture(autouse=True)
def clean_config(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(cf, "_global_config", {})
m.setattr(cf, "options", cf.DictWrapper(cf._global_config))
m.setattr(cf, "_deprecated_options", {})
m.setattr(cf, "_registered_options", {})
# Our test fixture in conftest.py sets "chained_assignment"
# to "raise" only after all test methods have been setup.
# However, after this setup, there is no longer any
# "chained_assignment" option, so re-register it.
cf.register_option("chained_assignment", "raise")
yield
def test_api(self):
# the pandas object exposes the user API
assert hasattr(pd, "get_option")
assert hasattr(pd, "set_option")
assert hasattr(pd, "reset_option")
assert hasattr(pd, "describe_option")
def test_is_one_of_factory(self):
v = cf.is_one_of_factory([None, 12])
v(12)
v(None)
msg = r"Value must be one of None\|12"
with pytest.raises(ValueError, match=msg):
v(1.1)
def test_register_option(self):
cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Option 'a' has already been registered"
with pytest.raises(OptionError, match=msg):
cf.register_option("a", 1, "doc")
# can't register an already registered option
msg = "Path prefix to option 'a' is already an option"
with pytest.raises(OptionError, match=msg):
cf.register_option("a.b.c.d1", 1, "doc")
with pytest.raises(OptionError, match=msg):
cf.register_option("a.b.c.d2", 1, "doc")
# no python keywords
msg = "for is a python keyword"
with pytest.raises(ValueError, match=msg):
cf.register_option("for", 0)
with pytest.raises(ValueError, match=msg):
cf.register_option("a.for.b", 0)
# must be valid identifier (ensure attribute access works)
msg = "oh my goddess! is not a valid identifier"
with pytest.raises(ValueError, match=msg):
cf.register_option("Oh my Goddess!", 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
cf.register_option("k.b.c.d1", 1, "doc")
cf.register_option("k.b.c.d2", 1, "doc")
def test_describe_option(self):
cf.register_option("a", 1, "doc")
cf.register_option("b", 1, "doc2")
cf.deprecate_option("b", FutureWarning)
cf.register_option("c.d.e1", 1, "doc3")
cf.register_option("c.d.e2", 1, "doc4")
cf.register_option("f", 1)
cf.register_option("g.h", 1)
cf.register_option("k", 2)
cf.deprecate_option("g.h", FutureWarning, rkey="k")
cf.register_option("l", "foo")
# non-existent keys raise KeyError
msg = r"No such keys\(s\)"
with pytest.raises(OptionError, match=msg):
cf.describe_option("no.such.key")
# we can get the description for any key we registered
assert "doc" in cf.describe_option("a", _print_desc=False)
assert "doc2" in cf.describe_option("b", _print_desc=False)
assert "precated" in cf.describe_option("b", _print_desc=False)
assert "doc3" in cf.describe_option("c.d.e1", _print_desc=False)
assert "doc4" in cf.describe_option("c.d.e2", _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
assert "available" in cf.describe_option("f", _print_desc=False)
assert "available" in cf.describe_option("g.h", _print_desc=False)
assert "precated" in cf.describe_option("g.h", _print_desc=False)
assert "k" in cf.describe_option("g.h", _print_desc=False)
# default is reported
assert "foo" in cf.describe_option("l", _print_desc=False)
# current value is reported
assert "bar" not in cf.describe_option("l", _print_desc=False)
cf.set_option("l", "bar")
assert "bar" in cf.describe_option("l", _print_desc=False)
@pytest.mark.parametrize("category", [DeprecationWarning, FutureWarning])
def test_case_insensitive(self, category):
cf.register_option("KanBAN", 1, "doc")
assert "doc" in cf.describe_option("kanbaN", _print_desc=False)
assert cf.get_option("kanBaN") == 1
cf.set_option("KanBan", 2)
assert cf.get_option("kAnBaN") == 2
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
cf.get_option("no_such_option")
cf.deprecate_option("KanBan", category)
msg = "'kanban' is deprecated, please refrain from using it."
with pytest.raises(category, match=msg):
cf.get_option("kAnBaN")
def test_get_option(self):
cf.register_option("a", 1, "doc")
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("b.b", None, "doc2")
# gets of existing keys succeed
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
assert cf.get_option("b.b") is None
# gets of non-existent keys fail
msg = r"No such keys\(s\): 'no_such_option'"
with pytest.raises(OptionError, match=msg):
cf.get_option("no_such_option")
def test_set_option(self):
cf.register_option("a", 1, "doc")
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("b.b", None, "doc2")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
assert cf.get_option("b.b") is None
cf.set_option("a", 2)
cf.set_option("b.c", "wurld")
cf.set_option("b.b", 1.1)
assert cf.get_option("a") == 2
assert cf.get_option("b.c") == "wurld"
assert cf.get_option("b.b") == 1.1
msg = r"No such keys\(s\): 'no.such.key'"
with pytest.raises(OptionError, match=msg):
cf.set_option("no.such.key", None)
def test_set_option_empty_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
cf.set_option()
def test_set_option_uneven_args(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
cf.set_option("a.b", 2, "b.c")
def test_set_option_invalid_single_argument_type(self):
msg = "Must provide an even number of non-keyword arguments"
with pytest.raises(ValueError, match=msg):
cf.set_option(2)
def test_set_option_multiple(self):
cf.register_option("a", 1, "doc")
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("b.b", None, "doc2")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
assert cf.get_option("b.b") is None
cf.set_option("a", "2", "b.c", None, "b.b", 10.0)
assert cf.get_option("a") == "2"
assert cf.get_option("b.c") is None
assert cf.get_option("b.b") == 10.0
def test_set_option_dict(self):
# GH 61093
cf.register_option("a", 1, "doc")
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("b.b", None, "doc2")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
assert cf.get_option("b.b") is None
options_dict = {"a": "2", "b.c": None, "b.b": 10.0}
cf.set_option(options_dict)
assert cf.get_option("a") == "2"
assert cf.get_option("b.c") is None
assert cf.get_option("b.b") == 10.0
def test_validation(self):
cf.register_option("a", 1, "doc", validator=cf.is_int)
cf.register_option("d", 1, "doc", validator=cf.is_nonnegative_int)
cf.register_option("b.c", "hullo", "doc2", validator=cf.is_text)
msg = "Value must have type '<class 'int'>'"
with pytest.raises(ValueError, match=msg):
cf.register_option("a.b.c.d2", "NO", "doc", validator=cf.is_int)
cf.set_option("a", 2) # int is_int
cf.set_option("b.c", "wurld") # str is_str
cf.set_option("d", 2)
cf.set_option("d", None) # non-negative int can be None
# None not is_int
with pytest.raises(ValueError, match=msg):
cf.set_option("a", None)
with pytest.raises(ValueError, match=msg):
cf.set_option("a", "ab")
msg = "Value must be a nonnegative integer or None"
with pytest.raises(ValueError, match=msg):
cf.register_option("a.b.c.d3", "NO", "doc", validator=cf.is_nonnegative_int)
with pytest.raises(ValueError, match=msg):
cf.register_option("a.b.c.d3", -2, "doc", validator=cf.is_nonnegative_int)
msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>"
with pytest.raises(ValueError, match=msg):
cf.set_option("b.c", 1)
validator = cf.is_one_of_factory([None, cf.is_callable])
cf.register_option("b", lambda: None, "doc", validator=validator)
cf.set_option("b", "%.1f".format) # Formatter is callable
cf.set_option("b", None) # Formatter is none (default)
with pytest.raises(ValueError, match="Value must be a callable"):
cf.set_option("b", "%.1f")
def test_reset_option(self):
cf.register_option("a", 1, "doc", validator=cf.is_int)
cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str)
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
cf.set_option("a", 2)
cf.set_option("b.c", "wurld")
assert cf.get_option("a") == 2
assert cf.get_option("b.c") == "wurld"
cf.reset_option("a")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "wurld"
cf.reset_option("b.c")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
def test_reset_option_all(self):
cf.register_option("a", 1, "doc", validator=cf.is_int)
cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str)
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
cf.set_option("a", 2)
cf.set_option("b.c", "wurld")
assert cf.get_option("a") == 2
assert cf.get_option("b.c") == "wurld"
cf.reset_option("all")
assert cf.get_option("a") == 1
assert cf.get_option("b.c") == "hullo"
def test_deprecate_option(self):
# we can deprecate non-existent options
cf.deprecate_option("foo", FutureWarning)
with tm.assert_produces_warning(FutureWarning, match="deprecated"):
with pytest.raises(KeyError, match="No such keys.s.: 'foo'"):
cf.get_option("foo")
cf.register_option("a", 1, "doc", validator=cf.is_int)
cf.register_option("b.c", "hullo", "doc2")
cf.register_option("foo", "hullo", "doc2")
cf.deprecate_option("a", FutureWarning, removal_ver="nifty_ver")
with tm.assert_produces_warning(FutureWarning, match="eprecated.*nifty_ver"):
cf.get_option("a")
msg = "Option 'a' has already been defined as deprecated"
with pytest.raises(OptionError, match=msg):
cf.deprecate_option("a", FutureWarning)
cf.deprecate_option("b.c", FutureWarning, "zounds!")
with tm.assert_produces_warning(FutureWarning, match="zounds!"):
cf.get_option("b.c")
# test rerouting keys
cf.register_option("d.a", "foo", "doc2")
cf.register_option("d.dep", "bar", "doc2")
assert cf.get_option("d.a") == "foo"
assert cf.get_option("d.dep") == "bar"
cf.deprecate_option("d.dep", FutureWarning, rkey="d.a") # reroute d.dep to d.a
with tm.assert_produces_warning(FutureWarning, match="eprecated"):
assert cf.get_option("d.dep") == "foo"
with tm.assert_produces_warning(FutureWarning, match="eprecated"):
cf.set_option("d.dep", "baz") # should overwrite "d.a"
with tm.assert_produces_warning(FutureWarning, match="eprecated"):
assert cf.get_option("d.dep") == "baz"
def test_config_prefix(self):
with cf.config_prefix("base"):
cf.register_option("a", 1, "doc1")
cf.register_option("b", 2, "doc2")
assert cf.get_option("a") == 1
assert cf.get_option("b") == 2
cf.set_option("a", 3)
cf.set_option("b", 4)
assert cf.get_option("a") == 3
assert cf.get_option("b") == 4
assert cf.get_option("base.a") == 3
assert cf.get_option("base.b") == 4
assert "doc1" in cf.describe_option("base.a", _print_desc=False)
assert "doc2" in cf.describe_option("base.b", _print_desc=False)
cf.reset_option("base.a")
cf.reset_option("base.b")
with cf.config_prefix("base"):
assert cf.get_option("a") == 1
assert cf.get_option("b") == 2
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(cf.get_option(key))
cf.register_option("d.a", "foo", cb=callback)
cf.register_option("d.b", "foo", cb=callback)
del k[-1], v[-1]
cf.set_option("d.a", "fooz")
assert k[-1] == "d.a"
assert v[-1] == "fooz"
del k[-1], v[-1]
cf.set_option("d.b", "boo")
assert k[-1] == "d.b"
assert v[-1] == "boo"
del k[-1], v[-1]
cf.reset_option("d.b")
assert k[-1] == "d.b"
def test_set_ContextManager(self):
def eq(val):
assert cf.get_option("a") == val
cf.register_option("a", 0)
eq(0)
with cf.option_context("a", 15):
eq(15)
with cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
cf.set_option("a", 17)
eq(17)
# Test that option_context can be used as a decorator too (#34253).
@cf.option_context("a", 123)
def f():
eq(123)
f()
def test_set_ContextManager_dict(self):
def eq(val):
assert cf.get_option("a") == val
assert cf.get_option("b.c") == val
cf.register_option("a", 0)
cf.register_option("b.c", 0)
eq(0)
with cf.option_context({"a": 15, "b.c": 15}):
eq(15)
with cf.option_context({"a": 25, "b.c": 25}):
eq(25)
eq(15)
eq(0)
cf.set_option("a", 17)
cf.set_option("b.c", 17)
eq(17)
# Test that option_context can be used as a decorator too
@cf.option_context({"a": 123, "b.c": 123})
def f():
eq(123)
f()
def test_attribute_access(self):
holder = []
def f3(key):
holder.append(True)
cf.register_option("a", 0)
cf.register_option("c", 0, cb=f3)
options = cf.options
assert options.a == 0
with cf.option_context("a", 15):
assert options.a == 15
options.a = 500
assert cf.get_option("a") == 500
cf.reset_option("a")
assert options.a == cf.get_option("a")
msg = "You can only set the value of existing options"
with pytest.raises(OptionError, match=msg):
options.b = 1
with pytest.raises(OptionError, match=msg):
options.display = 1
# make sure callback kicks when using this form of setting
options.c = 1
assert len(holder) == 1
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
option_name = "a"
cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = cf.option_context(option_name, context_value)
assert cf.get_option(option_name) == original_value
# Ensure the correct value is available inside the context.
with ctx:
assert cf.get_option(option_name) == context_value
# Ensure the current context is reset
assert cf.get_option(option_name) == original_value
def test_dictwrapper_getattr(self):
options = cf.options
# GH 19789
with pytest.raises(OptionError, match="No such option"):
options.bananas
assert not hasattr(options, "bananas")
def test_no_silent_downcasting_deprecated():
# GH#59502
with tm.assert_produces_warning(Pandas4Warning, match="is deprecated"):
cf.get_option("future.no_silent_downcasting")
with tm.assert_produces_warning(Pandas4Warning, match="is deprecated"):
cf.set_option("future.no_silent_downcasting", True)
def test_option_context_invalid_option():
with pytest.raises(OptionError, match="No such keys"):
with cf.option_context("invalid", True):
pass
|
TestConfig
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 4940,
"end": 5020
}
|
class ____(ModelArticle):
name = models.CharField(max_length=300)
|
ModelPackage
|
python
|
ansible__ansible
|
lib/ansible/plugins/strategy/debug.py
|
{
"start": 1046,
"end": 1205
}
|
class ____(LinearStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self.debugger_active = True
|
StrategyModule
|
python
|
huggingface__transformers
|
tests/models/bart/test_modeling_bart.py
|
{
"start": 2236,
"end": 8249
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return BartConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def get_pipeline_config(self):
config = self.get_config()
config.max_position_embeddings = 100
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BartModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = BartModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = BartEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = BartDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
|
BartModelTester
|
python
|
pypa__warehouse
|
warehouse/attestations/models.py
|
{
"start": 423,
"end": 1325
}
|
class ____(db.Model):
"""
A table for PEP 740 provenance objects.
Provenance objects contain one or more attestation objects.
These attestation objects are grouped into "bundles," each of which
contains one or more attestations along with the Trusted Publisher
identity that produced them.
"""
__tablename__ = "provenance"
file_id: Mapped[UUID] = mapped_column(
ForeignKey("release_files.id", onupdate="CASCADE", ondelete="CASCADE"),
)
file: Mapped[File] = orm.relationship(back_populates="provenance")
# This JSONB has the structure of a PEP 740 provenance object.
provenance: Mapped[dict] = mapped_column(JSONB, nullable=False, deferred=True)
@cached_property
def as_model(self):
return pypi_attestations.Provenance.model_validate(self.provenance)
__table_args__ = (Index("ix_provenance_file_id", file_id),)
|
Provenance
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 372531,
"end": 373144
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("MilestoneEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("Milestone"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
MilestoneConnection
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl/processing_qwen3_vl.py
|
{
"start": 1878,
"end": 16041
}
|
class ____(ProcessorMixin):
r"""
Constructs a Qwen3VL processor which wraps a Qwen3VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen3VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen3VLProcessor.__call__`] and [`~Qwen3VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen3VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
self.vision_start_token = (
"<|vision_start|>" if not hasattr(tokenizer, "vision_start_token") else tokenizer.vision_start_token
)
self.vision_end_token = (
"<|vision_end|>" if not hasattr(tokenizer, "vision_end_token") else tokenizer.vision_end_token
)
self.vision_start_token_id = (
tokenizer.vision_start_token_id
if getattr(tokenizer, "vision_start_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_start_token)
)
self.vision_end_token_id = (
tokenizer.vision_end_token_id
if getattr(tokenizer, "vision_end_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_end_token)
)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: VideoInput = None,
**kwargs: Unpack[Qwen3VLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen3VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
metadata = video_metadata[index]
if metadata.fps is None:
logger.warning_once(
"Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
# if timestamps are not provided, calculate them
curr_timestamp = self._calculate_timestamps(
metadata.frames_indices,
metadata.fps,
self.video_processor.merge_size,
)
video_placeholder = ""
frame_seqlen = video_grid_thw[index][1:].prod() // merge_length
for frame_idx in range(video_grid_thw[index][0]):
curr_time = curr_timestamp[frame_idx]
video_placeholder += f"<{curr_time:.1f} seconds>"
video_placeholder += (
self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token
)
if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]:
text[i] = text[i].replace(
f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1
)
else:
# vllm may input video token directly
text[i] = text[i].replace(self.video_token, video_placeholder, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen3VLProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen3VLProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def _calculate_timestamps(self, indices: Union[list[int], np.ndarray], video_fps: float, merge_size: int = 2):
if not isinstance(indices, list):
indices = indices.tolist()
if len(indices) % merge_size != 0:
indices.extend(indices[-1] for _ in range(merge_size - len(indices) % merge_size))
timestamps = [idx / video_fps for idx in indices]
# @JJJYmmm frames are merged by self.merge_size, \
# so we need to average the timestamps between the first/last frame within the temporal patch
timestamps = [
(timestamps[i] + timestamps[i + merge_size - 1]) / 2 for i in range(0, len(timestamps), merge_size)
]
return timestamps
__all__ = ["Qwen3VLProcessor"]
|
Qwen3VLProcessor
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_renamed.py
|
{
"start": 1023,
"end": 1195
}
|
class ____:
def test(self, arg):
return arg + 1
def kwargs_test(self, arg, *, var1, var2):
print(f"keyword parameters are {var1} and {var2}.")
|
Parent
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/project_profiling_profile.py
|
{
"start": 791,
"end": 2435
}
|
class ____(ProjectProfilingBaseEndpoint):
def get(self, request: Request, project: Project, profile_id: str) -> HttpResponse:
if not features.has("organizations:profiling", project.organization, actor=request.user):
return Response(status=404)
response = get_from_profiling_service(
"GET",
f"/organizations/{project.organization_id}/projects/{project.id}/profiles/{profile_id}",
params={"format": "sample"},
)
if response.status == 200:
profile = orjson.loads(response.data)
if "release" in profile:
profile["release"] = get_release(project, profile["release"])
else:
# make sure to remove the version from the metadata
# we're going to replace it with the release here
version = profile.get("metadata", {}).pop("version")
profile["metadata"]["release"] = get_release(project, version)
return Response(profile)
return HttpResponse(
content=response.data,
status=response.status,
content_type=response.headers.get("Content-Type", "application/json"),
)
def get_release(project: Project, version: str) -> Any:
if not version:
return None
try:
release = Release.objects.get(
projects=project,
organization_id=project.organization_id,
version=version,
)
return serialize(release)
except Release.DoesNotExist:
return {"version": version}
@region_silo_endpoint
|
ProjectProfilingProfileEndpoint
|
python
|
pypa__warehouse
|
tests/unit/manage/test_views.py
|
{
"start": 210727,
"end": 216544
}
|
class ____:
def test_revoke_invitation(self, db_request, token_service):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
RoleInvitationFactory.create(user=user, project=project)
owner_user = UserFactory.create()
RoleFactory(user=owner_user, project=project, role_name="Owner")
user_service = pretend.stub(get_user=lambda userid: user)
token_service.loads = pretend.call_recorder(
lambda token: {
"action": "email-project-role-verify",
"desired_role": "Maintainer",
"user_id": user.id,
"project_id": project.id,
"submitter_id": db_request.user.id,
}
)
db_request.find_service = pretend.call_recorder(
lambda iface, context=None, name=None: {
ITokenService: token_service,
IUserService: user_service,
}.get(iface)
)
db_request.method = "POST"
db_request.POST = MultiDict({"user_id": user.id, "token": "TOKEN"})
db_request.remote_addr = "10.10.10.10"
db_request.user = owner_user
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/projects"
)
form_class = pretend.call_recorder(lambda *a, **kw: None)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.revoke_project_role_invitation(
project, db_request, _form_class=form_class
)
db_request.db.flush()
assert not (
db_request.db.query(RoleInvitation)
.filter(RoleInvitation.user == user)
.filter(RoleInvitation.project == project)
.one_or_none()
)
assert db_request.session.flash.calls == [
pretend.call(f"Invitation revoked from '{user.username}'.", queue="success")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/manage/projects"
def test_invitation_does_not_exist(self, db_request, token_service):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
owner_user = UserFactory.create()
RoleFactory(user=owner_user, project=project, role_name="Owner")
user_service = pretend.stub(get_user=lambda userid: user)
token_service.loads = pretend.call_recorder(
lambda token: {
"action": "email-project-role-verify",
"desired_role": "Maintainer",
"user_id": user.id,
"project_id": project.id,
"submitter_id": db_request.user.id,
}
)
db_request.find_service = pretend.call_recorder(
lambda iface, context=None, name=None: {
ITokenService: token_service,
IUserService: user_service,
}.get(iface)
)
db_request.method = "POST"
db_request.POST = MultiDict({"user_id": user.id, "token": "TOKEN"})
db_request.remote_addr = "10.10.10.10"
db_request.user = owner_user
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/projects"
)
form_class = pretend.call_recorder(lambda *a, **kw: None)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.revoke_project_role_invitation(
project, db_request, _form_class=form_class
)
db_request.db.flush()
assert db_request.session.flash.calls == [
pretend.call("Could not find role invitation.", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/manage/projects"
def test_token_expired(self, db_request, token_service):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
RoleInvitationFactory.create(user=user, project=project)
owner_user = UserFactory.create()
RoleFactory(user=owner_user, project=project, role_name="Owner")
user_service = pretend.stub(get_user=lambda userid: user)
token_service.loads = pretend.call_recorder(pretend.raiser(TokenExpired))
db_request.find_service = pretend.call_recorder(
lambda iface, context=None, name=None: {
ITokenService: token_service,
IUserService: user_service,
}.get(iface)
)
db_request.method = "POST"
db_request.POST = MultiDict({"user_id": user.id, "token": "TOKEN"})
db_request.remote_addr = "10.10.10.10"
db_request.user = owner_user
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/manage/projects/roles"
)
form_class = pretend.call_recorder(lambda *a, **kw: None)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.revoke_project_role_invitation(
project, db_request, _form_class=form_class
)
db_request.db.flush()
assert not (
db_request.db.query(RoleInvitation)
.filter(RoleInvitation.user == user)
.filter(RoleInvitation.project == project)
.one_or_none()
)
assert db_request.session.flash.calls == [
pretend.call("Invitation already expired.", queue="success")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/manage/projects/roles"
|
TestRevokeRoleInvitation
|
python
|
huggingface__transformers
|
tests/models/blip/test_modeling_blip.py
|
{
"start": 10890,
"end": 12411
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BlipTextModel,) if is_torch_available() else ()
def setUp(self):
self.model_tester = BlipTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/blip-vqa-base"
model = BlipTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
|
BlipTextModelTest
|
python
|
getsentry__sentry
|
src/sentry/search/events/types.py
|
{
"start": 7948,
"end": 9265
}
|
class ____:
auto_fields: bool = False
auto_aggregations: bool = False
use_aggregate_conditions: bool = False
functions_acl: list[str] | None = None
equation_config: dict[str, bool] | None = None
# This allows queries to be resolved without adding time constraints. Currently this is just
# used to allow metric alerts to be built and validated before creation in snuba.
skip_time_conditions: bool = False
parser_config_overrides: Mapping[str, Any] = field(default_factory=dict)
has_metrics: bool = False
transform_alias_to_input_format: bool = False
use_metrics_layer: bool = False
# This skips converting tags back to their non-prefixed versions when processing the results
# Currently this is only used for avoiding conflicting values when doing the first query
# of a top events request
skip_tag_resolution: bool = False
on_demand_metrics_enabled: bool = False
on_demand_metrics_type: Any | None = None
skip_field_validation_for_entity_subscription_deletion: bool = False
allow_metric_aggregates: bool | None = False
insights_metrics_override_metric_layer: bool = False
# Allow the errors query builder to use the entity prefix for fields
use_entity_prefix_for_fields: bool = False
@dataclass(frozen=True)
|
QueryBuilderConfig
|
python
|
TheAlgorithms__Python
|
graphs/edmonds_karp_multiple_source_and_sink.py
|
{
"start": 0,
"end": 1887
}
|
class ____:
def __init__(self, graph, sources, sinks):
self.source_index = None
self.sink_index = None
self.graph = graph
self._normalize_graph(sources, sinks)
self.vertices_count = len(graph)
self.maximum_flow_algorithm = None
# make only one source and one sink
def _normalize_graph(self, sources, sinks):
if sources is int:
sources = [sources]
if sinks is int:
sinks = [sinks]
if len(sources) == 0 or len(sinks) == 0:
return
self.source_index = sources[0]
self.sink_index = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(sources) > 1 or len(sinks) > 1:
max_input_flow = 0
for i in sources:
max_input_flow += sum(self.graph[i])
size = len(self.graph) + 1
for room in self.graph:
room.insert(0, 0)
self.graph.insert(0, [0] * size)
for i in sources:
self.graph[0][i + 1] = max_input_flow
self.source_index = 0
size = len(self.graph) + 1
for room in self.graph:
room.append(0)
self.graph.append([0] * size)
for i in sinks:
self.graph[i + 1][size - 1] = max_input_flow
self.sink_index = size - 1
def find_maximum_flow(self):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before.")
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def set_maximum_flow_algorithm(self, algorithm):
self.maximum_flow_algorithm = algorithm(self)
|
FlowNetwork
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 7124,
"end": 7283
}
|
class ____(models.Model):
title = models.CharField(max_length=42)
slug = OverridedFindUniqueAutoSlugField(populate_from="title")
|
OverridedFindUniqueModel
|
python
|
ethereum__web3.py
|
web3/types.py
|
{
"start": 13600,
"end": 13728
}
|
class ____(TypedDict, total=False):
gas: int
failed: bool
returnValue: str
structLogs: list[StructLog]
|
OpcodeTrace
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/dml.py
|
{
"start": 12297,
"end": 12639
}
|
class ____(DMLState):
isdelete = True
def __init__(self, statement: Delete, compiler: SQLCompiler, **kw: Any):
self.statement = statement
self.isdelete = True
t, ef = self._make_extra_froms(statement)
self._primary_table = t
self._extra_froms = ef
self.is_multitable = ef
|
DeleteDMLState
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_assorted_poly.py
|
{
"start": 69057,
"end": 72935
}
|
class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
# test [ticket:4537]'s test case.
run_create_tables = run_deletes = None
run_setup_classes = run_setup_mappers = run_define_tables = "each"
__dialect__ = "default"
def _fixture(self, use_correlate_except):
Base = self.DeclarativeBasic
class Superclass(Base):
__tablename__ = "s1"
id = Column(Integer, primary_key=True)
common_id = Column(ForeignKey("c.id"))
common_relationship = relationship(
"Common", uselist=False, innerjoin=True, lazy="noload"
)
discriminator_field = Column(String)
__mapper_args__ = {
"polymorphic_identity": "superclass",
"polymorphic_on": discriminator_field,
}
class Subclass(Superclass):
__tablename__ = "s2"
id = Column(ForeignKey("s1.id"), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "subclass"}
class Common(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
if use_correlate_except:
Common.num_superclass = column_property(
select(func.count(Superclass.id))
.where(Superclass.common_id == Common.id)
.correlate_except(Superclass)
.scalar_subquery()
)
if not use_correlate_except:
Common.num_superclass = column_property(
select(func.count(Superclass.id))
.where(Superclass.common_id == Common.id)
.correlate(Common)
.scalar_subquery()
)
return Common, Superclass
def test_poly_query_on_correlate(self):
Common, Superclass = self._fixture(False)
with expect_noload_deprecation():
poly = with_polymorphic(Superclass, "*")
s = fixture_session()
q = (
s.query(poly)
.options(contains_eager(poly.common_relationship))
.join(poly.common_relationship)
.filter(Common.id == 1)
)
# note the order of c.id, subquery changes based on if we
# used correlate or correlate_except; this is only with the
# patch in place. Not sure why this happens.
self.assert_compile(
q,
"SELECT c.id AS c_id, (SELECT count(s1.id) AS count_1 "
"FROM s1 LEFT OUTER JOIN s2 ON s1.id = s2.id "
"WHERE s1.common_id = c.id) AS anon_1, "
"s1.id AS s1_id, "
"s1.common_id AS s1_common_id, "
"s1.discriminator_field AS s1_discriminator_field, "
"s2.id AS s2_id FROM s1 "
"LEFT OUTER JOIN s2 ON s1.id = s2.id "
"JOIN c ON c.id = s1.common_id WHERE c.id = :id_1",
)
def test_poly_query_on_correlate_except(self):
Common, Superclass = self._fixture(True)
with expect_noload_deprecation():
poly = with_polymorphic(Superclass, "*")
s = fixture_session()
q = (
s.query(poly)
.options(contains_eager(poly.common_relationship))
.join(poly.common_relationship)
.filter(Common.id == 1)
)
self.assert_compile(
q,
"SELECT c.id AS c_id, (SELECT count(s1.id) AS count_1 "
"FROM s1 LEFT OUTER JOIN s2 ON s1.id = s2.id "
"WHERE s1.common_id = c.id) AS anon_1, "
"s1.id AS s1_id, "
"s1.common_id AS s1_common_id, "
"s1.discriminator_field AS s1_discriminator_field, "
"s2.id AS s2_id FROM s1 "
"LEFT OUTER JOIN s2 ON s1.id = s2.id "
"JOIN c ON c.id = s1.common_id WHERE c.id = :id_1",
)
|
CorrelateExceptWPolyAdaptTest
|
python
|
doocs__leetcode
|
lcp/LCP 03. 机器人大冒险/Solution.py
|
{
"start": 0,
"end": 646
}
|
class ____:
def robot(self, command: str, obstacles: List[List[int]], x: int, y: int) -> bool:
vis = {(0, 0)}
i = j = 0
for c in command:
match c:
case "U":
j += 1
case "R":
i += 1
vis.add((i, j))
k = min(x // i, y // j)
if (x - k * i, y - k * j) not in vis:
return False
for a, b in obstacles:
if a > x or b > y:
continue
k = min(a // i, b // j)
if (a - k * i, b - k * j) in vis:
return False
return True
|
Solution
|
python
|
gevent__gevent
|
src/greentest/3.13/test_threading.py
|
{
"start": 3626,
"end": 46163
}
|
class ____(BaseTestCase):
maxDiff = 9999
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
def test_args_argument(self):
# bpo-45735: Using list or tuple as *args* in constructor could
# achieve the same effect.
num_list = [1]
num_tuple = (1,)
str_list = ["str"]
str_tuple = ("str",)
list_in_tuple = ([1],)
tuple_in_list = [(1,)]
test_cases = (
(num_list, lambda arg: self.assertEqual(arg, 1)),
(num_tuple, lambda arg: self.assertEqual(arg, 1)),
(str_list, lambda arg: self.assertEqual(arg, "str")),
(str_tuple, lambda arg: self.assertEqual(arg, "str")),
(list_in_tuple, lambda arg: self.assertEqual(arg, [1])),
(tuple_in_list, lambda arg: self.assertEqual(arg, (1,)))
)
for args, target in test_cases:
with self.subTest(target=target, args=args):
t = threading.Thread(target=target, args=args)
t.start()
t.join()
def test_lock_no_args(self):
threading.Lock() # works
self.assertRaises(TypeError, threading.Lock, 1)
self.assertRaises(TypeError, threading.Lock, a=1)
self.assertRaises(TypeError, threading.Lock, 1, 2, a=1, b=2)
def test_lock_no_subclass(self):
# Intentionally disallow subclasses of threading.Lock because they have
# never been allowed, so why start now just because the type is public?
with self.assertRaises(TypeError):
class MyLock(threading.Lock): pass
def test_lock_or_none(self):
import types
self.assertIsInstance(threading.Lock | None, types.UnionType)
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.current_thread().ident)
def f():
ident.append(threading.current_thread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
dummy_thread = None
error = None
def f(mutex):
try:
nonlocal dummy_thread
nonlocal error
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
dummy_thread = threading.current_thread()
tid = dummy_thread.ident
self.assertIn(tid, threading._active)
self.assertIsInstance(dummy_thread, threading._DummyThread)
self.assertIs(threading._active.get(tid), dummy_thread)
# gh-29376
self.assertTrue(
dummy_thread.is_alive(),
'Expected _DummyThread to be considered alive.'
)
self.assertIn('_DummyThread', repr(dummy_thread))
except BaseException as e:
error = e
finally:
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
if error is not None:
raise error
self.assertEqual(tid, dummy_thread.ident)
# Issue gh-106236:
with self.assertRaises(RuntimeError):
dummy_thread.join()
dummy_thread._started.clear()
with self.assertRaises(RuntimeError):
dummy_thread.is_alive()
# Busy wait for the following condition: after the thread dies, the
# related dummy thread must be removed from threading._active.
timeout = 5
timeout_at = time.monotonic() + timeout
while time.monotonic() < timeout_at:
if threading._active.get(dummy_thread.ident) is not dummy_thread:
break
time.sleep(.1)
else:
self.fail('It was expected that the created threading._DummyThread was removed from threading._active.')
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args, **kwargs):
raise threading.ThreadError()
_start_joinable_thread = threading._start_joinable_thread
threading._start_joinable_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_joinable_thread = _start_joinable_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
if support.check_sanitizer(thread=True):
# the thread running `time.sleep(100)` below will still be alive
# at process exit
self.skipTest("TSAN would report thread leak")
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
if support.check_sanitizer(thread=True):
# the thread running `time.sleep(2)` below will still be alive
# at process exit
self.skipTest("TSAN would report thread leak")
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
support.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
@support.bigmemtest(size=20, memuse=72*2**20, dry_run=False)
def test_join_from_multiple_threads(self, size):
# Thread.join() should be thread-safe
errors = []
def worker():
time.sleep(0.005)
def joiner(thread):
try:
thread.join()
except Exception as e:
errors.append(e)
for N in range(2, 20):
threads = [threading.Thread(target=worker)]
for i in range(N):
threads.append(threading.Thread(target=joiner,
args=(threads[0],)))
for t in threads:
t.start()
time.sleep(0.01)
for t in threads:
t.join()
if errors:
raise errors[0]
def test_join_with_timeout(self):
lock = _thread.allocate_lock()
lock.acquire()
def worker():
lock.acquire()
thread = threading.Thread(target=worker)
thread.start()
thread.join(timeout=0.01)
assert thread.is_alive()
lock.release()
thread.join()
assert not thread.is_alive()
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
restore_default_excepthook(self)
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
with self.assertWarnsRegex(DeprecationWarning,
r'get the daemon attribute'):
t.isDaemon()
with self.assertWarnsRegex(DeprecationWarning,
r'set the daemon attribute'):
t.setDaemon(True)
with self.assertWarnsRegex(DeprecationWarning,
r'get the name attribute'):
t.getName()
with self.assertWarnsRegex(DeprecationWarning,
r'set the name attribute'):
t.setName("name")
e = threading.Event()
with self.assertWarnsRegex(DeprecationWarning, 'use is_set()'):
e.isSet()
cond = threading.Condition()
cond.acquire()
with self.assertWarnsRegex(DeprecationWarning, 'use notify_all()'):
cond.notifyAll()
with self.assertWarnsRegex(DeprecationWarning, 'use active_count()'):
threading.activeCount()
with self.assertWarnsRegex(DeprecationWarning, 'use current_thread()'):
threading.currentThread()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@skip_unless_reliable_fork
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time, warnings
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"always", category=DeprecationWarning)
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
assert ws[0].category == DeprecationWarning, ws[0]
assert 'fork' in str(ws[0].message), ws[0]
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@skip_unless_reliable_fork
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
# Ignore the warning about fork with threads.
with warnings.catch_warnings(category=DeprecationWarning,
action="ignore"):
if (pid := os.fork()) == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
ident = threading.get_ident()
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data,
"current ident True\n"
"main MainThread\n"
"main ident True\n"
"current is main True\n")
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys, warnings
from test import support
def func():
ident = threading.get_ident()
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"always", category=DeprecationWarning)
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name, type(main).__name__)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
assert ws[0].category == DeprecationWarning, ws[0]
assert 'fork' in str(ws[0].message), ws[0]
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err.decode('utf-8'), "")
self.assertEqual(data,
"current ident True\n"
"main Thread-1 (func) Thread\n"
"main ident True\n"
"current is main True\n"
)
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_foreign_thread(self, create_dummy=False):
code = """if 1:
import os, threading, sys, traceback, _thread
from test import support
def func(lock):
ident = threading.get_ident()
if %s:
# call current_thread() before fork to allocate DummyThread
current = threading.current_thread()
print("current", current.name, type(current).__name__)
print("ident in _active", ident in threading._active)
# flush before fork, so child won't flush it again
sys.stdout.flush()
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name, type(main).__name__)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
print("_dangling", [t.name for t in list(threading._dangling)])
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
try:
threading._shutdown()
os._exit(0)
except:
traceback.print_exc()
sys.stderr.flush()
os._exit(1)
else:
try:
support.wait_process(pid, exitcode=0)
except Exception:
# avoid 'could not acquire lock for
# <_io.BufferedWriter name='<stderr>'> at interpreter shutdown,'
traceback.print_exc()
sys.stderr.flush()
finally:
lock.release()
join_lock = _thread.allocate_lock()
join_lock.acquire()
th = _thread.start_new_thread(func, (join_lock,))
join_lock.acquire()
""" % create_dummy
# "DeprecationWarning: This process is multi-threaded, use of fork()
# may lead to deadlocks in the child"
_, out, err = assert_python_ok("-W", "ignore::DeprecationWarning", "-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err.decode(), "")
self.assertEqual(data,
("current Dummy-1 _DummyThread\n" if create_dummy else "") +
f"ident in _active {create_dummy!s}\n" +
"current ident True\n"
"main MainThread _MainThread\n"
"main ident True\n"
"current is main True\n"
"_dangling ['MainThread']\n")
def test_main_thread_after_fork_from_dummy_thread(self, create_dummy=False):
self.test_main_thread_after_fork_from_foreign_thread(create_dummy=True)
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
_testcapi = import_module("_testcapi")
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
threading.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_gettrace_all_threads(self):
def fn(*args): pass
old_trace = threading.gettrace()
first_check = threading.Event()
second_check = threading.Event()
trace_funcs = []
def checker():
trace_funcs.append(sys.gettrace())
first_check.set()
second_check.wait()
trace_funcs.append(sys.gettrace())
try:
t = threading.Thread(target=checker)
t.start()
first_check.wait()
threading.settrace_all_threads(fn)
second_check.set()
t.join()
self.assertEqual(trace_funcs, [None, fn])
self.assertEqual(threading.gettrace(), fn)
self.assertEqual(sys.gettrace(), fn)
finally:
threading.settrace_all_threads(old_trace)
self.assertEqual(threading.gettrace(), old_trace)
self.assertEqual(sys.gettrace(), old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
def test_getprofile_all_threads(self):
def fn(*args): pass
old_profile = threading.getprofile()
first_check = threading.Event()
second_check = threading.Event()
profile_funcs = []
def checker():
profile_funcs.append(sys.getprofile())
first_check.set()
second_check.wait()
profile_funcs.append(sys.getprofile())
try:
t = threading.Thread(target=checker)
t.start()
first_check.wait()
threading.setprofile_all_threads(fn)
second_check.set()
t.join()
self.assertEqual(profile_funcs, [None, fn])
self.assertEqual(threading.getprofile(), fn)
self.assertEqual(sys.getprofile(), fn)
finally:
threading.setprofile_all_threads(old_profile)
self.assertEqual(threading.getprofile(), old_profile)
self.assertEqual(sys.getprofile(), old_profile)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_boolean_target(self):
# bpo-41149: A thread that had a boolean value of False would not
# run, regardless of whether it was callable. The correct behaviour
# is for a thread to do nothing if its target is None, and to call
# the target otherwise.
class BooleanTarget(object):
def __init__(self):
self.ran = False
def __bool__(self):
return False
def __call__(self):
self.ran = True
target = BooleanTarget()
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertTrue(target.ran)
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with threading_helper.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_start_new_thread_at_finalization(self):
code = """if 1:
import _thread
def f():
print("shouldn't be printed")
class AtFinalization:
def __del__(self):
print("OK")
_thread.start_new_thread(f, ())
at_finalization = AtFinalization()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out.strip(), b"OK")
self.assertIn(b"can't create new thread at interpreter shutdown", err)
def test_start_new_thread_failed(self):
# gh-109746: if Python fails to start newly created thread
# due to failure of underlying PyThread_start_new_thread() call,
# its state should be removed from interpreter' thread states list
# to avoid its double cleanup
try:
from resource import setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
code = """if 1:
import resource
import _thread
def f():
print("shouldn't be printed")
limits = resource.getrlimit(resource.RLIMIT_NPROC)
[_, hard] = limits
resource.setrlimit(resource.RLIMIT_NPROC, (0, hard))
try:
handle = _thread.start_joinable_thread(f)
except RuntimeError:
print('ok')
else:
print('!skip!')
handle.join()
"""
_, out, err = assert_python_ok("-u", "-c", code)
out = out.strip()
if b'!skip!' in out:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
self.assertEqual(out, b'ok')
self.assertEqual(err, b'')
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(threading, 'get_native_id'), "test needs threading.get_native_id()")
def test_native_id_after_fork(self):
script = """if True:
import threading
import os
from test import support
parent_thread_native_id = threading.current_thread().native_id
print(parent_thread_native_id, flush=True)
assert parent_thread_native_id == threading.get_native_id()
childpid = os.fork()
if childpid == 0:
print(threading.current_thread().native_id, flush=True)
assert threading.current_thread().native_id == threading.get_native_id()
else:
try:
assert parent_thread_native_id == threading.current_thread().native_id
assert parent_thread_native_id == threading.get_native_id()
finally:
support.wait_process(childpid, exitcode=0)
"""
rc, out, err = assert_python_ok('-c', script)
self.assertEqual(rc, 0)
self.assertEqual(err, b"")
native_ids = out.strip().splitlines()
self.assertEqual(len(native_ids), 2)
self.assertNotEqual(native_ids[0], native_ids[1])
|
ThreadTests
|
python
|
scipy__scipy
|
scipy/signal/tests/test_ltisys.py
|
{
"start": 39020,
"end": 42914
}
|
class ____:
def test_01(self):
# Test bode() magnitude calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 1 / (s + 1),
# cutoff: 1 rad/s, slope: -20 dB/decade
# H(s=0.1) ~= 0 dB
# H(s=1) ~= -3 dB
# H(s=10) ~= -20 dB
# H(s=100) ~= -40 dB
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
expected_mag = [0, -3, -20, -40]
assert_almost_equal(mag, expected_mag, decimal=1)
def test_02(self):
# Test bode() phase calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 1 / (s + 1),
# angle(H(s=0.1)) ~= -5.7 deg
# angle(H(s=1)) ~= -45 deg
# angle(H(s=10)) ~= -84.3 deg
system = lti([1], [1, 1])
w = [0.1, 1, 10]
w, mag, phase = bode(system, w=w)
expected_phase = [-5.7, -45, -84.3]
assert_almost_equal(phase, expected_phase, decimal=1)
def test_03(self):
# Test bode() magnitude calculation.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
jw = w * 1j
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
expected_mag = 20.0 * np.log10(abs(y))
assert_almost_equal(mag, expected_mag)
def test_04(self):
# Test bode() phase calculation.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
w = [0.1, 1, 10, 100]
w, mag, phase = bode(system, w=w)
jw = w * 1j
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi
assert_almost_equal(phase, expected_phase)
def test_05(self):
# Test that bode() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 1 / (s + 1)
system = lti([1], [1, 1])
n = 10
# Expected range is from 0.01 to 10.
expected_w = np.logspace(-2, 1, n)
w, mag, phase = bode(system, n=n)
assert_almost_equal(w, expected_w)
def test_06(self):
# Test that bode() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = lti([1], [1, 0])
w, mag, phase = bode(system, n=2)
assert w[0] == 0.01 # a fail would give not-a-number
def test_07(self):
# bode() should not fail on a system with pure imaginary poles.
# The test passes if bode doesn't raise an exception.
system = lti([1], [1, 0, 100])
w, mag, phase = bode(system, n=2)
def test_08(self):
# Test that bode() return continuous phase, issues/2331.
system = lti([], [-10, -30, -40, -60, -70], 1)
w, mag, phase = system.bode(w=np.logspace(-3, 40, 100))
assert_almost_equal(min(phase), -450, decimal=15)
def test_from_state_space(self):
# Ensure that bode works with a system that was created from the
# state space representation matrices A, B, C, D. In this case,
# system.num will be a 2-D array with shape (1, n+1), where (n,n)
# is the shape of A.
# A Butterworth lowpass filter is used, so we know the exact
# frequency response.
a = np.array([1.0, 2.0, 2.0, 1.0])
A = linalg.companion(a).T
B = np.array([[0.0], [0.0], [1.0]])
C = np.array([[1.0, 0.0, 0.0]])
D = np.array([[0.0]])
with warnings.catch_warnings():
warnings.simplefilter("ignore", BadCoefficients)
system = lti(A, B, C, D)
w, mag, phase = bode(system, n=100)
expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6)))
assert_almost_equal(mag, expected_magnitude)
|
Test_bode
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_device_counter_consumption.py
|
{
"start": 383,
"end": 5328
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'counter_set': 'str',
'counters': 'dict(str, V1Counter)'
}
attribute_map = {
'counter_set': 'counterSet',
'counters': 'counters'
}
def __init__(self, counter_set=None, counters=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceCounterConsumption - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._counter_set = None
self._counters = None
self.discriminator = None
self.counter_set = counter_set
self.counters = counters
@property
def counter_set(self):
"""Gets the counter_set of this V1DeviceCounterConsumption. # noqa: E501
CounterSet is the name of the set from which the counters defined will be consumed. # noqa: E501
:return: The counter_set of this V1DeviceCounterConsumption. # noqa: E501
:rtype: str
"""
return self._counter_set
@counter_set.setter
def counter_set(self, counter_set):
"""Sets the counter_set of this V1DeviceCounterConsumption.
CounterSet is the name of the set from which the counters defined will be consumed. # noqa: E501
:param counter_set: The counter_set of this V1DeviceCounterConsumption. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and counter_set is None: # noqa: E501
raise ValueError("Invalid value for `counter_set`, must not be `None`") # noqa: E501
self._counter_set = counter_set
@property
def counters(self):
"""Gets the counters of this V1DeviceCounterConsumption. # noqa: E501
Counters defines the counters that will be consumed by the device. The maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each). # noqa: E501
:return: The counters of this V1DeviceCounterConsumption. # noqa: E501
:rtype: dict(str, V1Counter)
"""
return self._counters
@counters.setter
def counters(self, counters):
"""Sets the counters of this V1DeviceCounterConsumption.
Counters defines the counters that will be consumed by the device. The maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each). # noqa: E501
:param counters: The counters of this V1DeviceCounterConsumption. # noqa: E501
:type: dict(str, V1Counter)
"""
if self.local_vars_configuration.client_side_validation and counters is None: # noqa: E501
raise ValueError("Invalid value for `counters`, must not be `None`") # noqa: E501
self._counters = counters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceCounterConsumption):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceCounterConsumption):
return True
return self.to_dict() != other.to_dict()
|
V1DeviceCounterConsumption
|
python
|
walkccc__LeetCode
|
solutions/2030. Smallest K-Length Subsequence With Occurrences of a Letter/2030.py
|
{
"start": 0,
"end": 889
}
|
class ____:
def smallestSubsequence(
self,
s: str,
k: int,
letter: str,
repetition: int,
) -> str:
stack = [] # running string
required = repetition
nLetters = s.count(letter)
for i, c in enumerate(s):
# Make sure the length is sufficient:
# Len(stack) := the length of running string
# Len(s) - i := the length of remain chars
# -1 := we're going to pop a char
while (stack and stack[-1] > c
and len(stack) + len(s) - i - 1 >= k
and (stack[-1] != letter or nLetters > required)):
if stack.pop() == letter:
required += 1
if len(stack) < k:
if c == letter:
stack.append(c)
required -= 1
elif k - len(stack) > required:
stack.append(c)
if c == letter:
nLetters -= 1
return ''.join(stack)
|
Solution
|
python
|
django__django
|
tests/queries/models.py
|
{
"start": 8682,
"end": 8909
}
|
class ____(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
|
Eaten
|
python
|
davidhalter__jedi
|
jedi/inference/compiled/subprocess/__init__.py
|
{
"start": 17839,
"end": 19369
}
|
class ____:
def __init__(
self,
subprocess: _InferenceStateProcess,
access: DirectObjectAccess,
id_: int,
) -> None:
self.access = access
self._subprocess = subprocess
self.id = id_
def add_subprocess(self, subprocess):
self._subprocess = subprocess
def __repr__(self):
try:
detail = self.access
except AttributeError:
detail = '#' + str(self.id)
return '<%s of %s>' % (self.__class__.__name__, detail)
def __getstate__(self):
return self.id
def __setstate__(self, state):
self.id = state
def __getattr__(self, name):
if name in ('id', 'access') or name.startswith('_'):
raise AttributeError("Something went wrong with unpickling")
# print('getattr', name, file=sys.stderr)
return partial(self._workaround, name)
def _workaround(self, name, *args, **kwargs):
"""
TODO Currently we're passing slice objects around. This should not
happen. They are also the only unhashable objects that we're passing
around.
"""
if args and isinstance(args[0], slice):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
return self._cached_results(name, *args, **kwargs)
@memoize_method
def _cached_results(self, name, *args, **kwargs):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
|
AccessHandle
|
python
|
trekhleb__learn-python
|
src/classes/test_inheritance.py
|
{
"start": 617,
"end": 3263
}
|
class ____(Person):
"""Example of the derived class
The Base Class (in our case Person) must be defined in a scope containing the derived class
definition. In place of a base class name, other arbitrary expressions are also allowed.
Derived classes may override methods of their base classes. Because methods have no special
privileges when calling other methods of the same object, a method of a base class that calls
another method defined in the same base class may end up calling a method of a derived class
that overrides it.
An overriding method in a derived class may in fact want to extend rather than simply replace
the base class method of the same name. There is a simple way to call the base class method
directly: just call BaseClassName.methodname(self, arguments). This is occasionally useful to
clients as well. (Note that this only works if the base class is accessible as BaseClassName
in the global scope.)
"""
def __init__(self, name, staff_id):
Person.__init__(self, name)
# You may also use super() here in order to avoid explicit using of parent class name:
# >>> super().__init__(name)
self.staff_id = staff_id
def get_full_id(self):
"""Get full employee id"""
return self.get_name() + ', ' + self.staff_id
def test_inheritance():
"""Inheritance."""
# There’s nothing special about instantiation of derived classes: DerivedClassName() creates a
# new instance of the class. Method references are resolved as follows: the corresponding class
# attribute is searched, descending down the chain of base classes if necessary, and the method
# reference is valid if this yields a function object.
person = Person('Bill')
employee = Employee('John', 'A23')
assert person.get_name() == 'Bill'
assert employee.get_name() == 'John'
assert employee.get_full_id() == 'John, A23'
# Python has two built-in functions that work with inheritance:
#
# - Use isinstance() to check an instance’s type: isinstance(obj, int) will be True only if
# obj.__class__ is int or some class derived from int.
#
# - Use issubclass() to check class inheritance: issubclass(bool, int) is True since bool is
# a subclass of int. However, issubclass(float, int) is False since float is not a subclass
# of int.
assert isinstance(employee, Employee)
assert not isinstance(person, Employee)
assert isinstance(person, Person)
assert isinstance(employee, Person)
assert issubclass(Employee, Person)
assert not issubclass(Person, Employee)
|
Employee
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
|
{
"start": 1899,
"end": 2133
}
|
class ____(BaseModel):
__root__: Literal["alpha", "beta", "generally_available", "custom"] = Field(
...,
description="enum that describes a connector's release stage",
title="ReleaseStage",
)
|
ReleaseStage
|
python
|
pydantic__pydantic
|
pydantic/networks.py
|
{
"start": 11644,
"end": 18194
}
|
class ____:
_constraints: ClassVar[UrlConstraints] = UrlConstraints()
_url: _CoreMultiHostUrl
def __init__(self, url: str | _CoreMultiHostUrl | _BaseMultiHostUrl) -> None:
self._url = _build_type_adapter(self.__class__).validate_python(url)._url
@property
def scheme(self) -> str:
"""The scheme part of the URL.
e.g. `https` in `https://foo.com,bar.com/path?query#fragment`
"""
return self._url.scheme
@property
def path(self) -> str | None:
"""The path part of the URL, or `None`.
e.g. `/path` in `https://foo.com,bar.com/path?query#fragment`
"""
return self._url.path
@property
def query(self) -> str | None:
"""The query part of the URL, or `None`.
e.g. `query` in `https://foo.com,bar.com/path?query#fragment`
"""
return self._url.query
def query_params(self) -> list[tuple[str, str]]:
"""The query part of the URL as a list of key-value pairs.
e.g. `[('foo', 'bar')]` in `https://foo.com,bar.com/path?foo=bar#fragment`
"""
return self._url.query_params()
@property
def fragment(self) -> str | None:
"""The fragment part of the URL, or `None`.
e.g. `fragment` in `https://foo.com,bar.com/path?query#fragment`
"""
return self._url.fragment
def hosts(self) -> list[MultiHostHost]:
'''The hosts of the `MultiHostUrl` as [`MultiHostHost`][pydantic_core.MultiHostHost] typed dicts.
```python
from pydantic_core import MultiHostUrl
mhu = MultiHostUrl('https://foo.com:123,foo:bar@bar.com/path')
print(mhu.hosts())
"""
[
{'username': None, 'password': None, 'host': 'foo.com', 'port': 123},
{'username': 'foo', 'password': 'bar', 'host': 'bar.com', 'port': 443}
]
```
Returns:
A list of dicts, each representing a host.
'''
return self._url.hosts()
def encoded_string(self) -> str:
"""The URL's encoded string representation via __str__().
This returns the punycode-encoded host version of the URL as a string.
"""
return str(self)
def unicode_string(self) -> str:
"""The URL as a unicode string, unlike `__str__()` this will not punycode encode the hosts."""
return self._url.unicode_string()
def __str__(self) -> str:
"""The URL as a string, this will punycode encode the host if required."""
return str(self._url)
def __repr__(self) -> str:
return f'{self.__class__.__name__}({str(self._url)!r})'
def __deepcopy__(self, memo: dict) -> Self:
return self.__class__(self._url)
def __eq__(self, other: Any) -> bool:
return self.__class__ is other.__class__ and self._url == other._url
def __hash__(self) -> int:
return hash(self._url)
def __len__(self) -> int:
return len(str(self._url))
@classmethod
def build(
cls,
*,
scheme: str,
hosts: list[MultiHostHost] | None = None,
username: str | None = None,
password: str | None = None,
host: str | None = None,
port: int | None = None,
path: str | None = None,
query: str | None = None,
fragment: str | None = None,
) -> Self:
"""Build a new `MultiHostUrl` instance from its component parts.
This method takes either `hosts` - a list of `MultiHostHost` typed dicts, or the individual components
`username`, `password`, `host` and `port`.
Args:
scheme: The scheme part of the URL.
hosts: Multiple hosts to build the URL from.
username: The username part of the URL.
password: The password part of the URL.
host: The host part of the URL.
port: The port part of the URL.
path: The path part of the URL.
query: The query part of the URL, or omit for no query.
fragment: The fragment part of the URL, or omit for no fragment.
Returns:
An instance of `MultiHostUrl`
"""
return cls(
_CoreMultiHostUrl.build(
scheme=scheme,
hosts=hosts,
username=username,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
)
)
@classmethod
def serialize_url(cls, url: Any, info: core_schema.SerializationInfo) -> str | Self:
if not isinstance(url, cls):
raise PydanticSerializationUnexpectedValue(
f"Expected `{cls}` but got `{type(url)}` with value `'{url}'` - serialized value may not be as expected."
)
if info.mode == 'json':
return str(url)
return url
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[_BaseMultiHostUrl], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
def wrap_val(v, h):
if isinstance(v, source):
return v
if isinstance(v, _BaseMultiHostUrl):
v = str(v)
core_url = h(v)
instance = source.__new__(source)
instance._url = core_url
return instance
return core_schema.no_info_wrap_validator_function(
wrap_val,
schema=core_schema.multi_host_url_schema(**cls._constraints.defined_constraints),
serialization=core_schema.plain_serializer_function_ser_schema(
cls.serialize_url, info_arg=True, when_used='always'
),
)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler
) -> JsonSchemaValue:
# we use the url schema for json schema generation, but we might have to extract it from
# the function-wrap schema we use as a tool for validation on initialization
inner_schema = core_schema['schema'] if core_schema['type'] == 'function-wrap' else core_schema
return handler(inner_schema)
__pydantic_serializer__ = SchemaSerializer(core_schema.any_schema(serialization=core_schema.to_string_ser_schema()))
@lru_cache
def _build_type_adapter(cls: type[_BaseUrl | _BaseMultiHostUrl]) -> TypeAdapter:
return TypeAdapter(cls)
|
_BaseMultiHostUrl
|
python
|
spyder-ide__spyder
|
installers-conda/utils.py
|
{
"start": 635,
"end": 734
}
|
class ____(
RawDescriptionHelpFormatter,
ArgumentDefaultsHelpFormatter
):
pass
|
DocFormatter
|
python
|
django__django
|
tests/postgres_tests/test_indexes.py
|
{
"start": 6719,
"end": 7445
}
|
class ____(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = GistIndex
def test_suffix(self):
self.assertEqual(GistIndex.suffix, "gist")
def test_deconstruction(self):
index = GistIndex(
fields=["title"], name="test_title_gist", buffering=False, fillfactor=80
)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, "django.contrib.postgres.indexes.GistIndex")
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"fields": ["title"],
"name": "test_title_gist",
"buffering": False,
"fillfactor": 80,
},
)
|
GistIndexTests
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v/modeling_glm4v.py
|
{
"start": 5147,
"end": 5968
}
|
class ____(nn.Module):
def __init__(self, dim: int, context_dim: int, hidden_act: str, bias: bool = False) -> None:
super().__init__()
self.proj = nn.Linear(dim, dim, bias=bias)
self.post_projection_norm = LayerNorm(dim)
self.gate_proj = nn.Linear(dim, context_dim, bias=bias)
self.up_proj = nn.Linear(dim, context_dim, bias=bias)
self.down_proj = nn.Linear(context_dim, dim, bias=bias)
self.act1 = nn.GELU()
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.proj(hidden_state)
hidden_state = self.act1(self.post_projection_norm(hidden_state))
return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
|
Glm4vVisionPatchMerger
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_confidence_for_data_label_to_be_less_than_or_equal_to_threshold.py
|
{
"start": 2565,
"end": 7979
}
|
class ____(ColumnMapExpectation):
"""Expect the column values to have a DataProfiler confidence threshold less than or equal to the specified threshold for the data label.
This function builds upon the custom column map expectations of Great Expectations. This function asks the question a yes/no question of each row in the user-specified column; namely, is the confidence threshold provided by the DataProfiler model upper bounded by the user-specified threshold.
Args:
column (str): The column name that you want to check.
data_label(str): The data label for which you want to check confidences against the threshold value
threshold (float): The value, usually as a decimal (e.g. .32), you want to use to flag low confidence predictions
df.expect_column_values_confidence_for_data_label_to_be_greater_than_or_equal_to_threshold(
column,
data_label=<>,
threshold=float(0<=1)
)
"""
examples = [
{
"data": {
"OPEID6": ["1002", "1052", "25034", "McRoomyRoom"],
"INSTNM": [
"Alabama A & M University",
"University of Alabama at Birmingham",
"Amridge University",
"McRoomyRoom",
],
"ZIP": ["35762", "35294-0110", "36117-3553", "McRoomyRoom"],
"ACCREDAGENCY": [
"Southern Association of Colleges and Schools Commission on Colleges",
"Southern Association of Colleges and Schools Commission on Colleges",
"Southern Association of Colleges and Schools Commission on Colleges",
"McRoomyRoom",
],
"INSTURL": [
"www.aamu.edu/",
"https://www.uab.edu",
"www.amridgeuniversity.edu",
"McRoomyRoom",
],
"NPCURL": [
"www.aamu.edu/admissions-aid/tuition-fees/net-price-calculator.html",
"https://uab.studentaidcalculator.com/survey.aspx",
"www2.amridgeuniversity.edu:9091/",
"McRoomyRoom",
],
"LATITUDE": ["34.783368", "33.505697", "32.362609", "McRoomyRoom"],
"LONGITUDE": ["-86.568502", "-86.799345", "-86.17401", "McRoomyRoom"],
"RELAFFIL": ["NULL", "NULL", "74", "McRoomyRoom"],
"DEATH_YR2_RT": [
"PrivacySuppressed",
"PrivacySuppressed",
"PrivacySuppressed",
"McRoomyRoom",
],
"SEARCH_STRING": [
"Alabama A & M University AAMU",
"University of Alabama at Birmingham ",
"Amridge University Southern Christian University Regions University",
"McRoomyRoom",
],
},
"tests": [
{
"title": "positive_test_with_column_one",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "ZIP", "data_label": "ADDRESS", "threshold": 1.00},
"out": {
"success": True,
},
},
{
"title": "failing_test_with_column_one",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "ZIP", "data_label": "ADDRESS", "threshold": 0.00},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = (
"column_values.prediction_confidence_for_data_label_less_than_or_equal_to_threshold"
)
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"threshold",
"data_label",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"threshold": None,
"data_label": None,
"result_format": "BASIC",
"catch_exceptions": False,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": ["dataprofiler"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@taylorfturner", # Don't forget to add your github handle here!
"@stevensecreti",
],
}
if __name__ == "__main__":
diagnostics_report = (
ExpectColumnValuesConfidenceForDataLabelToBeLessThanOrEqualToThreshold().run_diagnostics()
)
print(diagnostics_report.generate_checklist())
|
ExpectColumnValuesConfidenceForDataLabelToBeLessThanOrEqualToThreshold
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_quantization.py
|
{
"start": 84135,
"end": 84574
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = FunctionalConv2d()
self.relu = nn.ReLU()
self.conv2 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return self.conv1.get_example_inputs()
|
FunctionalConvReluConvModel
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/Qt/__init__.py
|
{
"start": 1519,
"end": 13002
}
|
class ____(object):
"""Used to defer ImportErrors until we are sure the module is needed.
"""
def __init__(self, err):
self.err = err
def __getattr__(self, attr):
raise self.err
# Make a loadUiType function like PyQt has
# Credit:
# http://stackoverflow.com/questions/4442286/python-code-genration-with-pyside-uic/14195313#14195313
def _loadUiType(uiFile):
"""
PySide lacks a "loadUiType" command like PyQt4's, so we have to convert
the ui file to py code in-memory first and then execute it in a
special frame to retrieve the form_class.
from stackoverflow: http://stackoverflow.com/a/14195313/3781327
seems like this might also be a legitimate solution, but I'm not sure
how to make PyQt4 and pyside look the same...
http://stackoverflow.com/a/8717832
"""
# get class names from ui file
import xml.etree.ElementTree as xml
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
# convert ui file to python code
uic_executable = QT_LIB.lower() + '-uic'
uipy = subprocess.check_output([uic_executable, uiFile])
# execute python code
pyc = compile(uipy, '<string>', 'exec')
frame = {}
exec(pyc, frame)
# fetch the base_class and form class based on their type in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('QtWidgets.%s'%widget_class)
return form_class, base_class
# For historical reasons, pyqtgraph maintains a Qt4-ish interface back when
# there wasn't a QtWidgets module. This _was_ done by monkey-patching all of
# QtWidgets into the QtGui module. This monkey-patching modifies QtGui at a
# global level.
# To avoid this, we now maintain a local "mirror" of QtCore, QtGui and QtWidgets.
# Thus, when monkey-patching happens later on in this file, they will only affect
# the local modules and not the global modules.
def _copy_attrs(src, dst):
for o in dir(src):
if not hasattr(dst, o):
setattr(dst, o, getattr(src, o))
from . import QtCore, QtGui, QtWidgets, compat
if QT_LIB == PYQT5:
# We're using PyQt5 which has a different structure so we're going to use a shim to
# recreate the Qt4 structure for Qt5
import PyQt5.QtCore
import PyQt5.QtGui
import PyQt5.QtWidgets
_copy_attrs(PyQt5.QtCore, QtCore)
_copy_attrs(PyQt5.QtGui, QtGui)
_copy_attrs(PyQt5.QtWidgets, QtWidgets)
try:
from PyQt5 import sip
except ImportError:
# some Linux distros package it this way (e.g. Ubuntu)
import sip
from PyQt5 import uic
try:
from PyQt5 import QtSvg
except ImportError as err:
QtSvg = FailedImport(err)
try:
from PyQt5 import QtTest
except ImportError as err:
QtTest = FailedImport(err)
VERSION_INFO = 'PyQt5 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR
elif QT_LIB == PYQT6:
import PyQt6.QtCore
import PyQt6.QtGui
import PyQt6.QtWidgets
_copy_attrs(PyQt6.QtCore, QtCore)
_copy_attrs(PyQt6.QtGui, QtGui)
_copy_attrs(PyQt6.QtWidgets, QtWidgets)
from PyQt6 import sip, uic
try:
from PyQt6 import QtSvg
except ImportError as err:
QtSvg = FailedImport(err)
try:
from PyQt6 import QtOpenGLWidgets
except ImportError as err:
QtOpenGLWidgets = FailedImport(err)
try:
from PyQt6 import QtTest
except ImportError as err:
QtTest = FailedImport(err)
VERSION_INFO = 'PyQt6 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR
elif QT_LIB == PYSIDE2:
import PySide2.QtCore
import PySide2.QtGui
import PySide2.QtWidgets
_copy_attrs(PySide2.QtCore, QtCore)
_copy_attrs(PySide2.QtGui, QtGui)
_copy_attrs(PySide2.QtWidgets, QtWidgets)
try:
from PySide2 import QtSvg
except ImportError as err:
QtSvg = FailedImport(err)
try:
from PySide2 import QtTest
except ImportError as err:
QtTest = FailedImport(err)
import PySide2
import shiboken2 as shiboken
VERSION_INFO = 'PySide2 ' + PySide2.__version__ + ' Qt ' + QtCore.__version__
elif QT_LIB == PYSIDE6:
import PySide6.QtCore
import PySide6.QtGui
import PySide6.QtWidgets
_copy_attrs(PySide6.QtCore, QtCore)
_copy_attrs(PySide6.QtGui, QtGui)
_copy_attrs(PySide6.QtWidgets, QtWidgets)
try:
from PySide6 import QtSvg
except ImportError as err:
QtSvg = FailedImport(err)
try:
from PySide6 import QtOpenGLWidgets
except ImportError as err:
QtOpenGLWidgets = FailedImport(err)
try:
from PySide6 import QtTest
except ImportError as err:
QtTest = FailedImport(err)
import PySide6
import shiboken6 as shiboken
VERSION_INFO = 'PySide6 ' + PySide6.__version__ + ' Qt ' + QtCore.__version__
else:
raise ValueError("Invalid Qt lib '%s'" % QT_LIB)
if QT_LIB in [PYQT6, PYSIDE6]:
# We're using Qt6 which has a different structure so we're going to use a shim to
# recreate the Qt5 structure
if not isinstance(QtOpenGLWidgets, FailedImport):
QtWidgets.QOpenGLWidget = QtOpenGLWidgets.QOpenGLWidget
# PySide6 incorrectly placed QFileSystemModel inside QtWidgets
if QT_LIB == PYSIDE6 and hasattr(QtWidgets, 'QFileSystemModel'):
module = getattr(QtWidgets, "QFileSystemModel")
setattr(QtGui, "QFileSystemModel", module)
else:
# Shim Qt5 namespace to match Qt6
module_whitelist = [
"QAction",
"QActionGroup",
"QFileSystemModel",
"QShortcut",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
]
for module in module_whitelist:
attr = getattr(QtWidgets, module)
setattr(QtGui, module, attr)
# Common to PySide2 and PySide6
if QT_LIB in [PYSIDE2, PYSIDE6]:
QtVersion = QtCore.__version__
QtVersionInfo = QtCore.__version_info__
loadUiType = _loadUiType
isQObjectAlive = shiboken.isValid
compat.wrapinstance = shiboken.wrapInstance
compat.unwrapinstance = lambda x : shiboken.getCppPointer(x)[0]
compat.voidptr = shiboken.VoidPtr
# Common to PyQt5 and PyQt6
if QT_LIB in [PYQT5, PYQT6]:
QtVersion = QtCore.QT_VERSION_STR
QtVersionInfo = tuple((QtCore.QT_VERSION >> i) & 0xff for i in [16,8,0])
# PyQt, starting in v5.5, calls qAbort when an exception is raised inside
# a slot. To maintain backward compatibility (and sanity for interactive
# users), we install a global exception hook to override this behavior.
if sys.excepthook == sys.__excepthook__:
sys_excepthook = sys.excepthook
def pyqt_qabort_override(*args, **kwds):
return sys_excepthook(*args, **kwds)
sys.excepthook = pyqt_qabort_override
def isQObjectAlive(obj):
return not sip.isdeleted(obj)
loadUiType = uic.loadUiType
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
compat.wrapinstance = sip.wrapinstance
compat.unwrapinstance = sip.unwrapinstance
compat.voidptr = sip.voidptr
from . import internals
App = QtWidgets.QApplication
# subclassing QApplication causes segfaults on PySide{2, 6} / Python 3.8.7+
QAPP = None
_pgAppInitialized = False
def mkQApp(name=None):
"""
Creates new QApplication or returns current instance if existing.
============== ========================================================
**Arguments:**
name (str) Application name, passed to Qt
============== ========================================================
"""
global QAPP
global _pgAppInitialized
QAPP = QtWidgets.QApplication.instance()
if QAPP is None:
# We do not have an already instantiated QApplication
# let's add some sane defaults
# enable hidpi handling for Qt5
if QtVersionInfo[0] == 5:
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough
)
QAPP = QtWidgets.QApplication(sys.argv or ["pyqtgraph"])
if QtVersionInfo[0] != 5:
# issues with dark mode + windows + qt5
QAPP.setStyle("fusion")
# set the application icon
# python 3.9 won't take "pyqtgraph.icons.peegee" directly
traverse_path = resources.files("pyqtgraph.icons")
peegee_traverse_path = traverse_path.joinpath("peegee")
# as_file requires I feed in a file from the directory...
with resources.as_file(
peegee_traverse_path.joinpath("peegee.svg")
) as path:
# need the parent directory, not the filepath
icon_path = path.parent
applicationIcon = QtGui.QIcon()
applicationIcon.addFile(
os.fsdecode(icon_path / "peegee.svg"),
)
for sz in [128, 256, 512]:
pathname = os.fsdecode(icon_path / f"peegee_{sz}px.png")
applicationIcon.addFile(pathname, QtCore.QSize(sz, sz))
# handles the icon showing up on the windows taskbar
if platform.system() == 'Windows':
import ctypes
my_app_id = "pyqtgraph.Qt.mkQApp"
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(my_app_id)
QAPP.setWindowIcon(applicationIcon)
if not _pgAppInitialized:
_pgAppInitialized = True
# determine if dark mode
try:
# this only works in Qt 6.5+
darkMode = QAPP.styleHints().colorScheme() == QtCore.Qt.ColorScheme.Dark
QAPP.styleHints().colorSchemeChanged.connect(_onColorSchemeChange)
except AttributeError:
palette = QAPP.palette()
windowTextLightness = palette.color(QtGui.QPalette.ColorRole.WindowText).lightness()
windowLightness = palette.color(QtGui.QPalette.ColorRole.Window).lightness()
darkMode = windowTextLightness > windowLightness
QAPP.paletteChanged.connect(_onPaletteChange)
QAPP.setProperty("darkMode", darkMode)
if name is not None:
QAPP.setApplicationName(name)
return QAPP
def _onPaletteChange(palette):
# Attempt to keep darkMode attribute up to date
# QEvent.Type.PaletteChanged/ApplicationPaletteChanged will be emitted after
# paletteChanged.emit()!
# Using API deprecated in Qt 6.0
app = mkQApp()
windowTextLightness = palette.color(QtGui.QPalette.ColorRole.WindowText).lightness()
windowLightness = palette.color(QtGui.QPalette.ColorRole.Window).lightness()
darkMode = windowTextLightness > windowLightness
app.setProperty('darkMode', darkMode)
def _onColorSchemeChange(colorScheme):
# Attempt to keep darkMode attribute up to date
# QEvent.Type.PaletteChanged/ApplicationPaletteChanged will be emitted before
# QStyleHint().colorSchemeChanged.emit()!
# Uses Qt 6.5+ API
app = mkQApp()
darkMode = colorScheme == QtCore.Qt.ColorScheme.Dark
app.setProperty('darkMode', darkMode)
# exec() is used within _loadUiType, so we define as exec_() here and rename in pg namespace
def exec_():
app = mkQApp()
return app.exec() if hasattr(app, 'exec') else app.exec_()
|
FailedImport
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/assertsql.py
|
{
"start": 11677,
"end": 12134
}
|
class ____(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_statement(self, execute_observed):
self._statement_count += 1
def no_more_statements(self):
if self.count != self._statement_count:
assert False, "desired statement count %d does not match %d" % (
self.count,
self._statement_count,
)
|
CountStatements
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.