language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles06.py | {
"start": 380,
"end": 4363
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for border colour styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format(
{
"left": 1,
"right": 1,
"top": 1,
"bottom": 1,
"diag_border": 1,
"diag_type": 3,
"left_color": "red",
"right_color": "red",
"top_color": "red",
"bottom_color": "red",
"diag_color": "red",
}
)
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="2">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<border diagonalUp="1" diagonalDown="1">
<left style="thin">
<color rgb="FFFF0000"/>
</left>
<right style="thin">
<color rgb="FFFF0000"/>
</right>
<top style="thin">
<color rgb="FFFF0000"/>
</top>
<bottom style="thin">
<color rgb="FFFF0000"/>
</bottom>
<diagonal style="thin">
<color rgb="FFFF0000"/>
</diagonal>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="2">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="1" xfId="0" applyBorder="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | python__mypy | mypy/test/testsolve.py | {
"start": 413,
"end": 10069
} | class ____(Suite):
def setUp(self) -> None:
self.fx = TypeFixture()
def test_empty_input(self) -> None:
self.assert_solve([], [], [])
def test_simple_supertype_constraints(self) -> None:
self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.a)], [self.fx.a])
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.a), self.supc(self.fx.t, self.fx.b)],
[self.fx.a],
)
def test_simple_subtype_constraints(self) -> None:
self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.a)], [self.fx.a])
self.assert_solve(
[self.fx.t],
[self.subc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)],
[self.fx.b],
)
def test_both_kinds_of_constraints(self) -> None:
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.a)],
[self.fx.b],
)
def test_unsatisfiable_constraints(self) -> None:
# The constraints are impossible to satisfy.
self.assert_solve(
[self.fx.t], [self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)], [None]
)
def test_exactly_specified_result(self) -> None:
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.b)],
[self.fx.b],
)
def test_multiple_variables(self) -> None:
self.assert_solve(
[self.fx.t, self.fx.s],
[
self.supc(self.fx.t, self.fx.b),
self.supc(self.fx.s, self.fx.c),
self.subc(self.fx.t, self.fx.a),
],
[self.fx.b, self.fx.c],
)
def test_no_constraints_for_var(self) -> None:
self.assert_solve([self.fx.t], [], [self.fx.a_uninhabited])
self.assert_solve(
[self.fx.t, self.fx.s], [], [self.fx.a_uninhabited, self.fx.a_uninhabited]
)
self.assert_solve(
[self.fx.t, self.fx.s],
[self.supc(self.fx.s, self.fx.a)],
[self.fx.a_uninhabited, self.fx.a],
)
def test_simple_constraints_with_dynamic_type(self) -> None:
self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.anyt)], [self.fx.anyt])
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.anyt)],
[self.fx.anyt],
)
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.a)],
[self.fx.anyt],
)
self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.anyt)], [self.fx.anyt])
self.assert_solve(
[self.fx.t],
[self.subc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.anyt)],
[self.fx.anyt],
)
# self.assert_solve([self.fx.t],
# [self.subc(self.fx.t, self.fx.anyt),
# self.subc(self.fx.t, self.fx.a)],
# [self.fx.anyt])
# TODO: figure out what this should be after changes to meet(any, X)
def test_both_normal_and_any_types_in_results(self) -> None:
# If one of the bounds is any, we promote the other bound to
# any as well, since otherwise the type range does not make sense.
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.anyt)],
[self.fx.anyt],
)
self.assert_solve(
[self.fx.t],
[self.supc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.a)],
[self.fx.anyt],
)
def test_poly_no_constraints(self) -> None:
self.assert_solve(
[self.fx.t, self.fx.u],
[],
[self.fx.a_uninhabited, self.fx.a_uninhabited],
allow_polymorphic=True,
)
def test_poly_trivial_free(self) -> None:
self.assert_solve(
[self.fx.t, self.fx.u],
[self.subc(self.fx.t, self.fx.a)],
[self.fx.a, self.fx.u],
[self.fx.u],
allow_polymorphic=True,
)
def test_poly_free_pair(self) -> None:
self.assert_solve(
[self.fx.t, self.fx.u],
[self.subc(self.fx.t, self.fx.u)],
[self.fx.t, self.fx.t],
[self.fx.t],
allow_polymorphic=True,
)
def test_poly_free_pair_with_bounds(self) -> None:
t_prime = self.fx.t.copy_modified(upper_bound=self.fx.b)
self.assert_solve(
[self.fx.t, self.fx.ub],
[self.subc(self.fx.t, self.fx.ub)],
[t_prime, t_prime],
[t_prime],
allow_polymorphic=True,
)
def test_poly_free_pair_with_bounds_uninhabited(self) -> None:
self.assert_solve(
[self.fx.ub, self.fx.uc],
[self.subc(self.fx.ub, self.fx.uc)],
[self.fx.a_uninhabited, self.fx.a_uninhabited],
[],
allow_polymorphic=True,
)
def test_poly_bounded_chain(self) -> None:
# B <: T <: U <: S <: A
self.assert_solve(
[self.fx.t, self.fx.u, self.fx.s],
[
self.supc(self.fx.t, self.fx.b),
self.subc(self.fx.t, self.fx.u),
self.subc(self.fx.u, self.fx.s),
self.subc(self.fx.s, self.fx.a),
],
[self.fx.b, self.fx.b, self.fx.b],
allow_polymorphic=True,
)
def test_poly_reverse_overlapping_chain(self) -> None:
# A :> T <: S :> B
self.assert_solve(
[self.fx.t, self.fx.s],
[
self.subc(self.fx.t, self.fx.s),
self.subc(self.fx.t, self.fx.a),
self.supc(self.fx.s, self.fx.b),
],
[self.fx.a, self.fx.a],
allow_polymorphic=True,
)
def test_poly_reverse_split_chain(self) -> None:
# B :> T <: S :> A
self.assert_solve(
[self.fx.t, self.fx.s],
[
self.subc(self.fx.t, self.fx.s),
self.subc(self.fx.t, self.fx.b),
self.supc(self.fx.s, self.fx.a),
],
[self.fx.b, self.fx.a],
allow_polymorphic=True,
)
def test_poly_unsolvable_chain(self) -> None:
# A <: T <: U <: S <: B
self.assert_solve(
[self.fx.t, self.fx.u, self.fx.s],
[
self.supc(self.fx.t, self.fx.a),
self.subc(self.fx.t, self.fx.u),
self.subc(self.fx.u, self.fx.s),
self.subc(self.fx.s, self.fx.b),
],
[None, None, None],
allow_polymorphic=True,
)
def test_simple_chain_closure(self) -> None:
self.assert_transitive_closure(
[self.fx.t.id, self.fx.s.id],
[
self.supc(self.fx.t, self.fx.b),
self.subc(self.fx.t, self.fx.s),
self.subc(self.fx.s, self.fx.a),
],
{(self.fx.t.id, self.fx.s.id)},
{self.fx.t.id: {self.fx.b}, self.fx.s.id: {self.fx.b}},
{self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.a}},
)
def test_reverse_chain_closure(self) -> None:
self.assert_transitive_closure(
[self.fx.t.id, self.fx.s.id],
[
self.subc(self.fx.t, self.fx.s),
self.subc(self.fx.t, self.fx.a),
self.supc(self.fx.s, self.fx.b),
],
{(self.fx.t.id, self.fx.s.id)},
{self.fx.t.id: set(), self.fx.s.id: {self.fx.b}},
{self.fx.t.id: {self.fx.a}, self.fx.s.id: set()},
)
def test_secondary_constraint_closure(self) -> None:
self.assert_transitive_closure(
[self.fx.t.id, self.fx.s.id],
[self.supc(self.fx.s, self.fx.gt), self.subc(self.fx.s, self.fx.ga)],
set(),
{self.fx.t.id: set(), self.fx.s.id: {self.fx.gt}},
{self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.ga}},
)
def assert_solve(
self,
vars: list[TypeVarLikeType],
constraints: list[Constraint],
results: list[None | Type],
free_vars: list[TypeVarLikeType] | None = None,
allow_polymorphic: bool = False,
) -> None:
if free_vars is None:
free_vars = []
actual, actual_free = solve_constraints(
vars, constraints, allow_polymorphic=allow_polymorphic
)
assert_equal(actual, results)
assert_equal(actual_free, free_vars)
def assert_transitive_closure(
self,
vars: list[TypeVarId],
constraints: list[Constraint],
graph: Graph,
lowers: Bounds,
uppers: Bounds,
) -> None:
actual_graph, actual_lowers, actual_uppers = transitive_closure(vars, constraints)
# Add trivial elements.
for v in vars:
graph.add((v, v))
assert_equal(actual_graph, graph)
assert_equal(dict(actual_lowers), lowers)
assert_equal(dict(actual_uppers), uppers)
def supc(self, type_var: TypeVarType, bound: Type) -> Constraint:
return Constraint(type_var, SUPERTYPE_OF, bound)
def subc(self, type_var: TypeVarType, bound: Type) -> Constraint:
return Constraint(type_var, SUBTYPE_OF, bound)
| SolveSuite |
python | realpython__materials | python-getter-setter/employee3.py | {
"start": 28,
"end": 704
} | class ____:
def __init__(self, name, birth_date, start_date):
self.name = name
self.birth_date = birth_date
self.start_date = start_date
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value.upper()
@property
def birth_date(self):
return self._birth_date
@birth_date.setter
def birth_date(self, value):
self._birth_date = date.fromisoformat(value)
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = date.fromisoformat(value)
| Employee |
python | huggingface__transformers | tests/models/beit/test_image_processing_beit.py | {
"start": 3604,
"end": 13761
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = BeitImageProcessor if is_vision_available() else None
fast_image_processing_class = BeitImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = BeitImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_reduce_labels"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
self.assertEqual(image_processor.do_reduce_labels, False)
image_processor = image_processing_class.from_dict(
self.image_processor_dict, size=42, crop_size=84, do_reduce_labels=True
)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
self.assertEqual(image_processor.do_reduce_labels, True)
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processing(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processing(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processing(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_reduce_labels(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
image, map = prepare_semantic_single_inputs()
encoding = image_processing(image, map, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 150)
image_processing.do_reduce_labels = True
encoding = image_processing(image, map, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
image_encoding_slow.labels.float(), image_encoding_fast.labels.float()
)
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(encoding_slow.labels.float(), encoding_fast.labels.float())
| BeitImageProcessingTest |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6096,
"end": 6152
} | class ____(ResampleReduction):
how = "max"
| ResampleMax |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/unsupervised_learning/fp_growth.py | {
"start": 380,
"end": 7664
} | class ____():
"""A method for determining frequent itemsets in a transactional database.
This is done by building a so called FP Growth tree, which can then be mined
to collect the frequent itemsets. More effective than Apriori for large transactional
databases.
Parameters:
-----------
min_sup: float
The minimum fraction of transactions an itemets needs to
occur in to be deemed frequent
"""
def __init__(self, min_sup=0.3):
self.min_sup = min_sup
# The root of the initial FP Growth Tree
self.tree_root = None
# Prefixes of itemsets in the FP Growth Tree
self.prefixes = {}
self.frequent_itemsets = []
# Count the number of transactions that contains item.
def _calculate_support(self, item, transactions):
count = 0
for transaction in transactions:
if item in transaction:
count += 1
support = count
return support
def _get_frequent_items(self, transactions):
""" Returns a set of frequent items. An item is determined to
be frequent if there are atleast min_sup transactions that contains
it. """
# Get all unique items in the transactions
unique_items = set(
item for transaction in transactions for item in transaction)
items = []
for item in unique_items:
sup = self._calculate_support(item, transactions)
if sup >= self.min_sup:
items.append([item, sup])
# Sort by support - Highest to lowest
items.sort(key=lambda item: item[1], reverse=True)
frequent_items = [[el[0]] for el in items]
# Only return the items
return frequent_items
def _insert_tree(self, node, children):
""" Recursive method which adds nodes to the tree. """
if not children:
return
# Create new node as the first item in children list
child_item = children[0]
child = FPTreeNode(item=child_item)
# If parent already contains item => increase the support
if child_item in node.children:
node.children[child.item].support += 1
else:
node.children[child.item] = child
# Execute _insert_tree on the rest of the children list
# from the new node
self._insert_tree(node.children[child.item], children[1:])
def _construct_tree(self, transactions, frequent_items=None):
if not frequent_items:
# Get frequent items sorted by support
frequent_items = self._get_frequent_items(transactions)
unique_frequent_items = list(
set(item for itemset in frequent_items for item in itemset))
# Construct the root of the FP Growth tree
root = FPTreeNode()
for transaction in transactions:
# Remove items that are not frequent according to
# unique_frequent_items
transaction = [item for item in transaction if item in unique_frequent_items]
transaction.sort(key=lambda item: frequent_items.index([item]))
self._insert_tree(root, transaction)
return root
def print_tree(self, node=None, indent_times=0):
""" Recursive method which prints the FP Growth Tree """
if not node:
node = self.tree_root
indent = " " * indent_times
print ("%s%s:%s" % (indent, node.item, node.support))
for child_key in node.children:
child = node.children[child_key]
self.print_tree(child, indent_times + 1)
def _is_prefix(self, itemset, node):
""" Makes sure that the first item in itemset is a child of node
and that every following item in itemset is reachable via that path """
for item in itemset:
if not item in node.children:
return False
node = node.children[item]
return True
def _determine_prefixes(self, itemset, node, prefixes=None):
""" Recursive method that adds prefixes to the itemset by traversing the
FP Growth Tree"""
if not prefixes:
prefixes = []
# If the current node is a prefix to the itemset
# add the current prefixes value as prefix to the itemset
if self._is_prefix(itemset, node):
itemset_key = self._get_itemset_key(itemset)
if not itemset_key in self.prefixes:
self.prefixes[itemset_key] = []
self.prefixes[itemset_key] += [{"prefix": prefixes, "support": node.children[itemset[0]].support}]
for child_key in node.children:
child = node.children[child_key]
# Recursive call with child as new node. Add the child item as potential
# prefix.
self._determine_prefixes(itemset, child, prefixes + [child.item])
def _get_itemset_key(self, itemset):
""" Determines the look of the hashmap key for self.prefixes
List of more strings than one gets joined by '-' """
if len(itemset) > 1:
itemset_key = "-".join(itemset)
else:
itemset_key = str(itemset[0])
return itemset_key
def _determine_frequent_itemsets(self, conditional_database, suffix):
# Calculate new frequent items from the conditional database
# of suffix
frequent_items = self._get_frequent_items(conditional_database)
cond_tree = None
if suffix:
cond_tree = self._construct_tree(conditional_database, frequent_items)
# Output new frequent itemset as the suffix added to the frequent
# items
self.frequent_itemsets += [el + suffix for el in frequent_items]
# Find larger frequent itemset by finding prefixes
# of the frequent items in the FP Growth Tree for the conditional
# database.
self.prefixes = {}
for itemset in frequent_items:
# If no suffix (first run)
if not cond_tree:
cond_tree = self.tree_root
# Determine prefixes to itemset
self._determine_prefixes(itemset, cond_tree)
conditional_database = []
itemset_key = self._get_itemset_key(itemset)
# Build new conditional database
if itemset_key in self.prefixes:
for el in self.prefixes[itemset_key]:
# If support = 4 => add 4 of the corresponding prefix set
for _ in range(el["support"]):
conditional_database.append(el["prefix"])
# Create new suffix
new_suffix = itemset + suffix if suffix else itemset
self._determine_frequent_itemsets(conditional_database, suffix=new_suffix)
def find_frequent_itemsets(self, transactions, suffix=None, show_tree=False):
self.transactions = transactions
# Build the FP Growth Tree
self.tree_root = self._construct_tree(transactions)
if show_tree:
print ("FP-Growth Tree:")
self.print_tree(self.tree_root)
self._determine_frequent_itemsets(transactions, suffix=None)
return self.frequent_itemsets
| FPGrowth |
python | scikit-learn__scikit-learn | sklearn/base.py | {
"start": 37928,
"end": 40992
} | class ____:
"""Mixin class for all outlier detection estimators in scikit-learn.
This mixin defines the following functionality:
- set estimator type to `"outlier_detector"` through the `estimator_type` tag;
- `fit_predict` method that default to `fit` and `predict`.
Examples
--------
>>> import numpy as np
>>> from sklearn.base import BaseEstimator, OutlierMixin
>>> class MyEstimator(OutlierMixin):
... def fit(self, X, y=None):
... self.is_fitted_ = True
... return self
... def predict(self, X):
... return np.ones(shape=len(X))
>>> estimator = MyEstimator()
>>> X = np.array([[1, 2], [2, 3], [3, 4]])
>>> estimator.fit_predict(X)
array([1., 1., 1.])
"""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.estimator_type = "outlier_detector"
return tags
def fit_predict(self, X, y=None, **kwargs):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# we do not route parameters here, since consumers don't route. But
# since it's possible for a `predict` method to also consume
# metadata, we check if that's the case, and we raise a warning telling
# users that they should implement a custom `fit_predict` method
# to forward metadata to `predict` as well.
#
# For that, we calculate routing and check if anything would be routed
# to `predict` if we were to route them.
if _routing_enabled():
transform_params = self.get_metadata_routing().consumes(
method="predict", params=kwargs.keys()
)
if transform_params:
warnings.warn(
(
f"This object ({self.__class__.__name__}) has a `predict` "
"method which consumes metadata, but `fit_predict` does not "
"forward metadata to `predict`. Please implement a custom "
"`fit_predict` method to forward metadata to `predict` as well."
"Alternatively, you can explicitly do `set_predict_request`"
"and set all values to `False` to disable metadata routed to "
"`predict`, if that's an option."
),
UserWarning,
)
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X, **kwargs).predict(X)
| OutlierMixin |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_standalone_command.py | {
"start": 1131,
"end": 1806
} | class ____:
@pytest.mark.parametrize(
"conf_executor_name",
[LOCAL_EXECUTOR, CELERY_EXECUTOR, KUBERNETES_EXECUTOR],
)
def test_calculate_env(self, conf_executor_name):
"""Should always force a local executor compatible with the db."""
with mock.patch.dict(
"os.environ",
{
"AIRFLOW__CORE__EXECUTOR": conf_executor_name,
},
):
reload(executor_loader)
env = StandaloneCommand().calculate_env()
# all non local executors will fall back to localesecutor
assert env["AIRFLOW__CORE__EXECUTOR"] == LOCAL_EXECUTOR
| TestStandaloneCommand |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 23972,
"end": 24846
} | class ____(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
| SVDHermitianCases |
python | bokeh__bokeh | src/bokeh/application/application.py | {
"start": 9376,
"end": 11511
} | class ____(metaclass=ABCMeta):
''' A harness for server-specific information and tasks related to
Bokeh sessions.
*This base class is probably not of interest to general users.*
'''
_server_context: ServerContext
_id: ID
def __init__(self, server_context: ServerContext, session_id: ID) -> None:
'''
'''
self._server_context = server_context
self._id = session_id
# Properties --------------------------------------------------------------
@property
@abstractmethod
def destroyed(self) -> bool:
''' If ``True``, the session has been discarded and cannot be used.
A new session with the same ID could be created later but this instance
will not come back to life.
'''
pass
@property
def id(self) -> ID:
''' The unique ID for the session associated with this context.
'''
return self._id
@property
def server_context(self) -> ServerContext:
''' The server context for this session context
'''
return self._server_context
# Public methods ----------------------------------------------------------
@abstractmethod
def with_locked_document(self, func: Callable[[Document], Awaitable[None]]) -> Awaitable[None]:
''' Runs a function with the document lock held, passing the
document to the function.
*Subclasses must implement this method.*
Args:
func (callable): function that takes a single parameter (the Document)
and returns ``None`` or a ``Future``
Returns:
a ``Future`` containing the result of the function
'''
pass
SessionDestroyedCallback = Callable[[SessionContext], None]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| SessionContext |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_lambda_function.py | {
"start": 1111,
"end": 3594
} | class ____:
def test_init(self):
op = LambdaFunctionStateSensor(
task_id="task_test",
function_name=FUNCTION_NAME,
aws_conn_id="aws_conn_test",
region_name="foo-bar-1",
verify="/spam/egg.pem",
botocore_config={"baz": "qux"},
)
assert op.function_name == FUNCTION_NAME
assert op.aws_conn_id == "aws_conn_test"
assert op.region_name == "foo-bar-1"
assert op.verify == "/spam/egg.pem"
assert op.botocore_config == {"baz": "qux"}
@pytest.mark.parametrize(
("get_function_output", "expect_failure", "expected"),
[
pytest.param(
{"Configuration": {"State": "Active"}},
False,
True,
id="Active state",
),
pytest.param(
{"Configuration": {"State": "Pending"}},
False,
False,
id="Pending state",
),
pytest.param(
{"Configuration": {"State": "Failed"}},
True,
None,
id="Failed state",
),
],
)
def test_poke(self, get_function_output, expect_failure, expected):
with mock.patch.object(LambdaHook, "conn") as mock_conn:
mock_conn.get_function.return_value = get_function_output
sensor = LambdaFunctionStateSensor(
task_id="test_sensor",
function_name=FUNCTION_NAME,
)
if expect_failure:
with pytest.raises(AirflowException):
sensor.poke({})
else:
result = sensor.poke({})
assert result == expected
mock_conn.get_function.assert_called_once_with(
FunctionName=FUNCTION_NAME,
)
def test_fail_poke(self):
sensor = LambdaFunctionStateSensor(
task_id="test_sensor",
function_name=FUNCTION_NAME,
)
message = "Lambda function state sensor failed because the Lambda is in a failed state"
with mock.patch("airflow.providers.amazon.aws.hooks.lambda_function.LambdaHook.conn") as conn:
conn.get_function.return_value = {"Configuration": {"State": "Failed"}}
with pytest.raises(AirflowException, match=message):
sensor.poke(context={})
| TestLambdaFunctionStateSensor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 552187,
"end": 552638
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeclineTopicSuggestion"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "topic")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
topic = sgqlc.types.Field("Topic", graphql_name="topic")
"""The declined topic."""
| DeclineTopicSuggestionPayload |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/distributions.py | {
"start": 1252,
"end": 1488
} | class ____(DistInstance):
@abc.abstractmethod
def all_log_prob(self) -> torch.Tensor:
"""
Returns the log probabilities of all actions represented by this distribution.
"""
pass
| DiscreteDistInstance |
python | walkccc__LeetCode | solutions/494. Target Sum/494.py | {
"start": 0,
"end": 471
} | class ____:
def findTargetSumWays(self, nums: list[int], target: int) -> int:
summ = sum(nums)
if summ < abs(target) or (summ + target) % 2 == 1:
return 0
def knapsack(target: int) -> int:
# dp[i] := the number of ways to sum to i by nums so far
dp = [1] + [0] * summ
for num in nums:
for j in range(summ, num - 1, -1):
dp[j] += dp[j - num]
return dp[target]
return knapsack((summ + target) // 2)
| Solution |
python | pytorch__pytorch | torch/_higher_order_ops/hints_wrap.py | {
"start": 472,
"end": 4775
} | class ____(HigherOrderOperator):
def __init__(self):
super().__init__("hints_wrapper")
def __call__(self, body_fn, args, kwargs, hints):
r"""
Call implementation of hints_wrapper
Args:
body_fn (Callable): A callable function that is within the scope
that is being traced.
args (Tuple of torch.Tensor/int/float/bool): A tuple of inputs to
body_fn.
kwargs (dict): Keyword argument to the body_fn.
hints (dict): A dict of context hints which could be passed to
backend compiler.
"""
if not isinstance(args, tuple):
args = tuple(args)
if not all(isinstance(t, (torch.Tensor, int, float, bool)) for t in args):
raise RuntimeError(
f"args must be a tuple of tensors, ints, floats, or bools, got {args}"
)
if not isinstance(kwargs, dict):
raise RuntimeError(f"kwargs must be a dict, got {type(kwargs)}")
if len(kwargs) > 0:
raise RuntimeError(
f"kwargs except for hints are not supported, got {kwargs}"
)
if not isinstance(hints, dict):
raise RuntimeError(f"hints must be a dict, got {type(hints)}")
for k, v in hints.items():
if not isinstance(k, str):
raise RuntimeError(f"hints key must be a str, got {k}.")
if not isinstance(v, (int, float, bool, str)):
raise RuntimeError(
"hints must be a dict containing int, float, bool or str "
f"value, got value {v} for key {k}."
)
return super().__call__(body_fn, args, kwargs, hints)
hints_wrapper = HintsWrapper()
@hints_wrapper.py_impl(DispatchKey.CompositeExplicitAutograd)
def hints_wrapper_dense(body_fn, args, kwargs, hints):
return body_fn(*args, **kwargs)
hints_wrapper.py_autograd_impl(
autograd_not_implemented(hints_wrapper, deferred_error=True)
)
@hints_wrapper.py_impl(FakeTensorMode)
def hints_wrapper_fake_tensor_mode(mode, body_func, args, kwargs, hints):
flat_args = pytree.tree_leaves(args)
with mode:
return body_func(*flat_args, **kwargs)
@hints_wrapper.py_functionalize_impl
def hints_wrapper_functionalize(ctx, body_fn, args, kwargs, hints):
from torch._higher_order_ops.utils import _check_alias_and_mutation
unwrapped_args = ctx.unwrap_tensors(args)
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
unwrapped_hints = ctx.unwrap_tensors(hints)
with ctx.redispatch_to_next():
functional_body_fn = ctx.functionalize(body_fn)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
_check_alias_and_mutation(
body_fn, unwrapped_args, "hints_wrapper", pre_dispatch
)
outputs = hints_wrapper(
functional_body_fn,
unwrapped_args,
unwrapped_kwargs,
unwrapped_hints,
)
return ctx.wrap_tensors(outputs)
def trace_hints_wrapper(proxy_mode, hints_wrapper, body_fn, args, kwargs, hints):
flat_args = tuple(pytree.tree_leaves(args))
body_graph = reenter_make_fx(body_fn)(*flat_args, **kwargs)
_, body_graph_name = unique_graph_id(proxy_mode, prefix="hints_wrapper_body_graph")
proxy_mode.tracer.root.register_module(body_graph_name, body_graph)
new_args: tuple = (body_graph, flat_args, {})
# merge hints into kwargs
new_kwargs = {}
new_kwargs["hints"] = hints
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, new_args)
proxy_kwargs = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, new_kwargs)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", hints_wrapper, proxy_args, proxy_kwargs, name="hints_wrapper"
)
out = body_fn(*flat_args, **kwargs)
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
@hints_wrapper.py_impl(ProxyTorchDispatchMode)
def inner(proxy_mode, body_fn, args, kwargs, hints):
if proxy_mode.enable_tracing:
return trace_hints_wrapper(
proxy_mode, hints_wrapper, body_fn, args, kwargs, hints
)
else:
return hints_wrapper(body_fn, args, kwargs, hints)
| HintsWrapper |
python | python-pillow__Pillow | src/PIL/FtexImagePlugin.py | {
"start": 1961,
"end": 2020
} | class ____(IntEnum):
DXT1 = 0
UNCOMPRESSED = 1
| Format |
python | pikepdf__pikepdf | src/pikepdf/_xml.py | {
"start": 295,
"end": 917
} | class ____(_UnsafeXMLParser):
def __init__(self, *args: Any, **kwargs: Any):
# Prevent XXE attacks
# https://rules.sonarsource.com/python/type/Vulnerability/RSPEC-2755
kwargs['resolve_entities'] = False
kwargs['no_network'] = True
super().__init__(*args, **kwargs)
def parse_xml(source: AnyStr | IO[Any], recover: bool = False) -> _ElementTree:
"""Wrap lxml's parse to provide protection against XXE attacks."""
parser = _XMLParser(recover=recover, remove_pis=False)
return _parse(source, parser=parser)
__all__ = ['parse_xml', '_ElementTree', '_Element']
| _XMLParser |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qembedding_bag_lookups_test.py | {
"start": 5942,
"end": 10065
} | class ____(op_bench.TorchBenchmarkBase):
def init(
self,
num_embeddings: int,
embedding_dim: int,
num_offsets: int,
enable_per_sample_weights: bool,
include_last_offset: bool,
is_pruned_weights: bool,
use_32bit_indices: bool,
use_32bit_offsets: bool,
op_func,
):
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.num_offsets = num_offsets
self.enable_per_sample_weights = enable_per_sample_weights
self.include_last_offset = include_last_offset
self.max_segment_length = 20
self.num_lengths = np.random.randint(1, num_offsets + 1)
self.lengths = np.random.randint(
0, self.max_segment_length + 1, size=self.num_lengths
).astype(np.int32)
self.is_pruned_weights = is_pruned_weights
self.use_32bit_indices = use_32bit_indices
self.use_32bit_offsets = use_32bit_offsets
self.num_indices = np.sum(self.lengths)
self.offsets = lengths_to_offsets(self.lengths)
self.indices = torch.from_numpy(
np.random.randint(
low=0, high=num_embeddings, size=self.num_indices, dtype=np.int64
)
)
self.indices = self.indices.int() if self.use_32bit_indices else self.indices
self.offsets = self.offsets.int() if self.use_32bit_offsets else self.offsets
if include_last_offset:
self.offsets = torch.cat(
(self.offsets, torch.tensor([self.indices.size(0)], dtype=torch.long)),
0,
)
self.weights = torch.from_numpy(
(
np.random.random_sample((self.num_embeddings, self.embedding_dim)) + 1
).astype(np.float32)
)
self.indices = torch.from_numpy(
np.random.randint(
low=0, high=self.num_embeddings, size=self.num_indices, dtype=np.int64
)
)
self.prepack_func = torch.ops.quantized.embedding_bag_byte_prepack
self.prepacked_weights = self.prepack_func(self.weights)
self.per_sample_weights = (
torch.from_numpy(
np.random.uniform(low=0.01, high=0.5, size=[len(self.indices)]).astype(
np.float32
)
)
if self.enable_per_sample_weights
else None
)
self.compressed_indices = None
if self.is_pruned_weights:
(
self.prepacked_weights,
self.compressed_indices,
) = get_pruned_weights_and_mapping(self.prepacked_weights)
self.inputs = {
"prepacked_weights": self.prepacked_weights,
"indices": self.indices,
"offsets": self.offsets,
"mode": 0,
"per_sample_weights": self.per_sample_weights,
"include_last_offset": self.include_last_offset,
"is_pruned_weights": self.is_pruned_weights,
"compressed_indices": self.compressed_indices,
}
self.op_func = op_func
def forward(
self,
prepacked_weights,
indices,
offsets,
mode: int,
per_sample_weights: Optional[torch.Tensor],
include_last_offset: bool,
is_pruned_weights: bool,
compressed_indices: Optional[torch.Tensor],
):
return self.op_func(
prepacked_weights,
indices,
offsets,
mode=0,
per_sample_weights=per_sample_weights,
include_last_offset=self.include_last_offset,
pruned_weights=self.is_pruned_weights,
compressed_indices_mapping=self.compressed_indices,
)
op_bench.generate_pt_tests_from_op_list(
four_bit_rowwise_ops, full_configs, EmbedddingBag4BitRowwiseOffsetsTest
)
op_bench.generate_pt_tests_from_op_list(
byte_rowwise_ops, full_configs, EmbedddingBagByteRowwiseOffsetsTest
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| EmbedddingBagByteRowwiseOffsetsTest |
python | PrefectHQ__prefect | tests/_internal/compatibility/test_async_dispatch.py | {
"start": 2587,
"end": 3368
} | class ____:
def test_async_compatible_requires_async_implementation(self):
"""Verify we properly reject non-async implementations"""
def not_async() -> None:
pass
with pytest.raises(TypeError, match="async_impl must be an async function"):
@async_dispatch(not_async)
def my_function() -> None:
pass
def test_async_compatible_requires_implementation(self):
"""Verify we properly reject missing implementations"""
with pytest.raises(
TypeError,
match=r"async_dispatch\(\) missing 1 required positional argument: 'async_impl'",
):
@async_dispatch()
def my_function() -> None:
pass
| TestAsyncDispatchValidation |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/endpoints/pre_configured.py | {
"start": 3536,
"end": 5740
} | class ____(
AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint, ResourceEndpoint, RevocationEndpoint
):
"""An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
def __init__(
self,
request_validator,
token_generator=None,
token_expires_in=None,
refresh_token_generator=None,
**kwargs,
):
"""Construct a new web application server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.auth_grant = AuthorizationCodeGrant(request_validator)
self.refresh_grant = RefreshTokenGrant(request_validator)
self.bearer = BearerToken(
request_validator, token_generator, token_expires_in, refresh_token_generator
)
AuthorizationEndpoint.__init__(
self,
default_response_type="code",
response_types={"code": self.auth_grant},
default_token_type=self.bearer,
)
TokenEndpoint.__init__(
self,
default_grant_type="authorization_code",
grant_types={
"authorization_code": self.auth_grant,
"refresh_token": self.refresh_grant,
},
default_token_type=self.bearer,
)
ResourceEndpoint.__init__(
self, default_token="Bearer", token_types={"Bearer": self.bearer}
)
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
| WebApplicationServer |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_emr_serverless.py | {
"start": 49853,
"end": 52988
} | class ____:
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_stop(self, mock_conn: MagicMock, mock_get_waiter: MagicMock):
mock_get_waiter().wait.return_value = True
operator = EmrServerlessStopApplicationOperator(task_id=task_id, application_id="test")
operator.execute({})
mock_get_waiter().wait.assert_called_once()
mock_conn.stop_application.assert_called_once()
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_stop_no_wait(self, mock_conn: MagicMock, mock_get_waiter: MagicMock):
operator = EmrServerlessStopApplicationOperator(
task_id=task_id, application_id="test", wait_for_completion=False
)
operator.execute({})
mock_get_waiter().wait.assert_not_called()
mock_conn.stop_application.assert_called_once()
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
@mock.patch.object(EmrServerlessHook, "cancel_running_jobs")
def test_force_stop(self, mock_cancel_running_jobs, mock_conn, mock_get_waiter):
mock_cancel_running_jobs.return_value = 0
mock_conn.stop_application.return_value = {}
mock_get_waiter().wait.return_value = True
operator = EmrServerlessStopApplicationOperator(
task_id=task_id, application_id="test", force_stop=True
)
operator.execute({})
mock_cancel_running_jobs.assert_called_once()
mock_conn.stop_application.assert_called_once()
mock_get_waiter().wait.assert_called_once()
@mock.patch.object(EmrServerlessHook, "cancel_running_jobs")
def test_stop_application_deferrable_with_force_stop(self, mock_cancel_running_jobs, caplog):
mock_cancel_running_jobs.return_value = 2
operator = EmrServerlessStopApplicationOperator(
task_id=task_id, application_id="test", deferrable=True, force_stop=True
)
with pytest.raises(TaskDeferred):
operator.execute({})
assert "now waiting for the 2 cancelled job(s) to terminate" in caplog.messages
@mock.patch.object(EmrServerlessHook, "conn")
@mock.patch.object(EmrServerlessHook, "cancel_running_jobs")
def test_stop_application_deferrable_without_force_stop(
self, mock_cancel_running_jobs, mock_conn, caplog
):
mock_conn.stop_application.return_value = {}
mock_cancel_running_jobs.return_value = 0
operator = EmrServerlessStopApplicationOperator(
task_id=task_id, application_id="test", deferrable=True, force_stop=True
)
with pytest.raises(TaskDeferred):
operator.execute({})
assert "no running jobs found with application ID test" in caplog.messages
def test_template_fields(self):
operator = EmrServerlessStopApplicationOperator(
task_id=task_id, application_id="test", deferrable=True, force_stop=True
)
validate_template_fields(operator)
| TestEmrServerlessStopOperator |
python | RaRe-Technologies__gensim | gensim/models/fasttext.py | {
"start": 29172,
"end": 38583
} | class ____(utils.SaveLoad):
"""Obsolete class retained for backward-compatible load()s"""
def _pad_ones(m, new_len):
"""Pad array with additional entries filled with ones."""
if len(m) > new_len:
raise ValueError('the new number of rows %i must be greater than old %i' % (new_len, len(m)))
new_arr = np.ones(new_len, dtype=REAL)
new_arr[:len(m)] = m
return new_arr
def load_facebook_model(path, encoding='utf-8'):
"""Load the model from Facebook's native fasttext `.bin` output file.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed (i.e. end in `.bin.gz` or `.bin.bz2`).
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
model_file : str
Path to the FastText output files.
FastText outputs two model files - `/path/to/model.vec` and `/path/to/model.bin`
Expected value for this example: `/path/to/model` or `/path/to/model.bin`,
as Gensim requires only `.bin` file to the load entire fastText model.
encoding : str, optional
Specifies the file encoding.
Examples
--------
Load, infer, continue training:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fb_model = load_facebook_model(cap_path)
>>>
>>> 'landlord' in fb_model.wv.key_to_index # Word is out of vocabulary
False
>>> oov_term = fb_model.wv['landlord']
>>>
>>> 'landlady' in fb_model.wv.key_to_index # Word is in the vocabulary
True
>>> iv_term = fb_model.wv['landlady']
>>>
>>> new_sent = [['lord', 'of', 'the', 'rings'], ['lord', 'of', 'the', 'flies']]
>>> fb_model.build_vocab(new_sent, update=True)
>>> fb_model.train(sentences=new_sent, total_examples=len(new_sent), epochs=5)
Returns
-------
gensim.models.fasttext.FastText
The loaded model.
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_vectors` loads
the word embeddings only. Its faster, but does not enable you to continue
training.
"""
return _load_fasttext_format(path, encoding=encoding, full_model=True)
def load_facebook_vectors(path, encoding='utf-8'):
"""Load word embeddings from a model saved in Facebook's native fasttext `.bin` format.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
This function requires you to **provide the full path to the .bin file**.
It effectively ignores the `.vec` output file, since it is redundant.
This function uses the smart_open library to open the path.
The path may be on a remote host (e.g. HTTP, S3, etc).
It may also be gzip or bz2 compressed.
For details, see `<https://github.com/RaRe-Technologies/smart_open>`__.
Parameters
----------
path : str
The location of the model file.
encoding : str, optional
Specifies the file encoding.
Returns
-------
gensim.models.fasttext.FastTextKeyedVectors
The word embeddings.
Examples
--------
Load and infer:
>>> from gensim.test.utils import datapath
>>>
>>> cap_path = datapath("crime-and-punishment.bin")
>>> fbkv = load_facebook_vectors(cap_path)
>>>
>>> 'landlord' in fbkv.key_to_index # Word is out of vocabulary
False
>>> oov_vector = fbkv['landlord']
>>>
>>> 'landlady' in fbkv.key_to_index # Word is in the vocabulary
True
>>> iv_vector = fbkv['landlady']
See Also
--------
:func:`~gensim.models.fasttext.load_facebook_model` loads
the full model, not just word embeddings, and enables you to continue
model training.
"""
full_model = _load_fasttext_format(path, encoding=encoding, full_model=False)
return full_model.wv
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
def _check_model(m):
"""Model sanity checks. Run after everything has been completely initialized."""
if m.wv.vector_size != m.wv.vectors_ngrams.shape[1]:
raise ValueError(
'mismatch between vector size in model params (%s) and model vectors (%s)' % (
m.wv.vector_size, m.wv.vectors_ngrams,
)
)
if hasattr(m, 'syn1neg') and m.syn1neg is not None:
if m.wv.vector_size != m.syn1neg.shape[1]:
raise ValueError(
'mismatch between vector size in model params (%s) and trainables (%s)' % (
m.wv.vector_size, m.wv.vectors_ngrams,
)
)
if len(m.wv) != m.nwords:
raise ValueError(
'mismatch between final vocab size (%s words), and expected number of words (%s words)' % (
len(m.wv), m.nwords,
)
)
if len(m.wv) != m.vocab_size:
# expecting to log this warning only for pretrained french vector, wiki.fr
logger.warning(
"mismatch between final vocab size (%s words), and expected vocab size (%s words)",
len(m.wv), m.vocab_size,
)
def save_facebook_model(model, path, encoding="utf-8", lr_update_rate=100, word_ngrams=1):
"""Saves word embeddings to the Facebook's native fasttext `.bin` format.
Notes
------
Facebook provides both `.vec` and `.bin` files with their modules.
The former contains human-readable vectors.
The latter contains machine-readable vectors along with other model parameters.
**This function saves only the .bin file**.
Parameters
----------
model : gensim.models.fasttext.FastText
FastText model to be saved.
path : str
Output path and filename (including `.bin` extension)
encoding : str, optional
Specifies the file encoding. Defaults to utf-8.
lr_update_rate : int
This parameter is used by Facebook fasttext tool, unused by Gensim.
It defaults to Facebook fasttext default value `100`.
In very rare circumstances you might wish to fiddle with it.
word_ngrams : int
This parameter is used by Facebook fasttext tool, unused by Gensim.
It defaults to Facebook fasttext default value `1`.
In very rare circumstances you might wish to fiddle with it.
Returns
-------
None
"""
fb_fasttext_parameters = {"lr_update_rate": lr_update_rate, "word_ngrams": word_ngrams}
gensim.models._fasttext_bin.save(model, path, fb_fasttext_parameters, encoding)
| FastTextTrainables |
python | doocs__leetcode | solution/0900-0999/0907.Sum of Subarray Minimums/Solution.py | {
"start": 0,
"end": 668
} | class ____:
def sumSubarrayMins(self, arr: List[int]) -> int:
n = len(arr)
left = [-1] * n
right = [n] * n
stk = []
for i, v in enumerate(arr):
while stk and arr[stk[-1]] >= v:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and arr[stk[-1]] > arr[i]:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
mod = 10**9 + 7
return sum((i - left[i]) * (right[i] - i) * v for i, v in enumerate(arr)) % mod
| Solution |
python | django__django | django/template/base.py | {
"start": 4354,
"end": 9475
} | class ____:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = str(template_string) # May be lazy.
self.nodelist = self.compile_nodelist()
def __repr__(self):
return '<%s template_string="%s...">' % (
self.__class__.__qualname__,
self.source[:20].replace("\n", ""),
)
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens,
self.engine.template_libraries,
self.engine.template_builtins,
self.origin,
)
try:
nodelist = parser.parse()
self.extra_data = parser.extra_data
return nodelist
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
if (
isinstance(e, TemplateSyntaxError)
and self.origin.name != UNKNOWN_SOURCE
and e.args
):
raw_message = e.args[0]
e.raw_error_message = raw_message
e.args = (f"Template: {self.origin.name}, {raw_message}", *e.args[1:])
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = self.source[upto:start]
during = self.source[start:end]
after = self.source[end:next]
source_lines.append((num, self.source[upto:next]))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = "(Could not get exception message)"
return {
"message": message,
"source_lines": source_lines[top:bottom],
"before": before,
"during": during,
"after": after,
"top": top,
"bottom": bottom,
"total": total,
"line": line,
"name": self.origin.name,
"start": start,
"end": end,
}
| Template |
python | Netflix__metaflow | metaflow/runner/nbdeploy.py | {
"start": 293,
"end": 4166
} | class ____(object):
"""
A wrapper over `Deployer` for deploying flows defined in a Jupyter
notebook cell.
Instantiate this class on the last line of a notebook cell where
a `flow` is defined. In contrast to `Deployer`, this class is not
meant to be used in a context manager.
```python
deployer = NBDeployer(FlowName)
ar = deployer.argo_workflows(name="madhur")
ar_obj = ar.create()
result = ar_obj.trigger(alpha=300)
print(result.status)
print(result.run)
result.terminate()
```
Parameters
----------
flow : FlowSpec
Flow defined in the same cell
show_output : bool, default True
Show the 'stdout' and 'stderr' to the console by default,
profile : str, optional, default None
Metaflow profile to use to deploy this run. If not specified, the default
profile is used (or the one already set using `METAFLOW_PROFILE`)
env : Dict[str, str], optional, default None
Additional environment variables to set. This overrides the
environment set for this process.
base_dir : str, optional, default None
The directory to run the subprocess in; if not specified, the current
working directory is used.
file_read_timeout : int, default 3600
The timeout until which we try to read the deployer attribute file (in seconds).
**kwargs : Any
Additional arguments that you would pass to `python myflow.py` i.e. options
listed in `python myflow.py --help`
"""
def __init__(
self,
flow,
show_output: bool = True,
profile: Optional[str] = None,
env: Optional[Dict] = None,
base_dir: Optional[str] = None,
file_read_timeout: int = 3600,
**kwargs,
):
try:
from IPython import get_ipython
ipython = get_ipython()
except ModuleNotFoundError as e:
raise NBDeployerInitializationError(
"'NBDeployer' requires an interactive Python environment "
"(such as Jupyter)"
) from e
self.cell = get_current_cell(ipython)
self.flow = flow
self.show_output = show_output
self.profile = profile
self.env = env
self.cwd = base_dir if base_dir is not None else os.getcwd()
self.file_read_timeout = file_read_timeout
self.top_level_kwargs = kwargs
self.env_vars = os.environ.copy()
self.env_vars.update(env or {})
# clears the Jupyter parent process ID environment variable
# prevents server from interfering with Metaflow
self.env_vars.update({"JPY_PARENT_PID": ""})
if self.profile:
self.env_vars["METAFLOW_PROFILE"] = self.profile
if not self.cell:
raise ValueError("Couldn't find a cell.")
self.tmp_flow_file = tempfile.NamedTemporaryFile(
prefix=self.flow.__name__,
suffix=".py",
mode="w",
dir=self.cwd,
delete=False,
)
self.tmp_flow_file.write(format_flowfile(self.cell))
self.tmp_flow_file.flush()
self.tmp_flow_file.close()
self.flow_file = self.tmp_flow_file.name
self.deployer = Deployer(
flow_file=self.flow_file,
show_output=self.show_output,
profile=self.profile,
env=self.env_vars,
cwd=self.cwd,
file_read_timeout=self.file_read_timeout,
**kwargs,
)
def __getattr__(self, name):
"""
Forward all attribute access to the underlying `Deployer` instance.
"""
return getattr(self.deployer, name)
def cleanup(self):
"""
Delete any temporary files created during execution.
"""
os.remove(self.flow_file)
| NBDeployer |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 5146,
"end": 5714
} | class ____(_ForkSafeThreadSyncObject):
def __init__(self) -> None:
super(ForkEvent, self).__init__(TrEvent)
def set(self) -> bool:
self._create()
return self._sync.set()
def clear(self) -> None:
if self._sync is None:
return None
self._create()
return self._sync.clear()
def is_set(self) -> bool:
self._create()
return self._sync.is_set()
def wait(self, *args: Any, **kwargs: Any) -> bool:
self._create()
return self._sync.wait(*args, **kwargs)
| ForkEvent |
python | pytorch__pytorch | torch/_inductor/codegen/triton.py | {
"start": 71445,
"end": 79143
} | class ____:
"""
Checks if the TMA API can be used for load / store triton operations.
"""
kernel: TritonKernel
dtype: torch.dtype
for_store: bool
force: bool
def __post_init__(self):
self.failed_debug_prefix = "Cannot use TMA descriptor for load / store since: "
# Also see Note: TMA API Restrictions for the below
def can_use_tma(
self,
) -> bool:
if self.force:
return True
if not (
V.graph.get_current_device_or_throw().type == "cuda"
and torch.cuda.get_device_capability()[0] >= 9
and config.triton.use_tensor_descriptor
and config.assume_aligned_inputs
and has_triton_stable_tma_api()
# For CUDA The base ptr needs to be aligned
):
log.debug(
(
"%s Requires triton>=3.4.0, a CUDA device with cc>=9.0 and"
" `use_tensor_descriptor` and `assume_aligned_inputs` options enabled"
),
self.failed_debug_prefix,
)
return False
# `no_x_dim` => XBLOCK=1, and for reductions this means only one element
# is to be stored . However the TMA API requires that
# the store will be 16 byte aligned, which is not attainable with a single
# element
if self.for_store and self.kernel.no_x_dim:
log.debug(
"%s stores with `no_x_dim` cannot load 16 bytes.",
self.failed_debug_prefix,
)
return False
return True
def are_block_parameters_compatible(
self,
block_params: BlockParameters,
) -> bool:
"""
Check if the block parameters are valid for TMA.
If force, we allow relying on symbolic hints equivalent
to what we check for Triton templates.
"""
if self.force:
strides = [
V.graph.sizevars.symbolic_hint(st) for st in block_params.strides
]
else:
strides = block_params.strides
# The TMA API requires that the innermost stride is 1
# and that the outer strides are 16 byte aligned
if not V.graph.sizevars.statically_known_equals(strides[-1], sympy.Integer(1)):
log.debug(
"%s TMA API requires innermost stride to be 1.",
self.failed_debug_prefix,
)
return False
element_size = self.dtype.itemsize
for stride in strides[:-1]:
if not V.graph.sizevars.statically_known_equals(
ModularIndexing(stride * element_size, 1, sympy.Integer(16)),
sympy.Integer(0),
):
log.debug(
"%s TMA API requires outer strides to be 16 byte aligned.",
self.failed_debug_prefix,
)
return False
# Now compute the minimum value of the block type that is used
# in the innermost block size that can guarantee that 16 bytes of data
# can be loaded / stored.
# Start with finding the innermost block type
innermost_block_shape = block_params.block_shape[-1]
innermost_block_type = None
innermost_block_symt = None
for block_type_str in innermost_block_shape.free_symbols:
for block_symt in TritonSymbols.block_types:
if symbol_is_type(block_type_str, block_symt):
innermost_block_type = block_type_str
innermost_block_symt = block_symt
break
assert innermost_block_type and innermost_block_symt, (
f"{innermost_block_shape} expr must contain a single block type from {TritonSymbols.block_types}"
)
# For persistent reductions, the reduction block sizes are fixed at compile time
if self.kernel.persistent_reduction and not self.for_store:
# For a discontiguous tensor, a 1D block will be split across several
# dimensions, e.g. R0_BLOCK:
# block_shape=[XBLOCK, ((R0_BLOCK + 31)//32), Min(1, ((R0_BLOCK + 31)//32)), Min(32, R0_BLOCK)]
# The persistent R0_BLOCK will be a power of 2 that is at least r0_numel So it
# should be guaranteed that Min(32, R0_BLOCK) * element_size >= 16
innermost_tree_prefix = prefix_str[innermost_block_symt]
tree_numel = None
for t in self.kernel.range_trees:
if t.is_reduction:
if t.prefix == innermost_tree_prefix:
tree_numel = t.numel
break
assert tree_numel is not None
persistent_rblock = self.kernel._get_persistent_RBLOCK(tree_numel)
innermost_block_bytes = (
innermost_block_shape.subs({innermost_block_type: persistent_rblock})
* element_size
)
if not V.graph.sizevars.statically_known_geq(
innermost_block_bytes, sympy.Integer(16)
):
log.debug(
"%s persistent reduction innermost block shape cannot load 16 bytes.",
self.failed_debug_prefix,
)
return False
else:
# E.g. if the innermost block shape is Min(2, XBLOCK)
# then the TMA API can only be used if the dtype has an 8 byte element
# size so that 16 bytes of data can be loaded in the innermost dimension
try:
min_block_size = next_power_of_2(
int(
sympy.nsolve(
innermost_block_shape * element_size - 16,
innermost_block_type,
1,
)
)
)
block_type_str = V.kernel.index_to_str(innermost_block_type)
# Check block sizes if the user has provided a fixed triton config
if self.kernel.fixed_config:
if min_block_size > self.kernel.fixed_config[block_type_str]:
log.debug(
"%s For block %s, fixed config block size %d is smaller "
"than the minimum required: %d",
self.failed_debug_prefix,
block_type_str,
self.kernel.fixed_config[block_type_str],
min_block_size,
)
return False
else:
# Update the minimum block sizes that are passed to triton
# heuristics
self.kernel.tma_min_block_sizes[block_type_str] = max(
min_block_size,
self.kernel.tma_min_block_sizes.get(block_type_str, 1),
)
except ValueError:
log.debug(
"%s innermost block shape cannot load 16 bytes.",
self.failed_debug_prefix,
)
return False
return True
def can_lift(self) -> bool:
"""
Can you lift the make_tensor_descriptor
call to the top of the kernel? This requires
being certain that all of the shape, stride,
and block_shape information is handled in arguments
or top level definitions.
Right now we assume this is always possible if you force TMA.
"""
return self.force
| TMACompatibilityChecker |
python | milvus-io__pymilvus | pymilvus/orm/role.py | {
"start": 718,
"end": 13437
} | class ____:
"""Role, can be granted privileges which are allowed to execute some objects' apis."""
def __init__(self, name: str, using: str = "default", **kwargs) -> None:
"""Constructs a role by name
:param name: role name.
:type name: str
"""
self._name = name
self._using = using
self._kwargs = kwargs
def _get_connection(self):
return connections._fetch_handler(self._using)
@property
def name(self):
return self._name
def create(self):
"""Create a role
It will success if the role isn't existed, otherwise fail.
:example:
>>> from pymilvus import connections, utility
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> role.create()
>>> roles = utility.list_roles()
>>> print(f"roles in Milvus: {roles}")
"""
return self._get_connection().create_role(self._name)
def drop(self):
"""Drop a role
It will success if the role is existed, otherwise fail.
:example:
>>> from pymilvus import connections, utility
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> role.drop()
>>> roles = utility.list_roles()
>>> print(f"roles in Milvus: {roles}")
"""
return self._get_connection().drop_role(self._name)
def add_user(self, username: str):
"""Add user to role
The user will get permissions that the role are allowed to perform operations.
:param username: user name.
:type username: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> role.add_user(username)
>>> users = role.get_users()
>>> print(f"users added to the role: {users}")
"""
return self._get_connection().add_user_to_role(username, self._name)
def remove_user(self, username: str):
"""Remove user from role
The user will remove permissions that the role are allowed to perform operations.
:param username: user name.
:type username: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> role.remove_user(username)
>>> users = role.get_users()
>>> print(f"users added to the role: {users}")
"""
return self._get_connection().remove_user_from_role(username, self._name)
def get_users(self):
"""Get all users who are added to the role.
:return a RoleInfo object which contains a RoleItem group
According to the RoleItem, you can get a list of usernames.
RoleInfo groups:
- UserItem: <role_name:admin>, <users:('root',)>
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> users = role.get_users()
>>> print(f"users added to the role: {users}")
"""
roles = self._get_connection().select_one_role(self._name, INCLUDE_USER_INFO)
if len(roles.groups) == 0:
return []
return roles.groups[0].users
def is_exist(self):
"""Check whether the role is existed.
:return a bool value
It will be True if the role is existed, otherwise False.
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(name=role_name)
>>> is_exist = role.is_exist()
>>> print(f"the role: {is_exist}")
"""
roles = self._get_connection().select_one_role(self._name, NOT_INCLUDE_USER_INFO)
return len(roles.groups) != 0
def grant(self, object: str, object_name: str, privilege: str, db_name: str = ""):
"""Grant a privilege for the role
:param object: object type.
:type object: str
:param object_name: identifies a specific object name.
:type object_name: str
:param privilege: privilege name.
:type privilege: str
:param db_name: db name.
:type db_name: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.grant("Collection", collection_name, "Insert")
"""
return self._get_connection().grant_privilege(
self._name, object, object_name, privilege, db_name
)
def revoke(self, object: str, object_name: str, privilege: str, db_name: str = ""):
"""Revoke a privilege for the role
Args:
object(str): object type.
object_name(str): identifies a specific object name.
privilege(str): privilege name.
db_name(str): db name.
Examples:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.revoke("Collection", collection_name, "Insert")
"""
return self._get_connection().revoke_privilege(
self._name, object, object_name, privilege, db_name
)
def grant_v2(self, privilege: str, collection_name: str, db_name: Optional[str] = None):
"""Grant a privilege for the role
:param privilege: privilege name.
:type privilege: str
:param collection_name: collection name.
:type collection_name: str
:param db_name: db name. Optional. If None, use the default db.
:type db_name: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.grant_v2("Insert", collection_name, db_name=db_name)
"""
return self._get_connection().grant_privilege_v2(
self._name,
privilege,
collection_name,
db_name=db_name,
)
def revoke_v2(self, privilege: str, collection_name: str, db_name: Optional[str] = None):
"""Revoke a privilege for the role
:param privilege: privilege name.
:type privilege: str
:param collection_name: collection name.
:type collection_name: str
:param db_name: db name. Optional. If None, use the default db.
:type db_name: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.revoke_v2("Insert", collection_name, db_name=db_name)
"""
return self._get_connection().revoke_privilege_v2(
self._name,
privilege,
collection_name,
db_name=db_name,
)
def list_grant(self, object: str, object_name: str, db_name: str = ""):
"""List a grant info for the role and the specific object
:param object: object type.
:type object: str
:param object_name: identifies a specific object name.
:type object_name: str
:param db_name: db name.
:type db_name: str
:return a GrantInfo object
:rtype GrantInfo
GrantInfo groups:
- GrantItem: <object:Collection>, <object_name:foo>, <role_name:x>,
<grantor_name:root>, <privilege:Load>
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.list_grant("Collection", collection_name)
"""
return self._get_connection().select_grant_for_role_and_object(
self._name, object, object_name, db_name
)
def list_grants(self, db_name: str = ""):
"""List a grant info for the role
:param db_name: db name.
:type db_name: str
:return a GrantInfo object
:rtype GrantInfo
GrantInfo groups:
- GrantItem: <object:Collection>, <object_name:foo>, <role_name:x>,
<grantor_name:root>, <privilege:Load>
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.list_grants()
"""
return self._get_connection().select_grant_for_one_role(self._name, db_name)
def create_privilege_group(self, privilege_group: str):
"""Create a privilege group for the role
:param privilege_group: privilege group name.
:type privilege_group: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.create_privilege_group(privilege_group="privilege_group")
"""
return self._get_connection().create_privilege_group(privilege_group)
def drop_privilege_group(self, privilege_group: str):
"""Drop a privilege group for the role
:param privilege_group: privilege group name.
:type privilege_group: str
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.drop_privilege_group(privilege_group="privilege_group")
"""
return self._get_connection().drop_privilege_group(privilege_group)
def list_privilege_groups(self):
"""List all privilege groups for the role
:return a PrivilegeGroupInfo object
:rtype PrivilegeGroupInfo
PrivilegeGroupInfo groups:
- PrivilegeGroupItem: <group_name:group1>, <privileges:['Insert', 'Release']>
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.list_privilege_groups()
"""
return self._get_connection().list_privilege_groups()
def add_privileges_to_group(self, privilege_group: str, privileges: list):
"""Add privileges to a privilege group for the role
:param privilege_group: privilege group name.
:type privilege_group: str
:param privileges: a list of privilege names.
:type privileges: list
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.add_privileges_to_group(privilege_group="privilege_group",
>>> privileges=["Insert","Release"])
"""
return self._get_connection().add_privileges_to_group(privilege_group, privileges)
def remove_privileges_from_group(self, privilege_group: str, privileges: list):
"""Remove privileges from a privilege group for the role
:param privilege_group: privilege group name.
:type privilege_group: str
:param privileges: a list of privilege names.
:type privileges: list
:example:
>>> from pymilvus import connections
>>> from pymilvus.orm.role import Role
>>> connections.connect()
>>> role = Role(role_name)
>>> role.remove_privileges_from_group(privilege_group="privilege_group",
>>> privileges=["Insert","Release"])
"""
return self._get_connection().remove_privileges_from_group(privilege_group, privileges)
| Role |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_organization_seer_explorer_runs.py | {
"start": 431,
"end": 8751
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-seer-explorer-runs"
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.url = reverse(self.endpoint, args=[self.organization.slug])
self.login_as(user=self.user)
self.seer_access_patcher = patch(
"sentry.seer.explorer.client_utils.has_seer_explorer_access_with_detail",
return_value=(True, None),
)
self.seer_access_patcher.start()
self.client_patcher = patch(
"sentry.seer.endpoints.organization_seer_explorer_runs.SeerExplorerClient"
)
self.mock_client_class = self.client_patcher.start()
self.mock_client = MagicMock()
self.mock_client_class.return_value = self.mock_client
def tearDown(self) -> None:
self.seer_access_patcher.stop()
self.client_patcher.stop()
super().tearDown()
def test_get_simple(self) -> None:
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=1,
title="Run 1",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
ExplorerRun(
run_id=2,
title="Run 2",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
]
response = self.client.get(self.url)
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 2
assert data[0]["run_id"] == 1
assert data[1]["run_id"] == 2
self.mock_client_class.assert_called_once_with(self.organization, ANY)
self.mock_client.get_runs.assert_called_once_with(
category_key=None,
category_value=None,
offset=0,
limit=101, # Default per_page of 100 + 1 for has_more
)
def test_get_cursor_pagination(self) -> None:
# Mock seer response for offset 0, limit 3.
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=1,
title="Run 1",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
ExplorerRun(
run_id=2,
title="Run 2",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
ExplorerRun(
run_id=3,
title="Run 3",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
]
cursor = str(Cursor(0, 0))
response = self.client.get(self.url + f"?per_page=2&cursor={cursor}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 2
assert data[0]["run_id"] == 1
assert data[1]["run_id"] == 2
assert 'rel="next"; results="true"' in response.headers["Link"]
self.mock_client.get_runs.assert_called_once_with(
category_key=None, category_value=None, offset=0, limit=3
)
# Second page - mock seer response for offset 2, limit 3.
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=3,
title="Run 3",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
ExplorerRun(
run_id=4,
title="Run 4",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
),
]
cursor = str(Cursor(0, 2))
response = self.client.get(self.url + f"?per_page=2&cursor={cursor}")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 2
assert data[0]["run_id"] == 3
assert data[1]["run_id"] == 4
assert 'rel="next"; results="false"' in response.headers["Link"]
# Verify second call
assert self.mock_client.get_runs.call_count == 2
call_args = self.mock_client.get_runs.call_args
assert call_args.kwargs["offset"] == 2
assert call_args.kwargs["limit"] == 3
def test_get_with_seer_error(self) -> None:
self.mock_client.get_runs.side_effect = requests.HTTPError("API Error")
response = self.client.get(self.url)
assert response.status_code == 500
def test_get_with_category_key_filter(self) -> None:
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=1,
title="Run 1",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
category_key="bug-fixer",
category_value=None,
),
]
response = self.client.get(self.url + "?category_key=bug-fixer")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 1
assert data[0]["run_id"] == 1
call_args = self.mock_client.get_runs.call_args
assert call_args.kwargs["category_key"] == "bug-fixer"
assert call_args.kwargs["category_value"] is None
def test_get_with_category_value_filter(self) -> None:
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=2,
title="Run 2",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
category_key=None,
category_value="issue-123",
),
]
response = self.client.get(self.url + "?category_value=issue-123")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 1
assert data[0]["run_id"] == 2
call_args = self.mock_client.get_runs.call_args
assert call_args.kwargs["category_key"] is None
assert call_args.kwargs["category_value"] == "issue-123"
def test_get_with_both_category_filters(self) -> None:
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=3,
title="Run 3",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
category_key="bug-fixer",
category_value="issue-123",
),
]
response = self.client.get(self.url + "?category_key=bug-fixer&category_value=issue-123")
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 1
assert data[0]["run_id"] == 3
call_args = self.mock_client.get_runs.call_args
assert call_args.kwargs["category_key"] == "bug-fixer"
assert call_args.kwargs["category_value"] == "issue-123"
def test_get_with_category_filters_and_pagination(self) -> None:
self.mock_client.get_runs.return_value = [
ExplorerRun(
run_id=1,
title="Run 1",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
category_key="bug-fixer",
category_value="issue-123",
),
ExplorerRun(
run_id=2,
title="Run 2",
last_triggered_at=datetime.now(),
created_at=datetime.now(),
category_key="bug-fixer",
category_value="issue-123",
),
]
cursor = str(Cursor(0, 0))
response = self.client.get(
self.url
+ "?category_key=bug-fixer&category_value=issue-123&per_page=2&cursor="
+ cursor
)
assert response.status_code == 200
data = response.json()["data"]
assert len(data) == 2
call_args = self.mock_client.get_runs.call_args
assert call_args.kwargs["category_key"] == "bug-fixer"
assert call_args.kwargs["category_value"] == "issue-123"
assert call_args.kwargs["limit"] == 3 # +1 for has_more
assert call_args.kwargs["offset"] == 0
| TestOrganizationSeerExplorerRunsEndpoint |
python | keras-team__keras | keras/src/metrics/f_score_metrics_test.py | {
"start": 137,
"end": 14009
} | class ____(testing.TestCase):
def _run_test(
self,
y_true,
y_pred,
sample_weights,
average,
beta,
threshold,
reference_result,
):
fbeta = f_score_metrics.FBetaScore(
average, beta, threshold, dtype="float32"
)
fbeta.update_state(y_true, y_pred, sample_weights)
result = fbeta.result()
self.assertAllClose(result, reference_result, atol=1e-6)
def test_config(self):
fbeta_obj = f_score_metrics.FBetaScore(
beta=0.5, threshold=0.3, average=None, dtype="float32"
)
self.assertEqual(fbeta_obj.beta, 0.5)
self.assertEqual(fbeta_obj.average, None)
self.assertEqual(fbeta_obj.threshold, 0.3)
self.assertEqual(fbeta_obj.dtype, "float32")
# Check save and restore config
fbeta_obj2 = f_score_metrics.FBetaScore.from_config(
fbeta_obj.get_config()
)
self.assertEqual(fbeta_obj2.beta, 0.5)
self.assertEqual(fbeta_obj2.average, None)
self.assertEqual(fbeta_obj2.threshold, 0.3)
self.assertEqual(fbeta_obj2.dtype, "float32")
@parameterized.parameters(
("micro", 0.5),
("micro", 1.0),
("micro", 2.0),
("macro", 0.5),
("macro", 1.0),
("macro", 2.0),
("weighted", 0.5),
("weighted", 1.0),
("weighted", 2.0),
)
def test_fbeta_perfect_score(self, average, beta):
y_true = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=1.0,
)
@parameterized.parameters(
("micro", 0.5),
("micro", 1.0),
("micro", 2.0),
("macro", 0.5),
("macro", 1.0),
("macro", 2.0),
("weighted", 0.5),
("weighted", 1.0),
("weighted", 2.0),
)
def test_fbeta_worst_score(self, average, beta):
y_true = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=0.0,
)
@parameterized.parameters(
# average, beta, result
(None, 0.5, [0.71428573, 0.5, 0.833334]),
(None, 1.0, [0.8, 0.5, 0.6666667]),
(None, 2.0, [0.9090904, 0.5, 0.555556]),
("micro", 0.5, 0.6666667),
("micro", 1.0, 0.6666667),
("micro", 2.0, 0.6666667),
("macro", 0.5, 0.6825397),
("macro", 1.0, 0.6555555),
("macro", 2.0, 0.6548822),
("weighted", 0.5, 0.6825397),
("weighted", 1.0, 0.6555555),
("weighted", 2.0, 0.6548822),
)
def test_fbeta_random_score(self, average, beta, result):
y_pred = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
y_true = [[0, 0, 1], [1, 1, 0], [1, 1, 1]]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=0.66,
reference_result=result,
)
@parameterized.parameters(
# average, beta, result
(None, 0.5, [0.9090904, 0.555556, 1.0]),
(None, 1.0, [0.8, 0.6666667, 1.0]),
(None, 2.0, [0.71428573, 0.833334, 1.0]),
("micro", 0.5, 0.833334),
("micro", 1.0, 0.833334),
("micro", 2.0, 0.833334),
("macro", 0.5, 0.821549),
("macro", 1.0, 0.822222),
("macro", 2.0, 0.849206),
("weighted", 0.5, 0.880471),
("weighted", 1.0, 0.844445),
("weighted", 2.0, 0.829365),
)
def test_fbeta_random_score_none(self, average, beta, result):
y_true = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
y_pred = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
self._run_test(
y_true,
y_pred,
None,
average=average,
beta=beta,
threshold=None,
reference_result=result,
)
@parameterized.parameters(
# average, beta, sample_weights, result
(None, 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.909091, 0.555556, 1.0]),
(None, 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.9375, 0.714286, 1.0]),
(None, 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.8, 0.666667, 1.0]),
(None, 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.857143, 0.8, 1.0]),
(None, 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.714286, 0.833333, 1.0]),
(None, 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.789474, 0.909091, 1.0]),
("micro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("macro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.821549),
("macro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.883929),
("macro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.822222),
("macro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.885714),
("macro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.849206),
("macro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.899522),
("weighted", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.880471),
("weighted", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.917857),
("weighted", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.844444),
("weighted", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.902857),
("weighted", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.829365),
("weighted", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.897608),
)
def test_fbeta_weighted_random_score_none(
self, average, beta, sample_weights, result
):
y_true = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
y_pred = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
self._run_test(
y_true,
y_pred,
sample_weights,
average=average,
beta=beta,
threshold=None,
reference_result=result,
)
def test_invalid_average_raises_value_error(self):
expected_message = (
"Invalid `average` argument value. Expected one of: "
r"\{None, 'micro', 'macro', 'weighted'\}. "
"Received: average=invalid_average"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="invalid_average",
beta=1.0,
threshold=None,
dtype="float32",
)
def test_beta_integer_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta=1, threshold=None, dtype="float32"
)
def test_beta_string_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta="1.0", threshold=None, dtype="float32"
)
def test_beta_none_type_raises_value_error(self):
with self.assertRaisesRegex(
ValueError,
"Invalid `beta` argument value. It should be a Python float.",
):
f_score_metrics.FBetaScore(
average="macro", beta=None, threshold=None, dtype="float32"
)
def test_beta_zero_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=0.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=0.0, threshold=None, dtype="float32"
)
def test_beta_negative_one_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=-1.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=-1.0, threshold=None, dtype="float32"
)
def test_beta_negative_half_raises_value_error(self):
expected_message = (
"Invalid `beta` argument value. It should be > 0. "
"Received: beta=-0.5"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=-0.5, threshold=None, dtype="float32"
)
def test_threshold_not_float_raises_value_error(self):
expected_message_pattern = (
"Invalid `threshold` argument value. "
"It should be a Python float. "
"Received: threshold=1 of type '<class 'int'>'"
)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=1, dtype="float32"
)
def test_threshold_string_raises_value_error(self):
expected_message_pattern = (
"Invalid `threshold` argument value. "
"It should be a Python float. "
"Received: threshold=0.5 of type '<class 'str'>'"
)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold="0.5", dtype="float32"
)
def test_threshold_above_one_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=1.1"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=1.1, dtype="float32"
)
def test_threshold_zero_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=0.0"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=0.0, dtype="float32"
)
def test_threshold_negative_raises_value_error(self):
expected_message = (
"Invalid `threshold` argument value. "
"It should verify 0 < threshold <= 1. "
"Received: threshold=-0.5"
)
with self.assertRaisesRegex(ValueError, expected_message):
f_score_metrics.FBetaScore(
average="macro", beta=1.0, threshold=-0.5, dtype="float32"
)
def test_non_2d_input_shapes_raises_value_error(self):
fbeta = f_score_metrics.FBetaScore(beta=1.0, dtype="float32")
y_true_shape = (2, 3, 4)
y_pred_shape = (2, 3, 4)
expected_error_message = (
"FBetaScore expects 2D inputs with shape "
r"\(batch_size, output_dim\)\. Received input "
r"shapes: y_pred\.shape=\(2, 3, 4\) and "
r"y_true\.shape=\(2, 3, 4\)\."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
fbeta._build(y_true_shape, y_pred_shape)
def test_undefined_output_dim_raises_value_error(self):
fbeta = f_score_metrics.FBetaScore(beta=1.0, dtype="float32")
y_true_shape = (2, None)
y_pred_shape = (2, None)
expected_error_message = (
"FBetaScore expects 2D inputs with shape "
r"\(batch_size, output_dim\), with output_dim fully "
r"defined \(not None\)\. Received input "
r"shapes: y_pred\.shape=\(2, None\) and "
r"y_true\.shape=\(2, None\)\."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
fbeta._build(y_true_shape, y_pred_shape)
| FBetaScoreTest |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 96522,
"end": 97570
} | class ____(TreeTestCase):
def test_nullable_ordered_insertion(self):
genreA = NullableOrderedInsertionModel.objects.create(name="A", parent=None)
NullableOrderedInsertionModel.objects.create(name="A1", parent=genreA)
NullableOrderedInsertionModel.objects.create(name=None, parent=genreA)
self.assertTreeEqual(
NullableOrderedInsertionModel.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
2 1 1 1 4 5
""",
)
def test_nullable_ordered_insertion_desc(self):
genreA = NullableDescOrderedInsertionModel.objects.create(name="A", parent=None)
NullableDescOrderedInsertionModel.objects.create(name="A1", parent=genreA)
NullableDescOrderedInsertionModel.objects.create(name=None, parent=genreA)
self.assertTreeEqual(
NullableDescOrderedInsertionModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
""",
)
| NullableOrderedInsertion |
python | pytest-dev__pytest | src/_pytest/runner.py | {
"start": 14823,
"end": 19922
} | class ____:
"""Shared state for setting up/tearing down test items or collectors
in a session.
Suppose we have a collection tree as follows:
<Session session>
<Module mod1>
<Function item1>
<Module mod2>
<Function item2>
The SetupState maintains a stack. The stack starts out empty:
[]
During the setup phase of item1, setup(item1) is called. What it does
is:
push session to stack, run session.setup()
push mod1 to stack, run mod1.setup()
push item1 to stack, run item1.setup()
The stack is:
[session, mod1, item1]
While the stack is in this shape, it is allowed to add finalizers to
each of session, mod1, item1 using addfinalizer().
During the teardown phase of item1, teardown_exact(item2) is called,
where item2 is the next item to item1. What it does is:
pop item1 from stack, run its teardowns
pop mod1 from stack, run its teardowns
mod1 was popped because it ended its purpose with item1. The stack is:
[session]
During the setup phase of item2, setup(item2) is called. What it does
is:
push mod2 to stack, run mod2.setup()
push item2 to stack, run item2.setup()
Stack:
[session, mod2, item2]
During the teardown phase of item2, teardown_exact(None) is called,
because item2 is the last item. What it does is:
pop item2 from stack, run its teardowns
pop mod2 from stack, run its teardowns
pop session from stack, run its teardowns
Stack:
[]
The end!
"""
def __init__(self) -> None:
# The stack is in the dict insertion order.
self.stack: dict[
Node,
tuple[
# Node's finalizers.
list[Callable[[], object]],
# Node's exception and original traceback, if its setup raised.
tuple[OutcomeException | Exception, types.TracebackType | None] | None,
],
] = {}
def setup(self, item: Item) -> None:
"""Setup objects along the collector chain to the item."""
needed_collectors = item.listchain()
# If a collector fails its setup, fail its entire subtree of items.
# The setup is not retried for each item - the same exception is used.
for col, (finalizers, exc) in self.stack.items():
assert col in needed_collectors, "previous item was not torn down properly"
if exc:
raise exc[0].with_traceback(exc[1])
for col in needed_collectors[len(self.stack) :]:
assert col not in self.stack
# Push onto the stack.
self.stack[col] = ([col.teardown], None)
try:
col.setup()
except TEST_OUTCOME as exc:
self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__))
raise
def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None:
"""Attach a finalizer to the given node.
The node must be currently active in the stack.
"""
assert node and not isinstance(node, tuple)
assert callable(finalizer)
assert node in self.stack, (node, self.stack)
self.stack[node][0].append(finalizer)
def teardown_exact(self, nextitem: Item | None) -> None:
"""Teardown the current stack up until reaching nodes that nextitem
also descends from.
When nextitem is None (meaning we're at the last item), the entire
stack is torn down.
"""
needed_collectors = (nextitem and nextitem.listchain()) or []
exceptions: list[BaseException] = []
while self.stack:
if list(self.stack.keys()) == needed_collectors[: len(self.stack)]:
break
node, (finalizers, _) = self.stack.popitem()
these_exceptions = []
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME as e:
these_exceptions.append(e)
if len(these_exceptions) == 1:
exceptions.extend(these_exceptions)
elif these_exceptions:
msg = f"errors while tearing down {node!r}"
exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1]))
if len(exceptions) == 1:
raise exceptions[0]
elif exceptions:
raise BaseExceptionGroup("errors during test teardown", exceptions[::-1])
if nextitem is None:
assert not self.stack
def collect_one_node(collector: Collector) -> CollectReport:
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
| SetupState |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 13836,
"end": 13911
} | class ____():
def __init__(self, thing: object) -> None:
pass
| Egg |
python | kamyu104__LeetCode-Solutions | Python/winner-of-the-linked-list-game.py | {
"start": 43,
"end": 367
} | class ____(object):
def gameResult(self, head):
"""
:type head: Optional[ListNode]
:rtype: str
"""
cnt = 0
while head:
cnt += cmp(head.val, head.next.val)
head = head.next.next
return "Tie" if cnt == 0 else "Odd" if cnt < 0 else "Even"
| Solution |
python | sympy__sympy | sympy/printing/pycode.py | {
"start": 18084,
"end": 21267
} | class ____(AbstractPythonCodePrinter):
def _print_sign(self, e):
return '(0.0 if {e} == 0 else {f}(1, {e}))'.format(
f=self._module_format('math.copysign'), e=self._print(e.args[0]))
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + ' ' + self.parenthesize(expr.args[0], PREC)
def _print_IndexedBase(self, expr):
return expr.name
def _print_Indexed(self, expr):
base = expr.args[0]
index = expr.args[1:]
return "{}[{}]".format(self._print(base), ", ".join([self._print(ind) for ind in index]))
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational)
def _print_Rational(self, expr):
return '{}/{}'.format(expr.p, expr.q)
def _print_Half(self, expr):
return self._print_Rational(expr)
def _print_frac(self, expr):
return self._print_Mod(Mod(expr.args[0], 1))
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
elif '{' in name: # Remove curly braces from subscripted variables
return name.replace('{', '').replace('}', '')
else:
return name
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for k in PythonCodePrinter._kf:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_func)
for k in _known_constants_math:
setattr(PythonCodePrinter, '_print_%s' % k, _print_known_const)
def pycode(expr, **settings):
""" Converts an expr to a string of Python code
Parameters
==========
expr : Expr
A SymPy expression.
fully_qualified_modules : bool
Whether or not to write out full module names of functions
(``math.sin`` vs. ``sin``). default: ``True``.
standard : str or None, optional
Only 'python3' (default) is supported.
This parameter may be removed in the future.
Examples
========
>>> from sympy import pycode, tan, Symbol
>>> pycode(tan(Symbol('x')) + 1)
'math.tan(x) + 1'
"""
return PythonCodePrinter(settings).doprint(expr)
from itertools import chain
from sympy.printing.pycode import PythonCodePrinter
_known_functions_cmath = {
'exp': 'exp',
'sqrt': 'sqrt',
'log': 'log',
'cos': 'cos',
'sin': 'sin',
'tan': 'tan',
'acos': 'acos',
'asin': 'asin',
'atan': 'atan',
'cosh': 'cosh',
'sinh': 'sinh',
'tanh': 'tanh',
'acosh': 'acosh',
'asinh': 'asinh',
'atanh': 'atanh',
}
_known_constants_cmath = {
'Pi': 'pi',
'E': 'e',
'Infinity': 'inf',
'NegativeInfinity': '-inf',
}
| PythonCodePrinter |
python | RaRe-Technologies__gensim | gensim/models/fasttext.py | {
"start": 9741,
"end": 29013
} | class ____(Word2Vec):
def __init__(self, sentences=None, corpus_file=None, sg=0, hs=0, vector_size=100, alpha=0.025,
window=5, min_count=5,
max_vocab_size=None, word_ngrams=1, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
negative=5, ns_exponent=0.75, cbow_mean=1, hashfxn=hash, epochs=5, null_word=0, min_n=3, max_n=6,
sorted_vocab=1, bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH, callbacks=(),
max_final_vocab=None, shrink_windows=True,):
"""Train, use and evaluate word representations learned using the method
described in `Enriching Word Vectors with Subword Information <https://arxiv.org/abs/1607.04606>`_,
aka FastText.
The model can be stored/loaded via its :meth:`~gensim.models.fasttext.FastText.save` and
:meth:`~gensim.models.fasttext.FastText.load` methods, or loaded from a format compatible with the
original Fasttext implementation via :func:`~gensim.models.fasttext.load_facebook_model`.
Parameters
----------
sentences : iterable of list of str, optional
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus'
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such
examples. If you don't supply `sentences`, the model is left uninitialized -- use if you plan to
initialize it in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left
uninitialized).
min_count : int, optional
The model ignores all words with total frequency lower than this.
vector_size : int, optional
Dimensionality of the word vectors.
window : int, optional
The maximum distance between the current and predicted word within a sentence.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
sg : {1, 0}, optional
Training algorithm: skip-gram if `sg=1`, otherwise CBOW.
hs : {1,0}, optional
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {1,0}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int, optional
Number of iterations (epochs) over the corpus.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during
:meth:`~gensim.models.fasttext.FastText.build_vocab` and is not stored as part of themodel.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
sorted_vocab : {1,0}, optional
If 1, sort the vocabulary by descending frequency before assigning word indices.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
min_n : int, optional
Minimum length of char n-grams to be used for training word representations.
max_n : int, optional
Max length of char ngrams to be used for training word representations. Set `max_n` to be
lesser than `min_n` to avoid char ngrams being used.
word_ngrams : int, optional
In Facebook's FastText, "max length of word ngram" - but gensim only supports the
default of 1 (regular unigram word handling).
bucket : int, optional
Character ngrams are hashed into a fixed number of buckets, in order to limit the
memory usage of the model. This option specifies the number of buckets used by the model.
The default value of 2000000 consumes as much memory as having 2000000 more in-vocabulary
words in your model.
callbacks : :obj: `list` of :obj: `~gensim.models.callbacks.CallbackAny2Vec`, optional
List of callbacks that need to be executed/run at specific stages during training.
max_final_vocab : int, optional
Limits the vocab to a target vocab size by automatically selecting
``min_count```. If the specified ``min_count`` is more than the
automatically calculated ``min_count``, the former will be used.
Set to ``None`` if not required.
shrink_windows : bool, optional
New in 4.1. Experimental.
If True, the effective window size is uniformly sampled from [1, `window`]
for each target word during training, to match the original word2vec algorithm's
approximate weighting of context words by distance. Otherwise, the effective
window size is always fixed to `window` words to either side.
Examples
--------
Initialize and train a `FastText` model:
.. sourcecode:: pycon
>>> from gensim.models import FastText
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = FastText(sentences, min_count=1)
>>> say_vector = model.wv['say'] # get vector for word
>>> of_vector = model.wv['of'] # get vector for out-of-vocab word
Attributes
----------
wv : :class:`~gensim.models.fasttext.FastTextKeyedVectors`
This object essentially contains the mapping between words and embeddings. These are similar to
the embedding computed in the :class:`~gensim.models.word2vec.Word2Vec`, however here we also
include vectors for n-grams. This allows the model to compute embeddings even for **unseen**
words (that do not exist in the vocabulary), as the aggregate of the n-grams included in the word.
After training the model, this attribute can be used directly to query those embeddings in various
ways. Check the module level docstring for some examples.
"""
self.load = utils.call_on_class_only
self.load_fasttext_format = utils.call_on_class_only
self.callbacks = callbacks
if word_ngrams != 1:
raise NotImplementedError("Gensim's FastText implementation does not yet support word_ngrams != 1.")
self.word_ngrams = word_ngrams
if max_n < min_n:
# with no eligible char-ngram lengths, no buckets need be allocated
bucket = 0
self.wv = FastTextKeyedVectors(vector_size, min_n, max_n, bucket)
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as desired after any vocab growth
self.wv.vectors_vocab_lockf = ones(1, dtype=REAL)
self.wv.vectors_ngrams_lockf = ones(1, dtype=REAL)
super(FastText, self).__init__(
sentences=sentences, corpus_file=corpus_file, workers=workers, vector_size=vector_size, epochs=epochs,
callbacks=callbacks, batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window,
max_vocab_size=max_vocab_size, max_final_vocab=max_final_vocab,
min_count=min_count, sample=sample, sorted_vocab=sorted_vocab,
null_word=null_word, ns_exponent=ns_exponent, hashfxn=hashfxn,
seed=seed, hs=hs, negative=negative, cbow_mean=cbow_mean,
min_alpha=min_alpha, shrink_windows=shrink_windows)
def _init_post_load(self, hidden_output):
num_vectors = len(self.wv.vectors)
vocab_size = len(self.wv)
vector_size = self.wv.vector_size
assert num_vectors > 0, 'expected num_vectors to be initialized already'
assert vocab_size > 0, 'expected vocab_size to be initialized already'
# EXPERIMENTAL lockf feature; create minimal no-op lockf arrays (1 element of 1.0)
# advanced users should directly resize/adjust as necessary
self.wv.vectors_ngrams_lockf = ones(1, dtype=REAL)
self.wv.vectors_vocab_lockf = ones(1, dtype=REAL)
if self.hs:
self.syn1 = hidden_output
if self.negative:
self.syn1neg = hidden_output
self.layer1_size = vector_size
def _clear_post_train(self):
"""Clear any cached values that training may have invalidated."""
super(FastText, self)._clear_post_train()
self.wv.adjust_vectors() # ensure composite-word vecs reflect latest training
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate memory that will be needed to train a model, and print the estimates to log."""
vocab_size = vocab_size or len(self.wv)
vec_size = self.vector_size * np.dtype(np.float32).itemsize
l1_size = self.layer1_size * np.dtype(np.float32).itemsize
report = report or {}
report['vocab'] = len(self.wv) * (700 if self.hs else 500)
report['syn0_vocab'] = len(self.wv) * vec_size
num_buckets = self.wv.bucket
if self.hs:
report['syn1'] = len(self.wv) * l1_size
if self.negative:
report['syn1neg'] = len(self.wv) * l1_size
if self.wv.bucket:
report['syn0_ngrams'] = self.wv.bucket * vec_size
num_ngrams = 0
for word in self.wv.key_to_index:
hashes = ft_ngram_hashes(word, self.wv.min_n, self.wv.max_n, self.wv.bucket)
num_ngrams += len(hashes)
# A list (64 bytes) with one np.array (100 bytes) per key, with a total of
# num_ngrams uint32s (4 bytes) amongst them.
# Only used during training, not stored with the model.
report['buckets_word'] = 64 + (100 * len(self.wv)) + (4 * num_ngrams) # TODO: caching & calc sensible?
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words, %i buckets and %i dimensions: %i bytes",
len(self.wv), num_buckets, self.vector_size, report['total'],
)
return report
def _do_train_epoch(
self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, **kwargs,
):
work, neu1 = thread_private_mem
if self.sg:
examples, tally, raw_tally = train_epoch_sg(
self, corpus_file, offset, cython_vocab, cur_epoch, total_examples, total_words, work, neu1,
)
else:
examples, tally, raw_tally = train_epoch_cbow(
self, corpus_file, offset, cython_vocab, cur_epoch, total_examples, total_words, work, neu1,
)
return examples, tally, raw_tally
def _do_train_job(self, sentences, alpha, inits):
"""Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
Parameters
----------
sentences : iterable of list of str
Can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
alpha : float
The current learning rate.
inits : tuple of (:class:`numpy.ndarray`, :class:`numpy.ndarray`)
Each worker's private work memory.
Returns
-------
(int, int)
Tuple of (effective word count after ignoring unknown words and sentence length trimming, total word count)
"""
work, neu1 = inits
tally = train_batch_any(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
@deprecated(
"Gensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. "
"init_sims() is now obsoleted and will be completely removed in future versions. "
"See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4"
)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors. Obsoleted.
If you need a single unit-normalized vector for some key, call
:meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
``fasttext_model.wv.get_vector(key, norm=True)``.
To refresh norms after you performed some atypical out-of-band vector tampering,
call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.
Parameters
----------
replace : bool
If True, forget the original trained vectors and only keep the normalized ones.
You lose information if you do this.
"""
self.wv.init_sims(replace=replace)
@classmethod
@utils.deprecated(
'use load_facebook_vectors (to use pretrained embeddings) or load_facebook_model '
'(to continue training with the loaded full model, more RAM) instead'
)
def load_fasttext_format(cls, model_file, encoding='utf8'):
"""Deprecated.
Use :func:`gensim.models.fasttext.load_facebook_model` or
:func:`gensim.models.fasttext.load_facebook_vectors` instead.
"""
return load_facebook_model(model_file, encoding=encoding)
@utils.deprecated(
'use load_facebook_vectors (to use pretrained embeddings) or load_facebook_model '
'(to continue training with the loaded full model, more RAM) instead'
)
def load_binary_data(self, encoding='utf8'):
"""Load data from a binary file created by Facebook's native FastText.
Parameters
----------
encoding : str, optional
Specifies the encoding.
"""
m = _load_fasttext_format(self.file_name, encoding=encoding)
for attr, val in m.__dict__.items():
setattr(self, attr, val)
def save(self, *args, **kwargs):
"""Save the Fasttext model. This saved model can be loaded again using
:meth:`~gensim.models.fasttext.FastText.load`, which supports incremental training
and getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Store the model to this file.
See Also
--------
:meth:`~gensim.models.fasttext.FastText.load`
Load :class:`~gensim.models.fasttext.FastText` model.
"""
super(FastText, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved `FastText` model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.fasttext.FastText`
Loaded model.
See Also
--------
:meth:`~gensim.models.fasttext.FastText.save`
Save :class:`~gensim.models.fasttext.FastText` model.
"""
return super(FastText, cls).load(*args, rethrow=True, **kwargs)
def _load_specials(self, *args, **kwargs):
"""Handle special requirements of `.load()` protocol, usually up-converting older versions."""
super(FastText, self)._load_specials(*args, **kwargs)
if hasattr(self, 'bucket'):
# should only exist in one place: the wv subcomponent
self.wv.bucket = self.bucket
del self.bucket
| FastText |
python | FactoryBoy__factory_boy | tests/djapp/models.py | {
"start": 262,
"end": 341
} | class ____(models.Model):
foo = models.CharField(max_length=20)
| StandardModel |
python | walkccc__LeetCode | solutions/2219. Maximum Sum Score of Array/2219.py | {
"start": 0,
"end": 233
} | class ____:
def maximumSumScore(self, nums: list[int]) -> int:
ans = -math.inf
prefix = 0
summ = sum(nums)
for num in nums:
prefix += num
ans = max(ans, prefix, summ - prefix + num)
return ans
| Solution |
python | neetcode-gh__leetcode | python/0225-implement-stack-using-queues.py | {
"start": 0,
"end": 393
} | class ____:
def __init__(self):
self.q = deque()
def push(self, x: int) -> None:
self.q.append(x)
for _ in range(len(self.q) - 1):
self.q.append(self.q.popleft())
def pop(self) -> int:
return self.q.popleft()
def top(self) -> int:
return self.q[0]
def empty(self) -> bool:
return len(self.q) == 0
| MyStack |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataprep.py | {
"start": 4782,
"end": 8309
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute_with_default_params(self, hook_mock):
op = DataprepCopyFlowOperator(
task_id=TASK_ID,
dataprep_conn_id=DATAPREP_CONN_ID,
flow_id=FLOW_ID,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(dataprep_conn_id="dataprep_default")
hook_mock.return_value.copy_flow.assert_called_once_with(
flow_id=FLOW_ID,
name="",
description="",
copy_datasources=False,
)
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute_with_specified_params(self, hook_mock):
op = DataprepCopyFlowOperator(
task_id=TASK_ID,
dataprep_conn_id=DATAPREP_CONN_ID,
flow_id=FLOW_ID,
name="specified name",
description="specified description",
copy_datasources=True,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(dataprep_conn_id="dataprep_default")
hook_mock.return_value.copy_flow.assert_called_once_with(
flow_id=FLOW_ID, name="specified name", description="specified description", copy_datasources=True
)
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute_with_templated_params(self, _, create_task_instance_of_operator, session):
dag_id = "test_execute_with_templated_params"
ti = create_task_instance_of_operator(
DataprepCopyFlowOperator,
dag_id=dag_id,
project_id="{{ dag.dag_id }}",
task_id=TASK_ID,
flow_id="{{ dag.dag_id }}",
name="{{ dag.dag_id }}",
description="{{ dag.dag_id }}",
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.project_id
assert dag_id == ti.task.flow_id
assert dag_id == ti.task.name
assert dag_id == ti.task.description
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
@mock.patch("airflow.providers.google.cloud.operators.dataprep.DataprepFlowLink")
@pytest.mark.parametrize(
("provide_project_id", "expected_call_count"),
[
(True, 1),
(False, 0),
],
)
def test_execute_with_project_id_will_persist_link_to_flow(
self,
link_mock,
hook_mock,
provide_project_id,
expected_call_count,
):
hook_mock.return_value.copy_flow.return_value = {"id": NEW_FLOW_ID}
context = mock.MagicMock()
project_id = GCP_PROJECT_ID if provide_project_id else None
op = DataprepCopyFlowOperator(
task_id=TASK_ID,
project_id=project_id,
dataprep_conn_id=DATAPREP_CONN_ID,
flow_id=FLOW_ID,
name="specified name",
description="specified description",
copy_datasources=True,
)
op.execute(context=context)
assert link_mock.persist.call_count == expected_call_count
if provide_project_id:
link_mock.persist.assert_called_with(
context=context,
project_id=project_id,
flow_id=NEW_FLOW_ID,
)
| TestDataprepCopyFlowOperatorTest |
python | qdrant__qdrant-client | qdrant_client/http/api/aliases_api.py | {
"start": 3934,
"end": 4786
} | class ____(_AliasesApi):
def get_collection_aliases(
self,
collection_name: str,
) -> m.InlineResponse2008:
"""
Get list of all aliases for a collection
"""
return self._build_for_get_collection_aliases(
collection_name=collection_name,
)
def get_collections_aliases(
self,
) -> m.InlineResponse2008:
"""
Get list of all existing collections aliases
"""
return self._build_for_get_collections_aliases()
def update_aliases(
self,
timeout: int = None,
change_aliases_operation: m.ChangeAliasesOperation = None,
) -> m.InlineResponse200:
return self._build_for_update_aliases(
timeout=timeout,
change_aliases_operation=change_aliases_operation,
)
| SyncAliasesApi |
python | huggingface__transformers | tests/models/smollm3/test_modeling_smollm3.py | {
"start": 2048,
"end": 2476
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = SmolLM3ModelTester
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# flaky test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions
return getattr(ModelTesterMixin, self._testMethodName)(self)
@require_torch
| SmolLM3ModelTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 227526,
"end": 230434
} | class ____(Request):
"""
Publish a draft version.
:param version: Draft version ID
:type version: str
:param force: Ignore ongoing annotation tasks with this version as input
:type force: bool
:param publishing_task: ID of an in-progress annotation task calling this
endpoint. Versions which are used as input of in-progress annotation tasks can
only be published if there is only one such task and its ID is sent in this
field. This is required if one exists.
:type publishing_task: str
"""
_service = "datasets"
_action = "publish_version"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Ignore ongoing annotation tasks with this version as input",
"type": "boolean",
},
"publishing_task": {
"description": (
"ID of an in-progress annotation task calling this endpoint.\n Versions which"
" are used as input of in-progress annotation tasks can only be published\n if"
" there is only one such task and its ID is sent in this field.\n This is"
" required if one exists."
),
"type": "string",
},
"version": {"description": "Draft version ID", "type": "string"},
},
"required": ["version"],
"type": "object",
}
def __init__(self, version, force=False, publishing_task=None, **kwargs):
super(PublishVersionRequest, self).__init__(**kwargs)
self.version = version
self.force = force
self.publishing_task = publishing_task
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publishing_task")
def publishing_task(self):
return self._property_publishing_task
@publishing_task.setter
def publishing_task(self, value):
if value is None:
self._property_publishing_task = None
return
self.assert_isinstance(value, "publishing_task", six.string_types)
self._property_publishing_task = value
| PublishVersionRequest |
python | doocs__leetcode | solution/3100-3199/3175.Find The First Player to win K Games in a Row/Solution.py | {
"start": 0,
"end": 374
} | class ____:
def findWinningPlayer(self, skills: List[int], k: int) -> int:
n = len(skills)
k = min(k, n - 1)
i = cnt = 0
for j in range(1, n):
if skills[i] < skills[j]:
i = j
cnt = 1
else:
cnt += 1
if cnt == k:
break
return i
| Solution |
python | sqlalchemy__sqlalchemy | examples/generic_associations/discriminator_on_association.py | {
"start": 1145,
"end": 1415
} | class ____(DeclarativeBase):
"""Base class which provides automated table name
and surrogate primary key column.
"""
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id: Mapped[int] = mapped_column(primary_key=True)
| Base |
python | sympy__sympy | sympy/polys/domains/powerseriesring.py | {
"start": 606,
"end": 2029
} | class ____(Protocol[Er]):
"""Generic protocol for power series rings."""
ring: PowerSeriesRingProto[TSeriesElement[Er], Er]
dtype: type[PowerSeriesElement[Er]]
domain: Domain[Er]
symbol: Expr
prec: int
one: PowerSeriesElement[Er]
zero: PowerSeriesElement[Er]
gen: PowerSeriesElement[Er]
def __init__(self, domain: Domain[Er], symbol: str | Expr = "x", prec: int = 6): ...
def __repr__(self) -> str: ...
def __eq__(self, other) -> bool: ...
def __hash__(self) -> int: ...
def is_element(self, element: object) -> TypeIs[PowerSeriesElement[Er]]: ...
def order_term(self) -> PowerSeriesElement[Er]: ...
def from_expr(self, expr: Expr) -> PowerSeriesElement[Er]: ...
def from_list(
self, lst: list[Er], prec: int | None = None
) -> PowerSeriesElement[Er]: ...
def from_element(self, element: TSeriesElement[Er]) -> PowerSeriesElement[Er]: ...
def from_int(self, arg: int) -> PowerSeriesElement[Er]: ...
def from_ground(self, arg: Er) -> PowerSeriesElement[Er]: ...
def to_expr(self, element: PowerSeriesElement[Er]) -> Expr: ...
def to_list(self, element: PowerSeriesElement[Er]) -> list[Er]: ...
def to_dense(self, element: PowerSeriesElement[Er]) -> dup[Er]: ...
def domain_new(self, arg: Er | int) -> Er: ...
def ring_new(self, arg: Expr | Er | int) -> PowerSeriesElement[Er]: ...
| SeriesRingProto |
python | apache__airflow | providers/mysql/tests/unit/mysql/hooks/test_mysql.py | {
"start": 18620,
"end": 19188
} | class ____:
def __init__(self, client):
self.client = client
self.connection = MySqlHook.get_connection(MySqlHook.default_conn_name)
self.init_client = self.connection.extra_dejson.get("client", "mysqlclient")
def __enter__(self):
self.connection.set_extra(f'{{"client": "{self.client}"}}')
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.set_extra(f'{{"client": "{self.init_client}"}}')
@pytest.mark.backend("mysql")
@pytest.mark.skipif(not MYSQL_AVAILABLE, reason="MySQL not available")
| MySqlContext |
python | ansible__ansible | test/units/module_utils/facts/test_collector.py | {
"start": 16083,
"end": 17445
} | class ____(unittest.TestCase):
def test_no_resolution(self):
unresolved = ['required_thing1', 'required_thing2']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
self.assertRaisesRegex(collector.UnresolvedFactDep,
'unresolved fact dep.*required_thing2',
collector.resolve_requires,
unresolved, all_fact_subsets)
def test(self):
unresolved = ['env', 'network']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.resolve_requires(unresolved, all_fact_subsets)
for goal in unresolved:
self.assertIn(goal, res)
def test_exception(self):
unresolved = ['required_thing1']
all_fact_subsets = {}
try:
collector.resolve_requires(unresolved, all_fact_subsets)
except collector.UnresolvedFactDep as exc:
self.assertIn(unresolved[0], '%s' % exc)
| TestResolveRequires |
python | getsentry__sentry | src/sentry/api/endpoints/internal_ea_features.py | {
"start": 477,
"end": 1426
} | class ____(Endpoint):
permission_classes = (OrganizationAdminPermission,)
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request) -> Response:
features_dict = features.all()
ea_org = Organization()
ea_org.flags.early_adopter = True
features_batch = features.batch_has(list(features_dict.keys()), organization=ea_org)
all_features_dict = (
features_batch.get(f"organization:{ea_org.id}", {}) if features_batch else {}
)
ea_features = list(filter(lambda key: all_features_dict[key], all_features_dict))
missing_from_self_hosted = [
feature for feature in ea_features if feature not in SENTRY_EARLY_FEATURES
]
return Response(
{"ea_features": ea_features, "missing_from_self_hosted": missing_from_self_hosted}
)
| InternalEAFeaturesEndpoint |
python | cython__cython | tests/run/for_in_iter.py | {
"start": 2153,
"end": 2527
} | class ____(object):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
def __init__(self, N):
self.N = N
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i < self.N:
i = self.i
self.i += 1
return i
raise StopIteration
next = __next__
| Iterable |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/utils/config.py | {
"start": 5580,
"end": 7099
} | class ____(str, enum.Enum):
"""
The method to use for shuffling data between workers with the streaming executor.
* ``ShuffleMethod.TASKS`` : Use the task-based shuffler.
* ``ShuffleMethod.RAPIDSMPF`` : Use the rapidsmpf shuffler.
* ``ShuffleMethod._RAPIDSMPF_SINGLE`` : Use the single-process rapidsmpf shuffler.
With :class:`cudf_polars.utils.config.StreamingExecutor`, the default of ``None``
will attempt to use ``ShuffleMethod.RAPIDSMPF`` for a distributed cluster,
but will fall back to ``ShuffleMethod.TASKS`` if rapidsmpf is not installed.
The user should **not** specify ``ShuffleMethod._RAPIDSMPF_SINGLE`` directly.
A setting of ``ShuffleMethod.RAPIDSMPF`` will be converted to the single-process
shuffler automatically when using single-GPU execution.
"""
TASKS = "tasks"
RAPIDSMPF = "rapidsmpf"
_RAPIDSMPF_SINGLE = "rapidsmpf-single"
T = TypeVar("T")
def _make_default_factory(
key: str, converter: Callable[[str], T], *, default: T
) -> Callable[[], T]:
def default_factory() -> T:
v = os.environ.get(key)
if v is None:
return default
return converter(v)
return default_factory
def _bool_converter(v: str) -> bool:
lowered = v.lower()
if lowered in {"1", "true", "yes", "y"}:
return True
elif lowered in {"0", "false", "no", "n"}:
return False
else:
raise ValueError(f"Invalid boolean value: '{v}'")
@dataclasses.dataclass(frozen=True)
| ShuffleMethod |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/gcs/gcs_fake_resource.py | {
"start": 987,
"end": 1570
} | class ____:
def __init__(self, name: str):
from unittest import mock
self.name = name
self.blobs: dict[str, FakeGCSBlob] = {}
self.mock_extras = mock.MagicMock()
def blob(self, blob_name: str, *args, **kwargs):
self.mock_extras.blob(*args, **kwargs)
if blob_name not in self.blobs.keys():
self.blobs[blob_name] = FakeGCSBlob(name=blob_name, bucket=self)
return self.blobs[blob_name]
def exists(self, *args, **kwargs):
self.mock_extras.exists(*args, **kwargs)
return True
| FakeGCSBucket |
python | django__django | django/urls/exceptions.py | {
"start": 34,
"end": 73
} | class ____(Http404):
pass
| Resolver404 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple23.py | {
"start": 822,
"end": 1473
} | class ____(Generic[X, Y]):
left: X
right: Y
def lift(
f: Callable[[*Xs], tuple[*Ys]],
) -> Callable[[Tree[Z, tuple[*Xs]]], Tree[Z, tuple[*Ys]]]: ...
def test(
f: Callable[[X], Y],
) -> Callable[[Tree[Z, tuple[X, ...]]], Tree[Z, tuple[Y, ...]]]:
return lift(star(f))
def parallel(
f: Callable[[X], Y],
g: Callable[[*Xs], tuple[*Ys]],
) -> Callable[[X, *Xs], tuple[Y, *Ys]]:
def wrapped(a: X, *bs: *Xs):
return f(a), *g(*bs)
return wrapped
def identity(x: X) -> X:
return x
def parallel_identity(*xs: *Xs) -> tuple[*Xs]:
return xs
Shape = TypeVarTuple("Shape")
DType = TypeVar("DType")
| Tree |
python | getsentry__sentry | src/sentry/api/endpoints/organization_relay_usage.py | {
"start": 892,
"end": 2135
} | class ____(OrganizationEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
permission_classes = (OrganizationPermission,)
@extend_schema(
operation_id="List an Organization's trusted Relays",
parameters=[GlobalParams.ORG_ID_OR_SLUG],
request=None,
responses={
200: inline_sentry_response_serializer(
"OrganizationRelayResponse", list[OrganizationRelayResponse]
),
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationExamples.LIST_RELAYS,
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Return a list of trusted relays bound to an organization.
"""
option_key = "sentry:trusted-relays"
trusted_relays = organization.get_option(option_key)
if trusted_relays is None or len(trusted_relays) == 0:
return Response([], status=200)
keys = [val.get("public_key") for val in trusted_relays]
relay_history = list(RelayUsage.objects.filter(public_key__in=keys).order_by("-last_seen"))
return Response(serialize(relay_history, request.user))
| OrganizationRelayUsage |
python | gevent__gevent | src/greentest/3.11/test_threading.py | {
"start": 40279,
"end": 45983
} | class ____(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@skip_unless_reliable_fork
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@skip_unless_reliable_fork
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
import test.test_threading as mod
while True:
with open(mod.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@skip_unless_reliable_fork
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@skip_unless_reliable_fork
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
| ThreadJoinOnShutdown |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py | {
"start": 54541,
"end": 62559
} | class ____(TestCase):
def setUp(self) -> None:
self._store = HashStore()
self._handlers = []
self._backend = _InMemoryRendezvousBackend()
def tearDown(self) -> None:
for handler in self._handlers:
handler._stop_heartbeats()
def _create_handler(self, **kwargs) -> DynamicRendezvousHandler:
params = {
"backend": self._backend.name,
"endpoint": "dummy_endpoint",
"run_id": "dummy_run_id",
"min_nodes": 2,
"max_nodes": 2,
"join_timeout": "5",
"local_addr": f"127.0.0.{len(self._handlers)}",
}
params.update(**kwargs)
rzdv_params = RendezvousParameters(**params)
handler = create_handler(self._store, self._backend, rzdv_params)
self._handlers.append(handler)
return handler
def test_all_nodes_join_rendezvous(self) -> None:
handler1 = self._create_handler(min_nodes=2, max_nodes=2)
handler2 = self._create_handler(min_nodes=2, max_nodes=2)
handler1_thread = _CapturingThread(target=handler1.next_rendezvous)
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler1_thread.start()
handler2_thread.start()
rdzv_info1: RendezvousInfo = handler1_thread.join()
rdzv_info2: RendezvousInfo = handler2_thread.join()
self.assertEqual(rdzv_info1.store.underlying_store, self._store)
self.assertEqual(rdzv_info2.store.underlying_store, self._store)
self.assertNotEqual(rdzv_info1.rank, rdzv_info2.rank)
self.assertEqual(rdzv_info1.world_size, 2)
self.assertEqual(rdzv_info2.world_size, 2)
def test_redundancy_list(self) -> None:
handler1 = self._create_handler(min_nodes=2, max_nodes=2)
handler2 = self._create_handler(min_nodes=2, max_nodes=2)
handler3 = self._create_handler(min_nodes=2, max_nodes=2)
handler1_thread = _CapturingThread(target=handler1.next_rendezvous)
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler3_thread = _CapturingThread(
target=_ignore_exception,
args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
)
handler1_thread.start()
handler2_thread.start()
# establish successful rendezvous
handler1_thread.join()
handler2_thread.join()
# expect to register in redundancy list
handler3_thread.start()
# wait until the handler3 is registered in the redundancy list
_wait_for(lambda: pickle.loads(self._backend.get_state()[0]).redundancy_list)
state_and_token = self._backend.get_state()
state = pickle.loads(state_and_token[0])
addresses = [node.addr for node in state.redundancy_list]
self.assertListEqual(addresses, ["127.0.0.2"])
def test_redundancy_transition_to_wait_list_then_join_rendezvous(self) -> None:
handler1 = self._create_handler(
min_nodes=1,
max_nodes=2,
)
handler2 = self._create_handler(
min_nodes=1,
max_nodes=2,
keep_alive_interval=timedelta(seconds=1),
)
handler3 = self._create_handler(
min_nodes=1,
max_nodes=2,
)
handler1_thread = _CapturingThread(target=handler1.next_rendezvous)
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler3_thread = _CapturingThread(
target=_ignore_exception,
args=(RendezvousTimeoutError, lambda: handler3.next_rendezvous()),
)
handler1_thread.start()
handler2_thread.start()
# establish successful rendezvous
handler1_thread.join()
handler2_thread.join()
handler3_thread.start()
_wait_for(lambda: pickle.loads(self._backend.get_state()[0]).redundancy_list)
handler2._stop_heartbeats()
_wait_for(
lambda: len(pickle.loads(self._backend.get_state()[0]).participants) == 1
)
_wait_for(
lambda: len(pickle.loads(self._backend.get_state()[0]).wait_list) == 1
)
def test_use_agent_store_is_true_by_default(self):
handler = self._create_handler(
min_nodes=1,
max_nodes=2,
)
self.assertTrue(handler.use_agent_store)
@patch.dict(os.environ, {"TORCH_DISABLE_SHARE_RDZV_TCP_STORE": "1"})
def test_use_agent_store_is_disabled(self):
handler = self._create_handler(
min_nodes=1,
max_nodes=2,
)
self.assertFalse(handler.use_agent_store)
@patch.object(dist, "PrefixStore")
def test_share_tcp_store_from_backend(self, prefix_store_class_mock):
expected_addr = "expected_address"
expected_port = 54231
class CustomPrefixStore(Mock):
def get(self, key):
return (
expected_addr.encode("utf-8")
if key == "MASTER_ADDR"
else bytes(str(expected_port), "utf-8")
)
def set(self, key, value):
pass
prefix_store = CustomPrefixStore(spec=dist.PrefixStore)
prefix_store_class_mock.return_value = prefix_store
tcp_store = Mock(spec=dist.TCPStore)
original_addr = "original_addr"
original_port = TEST_PORT
type(tcp_store).host = PropertyMock(return_value=original_addr)
type(tcp_store).port = PropertyMock(return_value=original_port)
# this will be injected
self._store = tcp_store
handler1 = self._create_handler(min_nodes=2, max_nodes=2)
handler2 = self._create_handler(min_nodes=2, max_nodes=2)
handler1_thread = _CapturingThread(target=handler1.next_rendezvous)
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler1_thread.start()
handler2_thread.start()
rdzv_info1: RendezvousInfo = handler1_thread.join()
rdzv_info2: RendezvousInfo = handler2_thread.join()
self.assertEqual(rdzv_info1.store, prefix_store)
self.assertEqual(rdzv_info2.store, prefix_store)
prefix_store_class_mock.assert_called_with(
"torch.rendezvous.dummy_run_id.0", tcp_store
)
self.assertEqual(
rdzv_info1.bootstrap_store_info, rdzv_info2.bootstrap_store_info
)
self.assertEqual(rdzv_info1.bootstrap_store_info.master_addr, expected_addr)
self.assertEqual(rdzv_info1.bootstrap_store_info.master_port, expected_port)
@patch.dict(os.environ, {"TORCH_DISABLE_SHARE_RDZV_TCP_STORE": "1"})
@patch.object(dist, "PrefixStore")
def test_share_tcp_store_is_disabled(self, prefix_store_class_mock):
prefix_store = Mock()
prefix_store_class_mock.return_value = prefix_store
prefix_store.set.return_value = None
prefix_store.get.return_value = b"123"
tcp_store = Mock(spec=dist.TCPStore)
# this will be injected
self._store = tcp_store
handler1 = self._create_handler(min_nodes=2, max_nodes=2)
handler2 = self._create_handler(min_nodes=2, max_nodes=2)
handler1_thread = _CapturingThread(target=handler1.next_rendezvous)
handler2_thread = _CapturingThread(target=handler2.next_rendezvous)
handler1_thread.start()
handler2_thread.start()
rdzv_info1: RendezvousInfo = handler1_thread.join()
rdzv_info2: RendezvousInfo = handler2_thread.join()
self.assertEqual(rdzv_info1.store, prefix_store)
self.assertEqual(rdzv_info2.store, prefix_store)
prefix_store_class_mock.assert_called_with(
"torch.rendezvous.dummy_run_id.0", self._store
)
self.assertEqual(rdzv_info1.bootstrap_store_info.master_port, 123)
self.assertEqual(rdzv_info2.bootstrap_store_info.master_port, 123)
| IntegrationTest |
python | py-pdf__pypdf | pypdf/_codecs/_codecs.py | {
"start": 293,
"end": 787
} | class ____(ABC):
"""Abstract base class for all codecs."""
@abstractmethod
def encode(self, data: bytes) -> bytes:
"""
Encode the input data.
Args:
data: Data to encode.
Returns:
Encoded data.
"""
@abstractmethod
def decode(self, data: bytes) -> bytes:
"""
Decode the input data.
Args:
data: Data to decode.
Returns:
Decoded data.
"""
| Codec |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 5842,
"end": 10443
} | class ____(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
| UnsupportedConstraintError |
python | getsentry__sentry | src/sentry/migrations/0949_add_dashboard_widget_snapshot_model.py | {
"start": 302,
"end": 2201
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0948_ds_waiver_org_fk_not_db_constr"),
]
operations = [
migrations.CreateModel(
name="DashboardWidgetSnapshot",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("data", sentry.db.models.fields.jsonfield.JSONField(default=dict)),
(
"widget",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.dashboardwidget"
),
),
],
options={
"abstract": False,
},
),
]
| Migration |
python | realpython__materials | queue/src/thread_safe_queues.py | {
"start": 793,
"end": 916
} | class ____:
priority: int
label: str = field(compare=False)
def __str__(self):
return self.label
| Product |
python | bokeh__bokeh | tests/unit/bokeh/plotting/test_figure.py | {
"start": 13527,
"end": 23391
} | class ____:
def test_returns_renderers(self) -> None:
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ["2015", "2016", "2017"]
colors = ["#c9d9d3", "#718dbf", "#e84d60"]
data = {'fruits' : fruits,
'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 4, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
source = ColumnDataSource(data=data)
p = bpf.figure()
renderers = p.vbar_stack(years, x='fruits', width=0.9, color=colors, source=source,
legend_label=years, name=years)
assert len(renderers) == 3
assert renderers[0].name == "2015"
assert renderers[1].name == "2016"
assert renderers[2].name == "2017"
def Test_figure_legends_DEPRECATED(object):
def test_glyph_label_is_legend_if_column_in_datasource_is_added_as_legend(self, p, source) -> None:
p.scatter(x='x', y='y', legend='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'field': 'label'}
def test_glyph_label_is_value_if_column_not_in_datasource_is_added_as_legend(self, p, source) -> None:
p.scatter(x='x', y='y', legend='milk', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'value': 'milk'}
def test_glyph_label_is_legend_if_column_in_df_datasource_is_added_as_legend(self, p) -> None:
pd = pytest.importorskip("pandas")
source = pd.DataFrame(data=dict(x=[1, 2, 3], y=[1, 2, 3], label=['a', 'b', 'c']))
p.scatter(x='x', y='y', legend='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'field': 'label'}
def test_glyph_label_is_value_if_column_not_in_df_datasource_is_added_as_legend(self, p) -> None:
pd = pytest.importorskip("pandas")
source = pd.DataFrame(data=dict(x=[1, 2, 3], y=[1, 2, 3], label=['a', 'b', 'c']))
p.scatter(x='x', y='y', legend='milk', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'value': 'milk'}
def test_glyph_label_is_just_added_directly_if_not_string(self, p, source) -> None:
p.scatter(x='x', y='y', legend={'field': 'milk'}, source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'field': 'milk'}
def test_no_legend_if_legend_is_none(self, p, source) -> None:
p.scatter(x='x', y='y', legend=None, source=source)
legends = p.select(Legend)
assert len(legends) == 0
def test_legend_added_when_legend_set(self, p, source) -> None:
renderer = p.scatter(x='x', y='y', legend='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [renderer]
def test_legend_not_added_when_no_legend(self, p, source) -> None:
p.scatter(x='x', y='y', source=source)
legends = p.select(Legend)
assert len(legends) == 0
def test_adding_legend_doesnt_work_when_legends_already_added(self, p, source) -> None:
p.add_layout(Legend())
p.add_layout(Legend())
with pytest.raises(RuntimeError):
p.scatter(x='x', y='y', legend='label', source=source)
def test_multiple_renderers_correctly_added_to_legend(self, p, source) -> None:
square = p.scatter(x='x', y='y', marker="square", legend='square', source=source)
circle = p.scatter(x='x', y='y', marker="circle", legend='circle', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square]
assert legends[0].items[0].label == value('square')
assert legends[0].items[1].renderers == [circle]
assert legends[0].items[1].label == value('circle')
def test_compound_legend_behavior_initiated_if_labels_are_same_on_multiple_renderers(self, p, source) -> None:
# 'compound legend string' is just a value
square = p.scatter(x='x', y='y', marker="square", legend='compound legend string')
circle = p.scatter(x='x', y='y', marker="circle", legend='compound legend string')
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square, circle]
assert legends[0].items[0].label == value('compound legend string')
def test_compound_legend_behavior_initiated_if_labels_are_same_on_multiple_renderers_and_are_field(self, p, source) -> None:
# label is a field
square = p.scatter(x='x', y='y', marker="square", legend='label', source=source)
circle = p.scatter(x='x', y='y', marker="circle", legend='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square, circle]
assert legends[0].items[0].label == {'field': 'label'}
def Test_figure_legends(object):
def test_glyph_legend_field(self, p, source) -> None:
p.scatter(x='x', y='y', legend_field='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].label == {'field': 'label'}
def test_no_legend_if_legend_is_none(self, p, source) -> None:
p.scatter(x='x', y='y', legend_label=None, source=source)
legends = p.select(Legend)
assert len(legends) == 0
def test_legend_added_when_legend_set(self, p, source) -> None:
renderer = p.scatter(x='x', y='y', legend_label='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [renderer]
def test_legend_not_added_when_no_legend(self, p, source) -> None:
p.scatter(x='x', y='y', source=source)
legends = p.select(Legend)
assert len(legends) == 0
def test_adding_legend_doesnt_work_when_legends_already_added(self, p, source) -> None:
p.add_layout(Legend())
p.add_layout(Legend())
with pytest.raises(RuntimeError):
p.scatter(x='x', y='y', legend_label='label', source=source)
with pytest.raises(RuntimeError):
p.scatter(x='x', y='y', legend_field='label', source=source)
with pytest.raises(RuntimeError):
p.scatter(x='x', y='y', legend_group='label', source=source)
def test_multiple_renderers_correctly_added_to_legend(self, p, source) -> None:
square = p.scatter(x='x', y='y', marker="square", legend_label='square', source=source)
circle = p.scatter(x='x', y='y', marker="circle", legend_label='circle', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square]
assert legends[0].items[0].label == value('square')
assert legends[0].items[1].renderers == [circle]
assert legends[0].items[1].label == value('circle')
def test_compound_legend_behavior_initiated_if_labels_are_same_on_multiple_renderers(self, p, source) -> None:
# 'compound legend string' is just a value
square = p.scatter(x='x', y='y', marker="square", legend_label='compound legend string')
circle = p.scatter(x='x', y='y', marker="circle", legend_label='compound legend string')
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square, circle]
assert legends[0].items[0].label == value('compound legend string')
def test_compound_legend_behavior_initiated_if_labels_are_same_on_multiple_renderers_and_are_field(self, p, source) -> None:
# label is a field
square = p.scatter(x='x', y='y', marker="square", legend_field='label', source=source)
circle = p.scatter(x='x', y='y', marker="circle", legend_field='label', source=source)
legends = p.select(Legend)
assert len(legends) == 1
assert legends[0].items[0].renderers == [square, circle]
assert legends[0].items[0].label == {'field': 'label'}
# XXX (bev) this doesn't work yet because compound behaviour depends on renderer sources
# matching, but passing a df means every renderer gets its own new source
# def test_compound_legend_behavior_initiated_if_labels_are_same_on_multiple_renderers_and_are_field_with_df_source(self, p) -> None:
# source = pd.DataFrame(data=dict(x=[1, 2, 3], y=[1, 2, 3], label=['a', 'b', 'c']))
# # label is a field
# square = p.scatter(x='x', y='y', marker="square", legend_label='label', source=source)
# circle = p.scatter(x='x', y='y', marker="circle", legend_label='label', source=source)
# legends = p.select(Legend)
# assert len(legends) == 1
# print(legends[0].items[0].renderers)
# assert legends[0].items[0].renderers == [square, circle]
# assert legends[0].items[0].label == {'field': 'label'}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@pytest.fixture
def source():
return ColumnDataSource(dict(x=[1, 2, 3], y=[1, 2, 3], label=['a', 'b', 'c']))
@pytest.fixture
def p():
return bpf.figure()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_vbar_stack |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition_group.py | {
"start": 9539,
"end": 11712
} | class ____(TestEvaluationConditionCase):
def setUp(self) -> None:
super().setUp()
self.data_condition_group.logic_type = DataConditionGroup.Type.ALL
def test_evaluate_data_conditions__passes_all(self) -> None:
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(10), self.data_condition_group.logic_type
)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition,
result=DetectorPriorityLevel.HIGH,
),
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition_two,
result=DetectorPriorityLevel.LOW,
),
],
)
assert result == expected_result
def test_evaluate_data_conditions__passes_one(self) -> None:
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(4), self.data_condition_group.logic_type
)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.FALSE,
condition_results=[],
)
assert result == expected_result
def test_evaluate_data_conditions__fails_all(self) -> None:
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(1), self.data_condition_group.logic_type
)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.FALSE,
condition_results=[],
)
assert result == expected_result
def test_evaluate_data_conditions__passes_without_conditions(self) -> None:
result = evaluate_data_conditions([], self.data_condition_group.logic_type)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[],
)
assert result == expected_result
| TestEvaluateConditionGroupTypeAll |
python | doocs__leetcode | solution/3000-3099/3074.Apple Redistribution into Boxes/Solution.py | {
"start": 0,
"end": 260
} | class ____:
def minimumBoxes(self, apple: List[int], capacity: List[int]) -> int:
capacity.sort(reverse=True)
s = sum(apple)
for i, c in enumerate(capacity, 1):
s -= c
if s <= 0:
return i
| Solution |
python | chroma-core__chroma | chromadb/types.py | {
"start": 7521,
"end": 7628
} | class ____(Enum):
ADD = "ADD"
UPDATE = "UPDATE"
UPSERT = "UPSERT"
DELETE = "DELETE"
| Operation |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/asb.py | {
"start": 9402,
"end": 15589
} | class ____(BaseOperator):
"""
Create an Azure Service Bus Topic under a Service Bus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusTopicCreateOperator`
:param topic_name: Name of the topic.
:param default_message_time_to_live: ISO 8601 default message time span to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:param requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:param duplicate_detection_history_time_window: ISO 8601 time span structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:param size_in_bytes: The size of the topic, in bytes.
:param filtering_messages_before_publishing: Filter messages before publishing.
:param authorization_rules: List of Authorization rules for resource.
:param support_ordering: A value that indicates whether the topic supports ordering.
:param auto_delete_on_idle: ISO 8601 time span idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:param enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:param user_metadata: Metadata associated with the topic.
:param max_message_size_in_kilobytes: The maximum size in kilobytes of message payload that
can be accepted by the queue. This feature is only available when using a Premium namespace
and Service Bus API version "2021-05" or higher.
The minimum allowed value is 1024 while the maximum allowed value is 102400. Default value is 1024.
"""
template_fields: Sequence[str] = ("topic_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
default_message_time_to_live: datetime.timedelta | str | None = None,
max_size_in_megabytes: int | None = None,
requires_duplicate_detection: bool | None = None,
duplicate_detection_history_time_window: datetime.timedelta | str | None = None,
enable_batched_operations: bool | None = None,
size_in_bytes: int | None = None,
filtering_messages_before_publishing: bool | None = None,
authorization_rules: list[AuthorizationRule] | None = None,
support_ordering: bool | None = None,
auto_delete_on_idle: datetime.timedelta | str | None = None,
enable_partitioning: bool | None = None,
enable_express: bool | None = None,
user_metadata: str | None = None,
max_message_size_in_kilobytes: int | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
self.default_message_time_to_live = default_message_time_to_live
self.max_size_in_megabytes = max_size_in_megabytes
self.requires_duplicate_detection = requires_duplicate_detection
self.duplicate_detection_history_time_window = duplicate_detection_history_time_window
self.enable_batched_operations = enable_batched_operations
self.size_in_bytes = size_in_bytes
self.filtering_messages_before_publishing = filtering_messages_before_publishing
self.authorization_rules = authorization_rules
self.support_ordering = support_ordering
self.auto_delete_on_idle = auto_delete_on_idle
self.enable_partitioning = enable_partitioning
self.enable_express = enable_express
self.user_metadata = user_metadata
self.max_message_size_in_kilobytes = max_message_size_in_kilobytes
def execute(self, context: Context) -> str:
"""Create Topic in Service Bus namespace, by connecting to Service Bus Admin client."""
if self.topic_name is None:
raise TypeError("Topic name cannot be None.")
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
return hook.create_topic(
topic_name=self.topic_name,
default_message_time_to_live=self.default_message_time_to_live,
max_size_in_megabytes=self.max_size_in_megabytes,
requires_duplicate_detection=self.requires_duplicate_detection,
duplicate_detection_history_time_window=self.duplicate_detection_history_time_window,
enable_batched_operations=self.enable_batched_operations,
size_in_bytes=self.size_in_bytes,
filtering_messages_before_publishing=self.filtering_messages_before_publishing,
authorization_rules=self.authorization_rules,
support_ordering=self.support_ordering,
auto_delete_on_idle=self.auto_delete_on_idle,
enable_partitioning=self.enable_partitioning,
enable_express=self.enable_express,
user_metadata=self.user_metadata,
max_message_size_in_kilobytes=self.max_message_size_in_kilobytes,
)
| AzureServiceBusTopicCreateOperator |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 137548,
"end": 141135
} | class ____(CythonTransform):
# In addition to marking closures this is also responsible to finding parts of the
# generator iterable and marking them
def visit_ModuleNode(self, node):
self.needs_closure = False
self.excludes = []
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector(self.excludes)
collector.visitchildren(node)
if node.is_async_def:
coroutine_type = Nodes.AsyncDefNode
if collector.has_yield:
coroutine_type = Nodes.AsyncGenNode
for yield_expr in collector.yields + collector.returns:
yield_expr.in_async_gen = True
elif self.current_directives['iterable_coroutine']:
coroutine_type = Nodes.IterableAsyncDefNode
elif collector.has_await:
found = next(y for y in collector.yields if y.is_await)
error(found.pos, "'await' not allowed in generators (use 'yield')")
return node
elif collector.has_yield:
coroutine_type = Nodes.GeneratorDefNode
else:
return node
for i, yield_expr in enumerate(collector.yields, 1):
yield_expr.label_num = i
for retnode in collector.returns + collector.finallys + collector.excepts:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body,
is_coroutine_body=node.is_async_def,
is_async_gen_body=node.is_async_def and collector.has_yield)
coroutine = coroutine_type(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name,
return_type_annotation=node.return_type_annotation,
is_generator_expression=node.is_generator_expression)
return coroutine
def visit_CFuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
if node.needs_closure and node.overridable:
error(node.pos, "closures inside cpdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
def visit_GeneratorExpressionNode(self, node):
excludes = self.excludes
if isinstance(node.loop, Nodes._ForInStatNode):
self.excludes = [node.loop.iterator]
node = self.visit_LambdaNode(node)
self.excludes = excludes
if not isinstance(node.loop, Nodes._ForInStatNode):
# Possibly should handle ForFromStatNode
# but for now do nothing
return node
itseq = node.loop.iterator.sequence
# literals do not need replacing with an argument
if itseq.is_literal:
return node
_GeneratorExpressionArgumentsMarker(node).visit(itseq)
return node
| MarkClosureVisitor |
python | neetcode-gh__leetcode | python/0028-find-the-index-of-the-first-occurrence-in-a-string.py | {
"start": 0,
"end": 881
} | class ____:
def strStr(self, haystack: str, needle: str) -> int:
if needle == "":
return 0
lps = [0] * len(needle)
prevLPS, i = 0, 1
while i < len(needle):
if needle[i] == needle[prevLPS]:
lps[i] = prevLPS + 1
prevLPS += 1
i += 1
elif prevLPS == 0:
lps[i] = 0
i += 1
else:
prevLPS = lps[prevLPS - 1]
i = 0 # ptr for haystack
j = 0 # ptr for needle
while i < len(haystack):
if haystack[i] == needle[j]:
i, j = i + 1, j + 1
else:
if j == 0:
i += 1
else:
j = lps[j - 1]
if j == len(needle):
return i - len(needle)
return -1
| Solution |
python | huggingface__transformers | src/transformers/models/flaubert/modeling_flaubert.py | {
"start": 31776,
"end": 43602
} | class ____(FlaubertPreTrainedModel):
def __init__(self, config): # , dico, is_encoder, with_output):
super().__init__(config)
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently Flaubert can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for i in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config, layer_idx=i))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# Initialize weights and apply final processing
self.post_init()
self.layerdrop = getattr(config, "layerdrop", 0.0)
self.pre_norm = getattr(config, "pre_norm", False)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
# Copied from transformers.models.xlm.modeling_xlm.XLMModel.get_input_embeddings
def get_input_embeddings(self):
return self.embeddings
# Copied from transformers.models.xlm.modeling_xlm.XLMModel.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
lengths: Optional[torch.LongTensor] = None,
cache: Optional[dict[str, torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# removed: src_enc=None, src_len=None
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if cache is None:
cache = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.tensor([slen] * bs, device=device)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# Setting the position-ids to the registered buffer in constructor, it helps
# when tracing the model without passing position-ids, solves
# issues similar to issue #5664
if position_ids is None:
if hasattr(self, "position_ids"):
position_ids = self.position_ids[:, :slen]
position_ids = position_ids.expand((bs, slen))
else:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache.get_seq_length()
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.config.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
for i in range(self.n_layers):
# LayerDrop
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
if not self.pre_norm:
attn_outputs = self.attentions[i](
tensor,
attn_mask,
cache=cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache[i])
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
# FFN
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
if not return_dict:
return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
@auto_docstring(
custom_intro="""
The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
| FlaubertModel |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 33448,
"end": 33717
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"COLLABORATORS_ONLY",
"CONTRIBUTORS_ONLY",
"EXISTING_USERS",
"NO_LIMIT",
)
| RepositoryInteractionLimit |
python | kamyu104__LeetCode-Solutions | Python/maximize-the-minimum-game-score.py | {
"start": 66,
"end": 1071
} | class ____(object):
def maxScore(self, points, m):
"""
:type points: List[int]
:type m: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def binary_search_right(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def check(x):
cnt = prev = 0
for i in xrange(len(points)):
remain = ceil_divide(x, points[i])-prev
if remain >= 1:
prev = remain-1
cnt += 2*remain-1
elif i != len(points)-1:
prev = 0
cnt += 1
if cnt > m:
return False
return True
return binary_search_right(1, max(points)*m, check)
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_legend05.py | {
"start": 315,
"end": 1419
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_legend05.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with legend options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [79973376, 84140800]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_legend({"border": {"color": "#4F81BD"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 989329,
"end": 989741
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("StatusCheckRollupContext", graphql_name="node")
"""The item at the end of the edge."""
| StatusCheckRollupContextEdge |
python | astropy__astropy | astropy/coordinates/angles/core.py | {
"start": 24468,
"end": 28366
} | class ____(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude) or (
isinstance(angle, str) and angle.endswith(("N", "S"))
):
raise TypeError(
"A Longitude angle cannot be created from a Latitude angle."
)
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, "wrap_angle", self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, "_wrap_angle", self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
| Longitude |
python | scikit-learn__scikit-learn | sklearn/covariance/_graph_lasso.py | {
"start": 25102,
"end": 40906
} | class ____(BaseGraphicalLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
GraphLassoCV has been renamed to GraphicalLassoCV
Parameters
----------
alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details. Range is [1, inf) for an integer.
Range is (0, inf] for an array-like of floats.
n_refinements : int, default=4
The number of times the grid is refined. Not used if explicit
values of alphas are passed. Range is [1, inf).
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.20
``cv`` default value if None changed from 3-fold to 5-fold.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
Maximum number of iterations.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
verbose : bool, default=False
If verbose is True, the objective function and duality gap are
printed at each iteration.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
.. versionadded:: 1.3
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
costs_ : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
.. versionadded:: 1.3
alpha_ : float
Penalization parameter selected.
cv_results_ : dict of ndarrays
A dict with keys:
alphas : ndarray of shape (n_alphas,)
All penalization parameters explored.
split(k)_test_score : ndarray of shape (n_alphas,)
Log-likelihood score on left-out data across (k)th fold.
.. versionadded:: 1.0
mean_test_score : ndarray of shape (n_alphas,)
Mean of scores over the folds.
.. versionadded:: 1.0
std_test_score : ndarray of shape (n_alphas,)
Standard deviation of scores over the folds.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
graphical_lasso : L1-penalized covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
Notes
-----
The search for the optimal penalization parameter (`alpha`) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of `alpha` then come out as missing values, but the optimum may
be close to these missing values.
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import GraphicalLassoCV
>>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.2, 0.0, 0.3, 0.1],
... [0.0, 0.0, 0.1, 0.7]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
... cov=true_cov,
... size=200)
>>> cov = GraphicalLassoCV().fit(X)
>>> np.around(cov.covariance_, decimals=3)
array([[0.816, 0.051, 0.22 , 0.017],
[0.051, 0.364, 0.018, 0.036],
[0.22 , 0.018, 0.322, 0.094],
[0.017, 0.036, 0.094, 0.69 ]])
>>> np.around(cov.location_, decimals=3)
array([0.073, 0.04 , 0.038, 0.143])
For an example comparing :class:`sklearn.covariance.GraphicalLassoCV`,
:func:`sklearn.covariance.ledoit_wolf` shrinkage and the empirical covariance
on high-dimensional gaussian data, see
:ref:`sphx_glr_auto_examples_covariance_plot_sparse_cov.py`.
"""
_parameter_constraints: dict = {
**BaseGraphicalLasso._parameter_constraints,
"alphas": [Interval(Integral, 0, None, closed="left"), "array-like"],
"n_refinements": [Interval(Integral, 1, None, closed="left")],
"cv": ["cv_object"],
"n_jobs": [Integral, None],
}
def __init__(
self,
*,
alphas=4,
n_refinements=4,
cv=None,
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
mode="cd",
n_jobs=None,
verbose=False,
eps=np.finfo(np.float64).eps,
assume_centered=False,
):
super().__init__(
tol=tol,
enet_tol=enet_tol,
max_iter=max_iter,
mode=mode,
verbose=verbose,
eps=eps,
assume_centered=assume_centered,
)
self.alphas = alphas
self.n_refinements = n_refinements
self.cv = cv
self.n_jobs = n_jobs
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None, **params):
"""Fit the GraphicalLasso covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
y : Ignored
Not used, present for API consistency by convention.
**params : dict, default=None
Parameters to be passed to the CV splitter and the
cross_val_score function.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
# Covariance does not make sense for a single feature
_raise_for_params(params, self, "fit")
X = validate_data(self, X, ensure_min_features=2)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if _is_arraylike_not_scalar(n_alphas):
for alpha in self.alphas:
check_scalar(
alpha,
"alpha",
Real,
min_val=0,
max_val=np.inf,
include_boundaries="right",
)
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1]
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(splitter=Bunch(split={}))
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter("ignore", ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graphical_lasso_path has been tried,
# and this did not allow to gain anything
# (same execution time with or without).
this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(graphical_lasso_path)(
X[train],
alphas=alphas,
X_test=X[test],
mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=int(0.1 * self.max_iter),
verbose=inner_verbose,
eps=self.eps,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= 0.1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif best_index == last_finite_idx and not best_index == len(path) - 1:
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not _is_arraylike_not_scalar(n_alphas):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print(
"[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is"
% (i + 1, n_refinements, time.time() - t0)
)
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(
cross_val_score(
EmpiricalCovariance(),
X,
cv=cv,
n_jobs=self.n_jobs,
verbose=inner_verbose,
params=params,
)
)
grid_scores = np.array(grid_scores)
self.cv_results_ = {"alphas": np.array(alphas)}
for i in range(grid_scores.shape[1]):
self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i]
self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1)
self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
emp_cov,
alpha=best_alpha,
mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=self.max_iter,
verbose=inner_verbose,
eps=self.eps,
)
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
splitter=check_cv(self.cv),
method_mapping=MethodMapping().add(callee="split", caller="fit"),
)
return router
| GraphicalLassoCV |
python | Textualize__textual | src/textual/widgets/_tabbed_content.py | {
"start": 4630,
"end": 6939
} | class ____(Widget):
"""A container for switchable content, with additional title.
This widget is intended to be used with [TabbedContent][textual.widgets.TabbedContent].
"""
DEFAULT_CSS = """
TabPane {
height: auto;
}
"""
@dataclass
class TabPaneMessage(Message):
"""Base class for `TabPane` messages."""
tab_pane: TabPane
"""The `TabPane` that is he object of this message."""
@property
def control(self) -> TabPane:
"""The tab pane that is the object of this message.
This is an alias for the attribute `tab_pane` and is used by the
[`on`][textual.on] decorator.
"""
return self.tab_pane
@dataclass
class Disabled(TabPaneMessage):
"""Sent when a tab pane is disabled via its reactive `disabled`."""
@dataclass
class Enabled(TabPaneMessage):
"""Sent when a tab pane is enabled via its reactive `disabled`."""
@dataclass
class Focused(TabPaneMessage):
"""Sent when a child widget is focused."""
def __init__(
self,
title: ContentType,
*children: Widget,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
):
"""Initialize a TabPane.
Args:
title: Title of the TabPane (will be displayed in a tab label).
*children: Widget to go inside the TabPane.
name: Optional name for the TabPane.
id: Optional ID for the TabPane.
classes: Optional initial classes for the widget.
disabled: Whether the TabPane is disabled or not.
"""
self._title = self.render_str(title)
super().__init__(
*children, name=name, id=id, classes=classes, disabled=disabled
)
def _watch_disabled(self, disabled: bool) -> None:
"""Notify the parent `TabbedContent` that a tab pane was enabled/disabled."""
self.post_message(self.Disabled(self) if disabled else self.Enabled(self))
def _on_descendant_focus(self, event: events.DescendantFocus):
"""Tell TabbedContent parent something is focused in this pane."""
self.post_message(self.Focused(self))
| TabPane |
python | networkx__networkx | networkx/classes/tests/dispatch_interface.py | {
"start": 1039,
"end": 1610
} | class ____(PlanarEmbedding):
__networkx_backend__ = "nx_loopback"
def convert(graph):
if isinstance(graph, PlanarEmbedding):
return LoopbackPlanarEmbedding(graph)
if isinstance(graph, MultiDiGraph):
return LoopbackMultiDiGraph(graph)
if isinstance(graph, MultiGraph):
return LoopbackMultiGraph(graph)
if isinstance(graph, DiGraph):
return LoopbackDiGraph(graph)
if isinstance(graph, Graph):
return LoopbackGraph(graph)
raise TypeError(f"Unsupported type of graph: {type(graph)}")
| LoopbackPlanarEmbedding |
python | Textualize__textual | src/textual/widgets/_select.py | {
"start": 707,
"end": 934
} | class ____:
"""Used by the `Select` widget to flag the unselected state. See [`Select.BLANK`][textual.widgets.Select.BLANK]."""
def __repr__(self) -> str:
return "Select.BLANK"
BLANK = NoSelection()
| NoSelection |
python | coleifer__peewee | peewee.py | {
"start": 90286,
"end": 90712
} | class ____(_WriteQuery):
def __sql__(self, ctx):
super(Delete, self).__sql__(ctx)
with ctx.scope_values(subquery=True):
ctx.literal('DELETE FROM ').sql(self.table)
if self._where is not None:
with ctx.scope_normal():
ctx.literal(' WHERE ').sql(self._where)
self._apply_ordering(ctx)
return self.apply_returning(ctx)
| Delete |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 2108,
"end": 2222
} | class ____(Greeter, ToUpperCase, enum.Enum):
"""this is enum class"""
x = 'x'
| EnumClassWithMixinTypeInherit |
python | getsentry__sentry | tests/sentry/ratelimits/utils/test_enforce_rate_limit.py | {
"start": 1169,
"end": 1628
} | class ____(APITestCase):
endpoint = "enforced-endpoint"
@override_settings(SENTRY_SELF_HOSTED=False)
def test_enforced_rate_limit(self) -> None:
"""Endpoints with enforce_rate_limit enabled should result in 429s"""
with freeze_time("2000-01-01"):
self.get_success_response()
self.get_error_response(status_code=status.HTTP_429_TOO_MANY_REQUESTS)
@override_settings(ROOT_URLCONF=__name__)
| EnforceRateLimitTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/types.py | {
"start": 4591,
"end": 6519
} | class ____(DispatcherSpanMixin):
@property
@abstractmethod
def metadata(self) -> ToolMetadata:
pass
@abstractmethod
def __call__(self, input: Any) -> ToolOutput:
pass
def _process_langchain_tool_kwargs(
self,
langchain_tool_kwargs: Any,
) -> Dict[str, Any]:
"""Process langchain tool kwargs."""
if "name" not in langchain_tool_kwargs:
langchain_tool_kwargs["name"] = self.metadata.name or ""
if "description" not in langchain_tool_kwargs:
langchain_tool_kwargs["description"] = self.metadata.description
if "fn_schema" not in langchain_tool_kwargs:
langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema
# Callback dont exist on langchain
if "_callback" in langchain_tool_kwargs:
del langchain_tool_kwargs["_callback"]
if "_async_callback" in langchain_tool_kwargs:
del langchain_tool_kwargs["_async_callback"]
return langchain_tool_kwargs
def to_langchain_tool(
self,
**langchain_tool_kwargs: Any,
) -> "Tool":
"""To langchain tool."""
from llama_index.core.bridge.langchain import Tool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return Tool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
def to_langchain_structured_tool(
self,
**langchain_tool_kwargs: Any,
) -> "StructuredTool":
"""To langchain structured tool."""
from llama_index.core.bridge.langchain import StructuredTool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return StructuredTool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
| BaseTool |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 220305,
"end": 220570
} | class ____(VegaLiteSchema):
"""ConditionalAxisLabelFontWeight schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisLabelFontWeight"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalAxisLabelFontWeight |
python | pytorch__pytorch | tools/experimental/torchfuzz/ops_fuzzer.py | {
"start": 3098,
"end": 4300
} | class ____:
"""
Represents a node in the operation graph.
Attributes:
node_id: Unique identifier for this node
op_name: Name of the operation (e.g., 'torch.ops.aten.add', 'scalar_add', 'arg')
input_specs: List of input specifications required by this operation
output_spec: Output specification produced by this operation
input_nodes: List of node IDs that provide inputs to this operation
depth: Depth level of this node in the generation tree
"""
node_id: str
op_name: str
input_specs: list[Spec]
output_spec: Spec
input_nodes: list[str]
depth: int
def __str__(self) -> str:
"""String representation for debugging."""
return (
f"{self.node_id}: {self.op_name} -> {self.output_spec} (depth {self.depth})"
)
def __repr__(self) -> str:
"""Detailed representation for debugging."""
return (
f"OperationNode(node_id='{self.node_id}', op_name='{self.op_name}', "
f"input_specs={self.input_specs}, output_spec={self.output_spec}, "
f"input_nodes={self.input_nodes}, depth={self.depth})"
)
@dataclass
| OperationNode |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/execute_step.py | {
"start": 30676,
"end": 42551
} | class ____(TypedDict):
data_version: DataVersion
storage_id: Optional[int]
def _get_input_provenance_data(
asset_key: AssetKey, step_context: StepExecutionContext
) -> Mapping[AssetKey, _InputProvenanceData]:
input_provenance: dict[AssetKey, _InputProvenanceData] = {}
deps = step_context.job_def.asset_layer.get(asset_key).parent_keys
for key in deps:
# For deps external to this step, this will retrieve the cached record that was stored prior
# to step execution. For inputs internal to this step, it may trigger a query to retrieve
# the most recent materialization record (it will retrieve a cached record if it's already
# been asked for). For this to be correct, the output materializations for the step must be
# generated in topological order -- we assume this.
version_info = step_context.maybe_fetch_and_get_input_asset_version_info(key)
# This can only happen for source assets that have never been observed.
if version_info is None:
storage_id = None
data_version = DEFAULT_DATA_VERSION
else:
storage_id = version_info.storage_id
data_version = version_info.data_version or DEFAULT_DATA_VERSION
input_provenance[key] = {
"data_version": data_version,
"storage_id": storage_id,
}
return input_provenance
def _build_data_version_tags(
data_version: DataVersion,
code_version: str,
input_provenance_data: Mapping[AssetKey, _InputProvenanceData],
data_version_is_user_provided: bool,
) -> dict[str, str]:
tags: dict[str, str] = {}
tags[CODE_VERSION_TAG] = code_version
for key, meta in input_provenance_data.items():
tags[get_input_data_version_tag(key)] = meta["data_version"].value
tags[get_input_event_pointer_tag(key)] = (
str(meta["storage_id"]) if meta["storage_id"] else NULL_EVENT_POINTER
)
tags[DATA_VERSION_TAG] = data_version.value
if data_version_is_user_provided:
tags[DATA_VERSION_IS_USER_PROVIDED_TAG] = "true"
return tags
def _build_data_version_observation_tags(data_version: DataVersion) -> dict[str, str]:
return {
DATA_VERSION_TAG: data_version.value,
DATA_VERSION_IS_USER_PROVIDED_TAG: "true",
}
def _store_output(
step_context: StepExecutionContext,
step_output_handle: StepOutputHandle,
output: Union[Output, DynamicOutput],
) -> Iterator[DagsterEvent]:
output_def = step_context.op_def.output_def_named(step_output_handle.output_name)
output_manager = step_context.get_io_manager(step_output_handle)
output_context = step_context.get_output_context(step_output_handle, output.metadata)
manager_materializations = []
manager_metadata: dict[str, MetadataValue] = {}
# don't store asset check outputs, asset observation outputs, asset result outputs, or Nothing
# type outputs
step_output = step_context.step.step_output_named(step_output_handle.output_name)
if (
step_output.properties.asset_check_key
or (step_context.output_observes_source_asset(step_output_handle.output_name))
or output_context.dagster_type.is_nothing
or (
# FIXME: currently, when an output type is unset, this quickly gets coerced to the Any type,
# making it impossible to distinguish between a user declaring that they expect an output
# of any type, and a user not declaring any expectation at all.
#
# For now, we assume that if the output type is Any AND the user has not explicitly set a
# value for their materialize result, that they do not expect the IO manager to be invoked.
# In contrast, if the user does explicitly set the output value to any value (including None),
# the IO manager *will* be invoked.
output_context.dagster_type.is_any
and isinstance(output, AssetResultOutput)
and output.value is NoValueSentinel
)
):
yield from _log_materialization_or_observation_events_for_asset(
step_context=step_context,
output_context=output_context,
output=output,
output_def=output_def,
manager_metadata={},
)
# otherwise invoke the I/O manager
else:
# output_manager.handle_output is either a generator function, or a normal function with or
# without a return value. In the case that handle_output is a normal function, we need to
# catch errors should they be raised before a return value. We can do this by wrapping
# handle_output in a generator so that errors will be caught within iterate_with_context.
if not inspect.isgeneratorfunction(output_manager.handle_output):
def _gen_fn():
gen_output = output_manager.handle_output(output_context, output.value)
yield from output_context.consume_events()
if gen_output:
yield gen_output
handle_output_gen = _gen_fn()
else:
handle_output_gen = output_manager.handle_output(output_context, output.value)
for elt in iterate_with_context(
lambda: op_execution_error_boundary(
DagsterExecutionHandleOutputError,
msg_fn=lambda: f'Error occurred while handling output "{output_context.name}" of step "{step_context.step.key}":',
step_context=step_context,
step_key=step_context.step.key,
output_name=output_context.name,
),
handle_output_gen,
):
for event in output_context.consume_events():
yield event
manager_metadata = {**manager_metadata, **output_context.consume_logged_metadata()}
if isinstance(elt, DagsterEvent):
yield elt
elif isinstance(elt, AssetMaterialization):
manager_materializations.append(elt)
elif isinstance(elt, dict): # should remove this?
beta_warning("Yielding metadata from an IOManager's handle_output() function")
manager_metadata = {**manager_metadata, **normalize_metadata(elt)}
else:
raise DagsterInvariantViolationError(
f"IO manager on output {output_def.name} has returned "
f"value {elt} of type {type(elt).__name__}. The return type can only be "
"one of AssetMaterialization, Dict[str, MetadataValue]."
)
for event in output_context.consume_events():
yield event
manager_metadata = {**manager_metadata, **output_context.consume_logged_metadata()}
# do not alter explicitly created AssetMaterializations
for mgr_materialization in manager_materializations:
if mgr_materialization.metadata and manager_metadata:
raise DagsterInvariantViolationError(
f"When handling output '{output_context.name}' of"
f" {output_context.op_def.node_type_str} '{output_context.op_def.name}', received a"
" materialization with metadata, while context.add_output_metadata was used within"
" the same call to handle_output. Due to potential conflicts, this is not allowed."
" Please specify metadata in one place within the `handle_output` function."
)
if manager_metadata:
with disable_dagster_warnings():
materialization = AssetMaterialization(
asset_key=mgr_materialization.asset_key,
description=mgr_materialization.description,
metadata=manager_metadata,
partition=mgr_materialization.partition,
)
else:
materialization = mgr_materialization
yield DagsterEvent.asset_materialization(step_context, materialization)
yield from _log_materialization_or_observation_events_for_asset(
step_context=step_context,
output_context=output_context,
output=output,
output_def=output_def,
manager_metadata=manager_metadata,
)
yield DagsterEvent.handled_output(
step_context,
output_name=step_output_handle.output_name,
manager_key=output_def.io_manager_key,
metadata=manager_metadata,
)
def _log_materialization_or_observation_events_for_asset(
step_context: StepExecutionContext,
output_context: OutputContext,
output: Union[Output, DynamicOutput],
output_def: OutputDefinition,
manager_metadata: Mapping[str, MetadataValue],
) -> Iterable[DagsterEvent]:
# This is a temporary workaround to prevent duplicate observation events from external
# observable assets that were auto-converted from source assets. These assets yield
# observation events through the context in their body, and will continue to do so until we
# can convert them to using ObserveResult, which requires a solution to partition-scoped
# metadata and data version on output. We identify these auto-converted assets by looking
# for OBSERVATION-type asset that have this special metadata key (added in
# `wrap_source_asset_observe_fn_in_op_compute_fn`), which should only occur for these
# auto-converted source assets. This can be removed when source asset observation functions
# are converted to use ObserveResult.
if SYSTEM_METADATA_KEY_SOURCE_ASSET_OBSERVATION in output.metadata:
return
asset_key = output_context.asset_key if output_context.has_asset_key else None
partitions = output_context.asset_partition_keys if output_context.has_asset_partitions else []
if asset_key:
asset_layer = step_context.job_def.asset_layer
assets_def = asset_layer.get_assets_def_for_node(step_context.node_handle)
execution_type = check.not_none(assets_def).execution_type
check.invariant(
execution_type != AssetExecutionType.UNEXECUTABLE,
"There should never be unexecutable assets here",
)
check.invariant(
execution_type in {AssetExecutionType.MATERIALIZATION, AssetExecutionType.OBSERVATION},
f"Unexpected asset execution type {execution_type}",
)
if assets_are_externally_managed(step_context.dagster_run):
asset_events = []
else:
asset_events = list(
_get_output_asset_events(
asset_key,
partitions,
output,
output_def,
manager_metadata,
step_context,
execution_type,
)
)
batch_id = generate_event_batch_id()
last_index = len(asset_events) - 1
for i, asset_event in enumerate(asset_events):
batch_metadata = (
DagsterEventBatchMetadata(batch_id, i == last_index) if partitions else None
)
yield _dagster_event_for_asset_event(step_context, asset_event, batch_metadata)
def _dagster_event_for_asset_event(
step_context: StepExecutionContext,
asset_event: Union[AssetMaterialization, AssetObservation],
batch_metadata: Optional[DagsterEventBatchMetadata],
) -> DagsterEvent:
if isinstance(asset_event, AssetMaterialization):
return DagsterEvent.asset_materialization(step_context, asset_event, batch_metadata)
else: # observation
return DagsterEvent.asset_observation(step_context, asset_event, batch_metadata)
| _InputProvenanceData |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 9099,
"end": 9174
} | class ____(NumericType):
"""Base class for float data types."""
| FloatType |
python | openai__openai-python | src/openai/types/responses/response_output_text_param.py | {
"start": 2570,
"end": 2759
} | class ____(TypedDict, total=False):
token: Required[str]
bytes: Required[Iterable[int]]
logprob: Required[float]
top_logprobs: Required[Iterable[LogprobTopLogprob]]
| Logprob |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/utils.py | {
"start": 6883,
"end": 7506
} | class ____(Provider):
max_tokens_key = "maxTokens"
def get_text_from_response(self, response: dict) -> str:
return response["completions"][0]["data"]["text"]
def completion_to_anthopic_prompt(completion: str) -> str:
messages, _ = messages_to_anthropic_messages(prompt_to_messages(completion))
return messages
def _messages_to_anthropic_messages(messages: Sequence[ChatMessage]) -> List[dict]:
messages, system_prompt = messages_to_anthropic_messages(messages)
if system_prompt:
messages = [{"role": "system", "content": system_prompt}, *messages]
return messages
| Ai21Provider |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 76103,
"end": 78681
} | class ____(fc_types.FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, fc_types.FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
| DenseColumn |
python | pypa__pipenv | pipenv/patched/pip/_internal/build_env.py | {
"start": 2736,
"end": 10411
} | class ____:
"""Creates and manages an isolated environment to install build deps"""
def __init__(self) -> None:
temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
self._prefixes = OrderedDict(
(name, _Prefix(os.path.join(temp_dir.path, name)))
for name in ("normal", "overlay")
)
self._bin_dirs: List[str] = []
self._lib_dirs: List[str] = []
for prefix in reversed(list(self._prefixes.values())):
self._bin_dirs.append(prefix.bin_dir)
self._lib_dirs.extend(prefix.lib_dirs)
# Customize site to:
# - ensure .pth files are honored
# - prevent access to system site packages
system_sites = _get_system_sitepackages()
self._site_dir = os.path.join(temp_dir.path, "site")
if not os.path.exists(self._site_dir):
os.mkdir(self._site_dir)
with open(
os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
) as fp:
fp.write(
textwrap.dedent(
"""
import os, site, sys
# First, drop system-sites related paths.
original_sys_path = sys.path[:]
known_paths = set()
for path in {system_sites!r}:
site.addsitedir(path, known_paths=known_paths)
system_paths = set(
os.path.normcase(path)
for path in sys.path[len(original_sys_path):]
)
original_sys_path = [
path for path in original_sys_path
if os.path.normcase(path) not in system_paths
]
sys.path = original_sys_path
# Second, add lib directories.
# ensuring .pth file are processed.
for path in {lib_dirs!r}:
assert not path in sys.path
site.addsitedir(path)
"""
).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
)
def __enter__(self) -> None:
self._save_env = {
name: os.environ.get(name, None)
for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
}
path = self._bin_dirs[:]
old_path = self._save_env["PATH"]
if old_path:
path.extend(old_path.split(os.pathsep))
pythonpath = [self._site_dir]
os.environ.update(
{
"PATH": os.pathsep.join(path),
"PYTHONNOUSERSITE": "1",
"PYTHONPATH": os.pathsep.join(pythonpath),
}
)
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
for varname, old_value in self._save_env.items():
if old_value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = old_value
def check_requirements(
self, reqs: Iterable[str]
) -> Tuple[Set[Tuple[str, str]], Set[str]]:
"""Return 2 sets:
- conflicting requirements: set of (installed, wanted) reqs tuples
- missing requirements: set of reqs
"""
missing = set()
conflicting = set()
if reqs:
env = (
get_environment(self._lib_dirs)
if hasattr(self, "_lib_dirs")
else get_default_environment()
)
for req_str in reqs:
req = get_requirement(req_str)
# We're explicitly evaluating with an empty extra value, since build
# environments are not provided any mechanism to select specific extras.
if req.marker is not None and not req.marker.evaluate({"extra": ""}):
continue
dist = env.get_distribution(req.name)
if not dist:
missing.add(req_str)
continue
if isinstance(dist.version, Version):
installed_req_str = f"{req.name}=={dist.version}"
else:
installed_req_str = f"{req.name}==={dist.version}"
if not req.specifier.contains(dist.version, prereleases=True):
conflicting.add((installed_req_str, req_str))
# FIXME: Consider direct URL?
return conflicting, missing
def install_requirements(
self,
finder: "PackageFinder",
requirements: Iterable[str],
prefix_as_string: str,
*,
kind: str,
) -> None:
prefix = self._prefixes[prefix_as_string]
assert not prefix.setup
prefix.setup = True
if not requirements:
return
self._install_requirements(
get_runnable_pip(),
finder,
requirements,
prefix,
kind=kind,
)
@staticmethod
def _install_requirements(
pip_runnable: str,
finder: "PackageFinder",
requirements: Iterable[str],
prefix: _Prefix,
*,
kind: str,
) -> None:
args: List[str] = [
sys.executable,
pip_runnable,
"install",
"--ignore-installed",
"--no-user",
"--prefix",
prefix.path,
"--no-warn-script-location",
"--disable-pip-version-check",
# As the build environment is ephemeral, it's wasteful to
# pre-compile everything, especially as not every Python
# module will be used/compiled in most cases.
"--no-compile",
# The prefix specified two lines above, thus
# target from config file or env var should be ignored
"--target",
"",
]
if logger.getEffectiveLevel() <= logging.DEBUG:
args.append("-vv")
elif logger.getEffectiveLevel() <= VERBOSE:
args.append("-v")
for format_control in ("no_binary", "only_binary"):
formats = getattr(finder.format_control, format_control)
args.extend(
(
"--" + format_control.replace("_", "-"),
",".join(sorted(formats or {":none:"})),
)
)
index_urls = finder.index_urls
if index_urls:
args.extend(["-i", index_urls[0]])
for extra_index in index_urls[1:]:
args.extend(["--extra-index-url", extra_index])
else:
args.append("--no-index")
for link in finder.find_links:
args.extend(["--find-links", link])
if finder.proxy:
args.extend(["--proxy", finder.proxy])
for host in finder.trusted_hosts:
args.extend(["--trusted-host", host])
if finder.custom_cert:
args.extend(["--cert", finder.custom_cert])
if finder.client_cert:
args.extend(["--client-cert", finder.client_cert])
if finder.allow_all_prereleases:
args.append("--pre")
if finder.prefer_binary:
args.append("--prefer-binary")
args.append("--")
args.extend(requirements)
with open_spinner(f"Installing {kind}") as spinner:
call_subprocess(
args,
command_desc=f"pip subprocess to install {kind}",
spinner=spinner,
)
| BuildEnvironment |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 367597,
"end": 369533
} | class ____(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _shape_info(self):
return [_ShapeInfo("c", False, (0, 1), (False, False))]
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
def f1(x, cr):
# CDF for 0 <= x < pi
return 1/np.pi * np.arctan(cr*np.tan(x/2))
def f2(x, cr):
# CDF for pi <= x <= 2*pi
return 1 - 1/np.pi * np.arctan(cr*np.tan((2*np.pi - x)/2))
cr = (1 + c)/(1 - c)
return xpx.apply_where(x < np.pi, (x, cr), f1, f2)
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
def _fitstart(self, data):
# Use 0.5 as the initial guess of the shape parameter.
# For the location and scale, use the minimum and
# peak-to-peak/(2*pi), respectively.
if isinstance(data, CensoredData):
data = data._uncensor()
return 0.5, np.min(data), np.ptp(data)/(2*np.pi)
@inherit_docstring_from(rv_continuous)
def rvs(self, *args, **kwds):
rvs = super().rvs(*args, **kwds)
return np.mod(rvs, 2*np.pi)
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
| wrapcauchy_gen |
python | has2k1__plotnine | plotnine/scales/scale_manual.py | {
"start": 1400,
"end": 1834
} | class ____(_scale_manual):
"""
Custom discrete color scale
"""
_aesthetics = ["color"]
values: InitVar[Sequence[Any] | dict[Any, Any]]
"""
Colors that make up the palette. The values will be matched with
the `limits` of the scale or the `breaks` if provided.
If it is a dict then it should map data values to colors.
"""
_: KW_ONLY
na_value: str = "#7F7F7F"
@dataclass
| scale_color_manual |
python | pyqtgraph__pyqtgraph | pyqtgraph/console/repl_widget.py | {
"start": 161,
"end": 5997
} | class ____(QtWidgets.QWidget):
sigCommandEntered = QtCore.Signal(object, object) # self, command
sigCommandRaisedException = QtCore.Signal(object, object) # self, exc
def __init__(self, globals, locals, parent=None, allowNonGuiExecution=False):
self._lastCommandRow = None
QtWidgets.QWidget.__init__(self, parent=parent)
self._allowNonGuiExecution = allowNonGuiExecution
self._thread = ReplThread(self, globals, locals, parent=self)
self._thread.sigCommandEntered.connect(self.sigCommandEntered)
self._thread.sigCommandRaisedException.connect(self.handleException)
self._thread.sigCommandExecuted.connect(self.handleCommandExecuted)
if allowNonGuiExecution:
self._thread.start()
self._setupUi()
# define text styles
isDark = self.output.palette().color(QtGui.QPalette.ColorRole.Base).value() < 128
outputBlockFormat = QtGui.QTextBlockFormat()
outputFirstLineBlockFormat = QtGui.QTextBlockFormat(outputBlockFormat)
outputFirstLineBlockFormat.setTopMargin(5)
outputCharFormat = QtGui.QTextCharFormat()
outputCharFormat.setFont(self.output.font())
outputCharFormat.setFontWeight(QtGui.QFont.Weight.Normal)
cmdBlockFormat = QtGui.QTextBlockFormat()
cmdBlockFormat.setBackground(mkBrush("#335" if isDark else "#CCF"))
cmdCharFormat = QtGui.QTextCharFormat()
cmdCharFormat.setFont(self.output.font())
cmdCharFormat.setFontWeight(QtGui.QFont.Weight.Bold)
self.textStyles = {
'command': (cmdCharFormat, cmdBlockFormat),
'output': (outputCharFormat, outputBlockFormat),
'output_first_line': (outputCharFormat, outputFirstLineBlockFormat),
}
self.input.ps1 = self._thread.ps1
self.input.ps2 = self._thread.ps2
def _setupUi(self):
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
self.output = QtWidgets.QTextEdit(self)
font = QtGui.QFont("monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.layout.addWidget(self.output)
# put input box in a horizontal layout so we can easily place buttons at the end
self.inputWidget = QtWidgets.QWidget(self)
self.layout.addWidget(self.inputWidget)
self.inputLayout = QtWidgets.QHBoxLayout()
self.inputWidget.setLayout(self.inputLayout)
self.inputLayout.setContentsMargins(0, 0, 0, 0)
self.input = CmdInput(parent=self)
self.input.setFont(font)
self.inputLayout.addWidget(self.input)
if self._allowNonGuiExecution:
self.guiCheckbox = QtWidgets.QCheckBox("Exec in GUI", self)
self.guiCheckbox.setChecked(True)
self.guiCheckbox.setToolTip(
"If your command is long-running and does not require GUI interaction,"
" uncheck this box to run it in a separate thread."
)
self.inputLayout.addWidget(self.guiCheckbox)
self.input.sigExecuteCmd.connect(self.handleCommand)
self._thread.sigInputGenerated.connect(self.write)
self._thread.sigMultilineChanged.connect(self._setMultiline)
def handleCommand(self, cmd):
self.input.setEnabled(False)
if self._allowNonGuiExecution and not self.guiCheckbox.isChecked():
self._thread.queueCommand(cmd)
else:
self._thread.runCmd(cmd)
def handleCommandExecuted(self):
self.input.setEnabled(True)
self.input.setFocus()
def handleException(self, exc):
self.input.setEnabled(True)
self.input.setFocus()
self.sigCommandRaisedException.emit(self, exc)
def write(self, strn, style='output', scrollToBottom='auto'):
"""Write a string into the console.
If scrollToBottom is 'auto', then the console is automatically scrolled
to fit the new text only if it was already at the bottom.
"""
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if not isGuiThread:
sys.__stdout__.write(strn)
return
cursor = self.output.textCursor()
cursor.movePosition(QtGui.QTextCursor.MoveOperation.End)
sb = self.output.verticalScrollBar()
scroll = sb.value()
if scrollToBottom == 'auto':
atBottom = scroll == sb.maximum()
scrollToBottom = atBottom
row = cursor.blockNumber()
if style == 'command':
self._lastCommandRow = row
if style == 'output' and row == self._lastCommandRow + 1:
# adjust style for first line of output
firstLine, endl, strn = strn.partition('\n')
self._setTextStyle('output_first_line')
self.output.insertPlainText(firstLine + endl)
if len(strn) > 0:
self._setTextStyle(style)
self.output.insertPlainText(strn)
# return to output style immediately to avoid seeing an extra line of command style
if style != 'output':
self._setTextStyle('output')
if scrollToBottom:
sb.setValue(sb.maximum())
else:
sb.setValue(scroll)
def _setMultiline(self, enable):
self.input.setMultiline(enable)
if enable:
self.input.setEnabled(True)
self.input.setFocus()
def _setTextStyle(self, style):
charFormat, blockFormat = self.textStyles[style]
cursor = self.output.textCursor()
cursor.setBlockFormat(blockFormat)
self.output.setCurrentCharFormat(charFormat)
| ReplWidget |
python | django__django | tests/admin_inlines/admin.py | {
"start": 1411,
"end": 1522
} | class ____(admin.StackedInline):
model = NonAutoPKBook
classes = ("collapse",)
| NonAutoPKBookStackedInline |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/search.py | {
"start": 685,
"end": 766
} | class ____(Enum):
FORWARD = "FORWARD"
BACKWARD = "BACKWARD"
| SearchDirection |
python | mlflow__mlflow | tests/models/test_pyfunc.py | {
"start": 87,
"end": 473
} | class ____:
def __init__(self, check_version=True):
self._check_version = check_version
def predict(self, df):
from mlflow.version import VERSION
if self._check_version:
assert VERSION == MLFLOW_VERSION
mu = df.mean().mean()
return [mu for _ in range(len(df))]
def _load_pyfunc(_):
return PyFuncTestModel()
| PyFuncTestModel |
python | pallets__flask | src/flask/views.py | {
"start": 305,
"end": 5146
} | class ____:
"""Subclass this class and override :meth:`dispatch_request` to
create a generic class-based view. Call :meth:`as_view` to create a
view function that creates an instance of the class with the given
arguments and calls its ``dispatch_request`` method with any URL
variables.
See :doc:`views` for a detailed guide.
.. code-block:: python
class Hello(View):
init_every_request = False
def dispatch_request(self, name):
return f"Hello, {name}!"
app.add_url_rule(
"/hello/<name>", view_func=Hello.as_view("hello")
)
Set :attr:`methods` on the class to change what methods the view
accepts.
Set :attr:`decorators` on the class to apply a list of decorators to
the generated view function. Decorators applied to the class itself
will not be applied to the generated view function!
Set :attr:`init_every_request` to ``False`` for efficiency, unless
you need to store request-global data on ``self``.
"""
#: The methods this view is registered for. Uses the same default
#: (``["GET", "HEAD", "OPTIONS"]``) as ``route`` and
#: ``add_url_rule`` by default.
methods: t.ClassVar[t.Collection[str] | None] = None
#: Control whether the ``OPTIONS`` method is handled automatically.
#: Uses the same default (``True``) as ``route`` and
#: ``add_url_rule`` by default.
provide_automatic_options: t.ClassVar[bool | None] = None
#: A list of decorators to apply, in order, to the generated view
#: function. Remember that ``@decorator`` syntax is applied bottom
#: to top, so the first decorator in the list would be the bottom
#: decorator.
#:
#: .. versionadded:: 0.8
decorators: t.ClassVar[list[t.Callable[..., t.Any]]] = []
#: Create a new instance of this view class for every request by
#: default. If a view subclass sets this to ``False``, the same
#: instance is used for every request.
#:
#: A single instance is more efficient, especially if complex setup
#: is done during init. However, storing data on ``self`` is no
#: longer safe across requests, and :data:`~flask.g` should be used
#: instead.
#:
#: .. versionadded:: 2.2
init_every_request: t.ClassVar[bool] = True
def dispatch_request(self) -> ft.ResponseReturnValue:
"""The actual view function behavior. Subclasses must override
this and return a valid response. Any variables from the URL
rule are passed as keyword arguments.
"""
raise NotImplementedError()
@classmethod
def as_view(
cls, name: str, *class_args: t.Any, **class_kwargs: t.Any
) -> ft.RouteCallable:
"""Convert the class into a view function that can be registered
for a route.
By default, the generated view will create a new instance of the
view class for every request and call its
:meth:`dispatch_request` method. If the view class sets
:attr:`init_every_request` to ``False``, the same instance will
be used for every request.
Except for ``name``, all other arguments passed to this method
are forwarded to the view class ``__init__`` method.
.. versionchanged:: 2.2
Added the ``init_every_request`` class attribute.
"""
if cls.init_every_request:
def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
self = view.view_class( # type: ignore[attr-defined]
*class_args, **class_kwargs
)
return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
else:
self = cls(*class_args, **class_kwargs) # pyright: ignore
def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls # type: ignore
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods # type: ignore
view.provide_automatic_options = cls.provide_automatic_options # type: ignore
return view
| View |
python | pytorch__pytorch | torch/_python_dispatcher.py | {
"start": 2763,
"end": 7137
} | class ____:
namespace = "__test__"
name = "foo"
# fmt: off
runtime_keys = [
"CPU", "AutogradCPU",
"FPGA", "AutogradOther",
"XLA", "AutogradXLA",
"Lazy", "AutogradLazy",
]
# fmt: on
alias_keys = [
"CompositeExplicitAutograd",
"Autograd",
"CompositeImplicitAutograd",
]
supported_keys = runtime_keys + alias_keys
def __init__(self) -> None:
C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
self.ref.def_("foo(Tensor x) -> Tensor")
"""
Returns a list of dispatch keys supported by PythonDispatcher.
You can register kernels to these keys.
"""
def keys(self):
return self.supported_keys
"""
Register kernels to the target dispatchKeys.
dispatchKeys(list[str]): a list of dispatch keys that you want to register
your own kernel. Note that you don't need to write the kernel yourself in
this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
automatically generated and registered.
"""
def register(self, dispatchKeys):
# Overridden is not supported and triggers a warning in C++ dispatcher.
if len(set(dispatchKeys)) != len(dispatchKeys):
raise RuntimeError(
f"Overridden is not allowed but found duplicates in {dispatchKeys}."
)
# We currently forbid this in codegen instead of C++ dispatcher.
if (
"CompositeImplicitAutograd" in dispatchKeys
and "CompositeExplicitAutograd" in dispatchKeys
):
raise RuntimeError(
"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
)
for key in dispatchKeys:
if key not in self.supported_keys:
raise RuntimeError(
f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
)
self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
"""
Helper function to format (key, kernel).
"""
def _format_line(self, key, kernel):
return f"{key:<15} {kernel}\n"
"""
Helper function to print a table header.
"""
def _format_header(self, header):
s = f"""
{header}
"""
s += self._format_line("key", "kernel")
s += "---------------------------\n"
return s
"""
Returns raw output of all registration info for debugging only.
Use registrations() for a simplified version.
"""
def rawRegistrations(self):
return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
"""
Returns raw output of computed dispatch table for debugging only.
Use dispatchTable() for a simplified version.
"""
def rawDispatchTable(self):
return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
"""
Returns a table(str) including all the registrations from users.
Note this includes registrations to both runtime keys and alias keys.
"""
def registrations(self):
output = self._format_header("Registered Kernels")
state = self.rawRegistrations()
state_entries = state.split("\n")
for line in state_entries:
first = line.split(":")[0]
if any(first.startswith(k) for k in self.supported_keys):
kernel = line.split("::")[0].split(" ")[1]
output += self._format_line(first, kernel)
return output
"""
Returns the computed dispatch table(str). Note this only include
runtime keys, registrations to alias keys have been decoded to their
mapped runtime keys.
"""
def dispatchTable(self):
output = self._format_header("Computed Dispatch Table")
table = self.rawDispatchTable()
table_entries = table.split("\n")
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
for line in table_entries:
k = line.split(":")[0]
if k in self.runtime_keys:
entry = regex.sub("[", line)
output += self._format_line(k, entry.split(": ")[1])
return output
| PythonDispatcher |
python | tornadoweb__tornado | tornado/routing.py | {
"start": 6268,
"end": 7247
} | class ____(httputil.HTTPServerConnectionDelegate):
"""Abstract router interface."""
def find_handler(
self, request: httputil.HTTPServerRequest, **kwargs: Any
) -> Optional[httputil.HTTPMessageDelegate]:
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
that can serve the request.
Routing implementations may pass additional kwargs to extend the routing logic.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg kwargs: additional keyword arguments passed by routing implementation.
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
process the request.
"""
raise NotImplementedError()
def start_request(
self, server_conn: object, request_conn: httputil.HTTPConnection
) -> httputil.HTTPMessageDelegate:
return _RoutingDelegate(self, server_conn, request_conn)
| Router |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 127084,
"end": 137882
} | class ____(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
pmf(x, n, p)
Probability mass function.
logpmf(x, n, p)
Log of the probability mass function.
rvs(n, p, size=1, random_state=None)
Draw random samples from a multinomial distribution.
entropy(n, p)
Compute the entropy of the multinomial distribution.
cov(n, p)
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
>>> rv = multinomial(n=7, p=[.3, .7])
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
eps = np.finfo(np.result_type(np.asarray(p), np.float32)).eps * 10
p = np.array(p, dtype=np.float64, copy=True)
p_adjusted = 1. - p[..., :-1].sum(axis=-1)
# only make adjustment when it's significant
i_adjusted = np.abs(1 - p.sum(axis=-1)) > eps
p[i_adjusted, -1] = p_adjusted[i_adjusted]
if np.any(i_adjusted):
message = ("Some rows of `p` do not sum to 1.0 within tolerance of "
f"{eps=}. Currently, the last element of these rows is adjusted "
"to compensate, but this condition will produce NaNs "
"beginning in SciPy 1.18.0. Please ensure that rows of `p` sum "
"to 1.0 to avoid futher disruption.")
warnings.warn(message, FutureWarning, stacklevel=3)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=int, copy=True)
# true for bad n
ncond = n < 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=int)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError(f"Size of each quantile should be size of p: "
f"received {xx.shape[-1]}, but expected "
f"{p.shape[-1]}.")
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, -np.inf)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.nan)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.nan)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
new_shape = x.shape + (1,)*new_axes_needed
x = x.reshape(new_shape)
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
| multinomial_gen |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.