language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | lib/ansible/executor/powershell/module_manifest.py | {
"start": 1453,
"end": 1643
} | class ____:
name: str
params: dict[str, object] = dataclasses.field(default_factory=dict)
secure_params: dict[str, object] = dataclasses.field(default_factory=dict)
| _ManifestAction |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 25109,
"end": 26665
} | class ____(PostGenerationDeclaration):
"""Calls a method of the generated object.
Attributes:
method_name (str): the method to call
method_args (list): arguments to pass to the method
method_kwargs (dict): keyword arguments to pass to the method
Example:
class UserFactory(factory.Factory):
...
password = factory.PostGenerationMethodCall('set_pass', password='')
"""
def __init__(self, method_name, *args, **kwargs):
super().__init__()
if len(args) > 1:
raise errors.InvalidDeclarationError(
"A PostGenerationMethodCall can only handle 1 positional argument; "
"please provide other parameters through keyword arguments."
)
self.method_name = method_name
self.method_arg = args[0] if args else NotProvided
self.method_kwargs = kwargs
def call(self, instance, step, context):
if not context.value_provided:
if self.method_arg is NotProvided:
args = ()
else:
args = (self.method_arg,)
else:
args = (context.value,)
kwargs = dict(self.method_kwargs)
kwargs.update(context.extra)
method = getattr(instance, self.method_name)
logger.debug(
"PostGenerationMethodCall: Calling %r.%s(%s)",
instance,
self.method_name,
utils.log_pprint(args, kwargs),
)
return method(*args, **kwargs)
| PostGenerationMethodCall |
python | gevent__gevent | src/greentest/3.12/test_interpreters.py | {
"start": 2011,
"end": 4011
} | class ____(TestBase):
def test_in_main(self):
interp = interpreters.create()
self.assertIsInstance(interp, interpreters.Interpreter)
self.assertIn(interp, interpreters.list_all())
def test_in_thread(self):
lock = threading.Lock()
interp = None
def f():
nonlocal interp
interp = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(interp, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
from test.support import interpreters
interp = interpreters.create()
print(interp.id)
"""))
interp2 = interpreters.Interpreter(int(out))
self.assertEqual(interpreters.list_all(), [main, interp, interp2])
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
interp_lst = []
for _ in range(3):
interps = interpreters.create()
interp_lst.append(interps)
# Now destroy them.
for interp in interp_lst:
interp.close()
# Finally, create another.
interp = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {interp})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
# Now destroy 2 of them.
interp1.close()
interp2.close()
# Finally, create another.
interp = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {interp3, interp})
| CreateTests |
python | pytorch__pytorch | test/test_indexing.py | {
"start": 87998,
"end": 98299
} | class ____(TestCase):
def test_index_no_floats(self, device):
a = torch.tensor([[[5.0]]], device=device)
self.assertRaises(IndexError, lambda: a[0.0])
self.assertRaises(IndexError, lambda: a[0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0])
self.assertRaises(IndexError, lambda: a[0.0, :])
self.assertRaises(IndexError, lambda: a[:, 0.0])
self.assertRaises(IndexError, lambda: a[:, 0.0, :])
self.assertRaises(IndexError, lambda: a[0.0, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0, 0])
self.assertRaises(IndexError, lambda: a[0, 0.0, 0])
self.assertRaises(IndexError, lambda: a[-1.4])
self.assertRaises(IndexError, lambda: a[0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0])
self.assertRaises(IndexError, lambda: a[-1.4, :])
self.assertRaises(IndexError, lambda: a[:, -1.4])
self.assertRaises(IndexError, lambda: a[:, -1.4, :])
self.assertRaises(IndexError, lambda: a[-1.4, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0, 0])
self.assertRaises(IndexError, lambda: a[0, -1.4, 0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0,:])
def test_none_index(self, device):
# `None` index adds newaxis
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[None].dim(), a.dim() + 1)
def test_empty_tuple_index(self, device):
# Empty tuple index creates a view
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[()], a)
self.assertEqual(a[()].data_ptr(), a.data_ptr())
def test_empty_fancy_index(self, device):
# Empty list index creates an empty array
a = tensor([1, 2, 3], device=device)
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long, device=device))
b = tensor([], device=device).long()
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long, device=device))
b = tensor([], device=device).float()
self.assertRaises(IndexError, lambda: a[b])
def test_ellipsis_index(self, device):
a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertIsNot(a[...], a)
self.assertEqual(a[...], a)
# `a[...]` was `a` in numpy <1.9.
self.assertEqual(a[...].data_ptr(), a.data_ptr())
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
self.assertEqual(a[0, ...], a[0])
self.assertEqual(a[0, ...], a[0, :])
self.assertEqual(a[..., 0], a[:, 0])
# In NumPy, slicing with ellipsis results in a 0-dim array. In PyTorch
# we don't have separate 0-dim arrays and scalars.
self.assertEqual(a[0, ..., 1], torch.tensor(2, device=device))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = torch.tensor(1)
b[(Ellipsis,)] = 2
self.assertEqual(b, 2)
def test_single_int_index(self, device):
# Single integer index selects one row
a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertEqual(a[0], [1, 2, 3])
self.assertEqual(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
self.assertRaises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces Exception NB: different exception type
self.assertRaises(Exception, a.__getitem__, 1 << 64)
def test_single_bool_index(self, device):
# Single boolean index
a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertEqual(a[True], a[None])
self.assertEqual(a[False], a[None][0:0])
def test_boolean_shape_mismatch(self, device):
arr = torch.ones((5, 4, 3), device=device)
index = tensor([True], device=device)
self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
index = tensor([False] * 6, device=device)
self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
index = torch.ByteTensor(4, 4).to(device).zero_()
self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
self.assertRaisesRegex(IndexError, "mask", lambda: arr[(slice(None), index)])
def test_boolean_indexing_onedim(self, device):
# Indexing a 2-dimensional array with
# boolean array of length one
a = tensor([[0.0, 0.0, 0.0]], device=device)
b = tensor([True], device=device)
self.assertEqual(a[b], a)
# boolean assignment
a[b] = 1.0
self.assertEqual(a, tensor([[1.0, 1.0, 1.0]], device=device))
# https://github.com/pytorch/pytorch/issues/127003
@xfailIfTorchDynamo
def test_boolean_assignment_value_mismatch(self, device):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = torch.arange(0, 4, device=device)
def f(a, v):
a[a > -1] = tensor(v).to(device)
self.assertRaisesRegex(Exception, "shape mismatch", f, a, [])
self.assertRaisesRegex(Exception, "shape mismatch", f, a, [1, 2, 3])
self.assertRaisesRegex(Exception, "shape mismatch", f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self, device):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
b = tensor(
[[True, False, True], [False, True, False], [True, False, True]],
device=device,
)
self.assertEqual(a[b], tensor([1, 3, 5, 7, 9], device=device))
self.assertEqual(a[b[1]], tensor([[4, 5, 6]], device=device))
self.assertEqual(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
self.assertEqual(a, tensor([[0, 2, 0], [4, 0, 6], [0, 8, 0]], device=device))
def test_boolean_indexing_weirdness(self, device):
# Weird boolean indexing things
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(
torch.ones(1, 2, device=device), a[True, [0, 1], True, True, [1], [[2]]]
)
self.assertRaises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_weirdness_tensors(self, device):
# Weird boolean indexing things
false = torch.tensor(False, device=device)
true = torch.tensor(True, device=device)
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(
torch.ones(1, 2, device=device), a[true, [0, 1], true, true, [1], [[2]]]
)
self.assertRaises(IndexError, lambda: a[false, [0, 1], ...])
def test_boolean_indexing_alldims(self, device):
true = torch.tensor(True, device=device)
a = torch.ones((2, 3), device=device)
self.assertEqual((1, 2, 3), a[True, True].shape)
self.assertEqual((1, 2, 3), a[true, true].shape)
def test_boolean_list_indexing(self, device):
# Indexing a 2-dimensional array with
# boolean lists
a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
b = [True, False, False]
c = [True, True, False]
self.assertEqual(a[b], tensor([[1, 2, 3]], device=device))
self.assertEqual(a[b, b], tensor([1], device=device))
self.assertEqual(a[c], tensor([[1, 2, 3], [4, 5, 6]], device=device))
self.assertEqual(a[c, c], tensor([1, 5], device=device))
def test_everything_returns_views(self, device):
# Before `...` would return a itself.
a = tensor([5], device=device)
self.assertIsNot(a, a[()])
self.assertIsNot(a, a[...])
self.assertIsNot(a, a[:])
def test_broaderrors_indexing(self, device):
a = torch.zeros(5, 5, device=device)
self.assertRaisesRegex(
IndexError, "shape mismatch", a.__getitem__, ([0, 1], [0, 1, 2])
)
self.assertRaisesRegex(
IndexError, "shape mismatch", a.__setitem__, ([0, 1], [0, 1, 2]), 0
)
def test_trivial_fancy_out_of_bounds(self, device):
a = torch.zeros(5, device=device)
ind = torch.ones(20, dtype=torch.int64, device=device)
if a.device.type in ["cuda", "xpu"]:
raise unittest.SkipTest("CUDA/XPU asserts instead of raising an exception")
ind[-1] = 10
self.assertRaises(IndexError, a.__getitem__, ind)
self.assertRaises(IndexError, a.__setitem__, ind, 0)
ind = torch.ones(20, dtype=torch.int64, device=device)
ind[0] = 11
self.assertRaises(IndexError, a.__getitem__, ind)
self.assertRaises(IndexError, a.__setitem__, ind, 0)
def test_index_is_larger(self, device):
# Simple case of fancy index broadcasting of the index.
a = torch.zeros((5, 5), device=device)
a[[[0], [1], [2]], [0, 1, 2]] = tensor([2.0, 3.0, 4.0], device=device)
self.assertTrue((a[:3, :3] == tensor([2.0, 3.0, 4.0], device=device)).all())
def test_broadcast_subspace(self, device):
a = torch.zeros((100, 100), device=device)
v = torch.arange(0.0, 100, device=device)[:, None]
b = torch.arange(99, -1, -1, device=device).long()
a[b] = v
expected = b.float().unsqueeze(1).expand(100, 100)
self.assertEqual(a, expected)
def test_truncate_leading_1s(self, device):
col_max = torch.randn(1, 4)
kernel = col_max.T * col_max # [4, 4] tensor
kernel2 = kernel.clone()
# Set the diagonal
kernel[range(len(kernel)), range(len(kernel))] = torch.square(col_max)
torch.diagonal(kernel2).copy_(torch.square(col_max.view(4)))
self.assertEqual(kernel, kernel2)
instantiate_device_type_tests(
TestIndexing, globals(), except_for="meta", allow_mps=True, allow_xpu=True
)
instantiate_device_type_tests(NumpyTests, globals(), except_for="meta", allow_xpu=True)
if __name__ == "__main__":
run_tests()
| NumpyTests |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/_textfont.py | {
"start": 233,
"end": 11099
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.textfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"size",
"sizesrc",
"style",
"stylesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | ray-project__ray | python/ray/train/v2/_internal/execution/worker_group/worker_group.py | {
"start": 3177,
"end": 32622
} | class ____(BaseWorkerGroup):
_worker_cls = RayTrainWorker
@classmethod
def create(
cls,
train_run_context: TrainRunContext,
worker_group_context: WorkerGroupContext,
callbacks: Optional[
List[Union[WorkerGroupCallback, WorkerCallback, TrainContextCallback]]
] = None,
) -> "WorkerGroup":
"""Create and start a new worker group.
Args:
train_run_context: The training run context.
worker_group_context: The worker group context.
callbacks: Optional callbacks to attach.
Returns:
An active WorkerGroup instance.
Raises:
WorkerGroupStartupTimeoutError: If worker group startup times out.
WorkerGroupStartupFailedError: If worker group fails to start.
"""
worker_group = cls(train_run_context, worker_group_context, callbacks)
worker_group._start()
return worker_group
def __init__(
self,
train_run_context: TrainRunContext,
worker_group_context: WorkerGroupContext,
callbacks: Optional[
List[Union[WorkerGroupCallback, WorkerCallback, TrainContextCallback]]
] = None,
):
"""Initialize a WorkerGroup instance.
Note: This should not be called directly. Use WorkerGroup.create() instead.
"""
self._train_run_context = train_run_context
run_config = self._train_run_context.run_config
self._storage_context = run_config.storage_context
self._worker_group_context: WorkerGroupContext = worker_group_context
callbacks = callbacks or []
# Group of callbacks that are specific to worker group itself.
self._callbacks = [c for c in callbacks if isinstance(c, WorkerGroupCallback)]
# Group of callbacks that will be propagated and called on the worker actors.
self._worker_callbacks_to_propagate = [
c
for c in callbacks
if isinstance(c, (WorkerCallback, TrainContextCallback))
]
self._worker_group_state: Optional[WorkerGroupState] = None
# Maps world rank to the ongoing poll task.
self._world_rank_to_ongoing_poll: Dict[int, PollTask] = {}
self._latest_poll_status: Optional[WorkerGroupPollStatus] = None
# Environment variables
self._worker_group_start_timeout_s = float(
os.environ.get(
WORKER_GROUP_START_TIMEOUT_S_ENV_VAR,
DEFAULT_WORKER_GROUP_START_TIMEOUT_S,
)
)
self._worker_health_check_timeout_s = float(
os.getenv(
WORKER_HEALTH_CHECK_TIMEOUT_S_ENV_VAR,
DEFAULT_WORKER_HEALTH_CHECK_TIMEOUT_S,
)
)
self._collective_timeout_s = env_float(
COLLECTIVE_TIMEOUT_S_ENV_VAR, DEFAULT_COLLECTIVE_TIMEOUT_S
)
self._collective_warn_interval_s = env_float(
COLLECTIVE_WARN_INTERVAL_S_ENV_VAR,
DEFAULT_COLLECTIVE_WARN_INTERVAL_S,
)
################################################################################
# Start Worker Group
################################################################################
def _start(
self,
):
"""Internal method to start the worker group."""
worker_group_state_builder = WorkerGroupStateBuilder()
try:
self._start_impl(
worker_group_state_builder,
)
except Exception as e:
if not self.has_started():
# Clean up partial worker group state.
worker_group_state_builder.shutdown()
raise e
assert self.has_started(), "Worker group failed to start."
@staticmethod
def _check_cluster_resources_and_raise_if_insufficient(
resources_per_worker: Dict[str, float], num_workers: int
) -> None:
"""Check if the cluster has enough resources before waiting for placement group.
Args:
resources_per_worker: The resources per worker.
num_workers: The number of workers.
"""
max_cluster_resources = ray_state.get_max_resources_from_cluster_config()
if not max_cluster_resources:
return
for (
resource_name,
required_amount,
) in resources_per_worker.items():
total_required_amount = required_amount * num_workers
available_amount = max_cluster_resources.get(resource_name, 0)
if total_required_amount > available_amount:
error_msg = (
"Insufficient cluster resources to launch training workers.\n"
f'The worker group requires {{"{resource_name}": {total_required_amount}}} but the cluster only has a maximum of {{"{resource_name}": {available_amount}}} resources.\n'
"Please reduce `num_workers`, lower resource requirements, or increase the cluster size."
)
raise InsufficientClusterResourcesError(error_msg)
def _start_impl(
self,
worker_group_state_builder: WorkerGroupStateBuilder,
):
"""Implementation of worker group startup.
Args:
worker_group_state_builder: Builder for constructing worker group state.
Raises:
ValueError: If workers are already started.
WorkerGroupStartupTimeoutError: If startup times out requesting resources.
WorkerGroupStartupFailedError: If workers fail during initialization.
"""
self._assert_inactive()
worker_group_context = self._worker_group_context
WorkerGroup._check_cluster_resources_and_raise_if_insufficient(
worker_group_context.resources_per_worker,
worker_group_context.num_workers,
)
# TODO: Review the order of `on_xyz_start` and `after_xyz_start` callbacks.
# The current execution order is as follows:`on_worker_group_start` callbacks
# are triggered before the `after_worker_group_start` callbacks.
with invoke_context_managers(
[callback.on_worker_group_start for callback in self._callbacks]
):
for callback in self._callbacks:
callback.before_worker_group_start(worker_group_context)
bundle_label_selector = (
[worker_group_context.bundle_label_selector.copy()]
* worker_group_context.num_workers
if worker_group_context.bundle_label_selector
else None
)
pg = placement_group(
bundles=[worker_group_context.resources_per_worker]
* worker_group_context.num_workers,
strategy=worker_group_context.placement_strategy,
bundle_label_selector=bundle_label_selector,
)
logger.info(
f"Attempting to start training worker group of size {worker_group_context.num_workers} with "
f"the following resources: [{worker_group_context.resources_per_worker}] * {worker_group_context.num_workers}"
)
# Wait for the placement group to be ready before proceeding
# to create actors.
# This could hang if the resources are not available, so we should
# time out if this hangs for a while to try again with a different size.
# For example, the controller may try to set a worker group size
# based on stale information about cluster resources.
try:
ray.get(pg.ready(), timeout=self._worker_group_start_timeout_s)
except GetTimeoutError as timeout_exc:
remove_placement_group(pg)
raise WorkerGroupStartupTimeoutError(
num_workers=worker_group_context.num_workers
) from timeout_exc
# TODO: Figure out ordering between these different calls/callbacks.
worker_group_state_builder.with_placement_group(pg)
# Initialize the synchronization actor on the driver node
sync_actor = SynchronizationActor.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=ray.get_runtime_context().get_node_id(),
soft=False,
)
).remote(
timeout_s=self._collective_timeout_s,
warn_interval_s=self._collective_warn_interval_s,
)
worker_group_state_builder.with_sync_actor(sync_actor)
workers = self._create_workers(
worker_group_context.num_workers,
pg,
worker_group_context.resources_per_worker,
)
worker_group_state_builder.with_workers(workers)
# All the ray.get calls in this try block can possibly error if the
# worker actors die during initialization.
# To prevent the driver from crashing, catch all `RayActorError`s and
# raise a specially handled error to the controller.
try:
train_context_args = {}
for callable in self._callbacks:
args = callable.before_init_train_context(workers)
for arg, arg_values in args.items():
assert len(arg_values) == worker_group_context.num_workers, (
f"Callback {callable} returned {arg} with "
f"{len(arg_values)} values, expected {worker_group_context.num_workers}."
)
assert (
arg not in train_context_args
), f"Callback {callable} returned {arg} which is already set."
train_context_args[arg] = arg_values
self._init_train_context_on_workers(
workers, sync_actor, train_context_args
)
self._worker_group_state = worker_group_state_builder.build()
for callback in self._callbacks:
callback.after_worker_group_start(self)
except RayActorError as actor_error:
error_msg = "At least one of the worker actors failed to initialize."
raise WorkerGroupStartupFailedError(error_msg) from actor_error
# Launch the training function on each worker.
# This task should start a worker thread and return immediately.
ray_get_safe(
[
worker.actor.run_train_fn.remote(worker_group_context.train_fn_ref)
for worker in workers
]
)
workers_info = "\n".join(
[
f"- (ip={w.metadata.node_ip}, pid={w.metadata.pid}) "
f"world_rank={w.distributed_context.world_rank}, "
f"local_rank={w.distributed_context.local_rank}, "
f"node_rank={w.distributed_context.node_rank}"
for w in workers
]
)
logger.info(
f"Started training worker group of size {len(workers)}: \n{workers_info}"
)
for callback in self._callbacks:
callback.after_worker_group_training_start(self)
def _create_workers(
self,
num_workers: int,
placement_group: PlacementGroup,
resources_per_worker: Dict[str, float],
) -> List[Worker]:
runtime_env = self._get_worker_runtime_env(
custom_runtime_env=self._train_run_context.run_config.worker_runtime_env
)
worker_actor_cls = ray.remote(
runtime_env=runtime_env,
**bundle_to_remote_args(resources_per_worker),
)(self._worker_cls)
actors = [
worker_actor_cls.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=i
),
).remote()
for i in range(num_workers)
]
try:
actor_metadatas = ray_get_safe(
[actor.get_metadata.remote() for actor in actors]
)
except RayActorError as actor_error:
for actor in actors:
ray.kill(actor)
error_msg = (
"One of the worker actors failed to initialize due to error:\n"
f"{traceback.format_exc()}"
)
raise WorkerGroupStartupFailedError(error_msg) from actor_error
workers = [
Worker(actor, meta, resources_per_worker)
for actor, meta in zip(actors, actor_metadatas)
]
return WorkerGroup._assign_worker_ranks(workers)
def _init_train_context_on_workers(
self,
workers: List[Worker],
sync_actor: ActorHandle,
train_context_args: Dict[str, List[Any]],
) -> None:
context_init_tasks = [
worker.actor.init_train_context.remote(
train_run_context=self._train_run_context,
distributed_context=worker.distributed_context,
synchronization_actor=sync_actor,
storage_context=self._storage_context,
worker_callbacks=self._worker_callbacks_to_propagate,
controller_actor=ray.get_runtime_context().current_actor,
**{
arg: arg_values[i] for arg, arg_values in train_context_args.items()
},
)
for i, worker in enumerate(workers)
]
ray_get_safe(context_init_tasks)
self._decorate_worker_log_file_paths(workers)
#####################################################################################
# Shutdown Worker Group
#####################################################################################
def shutdown(self):
"""Shutdown all the workers in this worker group."""
self._assert_active()
with invoke_context_managers(
[callback.on_worker_group_shutdown for callback in self._callbacks]
):
if self.has_started():
for callback in self._callbacks:
callback.before_worker_group_shutdown(self)
self._worker_group_state.shutdown()
self._clear_state()
logger.debug("Worker group shutdown successful.")
for callback in self._callbacks:
callback.after_worker_group_shutdown(self._worker_group_context)
def _clear_state(self):
self._worker_group_state = None
self._world_rank_to_ongoing_poll = {}
def abort(self):
"""Abort the worker group."""
self._assert_active()
for callback in self._callbacks:
callback.before_worker_group_abort(self._worker_group_context)
# TODO: Add shutdown callback hooks
self._worker_group_state.shutdown()
self._clear_state()
for callback in self._callbacks:
callback.after_worker_group_abort(self._worker_group_context)
#####################################################################################
# Polling Worker Group
#####################################################################################
def poll_status(self, timeout: Optional[float] = None) -> WorkerGroupPollStatus:
"""Poll the status of all workers in the worker group.
Args:
timeout: The maximum time to wait for the poll tasks to complete.
"""
self._assert_active()
poll_results = self._poll_workers_and_collect_errors(timeout)
worker_group_poll_status = WorkerGroupPollStatus(
worker_statuses=dict(enumerate(poll_results)),
)
for callback in self._callbacks:
callback.after_worker_group_poll_status(worker_group_poll_status)
self._latest_poll_status = worker_group_poll_status
return worker_group_poll_status
def _poll_workers_and_collect_errors(
self, timeout: Optional[float]
) -> List[WorkerStatus]:
"""Launch poll tasks on each worker and collect the results.
The poll task should involve very little computation and should
return almost immediately.
If a worker does not return the result of the poll task within
the timeout, it is considered as a missed health check.
The timeout is set to ~seconds, so a missed health check usually
means that something is wrong with the worker.
Subsequent calls to poll the worker will continue waiting on the
hanging poll task.
If a worker's health check hangs for too long, it is marked as dead
and a WorkerHealthCheckTimeoutError is propagated as the error in the
worker status for the controller to handle.
If a worker's poll task fails, a WorkerHealthCheckFailedError is similarly
propagated in the worker status.
Returns:
poll_results: A list of WorkerStatus objects.
If polling a certain worker hangs or fails, the corresponding
WorkerStatus object will include a system error mentioned above.
"""
workers = self.get_workers()
start_time = time_monotonic()
poll_tasks = self._get_poll_tasks()
poll_task_to_world_rank = {
poll_task: i for i, poll_task in enumerate(poll_tasks)
}
done_polls, hanging_polls = ray.wait(
list(poll_task_to_world_rank),
num_returns=len(poll_task_to_world_rank),
timeout=timeout,
)
poll_task_to_result = {}
for hanging_poll in hanging_polls:
hanging_rank = poll_task_to_world_rank[hanging_poll]
# The hanging poll task should be saved and awaited in the next round.
# Save the start time of the poll task to check for timeouts.
# Don't overwrite the ongoing poll task if it already exists.
ongoing_poll = self._world_rank_to_ongoing_poll.setdefault(
hanging_rank, PollTask(start_time, hanging_poll)
)
error = None
elapsed_time_s = time_monotonic() - ongoing_poll.start_time
if elapsed_time_s > self._worker_health_check_timeout_s:
error_msg = (
f"A worker health check has been hanging for {elapsed_time_s:.2f} "
"seconds. Marking the worker as dead.\n"
f"Worker info: {workers[hanging_rank]}"
)
error = WorkerHealthCheckTimeoutError(error_msg)
poll_task_to_result[hanging_poll] = WorkerStatus(
running=True, error=error, training_report=None
)
for done_poll in done_polls:
done_rank = poll_task_to_world_rank[done_poll]
# Remove the ongoing poll task for the worker.
self._world_rank_to_ongoing_poll.pop(done_rank, None)
try:
poll_result: WorkerStatus = ray.get(done_poll)
except Exception as e:
error_msg = (
"A worker health check failed.\n"
f"Worker info: {workers[done_rank]}"
)
poll_result = WorkerStatus(
running=False,
error=WorkerHealthCheckFailedError(error_msg, failure=e),
training_report=None,
)
poll_task_to_result[done_poll] = poll_result
# Collect the results and errors in the order of the workers.
results = [
poll_task_to_result.get(poll_task) for poll_task in poll_task_to_world_rank
]
return results
def _get_poll_tasks(self) -> List[ObjectRef]:
"""Get the poll tasks for each worker.
If there is an ongoing poll task for a worker that did not finish
in the timeout on the previous round, return that task instead of
queueing up a new one.
Spawns a new poll task for the worker if there is no ongoing poll task.
"""
workers = self.get_workers()
poll_tasks = []
for i, worker in enumerate(workers):
if i in self._world_rank_to_ongoing_poll:
ongoing_poll = self._world_rank_to_ongoing_poll[i]
poll_tasks.append(ongoing_poll.task)
else:
poll_tasks.append(worker.actor.poll_status.remote())
return poll_tasks
#####################################################################################
# Execution Methods
#####################################################################################
def execute_async(self, fn: Callable, *fn_args, **fn_kwargs) -> List[ObjectRef]:
"""Execute ``func`` on each worker and return the futures.
Returns:
(List[ObjectRef]) A list of ``ObjectRef`` representing the
output of ``func`` from each worker. The order is the same
as ``self.workers``.
"""
self._assert_active()
workers = self.get_workers()
return [worker.execute_async(fn, *fn_args, **fn_kwargs) for worker in workers]
def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> List[T]:
"""Execute ``func`` on each worker and return the outputs of ``func``.
Returns:
(List[T]) A list containing the output of ``func`` from each
worker. The order is the same as ``self.workers``.
"""
return ray_get_safe(self.execute_async(fn, *fn_args, **fn_kwargs))
def execute_single_async(
self, rank: int, fn: Callable[..., T], *fn_args, **fn_kwargs
) -> ObjectRef:
"""Execute ``func`` on worker with ``rank`` and return futures.
Returns:
(ObjectRef) An ObjectRef representing the output of func.
"""
self._assert_active()
workers = self.get_workers()
if rank >= len(workers):
raise ValueError(
f"The provided {rank=} is " f"not valid for {len(workers)} workers."
)
return workers[rank].execute_async(fn, *fn_args, **fn_kwargs)
def execute_single(
self, rank: int, fn: Callable[..., T], *fn_args, **fn_kwargs
) -> T:
"""Execute ``func`` on worker with ``rank``.
Returns:
(T) The output of func.
"""
return ray.get(self.execute_single_async(rank, fn, *fn_args, **fn_kwargs))
#####################################################################################
# Utility Methods
#####################################################################################
def has_started(self) -> bool:
return self._worker_group_state is not None
def _assert_active(self):
"""Assert that the worker group is active (not shut down)."""
if not self.has_started():
raise ValueError(
"Worker group is not active. "
"Call WorkerGroup.create() to create a new worker group."
)
def _assert_inactive(self):
"""Assert that the worker group is inactive (shut down)."""
if self.has_started():
raise ValueError(
"Worker group is active. "
"Call WorkerGroup.shutdown() to shut down the worker group."
)
def get_workers(self) -> List[Worker]:
self._assert_active()
return self._worker_group_state.workers
def get_worker_group_context(self) -> WorkerGroupContext:
return self._worker_group_context
def get_worker_group_state(self) -> WorkerGroupState:
self._assert_active()
return self._worker_group_state
def get_latest_poll_status(self) -> Optional[WorkerGroupPollStatus]:
self._assert_active()
return self._latest_poll_status
def __len__(self) -> int:
self._assert_active()
return len(self.get_workers())
def get_resources_per_worker(self) -> dict:
"""Get the resources allocated per worker."""
return copy.deepcopy(self._worker_group_context.resources_per_worker)
#########################################################################################
# Static Utility Methods
#########################################################################################
@staticmethod
def _assign_worker_ranks(workers: List[Worker]) -> List[Worker]:
"""Assign world ranks to workers by increasing node id and GPU id.
Initializes the `DistributedContext` for each worker.
Returns:
workers: Workers sorted by increasing world rank,
with the `DistributedContext` set.
"""
workers = WorkerGroup._sort_workers_by_node_id_and_gpu_id(workers)
node_ip_to_workers = collections.defaultdict(list)
for worker in workers:
node_ip_to_workers[worker.metadata.node_ip].append(worker)
node_ips = list(node_ip_to_workers.keys())
for world_rank, worker in enumerate(workers):
distributed_context = DistributedContext(
local_rank=node_ip_to_workers[worker.metadata.node_ip].index(worker),
local_world_size=len(node_ip_to_workers[worker.metadata.node_ip]),
world_rank=world_rank,
world_size=len(workers),
node_rank=node_ips.index(worker.metadata.node_ip),
)
worker.distributed_context = distributed_context
return workers
@staticmethod
def _decorate_worker_log_file_paths(workers: List[Worker]) -> List[Worker]:
"""Decorate worker log file paths.
Returns:
workers: Workers with log file paths set.
"""
# Execute all tasks in parallel and then get results
log_path_refs = [
worker.execute_async(get_train_application_worker_log_path)
for worker in workers
]
log_paths = ray_get_safe(log_path_refs)
# Assign log paths to workers
for worker, log_path in zip(workers, log_paths):
worker.log_file_path = log_path
return workers
@staticmethod
def _sort_workers_by_node_id_and_gpu_id(
workers: List[Worker], _first_id: Optional[str] = None
) -> List[Worker]:
"""Reorder the workers by their node id and the lowest GPU id.
Example:
Given workers with the following attributes:
worker_0: id=1, gpu_ids=[1]
worker_1: id=0, gpu_ids=[0]
worker_2: id=1, gpu_ids=[0]
worker_3: id=0, gpu_ids=[1]
The function will perform the following steps:
1. Group by node IP:
id=0: worker_1, worker_3
id=1: worker_0, worker_2
2. Sort each group by GPU ID:
id=0: worker_1 (gpu_id=0), worker_3 (gpu_id=1)
id=1: worker_2 (gpu_id=0), worker_0 (gpu_id=1)
Resulting in the order: [worker_1, worker_3, worker_2, worker_0]
Args:
_first_id: The first node id to group by.
"""
node_id_to_workers = collections.defaultdict(list)
if _first_id is not None:
node_id_to_workers[_first_id] = []
for worker in workers:
node_id_to_workers[worker.metadata.node_id].append(worker)
# Sort workers on the same node by the lowest GPU id
# More details: https://github.com/ray-project/ray/issues/40803
def get_lowest_gpu_id(worker) -> int:
gpu_ids = worker.metadata.accelerator_ids.get("GPU", [])
# If there are no GPU IDs, return 0 as a default
if not gpu_ids:
return 0
# Attempt to convert GPU IDs to integers and find the minimum ID.
# Fallback to return the minimum string-based ID
try:
return min(int(gpu_id) for gpu_id in gpu_ids)
except ValueError:
return min(gpu_ids)
for node_id in node_id_to_workers:
node_id_to_workers[node_id].sort(key=get_lowest_gpu_id)
sorted_workers = []
for workers in node_id_to_workers.values():
sorted_workers.extend(workers)
return sorted_workers
@staticmethod
def _get_worker_runtime_env(
custom_runtime_env: Union[Dict, RuntimeEnv],
) -> Union[Dict, RuntimeEnv]:
"""Update custom runtime env with internal Ray Train env vars
that should be propagated from the driver to worker processes.
Args:
custom_runtime_env: The custom runtime env dict passed in by the user.
Returns:
A copy of the custom runtime env dict updated with internal
Ray Train environment variables to propagate to worker processes.
"""
merged_env_vars = get_env_vars_to_propagate()
merged_env_vars.update(custom_runtime_env.get("env_vars", {}))
runtime_env = dict(custom_runtime_env)
runtime_env["env_vars"] = merged_env_vars
return runtime_env
| WorkerGroup |
python | neetcode-gh__leetcode | python/0064-minimum-path-sum.py | {
"start": 0,
"end": 490
} | class ____:
def minPathSum(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
prev = [float("inf")] * n
prev[-1] = 0
for row in range(m - 1, -1, -1):
dp = [float("inf")] * n
for col in range(n - 1, -1, -1):
if col < n - 1:
dp[col] = min(dp[col], dp[col + 1])
dp[col] = min(dp[col], prev[col]) + grid[row][col]
prev = dp
return prev[0]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/test_secret_manager.py | {
"start": 2125,
"end": 2939
} | class ____:
@pytest.fixture
def gcp_secret(self, gcp_credentials):
_gcp_secret = GcpSecret(
gcp_credentials=gcp_credentials, secret_name="my_secret_name"
)
return _gcp_secret
def test_write_secret(self, gcp_secret):
expected = "projects/gcp_credentials_project/secrets/my_secret_name"
actual = gcp_secret.write_secret(secret_data=b"my_secret_data")
assert actual == expected
def test_read_secret(self, gcp_secret):
expected = b"secret_data"
actual = gcp_secret.read_secret()
assert actual == expected
def test_delete_secret(self, gcp_secret):
expected = "projects/gcp_credentials_project/secrets/my_secret_name"
actual = gcp_secret.delete_secret()
assert actual == expected
| TestGcpSecret |
python | dask__distributed | distributed/client.py | {
"start": 3782,
"end": 4341
} | class ____(CancelledError):
key: str
reason: str
msg: str | None
def __init__(self, key: str, reason: str | None, msg: str | None = None):
self.key = key
self.reason = reason if reason else "unknown"
self.msg = msg
def __str__(self) -> str:
result = f"{self.key} cancelled for reason: {self.reason}."
if self.msg:
result = "\n".join([result, self.msg])
return result
def __reduce__(self):
return self.__class__, (self.key, self.reason, self.msg)
| FutureCancelledError |
python | scipy__scipy | scipy/stats/tests/test_sampling.py | {
"start": 50070,
"end": 51908
} | class ____:
# pdf with piecewise linear function as transformed density
# with T = -1/sqrt with shift. Taken from UNU.RAN test suite
# (from file t_srou.c)
class dist:
def __init__(self, shift):
self.shift = shift
self.mode = shift
def pdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
return 0.5 * y * y
def cdf(self, x):
x -= self.shift
if x <= 0.:
return 0.5 / (1. - x)
else:
return 1. - 0.5 / (1. + x)
dists = [dist(0.), dist(10000.)]
# exact mean and variance of the distributions in the list dists
mv1 = [0., np.inf]
mv2 = [10000., np.inf]
mvs = [mv1, mv2]
@pytest.mark.parametrize("dist, mv_ex",
zip(dists, mvs))
@pytest.mark.thread_unsafe
def test_basic(self, dist, mv_ex):
rng = SimpleRatioUniforms(dist, mode=dist.mode, random_state=42)
check_cont_samples(rng, dist, mv_ex)
rng = SimpleRatioUniforms(dist, mode=dist.mode,
cdf_at_mode=dist.cdf(dist.mode),
random_state=42)
check_cont_samples(rng, dist, mv_ex)
# test domains with inf + nan in them. need to write a custom test for
# this because not all methods support infinite tails.
@pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
def test_inf_nan_domains(self, domain, err, msg):
with pytest.raises(err, match=msg):
SimpleRatioUniforms(StandardNormal(), domain=domain)
def test_bad_args(self):
# pdf_area < 0
with pytest.raises(ValueError, match=r"`pdf_area` must be > 0"):
SimpleRatioUniforms(StandardNormal(), mode=0, pdf_area=-1)
| TestSimpleRatioUniforms |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 25290,
"end": 28270
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"license-files": 9000, "dynamic": ["license-files"]}}
)
with pytest.raises(
ValueError,
match=(
"Metadata field `license-files` cannot be both statically defined and listed in field `project.dynamic`"
),
):
_ = metadata.core.license_files
def test_not_array(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license-files": 9000}})
with pytest.raises(TypeError, match="Field `project.license-files` must be an array"):
_ = metadata.core.license_files
def test_entry_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"license-files": [9000]}})
with pytest.raises(TypeError, match="Entry #1 of field `project.license-files` must be a string"):
_ = metadata.core.license_files
def test_default_globs_no_licenses(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.license_files == metadata.core.license_files == []
def test_default_globs_with_licenses(self, temp_dir):
metadata = ProjectMetadata(str(temp_dir), None, {"project": {}})
expected = []
(temp_dir / "foo").touch()
for name in ("LICENSE", "LICENCE", "COPYING", "NOTICE", "AUTHORS"):
(temp_dir / name).touch()
expected.append(name)
name_with_extension = f"{name}.txt"
(temp_dir / f"{name}.txt").touch()
expected.append(name_with_extension)
assert metadata.core.license_files == sorted(expected)
def test_globs_with_licenses(self, temp_dir):
metadata = ProjectMetadata(str(temp_dir), None, {"project": {"license-files": ["LICENSES/*"]}})
licenses_dir = temp_dir / "LICENSES"
licenses_dir.mkdir()
(licenses_dir / "MIT.txt").touch()
(licenses_dir / "Apache-2.0.txt").touch()
for name in ("LICENSE", "LICENCE", "COPYING", "NOTICE", "AUTHORS"):
(temp_dir / name).touch()
assert metadata.core.license_files == ["LICENSES/Apache-2.0.txt", "LICENSES/MIT.txt"]
def test_paths_with_licenses(self, temp_dir):
metadata = ProjectMetadata(
str(temp_dir),
None,
{"project": {"license-files": ["LICENSES/Apache-2.0.txt", "LICENSES/MIT.txt", "COPYING"]}},
)
licenses_dir = temp_dir / "LICENSES"
licenses_dir.mkdir()
(licenses_dir / "MIT.txt").touch()
(licenses_dir / "Apache-2.0.txt").touch()
for name in ("LICENSE", "LICENCE", "COPYING", "NOTICE", "AUTHORS"):
(temp_dir / name).touch()
assert metadata.core.license_files == ["COPYING", "LICENSES/Apache-2.0.txt", "LICENSES/MIT.txt"]
| TestLicenseFiles |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_test.py | {
"start": 25861,
"end": 26054
} | class ____(DescriptorTest):
"""Redo the same tests as above, but with a separate DescriptorPool."""
def GetDescriptorPool(self):
return descriptor_pool.DescriptorPool()
| NewDescriptorTest |
python | apache__airflow | providers/snowflake/tests/unit/snowflake/operators/test_snowpark.py | {
"start": 1402,
"end": 6659
} | class ____:
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_operator_no_param(self, mock_snowflake_hook, dag_maker):
number = 11
with dag_maker(dag_id=TEST_DAG_ID) as dag:
def func1(session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
def func2():
return number
_ = [
SnowparkOperator(
task_id=f"{TASK_ID}_{i}",
snowflake_conn_id=CONN_ID,
python_callable=func,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
dag=dag,
)
for i, func in enumerate([func1, func2])
]
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() == number
assert mock_snowflake_hook.call_count == 2
assert mock_snowflake_hook.return_value.get_snowpark_session.call_count == 2
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_operator_with_param(self, mock_snowflake_hook, dag_maker):
number = 11
with dag_maker(dag_id=TEST_DAG_ID) as dag:
def func1(session: Session, number: int):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
def func2(number: int, session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
return number
def func3(number: int):
return number
_ = [
SnowparkOperator(
task_id=f"{TASK_ID}_{i}",
snowflake_conn_id=CONN_ID,
python_callable=func,
op_kwargs={"number": number},
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
dag=dag,
)
for i, func in enumerate([func1, func2, func3])
]
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() == number
assert mock_snowflake_hook.call_count == 3
assert mock_snowflake_hook.return_value.get_snowpark_session.call_count == 3
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_operator_no_return(self, mock_snowflake_hook, dag_maker):
with dag_maker(dag_id=TEST_DAG_ID) as dag:
def func(session: Session):
assert session == mock_snowflake_hook.return_value.get_snowpark_session.return_value
SnowparkOperator(
task_id=TASK_ID,
snowflake_conn_id=CONN_ID,
python_callable=func,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
dag=dag,
)
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.run()
assert ti.xcom_pull() is None
mock_snowflake_hook.assert_called_once()
mock_snowflake_hook.return_value.get_snowpark_session.assert_called_once()
@mock.patch("airflow.providers.snowflake.operators.snowpark.SnowflakeHook")
def test_snowpark_operator_session_tag(self, mock_snowflake_hook, dag_maker):
mock_session = mock_snowflake_hook.return_value.get_snowpark_session.return_value
mock_session.query_tag = {}
# Mock the update_query_tag function to combine with another dict
def update_query_tag(new_tags):
mock_session.query_tag.update(new_tags)
mock_session.update_query_tag = mock.Mock(side_effect=update_query_tag)
with dag_maker(dag_id=TEST_DAG_ID) as dag:
def func(session: Session):
return session.query_tag
SnowparkOperator(
task_id=TASK_ID,
snowflake_conn_id=CONN_ID,
python_callable=func,
warehouse="test_warehouse",
database="test_database",
schema="test_schema",
role="test_role",
authenticator="externalbrowser",
dag=dag,
)
dr = dag_maker.create_dagrun()
ti = dr.get_task_instances()[0]
ti.run()
query_tag = ti.xcom_pull()
assert query_tag == {
"dag_id": TEST_DAG_ID,
"dag_run_id": dr.run_id,
"task_id": TASK_ID,
"operator": "SnowparkOperator",
}
| TestSnowparkOperator |
python | getsentry__sentry | tests/sentry/codecov/test_client.py | {
"start": 338,
"end": 4261
} | class ____(TestCase):
def setUp(self) -> None:
self.test_git_provider_org = "test-org"
self.test_secret = "test-secret-" + "a" * 20
self.test_timestamp = datetime.datetime.now(datetime.UTC)
self._mock_now = patch("datetime.datetime.now", return_value=self.test_timestamp)
with self.options(
{
"codecov.api-bridge-signing-secret": self.test_secret,
}
):
self.codecov_client = CodecovApiClient(self.test_git_provider_org)
def test_raises_configuration_error_without_signing_secret(self) -> None:
with self.options(
{
"codecov.api-bridge-signing-secret": None,
}
):
with pytest.raises(ConfigurationError):
CodecovApiClient(self.test_git_provider_org)
def test_creates_valid_jwt(self) -> None:
encoded_jwt = self.codecov_client._create_jwt()
header = jwt.peek_header(encoded_jwt)
assert header == {
"typ": "JWT",
"alg": "HS256",
}
# Ensure the claims are what we expect, separate from verifying the
# signature and standard claims
claims = jwt.peek_claims(encoded_jwt)
expected_iat = int(self.test_timestamp.timestamp())
expected_exp = expected_iat + 300
assert claims == {
"g_o": self.test_git_provider_org,
"g_p": GitProvider.GitHub.value,
"iss": "https://sentry.io",
"iat": expected_iat,
"exp": expected_exp,
}
# Ensure we can verify the signature and whatall
jwt.decode(encoded_jwt, self.test_secret)
@patch("requests.get")
def test_sends_get_request_with_jwt_auth_header(self, mock_get: MagicMock) -> None:
with patch.object(self.codecov_client, "_create_jwt", return_value="test"):
self.codecov_client.get(
"/example/endpoint", {"example-param": "foo"}, {"X_TEST_HEADER": "bar"}
)
mock_get.assert_called_once_with(
"http://example.com/example/endpoint",
params={"example-param": "foo"},
headers={
"Authorization": "Bearer test",
"X_TEST_HEADER": "bar",
},
timeout=10,
)
@patch("requests.post")
def test_sends_post_request_with_jwt_auth_header(self, mock_post: MagicMock) -> None:
with patch.object(self.codecov_client, "_create_jwt", return_value="test"):
self.codecov_client.post(
"/example/endpoint", data={"example-param": "foo"}, headers={"X_TEST_HEADER": "bar"}
)
mock_post.assert_called_once_with(
"http://example.com/example/endpoint",
data={"example-param": "foo"},
json=None,
headers={
"Authorization": "Bearer test",
"X_TEST_HEADER": "bar",
},
timeout=10,
)
@patch("requests.post")
def test_query_sends_post_request_with_jwt_auth_header(self, mock_post: MagicMock) -> None:
with patch.object(self.codecov_client, "_create_jwt", return_value="test"):
self.codecov_client.query("query { test }", {"test": "test"}, GitProvider.GitHub)
mock_post.assert_called_once_with(
"http://example.com/graphql/sentry/github",
data=None,
json={"query": "query { test }", "variables": {"test": "test"}},
headers={
"Content-Type": "application/json; charset=utf-8",
"Accept": "application/json",
"Token-Type": "github-token",
"Authorization": "Bearer test",
},
timeout=10,
)
| TestCodecovApiClient |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-slack/llama_index/readers/slack/base.py | {
"start": 375,
"end": 12412
} | class ____(BasePydanticReader):
"""
Slack reader.
Reads conversations from channels. If an earliest_date is provided, an
optional latest_date can also be provided. If no latest_date is provided,
we assume the latest date is the current timestamp.
Args:
slack_token (Optional[str]): Slack token. If not provided, we
assume the environment variable `SLACK_BOT_TOKEN` is set.
ssl (Optional[str]): Custom SSL context. If not provided, it is assumed
there is already an SSL context available.
earliest_date (Optional[datetime]): Earliest date from which
to read conversations. If not provided, we read all messages.
latest_date (Optional[datetime]): Latest date from which to
read conversations. If not provided, defaults to current timestamp
in combination with earliest_date.
"""
is_remote: bool = True
slack_token: str
earliest_date_timestamp: Optional[float]
latest_date_timestamp: float
channel_types: str
_client: Any = PrivateAttr()
def __init__(
self,
slack_token: Optional[str] = None,
ssl: Optional[SSLContext] = None,
earliest_date: Optional[datetime] = None,
latest_date: Optional[datetime] = None,
earliest_date_timestamp: Optional[float] = None,
latest_date_timestamp: Optional[float] = None,
channel_types: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
from slack_sdk import WebClient
if slack_token is None:
slack_token = os.environ["SLACK_BOT_TOKEN"]
if slack_token is None:
raise ValueError(
"Must specify `slack_token` or set environment "
"variable `SLACK_BOT_TOKEN`."
)
if ssl is None:
client = WebClient(token=slack_token)
else:
client = WebClient(token=slack_token, ssl=ssl)
if latest_date is not None and earliest_date is None:
raise ValueError(
"Must specify `earliest_date` if `latest_date` is specified."
)
if earliest_date is not None:
earliest_date_timestamp = earliest_date.timestamp()
else:
earliest_date_timestamp = None or earliest_date_timestamp
if latest_date is not None:
latest_date_timestamp = latest_date.timestamp()
else:
latest_date_timestamp = datetime.now().timestamp() or latest_date_timestamp
if channel_types is not None:
channel_types = channel_types
else:
channel_types = "public_channel,private_channel"
res = client.api_test()
if not res["ok"]:
raise ValueError(f"Error initializing Slack API: {res['error']}")
super().__init__(
slack_token=slack_token,
earliest_date_timestamp=earliest_date_timestamp,
latest_date_timestamp=latest_date_timestamp,
channel_types=channel_types,
)
self._client = client
@classmethod
def class_name(cls) -> str:
return "SlackReader"
def _read_message(self, channel_id: str, message_ts: str) -> str:
from slack_sdk.errors import SlackApiError
"""Read a message."""
messages_text: List[str] = []
next_cursor = None
while True:
try:
# https://slack.com/api/conversations.replies
# List all replies to a message, including the message itself.
if self.earliest_date_timestamp is None:
result = self._client.conversations_replies(
channel=channel_id, ts=message_ts, cursor=next_cursor
)
else:
conversations_replies_kwargs = {
"channel": channel_id,
"ts": message_ts,
"cursor": next_cursor,
"latest": str(self.latest_date_timestamp),
}
if self.earliest_date_timestamp is not None:
conversations_replies_kwargs["oldest"] = str(
self.earliest_date_timestamp
)
result = self._client.conversations_replies(
**conversations_replies_kwargs # type: ignore
)
messages = result["messages"]
messages_text.extend(message["text"] for message in messages)
if not result["has_more"]:
break
next_cursor = result["response_metadata"]["next_cursor"]
except SlackApiError as e:
error = e.response["error"]
if error == "ratelimited":
retry_after = int(e.response.headers.get("retry-after", 1))
logger.error(
f"Rate limit error reached, sleeping for: {retry_after} seconds"
)
time.sleep(retry_after)
elif error == "not_in_channel":
logger.error(
f"Error: Bot not in channel: {channel_id}, cannot read messages."
)
break
else:
logger.error(
f"Error parsing conversation replies for channel {channel_id}: {e}"
)
break
return "\n\n".join(messages_text)
def _read_channel(self, channel_id: str, reverse_chronological: bool) -> str:
from slack_sdk.errors import SlackApiError
"""Read a channel."""
result_messages: List[str] = []
next_cursor = None
while True:
try:
# Call the conversations.history method using the WebClient
# conversations.history returns the first 100 messages by default
# These results are paginated,
# see: https://api.slack.com/methods/conversations.history$pagination
conversations_history_kwargs = {
"channel": channel_id,
"cursor": next_cursor,
"latest": str(self.latest_date_timestamp),
}
if self.earliest_date_timestamp is not None:
conversations_history_kwargs["oldest"] = str(
self.earliest_date_timestamp
)
result = self._client.conversations_history(
**conversations_history_kwargs # type: ignore
)
conversation_history = result["messages"]
# Print results
logger.info(
f"{len(conversation_history)} messages found in {channel_id}"
)
result_messages.extend(
self._read_message(channel_id, message["ts"])
for message in conversation_history
)
if not result["has_more"]:
break
next_cursor = result["response_metadata"]["next_cursor"]
except SlackApiError as e:
error = e.response["error"]
if error == "ratelimited":
retry_after = int(e.response.headers.get("retry-after", 1))
logger.error(
f"Rate limit error reached, sleeping for: {retry_after} seconds"
)
time.sleep(retry_after)
elif error == "not_in_channel":
logger.error(
f"Error: Bot not in channel: {channel_id}, cannot read messages."
)
break
else:
logger.error(
f"Error parsing conversation replies for channel {channel_id}: {e}"
)
break
return (
"\n\n".join(result_messages)
if reverse_chronological
else "\n\n".join(result_messages[::-1])
)
def load_data(
self, channel_ids: List[str], reverse_chronological: bool = True
) -> List[Document]:
"""
Load data from the input slack channel ids.
Args:
channel_ids (List[str]): List of channel ids to read.
Returns:
List[Document]: List of documents.
"""
results = []
for channel_id in channel_ids:
channel_content = self._read_channel(
channel_id, reverse_chronological=reverse_chronological
)
results.append(
Document(
id_=channel_id,
text=channel_content,
metadata={"channel": channel_id},
)
)
return results
def _is_regex(self, pattern: str) -> bool:
"""Check if a string is a regex pattern."""
try:
re.compile(pattern)
return True
except re.error:
return False
def _list_channels(self) -> List[Dict[str, Any]]:
"""List channels based on the types."""
from slack_sdk.errors import SlackApiError
try:
result = self._client.conversations_list(types=self.channel_types)
return result["channels"]
except SlackApiError as e:
logger.error(f"Error fetching channels: {e.response['error']}")
raise
def _filter_channels(
self, channels: List[Dict[str, Any]], patterns: List[str]
) -> List[Dict[str, Any]]:
"""Filter channels based on the provided names and regex patterns."""
regex_patterns = [pattern for pattern in patterns if self._is_regex(pattern)]
exact_names = [pattern for pattern in patterns if not self._is_regex(pattern)]
# Match Exact Channel names
filtered_channels = [
channel for channel in channels if channel["name"] in exact_names
]
# Match Regex Patterns
for channel in channels:
for pattern in regex_patterns:
if re.match(pattern, channel["name"]):
filtered_channels.append(channel)
return filtered_channels
def get_channel_ids(self, channel_patterns: List[str]) -> List[str]:
"""
Get list of channel IDs based on names and regex patterns.
Args:
channel_patterns List[str]: List of channel name patterns (names or regex) to read.
Returns:
List[Document]: List of documents.
"""
if not channel_patterns:
raise ValueError("No channel patterns provided.")
channels = self._list_channels()
logger.info(f"Total channels fetched: {len(channels)}")
if not channels:
logger.info("No channels found in Slack.")
return []
filtered_channels = self._filter_channels(
channels=channels, patterns=channel_patterns
)
logger.info(f"Channels matching patterns: {len(filtered_channels)}")
if not filtered_channels:
logger.info(
"None of the channel names or pattern matched with Slack Channels."
)
return []
channel_ids = [channel["id"] for channel in filtered_channels]
return list(set(channel_ids))
if __name__ == "__main__":
reader = SlackReader()
# load data using only channel ids
logger.info(reader.load_data(channel_ids=["C079KD1M8J3", "C078YQP5B51"]))
# load data using exact channel names and regex patterns
# get the channel ids first
channel_ids = reader.get_channel_ids(
channel_patterns=["^dev.*", "^qa.*", "test_channel"]
)
# load data using above channel_ids
logger.info(reader.load_data(channel_ids=channel_ids))
| SlackReader |
python | pytorch__pytorch | torch/_jit_internal.py | {
"start": 51307,
"end": 53544
} | class ____(pickle.Pickler):
def __init__(self, *args, tensors: list[torch.Tensor], **kwargs):
super().__init__(*args, **kwargs)
self.tensors = tensors
def persistent_id(self, obj):
if isinstance(obj, torch.Tensor):
self.tensors.append(obj)
return ""
# Since we just want to extract tensors, we don't mind if an object is
# unpicklable if it doesn't contain tensors, as we can just ignore/skip
# it. To play it safe, we only do so for common objects that we're sure
# don't contain tensors. Feel free to add new types here. Note also that
# even if a type isn't listed here this won't block users, since they
# can just add a __getstate__ or __reduce__ method to their class.
if isinstance(obj, LockType):
return ""
# Futures and RRefs don't technically contain a value, they just offer
# the means to access a value.
if isinstance(obj, CFuture) or is_rref_instance(obj):
return ""
if isinstance(obj, CAwait):
return ""
if isinstance(obj, torch.cuda.Event):
return ""
if isinstance(obj, threading.Thread):
return ""
return None
def _extract_tensors(obj):
r"""
This function is exclusively called from C++.
See ``torch/csrc/jit/python/python_ivalue.h``.
It extracts the tensors contained in the given object, through pickling.
"""
tensors: list[torch.Tensor] = []
extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
extractor.dump(obj)
return tensors
def _get_model_id(obj) -> Optional[str]:
if isinstance(obj, torch.jit.ScriptModule):
return str(obj._c._type())
elif isinstance(obj, torch.jit.ScriptFunction):
return obj.qualified_name
else:
return None
# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
# that were previously dropped. To preserve the behavior, explicitly drop them there
if sys.version_info >= (3, 11):
_drop(enum.Enum.__new__)
_drop(enum.Enum.__format__)
_drop(enum.Enum.__repr__)
_drop(enum.Enum.__str__)
| _TensorExtractor |
python | pyparsing__pyparsing | examples/TAP.py | {
"start": 2055,
"end": 2540
} | class ____:
def __init__(self, results):
self.num = results.testNumber
self.passed = results.passed == "ok"
self.skipped = self.todo = False
if results.directive:
self.skipped = results.directive[0][0] == "SKIP"
self.todo = results.directive[0][0] == "TODO"
@classmethod
def bailedTest(cls, num):
ret = TAPTest(empty.parse_string(""))
ret.num = num
ret.skipped = True
return ret
| TAPTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_checkbox05.py | {
"start": 350,
"end": 5146
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("checkbox05.xlsx")
def test_create_file_with_insert_checkbox(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox("E9", False)
cell_format = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format,
"criteria": "equal to",
"value": "FALSE",
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_insert_checkbox_and_manual_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format1 = workbook.add_format({"checkbox": True})
worksheet.insert_checkbox("E9", False, cell_format1)
cell_format2 = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format2,
"criteria": "equal to",
"value": "FALSE",
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_boolean_and_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format1 = workbook.add_format({"checkbox": True})
worksheet.write("E9", False, cell_format1)
cell_format2 = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format2,
"criteria": "equal to",
"value": "FALSE",
},
)
workbook.close()
self.assertExcelEqual()
def test_conditional_format_with_boolean(self):
"""Sub-test for conditional format value as a Python boolean."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format1 = workbook.add_format({"checkbox": True})
worksheet.write("E9", False, cell_format1)
cell_format2 = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format2,
"criteria": "equal to",
"value": False,
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_color_type(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox("E9", False)
cell_format = workbook.add_format(
{
"font_color": Color("#9C0006"),
"bg_color": Color("#FFC7CE"),
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format,
"criteria": "equal to",
"value": "FALSE",
},
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_color_methods(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox("E9", False)
cell_format = workbook.add_format(
{
"font_color": Color.rgb("#9C0006"),
"bg_color": Color.rgb_integer(0xFFC7CE),
}
)
worksheet.conditional_format(
"E9",
{
"type": "cell",
"format": cell_format,
"criteria": "equal to",
"value": "FALSE",
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | etianen__django-reversion | tests/test_app/models.py | {
"start": 1892,
"end": 2141
} | class ____(models.Model):
test_model_inline = models.ForeignKey(
TestModelInline,
on_delete=models.CASCADE,
)
nested_inline_name = models.CharField(
max_length=191,
default="v1",
)
| TestModelNestedInline |
python | facelessuser__soupsieve | tests/test_level4/test_paused.py | {
"start": 51,
"end": 931
} | class ____(util.TestCase):
"""Test paused selectors."""
MARKUP = """
<!DOCTYPE html>
<html>
<body>
<video id="vid" width="320" height="240" controls>
<source src="movie.mp4" type="video/mp4">
<source src="movie.ogg" type="video/ogg">
Your browser does not support the video tag.
</video>
</body>
</html>
"""
def test_paused(self):
"""Test paused (matches nothing)."""
# Not actually sure how this is used, but it won't match anything anyways
self.assert_selector(
self.MARKUP,
"video:paused",
[],
flags=util.HTML
)
def test_not_paused(self):
"""Test not paused."""
self.assert_selector(
self.MARKUP,
"video:not(:paused)",
["vid"],
flags=util.HTML
)
| TestPaused |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-looker/source_looker/components.py | {
"start": 289,
"end": 1910
} | class ____(NoAuth):
"""
Authenticator that sets the Authorization header on the HTTP requests sent using access token which is updated upon expiration.
The header is of the form:
`"Authorization": "token <access_token>"`
Attributes:
config (Config): The user-provided configuration as specified by the source's spec
"""
config: Mapping[str, Any]
def __post_init__(self, config):
self._access_token = None
self._token_expiry_date = pendulum.now()
def update_access_token(self) -> Optional[str]:
domain = self.config.get("domain")
client_id = self.config.get("client_id")
client_secret = self.config.get("client_secret")
headers = {"Content-Type": "application/x-www-form-urlencoded"}
url = f"https://{domain}/api/{API_VERSION}/login"
try:
resp = requests.post(url=url, headers=headers, data=f"client_id={client_id}&client_secret={client_secret}")
if resp.status_code != 200:
raise LookerException("auth error: Unable to connect to the Looker API. Please check your credentials.")
except ConnectionError as error:
raise LookerException(f"auth error: {str(error)}")
data = resp.json()
self._access_token = data["access_token"]
self._token_expiry_date = pendulum.now().add(seconds=data["expires_in"])
def get_auth_header(self) -> Mapping[str, Any]:
if self._token_expiry_date < pendulum.now():
self.update_access_token()
return {"Authorization": f"token {self._access_token}"}
| LookerAuthenticator |
python | django__django | tests/one_to_one/models.py | {
"start": 2089,
"end": 2209
} | class ____(models.Model):
other = models.OneToOneField(Target, models.CASCADE, related_name="second_pointer")
| Pointer2 |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 16610,
"end": 16819
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'virtual']
valid_subsets = ['virtual']
fact_namespace = 'ansible_virtual'
collector_class = VirtualCollector
| TestVirtualFacts |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 73558,
"end": 74902
} | class ____(PreTrainedModel):
config = Qwen3OmniMoeTextConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen3OmniMoeThinkerTextDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(Qwen3OmniMoeThinkerTextTopKRouter, layer_name="mlp.router", index=0),
"hidden_states": Qwen3OmniMoeThinkerTextDecoderLayer,
"attentions": Qwen3OmniMoeThinkerTextAttention,
}
config_class = Qwen3OmniMoeTextConfig
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, Qwen3OmniMoeThinkerTextExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=std)
init.normal_(module.down_proj, mean=0.0, std=std)
elif isinstance(module, Qwen3OmniMoeThinkerTextTopKRouter):
init.normal_(module.weight, mean=0.0, std=std)
@use_kernel_forward_from_hub("RMSNorm")
| Qwen3OmniMoeThinkerTextPreTrainedModel |
python | apache__airflow | airflow-core/tests/unit/models/test_deadline.py | {
"start": 25929,
"end": 30483
} | class ____:
def setup_method(self):
self.original_dagrun_created = DeadlineReference.TYPES.DAGRUN_CREATED
self.original_dagrun_queued = DeadlineReference.TYPES.DAGRUN_QUEUED
self.original_dagrun = DeadlineReference.TYPES.DAGRUN
self.original_attrs = set(dir(ReferenceModels))
def teardown_method(self):
DeadlineReference.TYPES.DAGRUN_CREATED = self.original_dagrun_created
DeadlineReference.TYPES.DAGRUN_QUEUED = self.original_dagrun_queued
DeadlineReference.TYPES.DAGRUN = self.original_dagrun
for attr in set(dir(ReferenceModels)):
if attr not in self.original_attrs:
delattr(ReferenceModels, attr)
@staticmethod
def create_decorated_custom_ref():
@deadline_reference()
class DecoratedCustomRef(ReferenceModels.BaseDeadlineReference):
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
return DecoratedCustomRef
@staticmethod
def create_decorated_custom_ref_with_kwargs():
@deadline_reference()
class DecoratedCustomRefWithKwargs(ReferenceModels.BaseDeadlineReference):
required_kwargs = {"custom_id"}
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
return DecoratedCustomRefWithKwargs
@staticmethod
def create_decorated_custom_ref_queued():
@deadline_reference(DeadlineReference.TYPES.DAGRUN_QUEUED)
class DecoratedCustomRefQueued(ReferenceModels.BaseDeadlineReference):
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
return DecoratedCustomRefQueued
@pytest.mark.parametrize(
("reference_factory", "expected_timing"),
[
pytest.param(
create_decorated_custom_ref,
DeadlineReference.TYPES.DAGRUN_CREATED,
id="basic_decorated_custom_ref",
),
pytest.param(
create_decorated_custom_ref_with_kwargs,
DeadlineReference.TYPES.DAGRUN_CREATED,
id="decorated_ref_with_kwargs",
),
pytest.param(
create_decorated_custom_ref_queued,
DeadlineReference.TYPES.DAGRUN_QUEUED,
id="decorated_ref_queued",
),
],
)
def test_deadline_reference_decorator(self, reference_factory, expected_timing):
reference = reference_factory()
assert getattr(ReferenceModels, reference.__name__) is reference
assert getattr(DeadlineReference, reference.__name__).__class__ is reference
assert_correct_timing(reference, expected_timing)
assert_builtin_types_unchanged(
DeadlineReference.TYPES.DAGRUN_QUEUED, DeadlineReference.TYPES.DAGRUN_CREATED
)
def test_deadline_reference_decorator_with_invalid_class(self):
"""Test that the decorator raises error for invalid classes."""
with pytest.raises(ValueError, match="InvalidDecoratedRef must inherit from BaseDeadlineReference"):
@deadline_reference()
class InvalidDecoratedRef:
pass
def test_deadline_reference_decorator_with_invalid_timing(self):
invalid_timing = ("not", "a", "valid", "timing")
with pytest.raises(
ValueError,
match=re.escape(
f"Invalid deadline reference type {invalid_timing}; "
f"must be a valid DeadlineReference.TYPES option."
),
):
@deadline_reference(invalid_timing)
class DecoratedCustomRef(ReferenceModels.BaseDeadlineReference):
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
@mock.patch.object(DeadlineReference, "register_custom_reference")
def test_deadline_reference_decorator_calls_register_method(self, mock_register):
timing = DeadlineReference.TYPES.DAGRUN_QUEUED
@deadline_reference(timing)
class DecoratedCustomRef(ReferenceModels.BaseDeadlineReference):
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
mock_register.assert_called_once_with(DecoratedCustomRef, timing)
| TestDeadlineReferenceDecorator |
python | huggingface__transformers | tests/models/resnet/test_modeling_resnet.py | {
"start": 5504,
"end": 9459
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
ResNetModel,
ResNetForImageClassification,
ResNetBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = ResNetModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=ResNetConfig,
has_text_modality=False,
common_properties=["num_channels", "hidden_sizes"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="ResNet does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="ResNet does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/resnet-50"
model = ResNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| ResNetModelTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/engine.py | {
"start": 6660,
"end": 34035
} | class ____( # type:ignore[misc]
ProxyComparable[Connection],
StartableContext["AsyncConnection"],
AsyncConnectable,
):
"""An asyncio proxy for a :class:`_engine.Connection`.
:class:`_asyncio.AsyncConnection` is acquired using the
:meth:`_asyncio.AsyncEngine.connect`
method of :class:`_asyncio.AsyncEngine`::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname")
async with engine.connect() as conn:
result = await conn.execute(select(table))
.. versionadded:: 1.4
""" # noqa
# AsyncConnection is a thin proxy; no state should be added here
# that is not retrievable from the "sync" engine / connection, e.g.
# current transaction, info, etc. It should be possible to
# create a new AsyncConnection that matches this one given only the
# "sync" elements.
__slots__ = (
"engine",
"sync_engine",
"sync_connection",
)
def __init__(
self,
async_engine: AsyncEngine,
sync_connection: Optional[Connection] = None,
):
self.engine = async_engine
self.sync_engine = async_engine.sync_engine
self.sync_connection = self._assign_proxied(sync_connection)
sync_connection: Optional[Connection]
"""Reference to the sync-style :class:`_engine.Connection` this
:class:`_asyncio.AsyncConnection` proxies requests towards.
This instance can be used as an event target.
.. seealso::
:ref:`asyncio_events`
"""
sync_engine: Engine
"""Reference to the sync-style :class:`_engine.Engine` this
:class:`_asyncio.AsyncConnection` is associated with via its underlying
:class:`_engine.Connection`.
This instance can be used as an event target.
.. seealso::
:ref:`asyncio_events`
"""
@classmethod
def _regenerate_proxy_for_target(
cls, target: Connection, **additional_kw: Any # noqa: U100
) -> AsyncConnection:
return AsyncConnection(
AsyncEngine._retrieve_proxy_for_target(target.engine), target
)
async def start(
self, is_ctxmanager: bool = False # noqa: U100
) -> AsyncConnection:
"""Start this :class:`_asyncio.AsyncConnection` object's context
outside of using a Python ``with:`` block.
"""
if self.sync_connection:
raise exc.InvalidRequestError("connection is already started")
self.sync_connection = self._assign_proxied(
await greenlet_spawn(self.sync_engine.connect)
)
return self
@property
def connection(self) -> NoReturn:
"""Not implemented for async; call
:meth:`_asyncio.AsyncConnection.get_raw_connection`.
"""
raise exc.InvalidRequestError(
"AsyncConnection.connection accessor is not implemented as the "
"attribute may need to reconnect on an invalidated connection. "
"Use the get_raw_connection() method."
)
async def get_raw_connection(self) -> PoolProxiedConnection:
"""Return the pooled DBAPI-level connection in use by this
:class:`_asyncio.AsyncConnection`.
This is a SQLAlchemy connection-pool proxied connection
which then has the attribute
:attr:`_pool._ConnectionFairy.driver_connection` that refers to the
actual driver connection. Its
:attr:`_pool._ConnectionFairy.dbapi_connection` refers instead
to an :class:`_engine.AdaptedConnection` instance that
adapts the driver connection to the DBAPI protocol.
"""
return await greenlet_spawn(getattr, self._proxied, "connection")
@util.ro_non_memoized_property
def info(self) -> _InfoType:
"""Return the :attr:`_engine.Connection.info` dictionary of the
underlying :class:`_engine.Connection`.
This dictionary is freely writable for user-defined state to be
associated with the database connection.
This attribute is only available if the :class:`.AsyncConnection` is
currently connected. If the :attr:`.AsyncConnection.closed` attribute
is ``True``, then accessing this attribute will raise
:class:`.ResourceClosedError`.
.. versionadded:: 1.4.0b2
"""
return self._proxied.info
@util.ro_non_memoized_property
def _proxied(self) -> Connection:
if not self.sync_connection:
self._raise_for_not_started()
return self.sync_connection
def begin(self) -> AsyncTransaction:
"""Begin a transaction prior to autobegin occurring."""
assert self._proxied
return AsyncTransaction(self)
def begin_nested(self) -> AsyncTransaction:
"""Begin a nested transaction and return a transaction handle."""
assert self._proxied
return AsyncTransaction(self, nested=True)
async def invalidate(
self, exception: Optional[BaseException] = None
) -> None:
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
See the method :meth:`_engine.Connection.invalidate` for full
detail on this method.
"""
return await greenlet_spawn(
self._proxied.invalidate, exception=exception
)
async def get_isolation_level(self) -> IsolationLevel:
return await greenlet_spawn(self._proxied.get_isolation_level)
def in_transaction(self) -> bool:
"""Return True if a transaction is in progress."""
return self._proxied.in_transaction()
def in_nested_transaction(self) -> bool:
"""Return True if a transaction is in progress.
.. versionadded:: 1.4.0b2
"""
return self._proxied.in_nested_transaction()
def get_transaction(self) -> Optional[AsyncTransaction]:
"""Return an :class:`.AsyncTransaction` representing the current
transaction, if any.
This makes use of the underlying synchronous connection's
:meth:`_engine.Connection.get_transaction` method to get the current
:class:`_engine.Transaction`, which is then proxied in a new
:class:`.AsyncTransaction` object.
.. versionadded:: 1.4.0b2
"""
trans = self._proxied.get_transaction()
if trans is not None:
return AsyncTransaction._retrieve_proxy_for_target(trans)
else:
return None
def get_nested_transaction(self) -> Optional[AsyncTransaction]:
"""Return an :class:`.AsyncTransaction` representing the current
nested (savepoint) transaction, if any.
This makes use of the underlying synchronous connection's
:meth:`_engine.Connection.get_nested_transaction` method to get the
current :class:`_engine.Transaction`, which is then proxied in a new
:class:`.AsyncTransaction` object.
.. versionadded:: 1.4.0b2
"""
trans = self._proxied.get_nested_transaction()
if trans is not None:
return AsyncTransaction._retrieve_proxy_for_target(trans)
else:
return None
@overload
async def execution_options(
self,
*,
compiled_cache: Optional[CompiledCacheType] = ...,
logging_token: str = ...,
isolation_level: IsolationLevel = ...,
no_parameters: bool = False,
stream_results: bool = False,
max_row_buffer: int = ...,
yield_per: int = ...,
insertmanyvalues_page_size: int = ...,
schema_translate_map: Optional[SchemaTranslateMapType] = ...,
preserve_rowcount: bool = False,
driver_column_names: bool = False,
**opt: Any,
) -> AsyncConnection: ...
@overload
async def execution_options(self, **opt: Any) -> AsyncConnection: ...
async def execution_options(self, **opt: Any) -> AsyncConnection:
r"""Set non-SQL options for the connection which take effect
during execution.
This returns this :class:`_asyncio.AsyncConnection` object with
the new options added.
See :meth:`_engine.Connection.execution_options` for full details
on this method.
"""
conn = self._proxied
c2 = await greenlet_spawn(conn.execution_options, **opt)
assert c2 is conn
return self
async def commit(self) -> None:
"""Commit the transaction that is currently in progress.
This method commits the current transaction if one has been started.
If no transaction was started, the method has no effect, assuming
the connection is in a non-invalidated state.
A transaction is begun on a :class:`_engine.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_engine.Connection.begin` method is called.
"""
await greenlet_spawn(self._proxied.commit)
async def rollback(self) -> None:
"""Roll back the transaction that is currently in progress.
This method rolls back the current transaction if one has been started.
If no transaction was started, the method has no effect. If a
transaction was started and the connection is in an invalidated state,
the transaction is cleared using this method.
A transaction is begun on a :class:`_engine.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_engine.Connection.begin` method is called.
"""
await greenlet_spawn(self._proxied.rollback)
async def close(self) -> None:
"""Close this :class:`_asyncio.AsyncConnection`.
This has the effect of also rolling back the transaction if one
is in place.
"""
await greenlet_spawn(self._proxied.close)
async def aclose(self) -> None:
"""A synonym for :meth:`_asyncio.AsyncConnection.close`.
The :meth:`_asyncio.AsyncConnection.aclose` name is specifically
to support the Python standard library ``@contextlib.aclosing``
context manager function.
.. versionadded:: 2.0.20
"""
await self.close()
async def exec_driver_sql(
self,
statement: str,
parameters: Optional[_DBAPIAnyExecuteParams] = None,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Any]:
r"""Executes a driver-level SQL string and return buffered
:class:`_engine.Result`.
"""
result = await greenlet_spawn(
self._proxied.exec_driver_sql,
statement,
parameters,
execution_options,
_require_await=True,
)
return await _ensure_sync_result(result, self.exec_driver_sql)
@overload
def stream(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> GeneratorStartableContext[AsyncResult[Unpack[_Ts]]]: ...
@overload
def stream(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> GeneratorStartableContext[AsyncResult[Unpack[TupleAny]]]: ...
@asyncstartablecontext
async def stream(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> AsyncIterator[AsyncResult[Unpack[TupleAny]]]:
"""Execute a statement and return an awaitable yielding a
:class:`_asyncio.AsyncResult` object.
E.g.::
result = await conn.stream(stmt)
async for row in result:
print(f"{row}")
The :meth:`.AsyncConnection.stream`
method supports optional context manager use against the
:class:`.AsyncResult` object, as in::
async with conn.stream(stmt) as result:
async for row in result:
print(f"{row}")
In the above pattern, the :meth:`.AsyncResult.close` method is
invoked unconditionally, even if the iterator is interrupted by an
exception throw. Context manager use remains optional, however,
and the function may be called in either an ``async with fn():`` or
``await fn()`` style.
.. versionadded:: 2.0.0b3 added context manager support
:return: an awaitable object that will yield an
:class:`_asyncio.AsyncResult` object.
.. seealso::
:meth:`.AsyncConnection.stream_scalars`
"""
if not self.dialect.supports_server_side_cursors:
raise exc.InvalidRequestError(
"Cant use `stream` or `stream_scalars` with the current "
"dialect since it does not support server side cursors."
)
result = await greenlet_spawn(
self._proxied.execute,
statement,
parameters,
execution_options=util.EMPTY_DICT.merge_with(
execution_options, {"stream_results": True}
),
_require_await=True,
)
assert result.context._is_server_side
ar = AsyncResult(result)
try:
yield ar
except GeneratorExit:
pass
else:
task = asyncio.create_task(ar.close())
await asyncio.shield(task)
@overload
async def execute(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[_Ts]]: ...
@overload
async def execute(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[TupleAny]]: ...
async def execute(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[TupleAny]]:
r"""Executes a SQL statement construct and return a buffered
:class:`_engine.Result`.
:param object: The statement to be executed. This is always
an object that is in both the :class:`_expression.ClauseElement` and
:class:`_expression.Executable` hierarchies, including:
* :class:`_expression.Select`
* :class:`_expression.Insert`, :class:`_expression.Update`,
:class:`_expression.Delete`
* :class:`_expression.TextClause` and
:class:`_expression.TextualSelect`
* :class:`_schema.DDL` and objects which inherit from
:class:`_schema.ExecutableDDLElement`
:param parameters: parameters which will be bound into the statement.
This may be either a dictionary of parameter names to values,
or a mutable sequence (e.g. a list) of dictionaries. When a
list of dictionaries is passed, the underlying statement execution
will make use of the DBAPI ``cursor.executemany()`` method.
When a single dictionary is passed, the DBAPI ``cursor.execute()``
method will be used.
:param execution_options: optional dictionary of execution options,
which will be associated with the statement execution. This
dictionary can provide a subset of the options that are accepted
by :meth:`_engine.Connection.execution_options`.
:return: a :class:`_engine.Result` object.
"""
result = await greenlet_spawn(
self._proxied.execute,
statement,
parameters,
execution_options=execution_options,
_require_await=True,
)
return await _ensure_sync_result(result, self.execute)
@overload
async def scalar(
self,
statement: TypedReturnsRows[_T],
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Optional[_T]: ...
@overload
async def scalar(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Any: ...
async def scalar(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Any:
r"""Executes a SQL statement construct and returns a scalar object.
This method is shorthand for invoking the
:meth:`_engine.Result.scalar` method after invoking the
:meth:`_engine.Connection.execute` method. Parameters are equivalent.
:return: a scalar Python value representing the first column of the
first row returned.
"""
result = await self.execute(
statement, parameters, execution_options=execution_options
)
return result.scalar()
@overload
async def scalars(
self,
statement: TypedReturnsRows[_T],
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[_T]: ...
@overload
async def scalars(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[Any]: ...
async def scalars(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[Any]:
r"""Executes a SQL statement construct and returns a scalar objects.
This method is shorthand for invoking the
:meth:`_engine.Result.scalars` method after invoking the
:meth:`_engine.Connection.execute` method. Parameters are equivalent.
:return: a :class:`_engine.ScalarResult` object.
.. versionadded:: 1.4.24
"""
result = await self.execute(
statement, parameters, execution_options=execution_options
)
return result.scalars()
@overload
def stream_scalars(
self,
statement: TypedReturnsRows[_T],
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> GeneratorStartableContext[AsyncScalarResult[_T]]: ...
@overload
def stream_scalars(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> GeneratorStartableContext[AsyncScalarResult[Any]]: ...
@asyncstartablecontext
async def stream_scalars(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> AsyncIterator[AsyncScalarResult[Any]]:
r"""Execute a statement and return an awaitable yielding a
:class:`_asyncio.AsyncScalarResult` object.
E.g.::
result = await conn.stream_scalars(stmt)
async for scalar in result:
print(f"{scalar}")
This method is shorthand for invoking the
:meth:`_engine.AsyncResult.scalars` method after invoking the
:meth:`_engine.Connection.stream` method. Parameters are equivalent.
The :meth:`.AsyncConnection.stream_scalars`
method supports optional context manager use against the
:class:`.AsyncScalarResult` object, as in::
async with conn.stream_scalars(stmt) as result:
async for scalar in result:
print(f"{scalar}")
In the above pattern, the :meth:`.AsyncScalarResult.close` method is
invoked unconditionally, even if the iterator is interrupted by an
exception throw. Context manager use remains optional, however,
and the function may be called in either an ``async with fn():`` or
``await fn()`` style.
.. versionadded:: 2.0.0b3 added context manager support
:return: an awaitable object that will yield an
:class:`_asyncio.AsyncScalarResult` object.
.. versionadded:: 1.4.24
.. seealso::
:meth:`.AsyncConnection.stream`
"""
async with self.stream(
statement, parameters, execution_options=execution_options
) as result:
yield result.scalars()
async def run_sync(
self,
fn: Callable[Concatenate[Connection, _P], _T],
*arg: _P.args,
**kw: _P.kwargs,
) -> _T:
'''Invoke the given synchronous (i.e. not async) callable,
passing a synchronous-style :class:`_engine.Connection` as the first
argument.
This method allows traditional synchronous SQLAlchemy functions to
run within the context of an asyncio application.
E.g.::
def do_something_with_core(conn: Connection, arg1: int, arg2: str) -> str:
"""A synchronous function that does not require awaiting
:param conn: a Core SQLAlchemy Connection, used synchronously
:return: an optional return value is supported
"""
conn.execute(some_table.insert().values(int_col=arg1, str_col=arg2))
return "success"
async def do_something_async(async_engine: AsyncEngine) -> None:
"""an async function that uses awaiting"""
async with async_engine.begin() as async_conn:
# run do_something_with_core() with a sync-style
# Connection, proxied into an awaitable
return_code = await async_conn.run_sync(
do_something_with_core, 5, "strval"
)
print(return_code)
This method maintains the asyncio event loop all the way through
to the database connection by running the given callable in a
specially instrumented greenlet.
The most rudimentary use of :meth:`.AsyncConnection.run_sync` is to
invoke methods such as :meth:`_schema.MetaData.create_all`, given
an :class:`.AsyncConnection` that needs to be provided to
:meth:`_schema.MetaData.create_all` as a :class:`_engine.Connection`
object::
# run metadata.create_all(conn) with a sync-style Connection,
# proxied into an awaitable
with async_engine.begin() as conn:
await conn.run_sync(metadata.create_all)
.. note::
The provided callable is invoked inline within the asyncio event
loop, and will block on traditional IO calls. IO within this
callable should only call into SQLAlchemy's asyncio database
APIs which will be properly adapted to the greenlet context.
.. seealso::
:meth:`.AsyncSession.run_sync`
:ref:`session_run_sync`
''' # noqa: E501
return await greenlet_spawn(
fn, self._proxied, *arg, _require_await=False, **kw
)
def __await__(self) -> Generator[Any, None, AsyncConnection]:
return self.start().__await__()
async def __aexit__(self, type_: Any, value: Any, traceback: Any) -> None:
task = asyncio.create_task(self.close())
await asyncio.shield(task)
# START PROXY METHODS AsyncConnection
# code within this block is **programmatically,
# statically generated** by tools/generate_proxy_methods.py
@property
def closed(self) -> Any:
r"""Return True if this connection is closed.
.. container:: class_bases
Proxied for the :class:`_engine.Connection` class
on behalf of the :class:`_asyncio.AsyncConnection` class.
""" # noqa: E501
return self._proxied.closed
@property
def invalidated(self) -> Any:
r"""Return True if this connection was invalidated.
.. container:: class_bases
Proxied for the :class:`_engine.Connection` class
on behalf of the :class:`_asyncio.AsyncConnection` class.
This does not indicate whether or not the connection was
invalidated at the pool level, however
""" # noqa: E501
return self._proxied.invalidated
@property
def dialect(self) -> Dialect:
r"""Proxy for the :attr:`_engine.Connection.dialect` attribute
on behalf of the :class:`_asyncio.AsyncConnection` class.
""" # noqa: E501
return self._proxied.dialect
@dialect.setter
def dialect(self, attr: Dialect) -> None:
self._proxied.dialect = attr
@property
def default_isolation_level(self) -> Any:
r"""The initial-connection time isolation level associated with the
:class:`_engine.Dialect` in use.
.. container:: class_bases
Proxied for the :class:`_engine.Connection` class
on behalf of the :class:`_asyncio.AsyncConnection` class.
This value is independent of the
:paramref:`.Connection.execution_options.isolation_level` and
:paramref:`.Engine.execution_options.isolation_level` execution
options, and is determined by the :class:`_engine.Dialect` when the
first connection is created, by performing a SQL query against the
database for the current isolation level before any additional commands
have been emitted.
Calling this accessor does not invoke any new SQL queries.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current actual isolation level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
""" # noqa: E501
return self._proxied.default_isolation_level
# END PROXY METHODS AsyncConnection
@util.create_proxy_methods(
Engine,
":class:`_engine.Engine`",
":class:`_asyncio.AsyncEngine`",
classmethods=[],
methods=[
"clear_compiled_cache",
"update_execution_options",
"get_execution_options",
],
attributes=["url", "pool", "dialect", "engine", "name", "driver", "echo"],
)
# "Class has incompatible disjoint bases" - no idea
| AsyncConnection |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/multi_client_input_util_test.py | {
"start": 10617,
"end": 19586
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
logging.info('Check per client log in Test artifacts.')
self.server_ports = [
multi_client_test_util.pick_unused_port() for _ in range(NUM_CLIENTS)
]
self.worker_ports = [
multi_client_test_util.pick_unused_port() for _ in range(NUM_CLIENTS)
]
worker_addresses = [f'localhost:{port}' for port in self.worker_ports]
self.cluster = TFDataServiceCluster(
test_name=self._testMethodName,
num_workers=0, # Co-located mode.
worker_addresses=worker_addresses)
def tearDown(self):
super().tearDown()
self.cluster.stop()
def write_dataset(self, dataset, num_files, num_elems):
"""Writes a dataset_ops.DatasetV2 to multiple files."""
dataset_paths = []
dataset_iter = iter(dataset)
for file_idx in range(num_files):
dataset_path = os.path.join(self.get_temp_dir(),
f'dataset-{file_idx}.tfrecords')
dataset_paths.append(dataset_path)
with tf_record.TFRecordWriter(dataset_path) as writer:
for _ in range(num_elems // num_files):
idx, elem = next(dataset_iter)
elem_bytes = example_pb2.Example(
features=feature_pb2.Features(
feature={
'idx': feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[idx])
),
'elem': feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[io_ops.serialize_tensor(elem).numpy()]
)
),
}
)
).SerializeToString()
writer.write(elem_bytes)
return dataset_paths
@parameterized.product(
(
{
# batch=4 x height=2 x width=2
# 1 replica per client.
'mesh_dims': [(MESH_DIM_BATCH, 4),
(MESH_DIM_HEIGHT, 2),
(MESH_DIM_WIDTH, 2)],
}, {
# batch=4 x height=2 x width=2 (transposed)
# 1 replica per client with reordered local partitions.
'mesh_dims': [(MESH_DIM_BATCH, 4),
(MESH_DIM_WIDTH, 2),
(MESH_DIM_HEIGHT, 2)],
}, {
# batch=8 x height=2 x width=1
# 2 replicas per client.
'mesh_dims': [(MESH_DIM_BATCH, 8),
(MESH_DIM_HEIGHT, 2),
(MESH_DIM_WIDTH, 1)],
}, {
# batch=8 x height=2 x width=1 (transposed)
# 2 replicas per client with reordered partitions.
'mesh_dims': [(MESH_DIM_BATCH, 8),
(MESH_DIM_WIDTH, 1),
(MESH_DIM_HEIGHT, 2)],
}, {
# batch=2 x height=4 x width=2
# 1 replica split over 2 clients.
'mesh_dims': [(MESH_DIM_BATCH, 2),
(MESH_DIM_HEIGHT, 4),
(MESH_DIM_WIDTH, 2)],
}, {
# batch=2 x height=4 x width=2 (transposed)
# 1 replica split over 2 clients with reordered partitions.
'mesh_dims': [(MESH_DIM_BATCH, 2),
(MESH_DIM_WIDTH, 2),
(MESH_DIM_HEIGHT, 4)],
},
),
(
{
# Replicated
'idx_sharding': [UNSHARDED],
'images_sharding': [UNSHARDED, UNSHARDED, UNSHARDED, UNSHARDED],
}, {
# Batch sharded
'idx_sharding': [MESH_DIM_BATCH],
'images_sharding':
[MESH_DIM_BATCH, UNSHARDED, UNSHARDED, UNSHARDED],
}, {
# Spatially sharded
'idx_sharding': [UNSHARDED],
'images_sharding':
[UNSHARDED, MESH_DIM_HEIGHT, MESH_DIM_WIDTH, UNSHARDED],
}, {
# Batch and spatially sharded
'idx_sharding': [MESH_DIM_BATCH],
'images_sharding':
[MESH_DIM_BATCH, MESH_DIM_HEIGHT, MESH_DIM_WIDTH, UNSHARDED],
}
))
def testMultiClientIter(self, mesh_dims, idx_sharding, images_sharding):
num_batches = 4
batch_size = 16
num_elems = num_batches * batch_size
images = stateless_random_ops.stateless_random_uniform(
[num_elems, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS],
seed=(1, 2),
minval=0,
maxval=255,
dtype=dtypes.int32,
)
dataset = dataset_ops.DatasetV2.from_tensor_slices(images)
# Enumerate the dataset elements to make it easier to identify the batches
# returned by the DTensorDataset.
dataset = dataset.enumerate()
# Store a mapping of index to dataset elements which can be looked up later
# to identify the batches returned by the DTensorDataset.
all_elems = {idx.numpy(): elem for idx, elem in dataset}
# Write the dataset and shard it among multiple files.
dataset_paths = self.write_dataset(
dataset, num_files=8, num_elems=num_elems)
# Construct args for starmap.
args = []
mesh_dim_names, mesh_dim_sizes = zip(*mesh_dims)
global_device_ids = test_util.create_device_ids_array(mesh_dim_sizes)
device_ids_split = np.split(np.ravel(global_device_ids), NUM_CLIENTS)
dtensor_jobs = [
f'localhost:{self.server_ports[i]}' for i in range(NUM_CLIENTS)
]
for client_id in range(NUM_CLIENTS):
# Manually specify DTensor environment variables since we are in a test
# environment.
env = {
config._DT_CLIENT_ID: str(client_id),
config._DT_JOB_NAME: str(JOB_NAME),
config._DT_JOBS: ','.join(dtensor_jobs)
}
local_device_ids = device_ids_split[client_id].tolist()
local_devices = [
device_spec.DeviceSpecV2( # pylint: disable=g-complex-comprehension
job=JOB_NAME,
replica=0,
task=client_id,
device_type='CPU',
device_index=i,
)
for i in range(len(local_device_ids))
]
mesh = Mesh(
dim_names=mesh_dim_names,
global_device_ids=global_device_ids,
local_device_ids=local_device_ids,
local_devices=local_devices,
)
idx_layout = Layout(idx_sharding, mesh)
images_layout = Layout(images_sharding, mesh)
batch_dim = MESH_DIM_BATCH if MESH_DIM_BATCH in images_sharding else None
args.append((client_id, self._testMethodName, env, NUM_DEVICES_PER_CLIENT,
self.cluster.dispatcher_address(),
self.worker_ports[client_id], batch_size, dataset_paths,
mesh, batch_dim, (idx_layout, images_layout)))
def get_results():
# Run the DTensor client processes and get the DTensor dataset components.
with mp_context.Pool(NUM_CLIENTS) as pool:
results = pool.starmap(run_client, args)
pool.close()
pool.join()
return results
# TODO(b/271162918): fix multi-client use case.
with self.assertRaises(NotImplementedError):
results = get_results()
return
# pylint: disable=unreachable
# Create a mesh on the main test process. The tensor components returned
# from each DTensor client subprocess will be packed onto this mesh to
# verify correctness.
test_mesh = mesh_util.create_mesh(
mesh_dims=mesh_dims,
devices=[
'CPU:%d' % i for i in range(NUM_CLIENTS * NUM_DEVICES_PER_CLIENT)
])
test_mesh = self.configTestMesh({'CPU': test_mesh})
idx_test_layout = Layout(idx_sharding, test_mesh)
images_test_layout = Layout(images_sharding, test_mesh)
for batch_elems in zip(*results):
# Collect the tensor components returned from each client.
idx_components = []
images_components = []
for client_id in range(NUM_CLIENTS):
local_idx, local_images = batch_elems[client_id]
idx_components.extend(local_idx)
images_components.extend(local_images)
# Pack the dataset elements into a DTensor on the test mesh.
d_idx = api.pack(idx_components, idx_test_layout)
d_images = api.pack(images_components, images_test_layout)
# Get the batch of elements from the original dataset using the element
# indices.
batch_stack = []
for elem_idx in d_idx:
batch_stack.append(all_elems.pop(elem_idx.numpy()))
batch = array_ops_stack.stack(batch_stack)
self.assertDTensorEqual(batch, images_test_layout, d_images)
self.assertEmpty(
all_elems, 'Not all batches were returned by DTensorDataset.')
if __name__ == '__main__':
test_backend_util.handle_test_main(test.main)
| MultiClientDTensorDatasetTest |
python | tox-dev__tox | src/tox/tox_env/errors.py | {
"start": 152,
"end": 228
} | class ____(Exception): # noqa: N818
"""Skip this tox environment."""
| Skip |
python | spyder-ide__spyder | external-deps/python-lsp-server/test/plugins/test_symbols.py | {
"start": 461,
"end": 4062
} | class ____:
def __init__(self):
x = 2
self.y = x
def main(x):
y = 2 * x
return y
"""
DOC_IMPORTS = """from . import something
from ..module import something
from module import (a, b)
def main():
# import ignored
print("from module import x") # string with import
return something
"""
def helper_check_symbols_all_scope(symbols):
# All eight symbols (import sys, a, B, __init__, x, y, main, y)
assert len(symbols) == 8
def sym(name):
return [s for s in symbols if s["name"] == name][0]
# Check we have some sane mappings to VSCode constants
assert sym("a")["kind"] == SymbolKind.Variable
assert sym("B")["kind"] == SymbolKind.Class
assert sym("__init__")["kind"] == SymbolKind.Method
assert sym("main")["kind"] == SymbolKind.Function
# Not going to get too in-depth here else we're just testing Jedi
assert sym("a")["location"]["range"]["start"] == {"line": 2, "character": 0}
def test_symbols(config, workspace):
doc = Document(DOC_URI, workspace, DOC)
config.update({"plugins": {"jedi_symbols": {"all_scopes": False}}})
symbols = pylsp_document_symbols(config, doc)
# All four symbols (import sys, a, B, main)
# y is not in the root scope, it shouldn't be returned
assert len(symbols) == 5
def sym(name):
return [s for s in symbols if s["name"] == name][0]
# Check we have some sane mappings to VSCode constants
assert sym("a")["kind"] == SymbolKind.Variable
assert sym("B")["kind"] == SymbolKind.Class
assert sym("main")["kind"] == SymbolKind.Function
# Not going to get too in-depth here else we're just testing Jedi
assert sym("a")["location"]["range"]["start"] == {"line": 2, "character": 0}
# Ensure that the symbol range spans the whole definition
assert sym("main")["location"]["range"]["start"] == {"line": 9, "character": 0}
assert sym("main")["location"]["range"]["end"] == {"line": 12, "character": 0}
def test_symbols_complex_imports(config, workspace):
doc = Document(DOC_URI, workspace, DOC_IMPORTS)
config.update({"plugins": {"jedi_symbols": {"all_scopes": False}}})
symbols = pylsp_document_symbols(config, doc)
import_symbols = [s for s in symbols if s["kind"] == SymbolKind.Module]
assert len(import_symbols) == 4
names = [s["name"] for s in import_symbols]
assert "something" in names
assert "a" in names or "b" in names
assert any(
s["name"] == "main" and s["kind"] == SymbolKind.Function for s in symbols
)
def test_symbols_all_scopes(config, workspace) -> None:
doc = Document(DOC_URI, workspace, DOC)
symbols = pylsp_document_symbols(config, doc)
helper_check_symbols_all_scope(symbols)
def test_symbols_non_existing_file(config, workspace, tmpdir) -> None:
path = tmpdir.join("foo.py")
# Check pre-condition: file must not exist
assert not path.check(exists=1)
doc = Document(uris.from_fs_path(str(path)), workspace, DOC)
symbols = pylsp_document_symbols(config, doc)
helper_check_symbols_all_scope(symbols)
@pytest.mark.skipif(
PY2 or not LINUX or not CI, reason="tested on linux and python 3 only"
)
def test_symbols_all_scopes_with_jedi_environment(workspace) -> None:
doc = Document(DOC_URI, workspace, DOC)
# Update config extra environment
env_path = "/tmp/pyenv/bin/python"
settings = {"pylsp": {"plugins": {"jedi": {"environment": env_path}}}}
doc.update_config(settings)
symbols = pylsp_document_symbols(doc._config, doc)
helper_check_symbols_all_scope(symbols)
| B |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 10457,
"end": 10687
} | class ____(_GenerativeProvider):
module_config: Optional[Dict[str, Any]]
def _to_dict(self) -> Dict[str, Any]:
if self.module_config is None:
return {}
return self.module_config
| _GenerativeCustom |
python | numba__numba | numba/cuda/tests/cudadrv/test_cuda_array_slicing.py | {
"start": 2821,
"end": 8015
} | class ____(CUDATestCase):
def test_prefix_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(arr.size):
expect = arr[i:]
got = darr[i:].copy_to_host()
self.assertTrue(np.all(expect == got))
def test_prefix_2d(self):
arr = np.arange(3 ** 2).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
expect = arr[i:, j:]
sliced = darr[i:, j:]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_select_3d_first_two_dim(self):
arr = np.arange(3 * 4 * 5).reshape(3, 4, 5)
darr = cuda.to_device(arr)
# Select first dimension
for i in range(arr.shape[0]):
expect = arr[i]
sliced = darr[i]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
# Select second dimension
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
expect = arr[i, j]
sliced = darr[i, j]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_select_f(self):
a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='F')
da = cuda.to_device(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(),
a[i, j, :]))
for j in range(a.shape[2]):
self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(),
a[i, :, j]))
for i in range(a.shape[1]):
for j in range(a.shape[2]):
self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(),
a[:, i, j]))
def test_select_c(self):
a = np.arange(5 * 6 * 7).reshape(5, 6, 7, order='C')
da = cuda.to_device(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
self.assertTrue(np.array_equal(da[i, j, :].copy_to_host(),
a[i, j, :]))
for j in range(a.shape[2]):
self.assertTrue(np.array_equal(da[i, :, j].copy_to_host(),
a[i, :, j]))
for i in range(a.shape[1]):
for j in range(a.shape[2]):
self.assertTrue(np.array_equal(da[:, i, j].copy_to_host(),
a[:, i, j]))
def test_prefix_select(self):
arr = np.arange(5 * 7).reshape(5, 7, order='F')
darr = cuda.to_device(arr)
self.assertTrue(np.all(darr[:1, 1].copy_to_host() == arr[:1, 1]))
def test_negative_slicing_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i, j in product(range(-10, 10), repeat=2):
np.testing.assert_array_equal(arr[i:j],
darr[i:j].copy_to_host())
def test_negative_slicing_2d(self):
arr = np.arange(12).reshape(3, 4)
darr = cuda.to_device(arr)
for x, y, w, s in product(range(-4, 4), repeat=4):
np.testing.assert_array_equal(arr[x:y, w:s],
darr[x:y, w:s].copy_to_host())
def test_empty_slice_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(darr.shape[0]):
np.testing.assert_array_equal(darr[i:i].copy_to_host(), arr[i:i])
# empty slice of empty slice
np.testing.assert_array_equal(darr[:0][:0].copy_to_host(), np.empty(0))
# out-of-bound slice just produces empty slices
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(),
arr[:0][:1])
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(),
arr[:0][-1:])
def test_empty_slice_2d(self):
arr = np.arange(5 * 7).reshape(5, 7)
darr = cuda.to_device(arr)
np.testing.assert_array_equal(darr[:0].copy_to_host(), arr[:0])
np.testing.assert_array_equal(darr[3, :0].copy_to_host(), arr[3, :0])
# empty slice of empty slice
np.testing.assert_array_equal(darr[:0][:0].copy_to_host(),
np.empty((0, 7)))
# out-of-bound slice just produces empty slices
np.testing.assert_array_equal(darr[:0][:1].copy_to_host(), arr[:0][:1])
np.testing.assert_array_equal(darr[:0][-1:].copy_to_host(),
arr[:0][-1:])
| CudaArraySlicing |
python | sympy__sympy | sympy/physics/optics/medium.py | {
"start": 3869,
"end": 4484
} | class ____(Medium):
"""
Represents an optical medium for which only the refractive index is known.
Useful for simple ray optics.
This class should never be instantiated directly.
Instead it should be instantiated indirectly by instantiating Medium with
only n specified.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m', n=2)
>>> m
MediumN(Str('m'), 2)
"""
def __new__(cls, name, n):
obj = super(Medium, cls).__new__(cls, name, n)
return obj
@property
def n(self):
return self.args[1]
| MediumN |
python | PyCQA__pycodestyle | testing/data/W29.py | {
"start": 337,
"end": 403
} | class ____(object):
def __repr__(self):
return 'test'
| Test |
python | huggingface__transformers | tests/models/oneformer/test_modeling_oneformer.py | {
"start": 19400,
"end": 25602
} | class ____(unittest.TestCase):
@cached_property
def model_checkpoints(self):
return "shi-labs/oneformer_ade20k_swin_tiny"
@cached_property
def default_processor(self):
return OneFormerProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def test_inference_no_head(self):
model = OneFormerModel.from_pretrained(self.model_checkpoints).to(torch_device)
processor = self.default_processor
image = prepare_img()
inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size
self.assertEqual(inputs_shape, (1, 3, 512, 682))
task_inputs_shape = inputs["task_inputs"].shape
# check size
self.assertEqual(task_inputs_shape, (1, 77))
with torch.no_grad():
outputs = model(**inputs)
expected_slice_hidden_state = [[0.2723, 0.8280, 0.6026], [1.2699, 1.1257, 1.1444], [1.1344, 0.6153, 0.4177]]
expected_slice_hidden_state = torch.tensor(expected_slice_hidden_state).to(torch_device)
slice_hidden_state = outputs.encoder_hidden_states[-1][0, 0, :3, :3]
torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE)
expected_slice_hidden_state = [[1.0581, 1.2276, 1.2003], [1.1903, 1.2925, 1.2862], [1.158, 1.2559, 1.3216]]
expected_slice_hidden_state = torch.tensor(expected_slice_hidden_state).to(torch_device)
slice_hidden_state = outputs.pixel_decoder_hidden_states[0][0, 0, :3, :3]
torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE)
expectations = Expectations(
{
(None, None): [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]],
("cuda", 8): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]],
}
)
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
slice_hidden_state = outputs.transformer_decoder_class_predictions[0, :3, :3]
torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE)
def test_inference_universal_segmentation_head(self):
model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
processor = self.default_processor
image = prepare_img()
inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size
self.assertEqual(inputs_shape, (1, 3, 512, 682))
with torch.no_grad():
outputs = model(**inputs)
# masks_queries_logits
masks_queries_logits = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,
(1, model.config.num_queries, inputs_shape[-2] // 4, (inputs_shape[-1] + 2) // 4),
)
expectations = Expectations(
{
(None, None): [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]],
("cuda", 8): [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape,
(1, model.config.num_queries, model.config.num_labels + 1),
)
expectations = Expectations(
{
(None, None): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]],
("cuda", 8): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
model = (
OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints)
.to(torch_device, dtype=torch.float16)
.eval()
)
processor = self.default_processor
image = prepare_img()
inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
_ = model(**inputs)
def test_with_segmentation_maps_and_loss(self):
dummy_model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints)
processor = self.default_processor
processor.image_processor.num_text = dummy_model.config.num_queries - dummy_model.config.text_encoder_n_ctx
dummy_model.config.is_training = True
model = OneFormerForUniversalSegmentation(dummy_model.config).to(torch_device).eval()
del dummy_model
inputs = processor(
[np.zeros((3, 512, 640)), np.zeros((3, 512, 640))],
["semantic", "semantic"],
segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)],
return_tensors="pt",
)
inputs["pixel_values"] = inputs["pixel_values"].to(torch_device)
inputs["task_inputs"] = inputs["task_inputs"].to(torch_device)
inputs["text_inputs"] = inputs["text_inputs"].to(torch_device)
inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]]
inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]]
with torch.no_grad():
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
| OneFormerModelIntegrationTest |
python | lepture__authlib | authlib/oauth2/rfc6749/errors.py | {
"start": 6708,
"end": 6826
} | class ____(OAuth2Error):
error = "missing_code"
description = "Missing 'code' in response."
| MissingCodeException |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py | {
"start": 5094,
"end": 6211
} | class ____(CloudEnvironment):
"""Galaxy environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
pulp_user = str(self._get_cloud_config('PULP_USER'))
pulp_password = str(self._get_cloud_config('PULP_PASSWORD'))
pulp_host = self._get_cloud_config('PULP_HOST')
return CloudEnvironmentConfig(
ansible_vars=dict(
pulp_user=pulp_user,
pulp_password=pulp_password,
pulp_api=f'http://{pulp_host}',
pulp_server=f'http://{pulp_host}/pulp_ansible/galaxy/',
galaxy_ng_server=f'http://{pulp_host}/api/galaxy/',
),
env_vars=dict(
PULP_USER=pulp_user,
PULP_PASSWORD=pulp_password,
PULP_SERVER=f'http://{pulp_host}/pulp_ansible/galaxy/api/',
GALAXY_NG_SERVER=f'http://{pulp_host}/api/galaxy/',
),
)
| GalaxyEnvironment |
python | falconry__falcon | falcon/redirects.py | {
"start": 4820,
"end": 5564
} | class ____(HTTPStatus):
"""308 Permanent Redirect.
The 308 (Permanent Redirect) status code indicates that the target
resource has been assigned a new permanent URI.
Note:
This status code is similar to 301 (Moved Permanently), except
that it does not allow changing the request method from POST to
GET.
(See also: RFC 7238, Section 3)
Args:
location (str): URI to provide as the Location header in the
response.
"""
def __init__(self, location: str, headers: Headers | None = None) -> None:
if headers is None:
headers = {}
headers.setdefault('location', location)
super().__init__(falcon.HTTP_308, headers)
| HTTPPermanentRedirect |
python | sphinx-doc__sphinx | sphinx/errors.py | {
"start": 860,
"end": 977
} | class ____(SphinxError):
"""Warning, treated as error."""
category = 'Warning, treated as error'
| SphinxWarning |
python | huggingface__transformers | src/transformers/models/gpt_oss/modular_gpt_oss.py | {
"start": 13695,
"end": 15570
} | class ____(LlamaDecoderLayer):
def __init__(self, config: GptOssConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.hidden_size = config.hidden_size
self.self_attn = GptOssAttention(config=config, layer_idx=layer_idx)
self.mlp = GptOssMLP(config)
self.input_layernorm = GptOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GptOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.mlp(hidden_states) # diff with llama: router scores
hidden_states = residual + hidden_states
return hidden_states
| GptOssDecoderLayer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 165901,
"end": 166494
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="enterpriseId"
)
setting_value = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseMembersCanMakePurchasesSettingValue),
graphql_name="settingValue",
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateEnterpriseMembersCanMakePurchasesSettingInput |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nn_functional.py | {
"start": 39031,
"end": 44749
} | class ____(Operator):
"""Operator for torch.nn.functional.multi_head_attention_forward."""
def __init__(self):
super().__init__("torch.nn.functional.multi_head_attention_forward")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.multi_head_attention_forward"
def can_produce(self, output_spec: Spec) -> bool:
"""Multi-head attention forward can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
# MHA needs at least 3 dimensions (seq_len, batch, embed_dim)
if len(output_spec.size) < 3:
return False
# MHA cannot handle 0-sized dimensions (seq_len, batch, or embed_dim must be > 0)
if any(dim == 0 for dim in output_spec.size):
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for multi_head_attention_forward.
MHA requires:
- query, key, value: (seq_len, batch, embed_dim)
- in_proj_weight: (3*embed_dim, embed_dim) for combined QKV projection
- in_proj_bias: (3*embed_dim,) optional
- out_proj_weight: (embed_dim, embed_dim)
- out_proj_bias: (embed_dim,) optional
For simplicity, we'll use the combined in_proj_weight path.
IMPORTANT: The order of optional parameters matters for codegen!
We must ensure that when we have 6 inputs, they are in the order:
query, key, value, in_proj_weight, in_proj_bias, out_proj_weight
NOT: query, key, value, in_proj_weight, out_proj_weight, out_proj_bias
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError(
"MultiHeadAttentionForwardOperator can only produce TensorSpec outputs"
)
if len(output_spec.size) < 3:
raise ValueError("MHA output must have at least 3 dimensions")
# Output shape: (seq_len, batch, embed_dim)
seq_len, batch, embed_dim = output_spec.size[:3]
# Query, key, value have the same shape as output
query_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
key_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
value_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
# in_proj_weight: (3*embed_dim, embed_dim)
in_proj_weight_spec = TensorSpec(
size=(3 * embed_dim, embed_dim),
stride=(embed_dim, 1),
dtype=output_spec.dtype,
)
# out_proj_weight: (embed_dim, embed_dim)
out_proj_weight_spec = TensorSpec(
size=(embed_dim, embed_dim),
stride=(embed_dim, 1),
dtype=output_spec.dtype,
)
# For simplicity and correctness, always generate all required tensors
# This avoids ambiguity in the codegen about which optional parameters are present
# We'll use a simplified signature: query, key, value, in_proj_weight, out_proj_weight only
specs = [
query_spec,
key_spec,
value_spec,
in_proj_weight_spec,
out_proj_weight_spec,
]
from typing import cast
return cast(list[Spec], specs)
def _calculate_stride(self, size):
"""Calculate stride for a given size."""
if not size:
return ()
stride = []
current_stride = 1
for dim_size in reversed(size):
stride.append(current_stride)
current_stride *= dim_size
return tuple(reversed(stride))
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for multi_head_attention_forward operation."""
if len(input_names) != 5:
raise ValueError(
"MHA requires exactly 5 inputs: query, key, value, in_proj_weight, out_proj_weight"
)
if not isinstance(output_spec, TensorSpec):
raise ValueError(
"MultiHeadAttentionForwardOperator can only produce TensorSpec outputs"
)
target_dtype = str(output_spec.dtype)
embed_dim = output_spec.size[-1]
# Determine number of heads (must divide embed_dim evenly)
# Common choices: 8, 4, 2, 1
possible_heads = [h for h in [8, 4, 2, 1] if embed_dim % h == 0]
num_heads = possible_heads[0] if possible_heads else 1
query_name = input_names[0]
key_name = input_names[1]
value_name = input_names[2]
in_proj_weight_name = input_names[3]
out_proj_weight_name = input_names[4]
# Build the function call without optional biases
code = f"""{output_name}, _ = torch.nn.functional.multi_head_attention_forward(
{query_name}.to({target_dtype}),
{key_name}.to({target_dtype}),
{value_name}.to({target_dtype}),
{embed_dim},
{num_heads},
{in_proj_weight_name}.to({target_dtype}),
None, # in_proj_bias
None, # bias_k
None, # bias_v
False, # add_zero_attn
0.0, # dropout_p (no dropout for testing)
{out_proj_weight_name}.to({target_dtype}),
None, # out_proj_bias
training=False, # Use eval mode for deterministic behavior
need_weights=False, # Don't compute attention weights for performance
)"""
return code
| MultiHeadAttentionForwardOperator |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 5900,
"end": 14967
} | class ____(TimeSeriesBaseModel, ObjectBaseModel, Generic[R]):
"""
The state of a run.
"""
type: StateType
name: Optional[str] = Field(default=None)
timestamp: datetime.datetime = Field(default_factory=lambda: now("UTC"))
message: Optional[str] = Field(default=None, examples=["Run started"])
state_details: StateDetails = Field(default_factory=StateDetails)
data: Annotated[
Union[
Annotated["ResultRecordMetadata", Tag("ResultRecordMetadata")],
Annotated[Any, Tag("Any")],
],
Discriminator(data_discriminator),
] = Field(default=None)
@overload
async def aresult(
self: "State[R]",
raise_on_failure: Literal[True] = ...,
retry_result_failure: bool = ...,
) -> R: ...
@overload
async def aresult(
self: "State[R]",
raise_on_failure: Literal[False] = False,
retry_result_failure: bool = ...,
) -> Union[R, Exception]: ...
@overload
async def aresult(
self: "State[R]",
raise_on_failure: bool = ...,
retry_result_failure: bool = ...,
) -> Union[R, Exception]: ...
async def aresult(
self,
raise_on_failure: bool = True,
retry_result_failure: bool = True,
) -> Union[R, Exception]:
"""
Retrieve the result attached to this state.
"""
from prefect.states import get_state_result
return await get_state_result(
self,
raise_on_failure=raise_on_failure,
retry_result_failure=retry_result_failure,
)
@overload
def result(
self: "State[R]",
raise_on_failure: Literal[True] = ...,
retry_result_failure: bool = ...,
) -> R: ...
@overload
def result(
self: "State[R]",
raise_on_failure: Literal[False] = False,
retry_result_failure: bool = ...,
) -> Union[R, Exception]: ...
@overload
def result(
self: "State[R]",
raise_on_failure: bool = ...,
retry_result_failure: bool = ...,
) -> Union[R, Exception]: ...
@async_dispatch(aresult)
def result(
self,
raise_on_failure: bool = True,
retry_result_failure: bool = True,
) -> Union[R, Exception]:
"""
Retrieve the result attached to this state.
Args:
raise_on_failure: a boolean specifying whether to raise an exception
if the state is of type `FAILED` and the underlying data is an exception. When flow
was run in a different memory space (using `run_deployment`), this will only raise
if `fetch` is `True`.
retry_result_failure: a boolean specifying whether to retry on failures to
load the result from result storage
Raises:
TypeError: If the state is failed but the result is not an exception.
Returns:
The result of the run
Examples:
Get the result from a flow state
```python
@flow
def my_flow():
return "hello"
my_flow(return_state=True).result()
# hello
```
Get the result from a failed state
```python
@flow
def my_flow():
raise ValueError("oh no!")
state = my_flow(return_state=True) # Error is wrapped in FAILED state
state.result() # Raises `ValueError`
```
Get the result from a failed state without erroring
```python
@flow
def my_flow():
raise ValueError("oh no!")
state = my_flow(return_state=True)
result = state.result(raise_on_failure=False)
print(result)
# ValueError("oh no!")
```
Get the result from a flow state in an async context
```python
@flow
async def my_flow():
return "hello"
state = await my_flow(return_state=True)
await state.result()
# hello
```
Get the result with `raise_on_failure` from a flow run in a different memory space
```python
@flow
async def my_flow():
raise ValueError("oh no!")
my_flow.deploy("my_deployment/my_flow")
flow_run = run_deployment("my_deployment/my_flow")
await flow_run.state.result(raise_on_failure=True) # Raises `ValueError("oh no!")`
```
"""
from prefect.states import get_state_result
return run_coro_as_sync(
get_state_result(
self,
raise_on_failure=raise_on_failure,
retry_result_failure=retry_result_failure,
)
)
@model_validator(mode="after")
def default_name_from_type(self) -> Self:
"""If a name is not provided, use the type"""
# if `type` is not in `values` it means the `type` didn't pass its own
# validation check and an error will be raised after this function is called
name = self.name
if name is None and self.type:
self.name = " ".join([v.capitalize() for v in self.type.split("_")])
return self
@model_validator(mode="after")
def default_scheduled_start_time(self) -> Self:
if self.type == StateType.SCHEDULED:
if not self.state_details.scheduled_time:
self.state_details.scheduled_time = now("UTC") # pyright: ignore[reportAttributeAccessIssue] DateTime is split into two types depending on Python version
return self
@model_validator(mode="after")
def set_unpersisted_results_to_none(self) -> Self:
if isinstance(self.data, dict) and self.data.get("type") == "unpersisted": # pyright: ignore[reportUnknownMemberType] unable to narrow dict type
self.data = None
return self
def is_scheduled(self) -> bool:
return self.type == StateType.SCHEDULED
def is_pending(self) -> bool:
return self.type == StateType.PENDING
def is_running(self) -> bool:
return self.type == StateType.RUNNING
def is_completed(self) -> bool:
return self.type == StateType.COMPLETED
def is_failed(self) -> bool:
return self.type == StateType.FAILED
def is_crashed(self) -> bool:
return self.type == StateType.CRASHED
def is_cancelled(self) -> bool:
return self.type == StateType.CANCELLED
def is_cancelling(self) -> bool:
return self.type == StateType.CANCELLING
def is_final(self) -> bool:
return self.type in TERMINAL_STATES
def is_paused(self) -> bool:
return self.type == StateType.PAUSED
def model_copy(
self, *, update: Optional[Mapping[str, Any]] = None, deep: bool = False
) -> Self:
"""
Copying API models should return an object that could be inserted into the
database again. The 'timestamp' is reset using the default factory.
"""
update = {
"timestamp": type(self).model_fields["timestamp"].get_default(),
**(update or {}),
}
return super().model_copy(update=update, deep=deep)
def fresh_copy(self, **kwargs: Any) -> Self:
"""
Return a fresh copy of the state with a new ID.
"""
return self.model_copy(
update={
"id": uuid7(),
"created": now("UTC"),
"updated": now("UTC"),
"timestamp": now("UTC"),
},
**kwargs,
)
def __repr__(self) -> str:
"""
Generates a complete state representation appropriate for introspection
and debugging, including the result:
`MyCompletedState(message="my message", type=COMPLETED, result=...)`
"""
result = self.data
display = dict(
message=repr(self.message),
type=str(self.type.value),
result=repr(result),
)
return f"{self.name}({', '.join(f'{k}={v}' for k, v in display.items())})"
def __str__(self) -> str:
"""
Generates a simple state representation appropriate for logging:
`MyCompletedState("my message", type=COMPLETED)`
"""
display: list[str] = []
if self.message:
display.append(repr(self.message))
if TYPE_CHECKING:
assert self.name is not None
if self.type.lower() != self.name.lower():
display.append(f"type={self.type.value}")
return f"{self.name}({', '.join(display)})"
def __hash__(self) -> int:
return hash(
(
getattr(self.state_details, "flow_run_id", None),
getattr(self.state_details, "task_run_id", None),
self.timestamp,
self.type,
)
)
| State |
python | pandas-dev__pandas | asv_bench/benchmarks/categoricals.py | {
"start": 7082,
"end": 7409
} | class ____:
def setup(self):
N = 10**5
self.ci = pd.CategoricalIndex(np.arange(N))
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
| Contains |
python | realpython__materials | python-unittest/test_prime_v1.py | {
"start": 49,
"end": 664
} | class ____(unittest.TestCase):
def test_prime_numbers(self):
for num in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]:
with self.subTest(num=num):
self.assertTrue(is_prime(num))
def test_non_prime_numbers(self):
for num in [-1, 0, 1, 4, 6, 8, 9, 10, 12, 15]:
with self.subTest(num=num):
self.assertFalse(is_prime(num))
# def test_prime_number(self):
# self.assertTrue(is_prime(17))
# def test_non_prime_number(self):
# self.assertFalse(is_prime(10))
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestIsPrime |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/config_types.py | {
"start": 3859,
"end": 4981
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneConfigType,)
description = "Regular is an odd name in this context. It really means Scalar or Any."
name = "RegularConfigType"
given_name = graphene.NonNull(graphene.String)
def __init__(
self,
get_config_type: Callable[[str], ConfigTypeSnap],
config_type_snap: ConfigTypeSnap,
):
self._config_type_snap = check.inst_param(
config_type_snap, "config_type_snap", ConfigTypeSnap
)
self._get_config_type = get_config_type
super().__init__(**_ctor_kwargs_for_snap(config_type_snap))
def resolve_recursive_config_types(
self, graphene_info: ResolveInfo
) -> list[GrapheneConfigTypeUnion]:
return [
to_config_type(self._get_config_type, config_type_key)
for config_type_key in _recursive_config_type_keys(
self._get_config_type, self._config_type_snap
)
]
def resolve_given_name(self, _):
return self._config_type_snap.given_name
| GrapheneRegularConfigType |
python | doocs__leetcode | solution/3300-3399/3307.Find the K-th Character in String Game II/Solution.py | {
"start": 0,
"end": 367
} | class ____:
def kthCharacter(self, k: int, operations: List[int]) -> str:
n, i = 1, 0
while n < k:
n *= 2
i += 1
d = 0
while n > 1:
if k > n // 2:
k -= n // 2
d += operations[i - 1]
n //= 2
i -= 1
return chr(d % 26 + ord("a"))
| Solution |
python | Netflix__metaflow | metaflow/plugins/cards/component_serializer.py | {
"start": 6184,
"end": 15873
} | class ____:
"""
This class manages the card's state for a single card.
- It uses the `ComponentStore` to manage the storage of the components
- It exposes methods to add, remove and access the components.
- It exposes a `refresh` method that will allow refreshing a card with new data
for realtime(ish) updates.
- The `CardComponentCollector` exposes convenience methods similar to this class for a default
editable card. These methods include :
- `append`
- `extend`
- `clear`
- `refresh`
- `components`
- `__iter__`
## Usage Patterns :
```python
current.card["mycardid"].append(component, id="comp123")
current.card["mycardid"].extend([component])
current.card["mycardid"].refresh(data) # refreshes the card with new data
current.card["mycardid"].components["comp123"] # returns the component with id "comp123"
current.card["mycardid"].components["comp123"].update()
current.card["mycardid"].components.clear() # Wipe all the components
del current.card["mycardid"].components["mycomponentid"] # Delete a component
```
"""
def __init__(
self,
card_uuid,
decorator_attributes,
card_creator,
components=None,
logger=None,
no_warnings=False,
user_set_card_id=None,
runtime_card=False,
card_options=None,
refresh_interval=5,
):
self._card_creator_args = dict(
card_uuid=card_uuid,
user_set_card_id=user_set_card_id,
runtime_card=runtime_card,
decorator_attributes=decorator_attributes,
card_options=card_options,
logger=logger,
)
self._card_creator = card_creator
self._refresh_interval = refresh_interval
self._last_layout_change = None
self._latest_user_data = None
self._last_refresh = 0
self._last_render = 0
self._render_seq = 0
self._logger = logger
self._no_warnings = no_warnings
self._warn_once = {
"update": {},
"not_implemented": {},
}
card_type = decorator_attributes["type"]
if components is None:
self._components = ComponentStore(
logger=self._logger,
card_type=card_type,
user_set_id=user_set_card_id,
components=None,
)
else:
self._components = ComponentStore(
logger=self._logger,
card_type=card_type,
user_set_id=user_set_card_id,
components=list(components),
)
def append(self, component, id=None):
self._components.append(component, id=id)
def extend(self, components):
self._components.extend(components)
def clear(self):
self._components.clear()
def _card_proc(self, mode, sync=False):
self._card_creator.create(**self._card_creator_args, mode=mode, sync=sync)
def refresh(self, data=None, force=False):
self._latest_user_data = data
nu = time.time()
first_render = True if self._last_render == 0 else False
if nu - self._last_refresh < self._refresh_interval:
# rate limit refreshes: silently ignore requests that
# happen too frequently
return
self._last_refresh = nu
# This block of code will render the card in `render_runtime` mode when:
# 1. refresh is called with `force=True`
# 2. Layout of the components in the card has changed. i.e. The actual elements in the component array have changed.
# 3. The last time the card was rendered was more the minimum interval after which they should be rendered.
last_rendered_before_minimum_interval = (
nu - self._last_refresh
) > RUNTIME_CARD_RENDER_INTERVAL
layout_has_changed = (
self._last_layout_change != self.components.layout_last_changed_on
or self._last_layout_change is None
)
if force or last_rendered_before_minimum_interval or layout_has_changed:
self._render_seq += 1
self._last_render = nu
self._card_proc("render_runtime")
# The below `if not first_render` condition is a special case for the following scenario:
# Lets assume the case that the user is only doing `current.card.append` followed by `refresh`.
# In this case, there will be no process executed in `refresh` mode since `layout_has_changed`
# will always be true and as a result there will be no data update that informs the UI of the RELOAD_TOKEN change.
# This will cause the UI to seek for the data update object but will constantly find None. So if it is not
# the first render then we should also have a `refresh` call followed by a `render_runtime` call so
# that the UI can always be updated with the latest data.
if not first_render:
# For the general case, the CardCreator's ProcessManager run's the `refresh` / `render_runtime` in a asynchronous manner.
# Due to this when the `render_runtime` call is happening, an immediately subsequent call to `refresh` will not be able to
# execute since the card-process manager will be busy executing the `render_runtime` call and ignore the `refresh` call.
# Hence we need to pass the `sync=True` argument to the `refresh` call so that the `refresh` call is executed synchronously and waits for the
# `render_runtime` call to finish.
self._card_proc("refresh", sync=True)
# We set self._last_layout_change so that when self._last_layout_change is not the same
# as `self.components.layout_last_changed_on`, then the component array itself
# has been modified. So we should force a re-render of the card.
self._last_layout_change = self.components.layout_last_changed_on
else:
self._card_proc("refresh")
@property
def components(self):
return self._components
def _warning(self, message):
msg = "[@card WARNING] %s" % message
self._logger(msg, timestamp=False, bad=True)
def _get_latest_data(self, final=False, mode=None):
"""
This function returns the data object that is passed down to :
- `MetaflowCard.render_runtime`
- `MetaflowCard.refresh`
- `MetaflowCard.reload_content_token`
The return value of this function contains all the necessary state information for Metaflow Cards to make decisions on the following:
1. What components are rendered
2. Should the card be reloaded on the UI
3. What data to pass down to the card.
Parameters
----------
final : bool, optional
If True, it implies that the final "rendering" sequence is taking place (which involves calling a `render` and a `refresh` function.)
When final is set the `render_seq` is set to "final" so that the reload token in the card is set to final
and the card is not reloaded again on the user interface.
mode : str
This parameter is passed down to the object returned by this function. Can be one of `render_runtime` / `refresh` / `render`
Returns
-------
dict
A dictionary of the form :
```python
{
"user": user_data, # any passed to `current.card.refresh` function
"components": component_dict, # all rendered REALTIME_UPDATABLE components
"render_seq": seq,
# `render_seq` is a counter that is incremented every time `render_runtime` is called.
# If a metaflow card has a RELOAD_POLICY_ALWAYS set then the reload token will be set to this value
# so that the card reload on the UI everytime `render_runtime` is called.
"component_update_ts": self.components.layout_last_changed_on,
# `component_update_ts` is the timestamp of the last time the component array was modified.
# `component_update_ts` can get used by the `reload_content_token` to make decisions on weather to
# reload the card on the UI when component array has changed.
"mode": mode,
}
```
"""
seq = "final" if final else self._render_seq
# Extract all the runtime-updatable components as a dictionary
component_dict = {}
for component in self._components._realtime_updateable_components():
rendered_comp = _render_card_component(component)
if rendered_comp is not None:
component_dict.update({component.component_id: rendered_comp})
# Verify _latest_user_data is json serializable
user_data = {}
if self._latest_user_data is not None and not _object_is_json_serializable(
self._latest_user_data
):
self._warning(
"Data provided to `refresh` is not JSON serializable. It will be ignored."
)
else:
user_data = self._latest_user_data
return {
"user": user_data,
"components": component_dict,
"render_seq": seq,
"component_update_ts": self.components.layout_last_changed_on,
"mode": mode,
}
def __iter__(self):
return iter(self._components)
| CardComponentManager |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 53088,
"end": 54543
} | class ____(nn.Module):
def __init__(self, in_features: int, out_features: int, kernel_size: int = 3, padding: int = 1):
"""
A basic module that executes conv - norm - in sequence used in MaskFormer.
Args:
in_features (`int`):
The number of input features (channels).
out_features (`int`):
The number of outputs features (channels).
"""
super().__init__()
self.layers = [
nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, bias=False),
nn.GroupNorm(32, out_features),
nn.ReLU(inplace=True),
]
for i, layer in enumerate(self.layers):
# Provide backwards compatibility from when the class inherited from nn.Sequential
# In nn.Sequential subclasses, the name given to the layer is its index in the sequence.
# In nn.Module subclasses they derived from the instance attribute they are assigned to e.g.
# self.my_layer_name = Layer()
# We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register
# explicitly
self.add_module(str(i), layer)
def forward(self, input: Tensor) -> Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
| MaskFormerFPNConvLayer |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 9330,
"end": 13852
} | class ____:
KEY = OpenApiParameter(
name="key",
location=OpenApiParameter.PATH,
type=OpenApiTypes.STR,
description="The tag key to look the values up for.",
required=True,
)
ISSUES_OR_GROUPS = OpenApiParameter(
name="var",
location="path",
required=False,
type=str,
description="Issue URLs may be accessed with either `issues` or `groups`. This parameter is will be removed when building the API docs.",
)
ISSUE_ID = OpenApiParameter(
name="issue_id",
location="path",
required=True,
type=int,
description="The ID of the issue you'd like to query.",
)
SORT = OpenApiParameter(
name="sort",
location="query",
required=False,
type=str,
description="Sort order of the resulting tag values. Prefix with '-' for descending order. Default is '-id'.",
enum=["id", "date", "age", "count"],
)
GROUP_STATS_PERIOD = OpenApiParameter(
name="groupStatsPeriod",
description="The timeline on which stats for the groups should be presented.",
enum=["", "24h", "14d", "auto"],
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
)
SHORT_ID_LOOKUP = OpenApiParameter(
name="shortIdLookup",
description="If this is set to `1` then the query will be parsed for issue short IDs. These may ignore other filters (e.g. projects), which is why it is an opt-in.",
enum=["1", "0"],
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
)
DEFAULT_QUERY = OpenApiParameter(
name="query",
description="An optional search query for filtering issues. A default query will apply if no view/query is set. For all results use this parameter with an empty string.",
default="is:unresolved",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
)
VIEW_ID = OpenApiParameter(
name="viewId",
description="The ID of the view to use. If no query is present, the view's query and filters will be applied.",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
)
VIEW_SORT = OpenApiParameter(
name="sort",
description="The sort order of the view. Options include 'Last Seen' (`date`), 'First Seen' (`new`), 'Trends' (`trends`), 'Events' (`freq`), 'Users' (`user`), and 'Date Added' (`inbox`).",
default="date",
enum=["date", "new", "trends", "freq", "user", "inbox"],
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
)
LIMIT = OpenApiParameter(
name="limit",
description="The maximum number of issues to affect. The maximum is 100.",
default=100,
location=OpenApiParameter.QUERY,
type=OpenApiTypes.INT,
required=False,
)
GROUP_INDEX_EXPAND = OpenApiParameter(
name="expand",
description="Additional data to include in the response.",
enum=[
"inbox",
"owners",
"sessions",
"pluginActions",
"pluginIssues",
"integrationIssues",
"sentryAppIssues",
"latestEventHasAttachments",
],
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
many=True,
)
GROUP_INDEX_COLLAPSE = OpenApiParameter(
name="collapse",
description="Fields to remove from the response to improve query performance.",
enum=["stats", "lifetime", "base", "unhandled", "filtered"],
location=OpenApiParameter.QUERY,
type=OpenApiTypes.STR,
required=False,
many=True,
)
MUTATE_ISSUE_ID_LIST = OpenApiParameter(
name="id",
description="The list of issue IDs to mutate. It is optional for status updates, in which an implicit `update all` is assumed.",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.INT,
required=False,
many=True,
)
DELETE_ISSUE_ID_LIST = OpenApiParameter(
name="id",
description="The list of issue IDs to be removed. If not provided, it will attempt to remove the first 1000 issues.",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.INT,
required=False,
many=True,
)
| IssueParams |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_hash.py | {
"start": 606,
"end": 664
} | class ____:
def __hash__(self):
return 7741
| Hash |
python | falconry__falcon | falcon/util/sync.py | {
"start": 507,
"end": 1145
} | class ____:
def run(self, coro: Awaitable[Result]) -> Result: # pragma: nocover
# NOTE(vytas): Work around get_event_loop deprecation in 3.10 by going
# via get_event_loop_policy(). This should be equivalent for
# async_to_sync's use case as it is currently impossible to invoke
# run_until_complete() from a running loop anyway.
return self.get_loop().run_until_complete(coro)
def get_loop(self) -> asyncio.AbstractEventLoop: # pragma: nocover
return asyncio.get_event_loop_policy().get_event_loop()
def close(self) -> None: # pragma: nocover
pass
| _DummyRunner |
python | openai__openai-python | src/openai/types/responses/response_in_progress_event.py | {
"start": 231,
"end": 518
} | class ____(BaseModel):
response: Response
"""The response that is in progress."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.in_progress"]
"""The type of the event. Always `response.in_progress`."""
| ResponseInProgressEvent |
python | numpy__numpy | numpy/_core/tests/test_array_coercion.py | {
"start": 29462,
"end": 32504
} | class ____:
"""Test expected behaviors of ``asarray``."""
def test_dtype_identity(self):
"""Confirm the intended behavior for *dtype* kwarg.
The result of ``asarray()`` should have the dtype provided through the
keyword argument, when used. This forces unique array handles to be
produced for unique np.dtype objects, but (for equivalent dtypes), the
underlying data (the base object) is shared with the original array
object.
Ref https://github.com/numpy/numpy/issues/1468
"""
int_array = np.array([1, 2, 3], dtype='i')
assert np.asarray(int_array) is int_array
# The character code resolves to the singleton dtype object provided
# by the numpy package.
assert np.asarray(int_array, dtype='i') is int_array
# Derive a dtype from n.dtype('i'), but add a metadata object to force
# the dtype to be distinct.
unequal_type = np.dtype('i', metadata={'spam': True})
annotated_int_array = np.asarray(int_array, dtype=unequal_type)
assert annotated_int_array is not int_array
assert annotated_int_array.base is int_array
# Create an equivalent descriptor with a new and distinct dtype
# instance.
equivalent_requirement = np.dtype('i', metadata={'spam': True})
annotated_int_array_alt = np.asarray(annotated_int_array,
dtype=equivalent_requirement)
assert unequal_type == equivalent_requirement
assert unequal_type is not equivalent_requirement
assert annotated_int_array_alt is not annotated_int_array
assert annotated_int_array_alt.dtype is equivalent_requirement
# Check the same logic for a pair of C types whose equivalence may vary
# between computing environments.
# Find an equivalent pair.
integer_type_codes = ('i', 'l', 'q')
integer_dtypes = [np.dtype(code) for code in integer_type_codes]
typeA = None
typeB = None
for typeA, typeB in permutations(integer_dtypes, r=2):
if typeA == typeB:
assert typeA is not typeB
break
assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
# These ``asarray()`` calls may produce a new view or a copy,
# but never the same object.
long_int_array = np.asarray(int_array, dtype='l')
long_long_int_array = np.asarray(int_array, dtype='q')
assert long_int_array is not int_array
assert long_long_int_array is not int_array
assert np.asarray(long_int_array, dtype='q') is not long_int_array
array_a = np.asarray(int_array, dtype=typeA)
assert typeA == typeB
assert typeA is not typeB
assert array_a.dtype is typeA
assert array_a is not np.asarray(array_a, dtype=typeB)
assert np.asarray(array_a, dtype=typeB).dtype is typeB
assert array_a is np.asarray(array_a, dtype=typeB).base
| TestAsArray |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 2783,
"end": 3033
} | class ____(BaseStringType):
"""There's a regex in the spec this is supposed to meet...
but current assessment is that spending cycles on validating wouldn't be worth it
for the number of programming errors it would catch.
"""
| XsdAnyUri |
python | geekcomputers__Python | Sorting Algorithims/mergesort_linkedlist.py | {
"start": 144,
"end": 1770
} | class ____:
def __init__(self):
self.head = None
def insert(self, new_data: int) -> None:
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printLL(self) -> None:
temp = self.head
if temp == None:
return "Linked List is empty"
while temp.next:
print(temp.data, "->", end="")
temp = temp.next
print(temp.data)
return
# Merge two sorted linked lists
def merge(left, right):
if not left:
return right
if not right:
return left
if left.data < right.data:
result = left
result.next = merge(left.next, right)
else:
result = right
result.next = merge(left, right.next)
return result
# Merge sort for linked list
def merge_sort(head):
if not head or not head.next:
return head
# Find the middle of the list
slow = head
fast = head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
left = head
right = slow.next
slow.next = None
left = merge_sort(left)
right = merge_sort(right)
return merge(left, right)
if __name__ == "__main__":
ll = LinkedList()
print(
"Enter the space-separated values of numbers to be inserted in the linked list prompted below:"
)
arr = list(map(int, input().split()))
for num in arr:
ll.insert(num)
print("Linked list before sorting:")
ll.printLL()
ll.head = merge_sort(ll.head)
print("Linked list after sorting:")
ll.printLL()
| LinkedList |
python | kamyu104__LeetCode-Solutions | Python/separate-black-and-white-balls.py | {
"start": 406,
"end": 907
} | class ____(object):
def minimumSteps(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
left, right = 0, len(s)-1
while left < right:
if left < len(s) and s[left] != '1':
left += 1
continue
if right >= 0 and s[right] != '0':
right -= 1
continue
result += right-left
left += 1
right -= 1
return result
| Solution2 |
python | coleifer__peewee | playhouse/sqlite_ext.py | {
"start": 9748,
"end": 11578
} | class ____(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
| VirtualTableSchemaManager |
python | readthedocs__readthedocs.org | readthedocs/redirects/migrations/0008_alter_redirect_position.py | {
"start": 148,
"end": 625
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("redirects", "0007_migrate_to_new_syntax"),
]
operations = [
migrations.AlterField(
model_name="redirect",
name="position",
field=models.PositiveIntegerField(
default=0,
help_text="Order of execution of the redirect.",
verbose_name="Position",
),
),
]
| Migration |
python | wandb__wandb | wandb/vendor/pygments/lexers/haskell.py | {
"start": 10763,
"end": 13366
} | class ____(RegexLexer):
"""
For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
dependently typed functional programming language and proof assistant.
.. versionadded:: 2.0
"""
name = 'Agda'
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',
'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
'infixl', 'infixr', 'instance', 'let', 'mutual', 'open',
'pattern', 'postulate', 'primitive', 'private',
'quote', 'quoteGoal', 'quoteTerm',
'record', 'renaming', 'rewrite', 'syntax', 'tactic',
'unquote', 'unquoteDecl', 'using', 'where', 'with']
tokens = {
'root': [
# Declaration
(r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
bygroups(Text, Name.Function, Text, Operator.Word, Text)),
# Comments
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
# Holes
(r'\{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r'\b(Set|Prop)\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
(u'(\\.{1,3}|\\||\u039B|\u2200|\u2192|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s(){}]+', Text),
(r'\s+?', Text), # Whitespace
],
'hole': [
# Holes
(r'[^!{}]+', Comment.Directive),
(r'\{!', Comment.Directive, '#push'),
(r'!\}', Comment.Directive, '#pop'),
(r'[!{}]', Comment.Directive),
],
'module': [
(r'\{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][\w.]*', Name, '#pop'),
(r'[\W0-9_]+', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
'string': HaskellLexer.tokens['string'],
'escape': HaskellLexer.tokens['escape']
}
| AgdaLexer |
python | weaviate__weaviate-python-client | weaviate/users/base.py | {
"start": 3866,
"end": 7165
} | class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]):
def get_my_user(self) -> executor.Result[OwnUser]:
"""Get the currently authenticated user.
Returns:
A user object.
"""
path = "/users/own-info"
def resp(res: Response) -> OwnUser:
parsed = _decode_json_response_dict(res, "Get current user")
assert parsed is not None
# The API returns "username" for 1.29 instead of "user_id"
user_id = parsed["username"] if "username" in parsed else parsed["user_id"]
return OwnUser(
user_id=user_id,
roles=(
{role["name"]: Role._from_weaviate_role(role) for role in parsed["roles"]}
if parsed["roles"] is not None
else {}
),
groups=parsed["groups"] if "groups" in parsed else [],
)
return executor.execute(
response_callback=resp,
method=self._connection.get,
path=path,
error_msg="Could not get roles",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get own roles"),
)
@deprecated(
"""This method is deprecated and will be removed in Q4 25.
Please use `users.db.get_assigned_roles` and/or `users.oidc.get_assigned_roles` instead."""
)
def get_assigned_roles(self, user_id: str) -> executor.Result[Dict[str, Role]]:
"""Get the roles assigned to a user.
Args:
user_id: The user ID to get the roles for.
Returns:
A dictionary with role names as keys and the `Role` objects as values.
"""
# cast here because the deprecated method is only used in the deprecated class and this type is known
return cast(Dict[str, Role], self._get_roles_of_user_deprecated(user_id))
@deprecated(
"""This method is deprecated and will be removed in Q4 25.
Please use `users.db.assign_roles` and/or `users.oidc.assign_roles` instead."""
)
def assign_roles(
self,
*,
user_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Assign roles to a user.
Args:
role_names: The names of the roles to assign to the user.
user_id: The user to assign the roles to.
"""
return self._assign_roles_to_user(
[role_names] if isinstance(role_names, str) else role_names,
user_id,
None,
)
@deprecated(
"""This method is deprecated and will be removed in Q4 25.
Please use `users.db.revoke_roles` and/or `users.oidc.revoke_roles` instead."""
)
def revoke_roles(
self,
*,
user_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Revoke roles from a user.
Args:
role_names: The names of the roles to revoke from the user.
user_id: The user to revoke the roles from.
"""
return self._revoke_roles_from_user(
[role_names] if isinstance(role_names, str) else role_names,
user_id,
None,
)
| _UsersExecutor |
python | spack__spack | lib/spack/spack/test/llnl/util/lock.py | {
"start": 9815,
"end": 10358
} | class ____:
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, start=self.start, length=self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
barrier.wait()
| TimeoutWrite |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/migration/incoming/main.py | {
"start": 2208,
"end": 2688
} | class ____(webapp2.RequestHandler):
allowed_app_ids = ["other-app-id", "other-app-id-2"]
def get(self):
incoming_app_id = get_app_id(self.request)
if incoming_app_id is None:
self.abort(403)
if incoming_app_id not in self.allowed_app_ids:
self.abort(403)
self.response.write("This is a protected page.")
app = webapp2.WSGIApplication([("/", MainPage)], debug=True)
# [END gae_python_app_identity_incoming]
| MainPage |
python | ansible__ansible | lib/ansible/plugins/filter/core.py | {
"start": 21841,
"end": 23773
} | class ____(t.NamedTuple):
"""
Custom named tuple for the groupby filter with a public interface; silently ignored by unknown type checks.
This matches the internal implementation of the _GroupTuple returned by Jinja's built-in groupby filter.
"""
grouper: t.Any
list: list[t.Any]
def __repr__(self) -> str:
return tuple.__repr__(self)
_lazy_containers.register_known_types(GroupTuple)
@pass_environment
def _cleansed_groupby(*args, **kwargs):
res = sync_do_groupby(*args, **kwargs)
res = [GroupTuple(grouper=g.grouper, list=g.list) for g in res]
return res
# DTFIX-FUTURE: make these dumb wrappers more dynamic
@accept_args_markers
def ansible_default(
value: t.Any,
default_value: t.Any = '',
boolean: bool = False,
) -> t.Any:
"""Updated `default` filter that only coalesces classic undefined objects; other Undefined-derived types (eg, ErrorMarker) pass through."""
validate_arg_type('boolean', boolean, bool)
if isinstance(value, UndefinedMarker):
return default_value
if boolean and not value:
return default_value
return value
@accept_lazy_markers
@functools.wraps(do_map)
def wrapped_map(*args, **kwargs) -> t.Any:
return do_map(*args, **kwargs)
@accept_lazy_markers
@functools.wraps(do_select)
def wrapped_select(*args, **kwargs) -> t.Any:
return do_select(*args, **kwargs)
@accept_lazy_markers
@functools.wraps(do_selectattr)
def wrapped_selectattr(*args, **kwargs) -> t.Any:
return do_selectattr(*args, **kwargs)
@accept_lazy_markers
@functools.wraps(do_reject)
def wrapped_reject(*args, **kwargs) -> t.Any:
return do_reject(*args, **kwargs)
@accept_lazy_markers
@functools.wraps(do_rejectattr)
def wrapped_rejectattr(*args, **kwargs) -> t.Any:
return do_rejectattr(*args, **kwargs)
@accept_args_markers
def type_debug(obj: object) -> str:
return native_type_name(obj)
| GroupTuple |
python | ray-project__ray | doc/source/ray-overview/examples/mcp-ray-serve/multi_mcp_ray_serve.py | {
"start": 3515,
"end": 5855
} | class ____:
def __init__(self, brave_search: DeploymentHandle, fetch: DeploymentHandle) -> None:
self._mcps = {"brave_search": brave_search, "fetch": fetch}
@api.get("/{mcp_name}/tools")
async def list_tools_http(self, mcp_name: str):
handle = self._mcps.get(mcp_name)
if not handle:
raise HTTPException(404, f"MCP {mcp_name} not found")
try:
return {"tools": await handle.list_tools.remote()}
except Exception as exc:
logger.exception("Listing tools failed")
raise HTTPException(500, str(exc))
@api.post("/{mcp_name}/call")
async def call_tool_http(self, mcp_name: str, request: Request):
handle = self._mcps.get(mcp_name)
if not handle:
raise HTTPException(404, f"MCP {mcp_name} not found")
body = await request.json()
tool_name = body.get("tool_name")
tool_args = body.get("tool_args")
if tool_name is None or tool_args is None:
raise HTTPException(400, "Missing 'tool_name' or 'tool_args'")
try:
result = await handle.call_tool.remote(tool_name, tool_args)
return {"result": result}
except Exception as exc:
logger.exception("Tool call failed")
raise HTTPException(500, str(exc))
# -------------------------
# Binding deployments
# -------------------------
if "BRAVE_API_KEY" not in os.environ:
raise RuntimeError("BRAVE_API_KEY must be set before `serve run`.")
# Example: autoscaling BraveSearch between 1 and 5 replicas,
# targeting ~10 concurrent requests per replica.
BraveSearch = build_mcp_deployment(
name="brave_search",
docker_image="docker.io/mcp/brave-search",
env={"BRAVE_API_KEY": os.environ["BRAVE_API_KEY"]},
num_cpus=0.2,
autoscaling_config={
"min_replicas": 1,
"max_replicas": 5,
"target_num_ongoing_requests": 10,
},
)
# Example: keep Fetch at a fixed 2 replicas.
Fetch = build_mcp_deployment(
name="fetch",
docker_image="docker.io/mcp/fetch",
num_replicas=2,
num_cpus=0.2,
)
# entry-point object for `serve run …`
brave_search_handle = BraveSearch.bind()
fetch_handle = Fetch.bind()
app = Router.bind(brave_search_handle, fetch_handle)
## Run in terminal.
# serve run multi_mcp_ray_serve:app
| Router |
python | pypa__warehouse | tests/common/db/packaging.py | {
"start": 4420,
"end": 4702
} | class ____(WarehouseFactory):
class Meta:
model = Dependency
release = factory.SubFactory(ReleaseFactory)
kind = factory.Faker(
"random_element", elements=[int(kind) for kind in DependencyKind]
)
specifier = factory.Faker("word")
| DependencyFactory |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 2188,
"end": 6687
} | class ____(nn.Module):
def __init__(self, config: CpmAntConfig, layer_idx=None):
super().__init__()
self.dim_model = config.hidden_size
self.num_heads = config.num_attention_heads
self.dim_head = config.dim_head
self.layer_idx = layer_idx
self.project_q = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
self.project_k = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
self.project_v = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
self.attention_out = nn.Linear(self.num_heads * self.dim_head, self.dim_model, bias=False)
self.softmax = torch.nn.Softmax(dim=-1)
if config.dropout_p is not None:
self.dropout = torch.nn.Dropout(p=config.dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_q: torch.Tensor,
hidden_kv: torch.Tensor,
attention_mask: torch.BoolTensor,
position_bias: torch.Tensor,
output_attentions: Optional[bool] = False,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
):
"""
Args:
hidden_q (`torch.Tensor`):
Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
hidden_kv (`torch.Tensor` of shape `(batch, len_k, dim_model)`)):
Tensor *key_value* and *query* of shape `(batch, len_k, dim_model)`
attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
Avoid invalid areas to participate in the calculation of self-attention.
position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
Provide positional information to self-attention block.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
past_key_values (`Cache`, *optional*):
Cached past key and value projection states.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
"""
batch_size = hidden_q.size(0)
len_q = hidden_q.size(1)
len_k = hidden_kv.size(1)
query = self.project_q(hidden_q)
key = self.project_k(hidden_kv)
value = self.project_v(hidden_kv)
query = query.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
key = key.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
value = value.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
if past_key_values is not None:
key, value = past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position})
len_k = key.size(-2)
# (batch_size, num_heads, len_q, dim_head) @ (batch_size, num_heads, dim_head, len_k) -> (batch_size, num_heads, len_q, len_k)
score = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(self.dim_head)
score = score + position_bias
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False),
torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype),
)
score = self.softmax(score)
score = torch.masked_fill(
score,
attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False),
torch.scalar_tensor(0, device=score.device, dtype=score.dtype),
)
if output_attentions:
attn_weights = score
else:
attn_weights = None
if self.dropout is not None:
score = self.dropout(score)
# (batch_size, num_heads, len_q, len_k) @ (batch_size, num_heads, len_k, dim_head) -> (batch_size, num_heads, len_q, dim_head)
score = torch.matmul(score, value)
score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
score = self.attention_out(score)
return score, attn_weights
| CpmAntAttention |
python | kamyu104__LeetCode-Solutions | Python/minimum-subsequence-in-non-increasing-order.py | {
"start": 33,
"end": 393
} | class ____(object):
def minSubsequence(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
result, total, curr = [], sum(nums), 0
nums.sort(reverse=True)
for i, x in enumerate(nums):
curr += x
if curr > total-curr:
break
return nums[:i+1]
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/services/public/variables.py | {
"start": 3265,
"end": 7770
} | class ____(BulkService[VariableBody]):
"""Service for handling bulk operations on variables."""
def categorize_keys(self, keys: set) -> tuple[set, set]:
"""Categorize the given keys into matched_keys and not_found_keys based on existing keys."""
existing_keys = {variable for variable in self.session.execute(select(Variable.key)).scalars()}
matched_keys = existing_keys & keys
not_found_keys = keys - existing_keys
return matched_keys, not_found_keys
def handle_bulk_create(self, action: BulkCreateAction, results: BulkActionResponse) -> None:
"""Bulk create variables."""
to_create_keys = {variable.key for variable in action.entities}
matched_keys, not_found_keys = self.categorize_keys(to_create_keys)
try:
if action.action_on_existence == BulkActionOnExistence.FAIL and matched_keys:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"The variables with these keys: {matched_keys} already exist.",
)
if action.action_on_existence == BulkActionOnExistence.SKIP:
create_keys = not_found_keys
else:
create_keys = to_create_keys
for variable in action.entities:
if variable.key in create_keys:
should_serialize_json = isinstance(variable.value, (dict, list))
Variable.set(
key=variable.key,
value=variable.value,
description=variable.description,
session=self.session,
serialize_json=should_serialize_json,
)
results.success.append(variable.key)
except HTTPException as e:
results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
def handle_bulk_update(self, action: BulkUpdateAction, results: BulkActionResponse) -> None:
"""Bulk Update variables."""
to_update_keys = {variable.key for variable in action.entities}
matched_keys, not_found_keys = self.categorize_keys(to_update_keys)
try:
if action.action_on_non_existence == BulkActionNotOnExistence.FAIL and not_found_keys:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"The variables with these keys: {not_found_keys} were not found.",
)
if action.action_on_non_existence == BulkActionNotOnExistence.SKIP:
update_keys = matched_keys
else:
update_keys = to_update_keys
for variable in action.entities:
if variable.key not in update_keys:
continue
updated_variable = update_orm_from_pydantic(
variable.key, variable, action.update_mask, self.session
)
results.success.append(updated_variable.key)
except HTTPException as e:
results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
except ValidationError as e:
results.errors.append({"error": f"{e.errors()}"})
def handle_bulk_delete(self, action: BulkDeleteAction, results: BulkActionResponse) -> None:
"""Bulk delete variables."""
to_delete_keys = set(action.entities)
matched_keys, not_found_keys = self.categorize_keys(to_delete_keys)
try:
if action.action_on_non_existence == BulkActionNotOnExistence.FAIL and not_found_keys:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"The variables with these keys: {not_found_keys} were not found.",
)
if action.action_on_non_existence == BulkActionNotOnExistence.SKIP:
delete_keys = matched_keys
else:
delete_keys = to_delete_keys
for key in delete_keys:
existing_variable = self.session.scalar(select(Variable).where(Variable.key == key).limit(1))
if existing_variable:
self.session.delete(existing_variable)
results.success.append(key)
except HTTPException as e:
results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
| BulkVariableService |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_daemon_health.py | {
"start": 1737,
"end": 6276
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_get_individual_daemons(self, graphql_context):
if graphql_context.instance.is_ephemeral:
pytest.skip("The daemon isn't compatible with an in-memory instance")
graphql_context.instance.add_daemon_heartbeat(
DaemonHeartbeat(
timestamp=100.0, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=None
)
)
results = execute_dagster_graphql(graphql_context, INDIVIDUAL_DAEMON_QUERY)
scheduler_required = isinstance(graphql_context.instance.scheduler, DagsterDaemonScheduler)
assert results.data == {
"instance": {
"daemonHealth": {
"id": "daemonHealth",
"sensor": {
"daemonType": "SENSOR",
"required": True,
"healthy": False,
"lastHeartbeatTime": 100.0,
},
"run_coordinator": {
"daemonType": "QUEUED_RUN_COORDINATOR",
"required": False,
"healthy": None,
"lastHeartbeatTime": None,
},
"scheduler": {
"daemonType": "SCHEDULER",
"required": scheduler_required,
"healthy": False if scheduler_required else None,
"lastHeartbeatTime": None,
},
}
}
}
def test_get_all_daemons(self, graphql_context):
if graphql_context.instance.is_ephemeral:
pytest.skip("The daemon isn't compatible with an in-memory instance")
results = execute_dagster_graphql(graphql_context, ALL_DAEMON_QUERY)
scheduler_required = isinstance(graphql_context.instance.scheduler, DagsterDaemonScheduler)
assert results.data == {
"instance": {
"daemonHealth": {
"id": "daemonHealth",
"allDaemonStatuses": [
{
"daemonType": "SENSOR",
"required": True,
"healthy": False,
"lastHeartbeatTime": None,
},
{
"daemonType": "BACKFILL",
"required": True,
"healthy": False,
"lastHeartbeatTime": None,
},
{
"daemonType": "ASSET",
"required": True,
"healthy": False,
"lastHeartbeatTime": None,
},
{
"daemonType": "FRESHNESS_DAEMON",
"required": True,
"healthy": False,
"lastHeartbeatTime": None,
},
]
+ (
[
{
"daemonType": "SCHEDULER",
"required": True,
"healthy": False if scheduler_required else None,
"lastHeartbeatTime": None,
}
]
if scheduler_required
else []
),
}
}
}
def test_get_daemon_error(self, graphql_context):
if graphql_context.instance.is_ephemeral:
pytest.skip("The daemon isn't compatible with an in-memory instance")
graphql_context.instance.add_daemon_heartbeat(
DaemonHeartbeat(
timestamp=time.time(),
daemon_type=SensorDaemon.daemon_type(),
daemon_id=None,
errors=[
SerializableErrorInfo(message="foobar", stack=[], cls_name=None, cause=None)
],
)
)
results = execute_dagster_graphql(graphql_context, DAEMON_HEALTH_QUERY)
assert results.data["instance"]["daemonHealth"]["sensor"] == {
"lastHeartbeatErrors": [{"message": "foobar"}],
"healthy": True,
}
| TestDaemonHealth |
python | Netflix__metaflow | test/unit/inheritance/flows/mutator_with_derived_config_flow.py | {
"start": 362,
"end": 2614
} | class ____(BaseC):
"""
Flow testing FlowMutator from base class using config from derived class.
Verifies:
- Base class mutator can access derived class config
- Parameters are injected based on derived config values
- All original parameters and configs remain accessible
"""
final_param = Parameter("final_param", help="Final parameter", default=999)
@step
def start(self):
"""Verify all parameters including those injected by base mutator"""
print("Starting MutatorWithDerivedConfigFlow")
# Original parameters
print(f"base_param: {self.base_param}")
print(f"middle_param: {self.middle_param}")
print(f"final_param: {self.final_param}")
# Configs
print(f"middle_config: {self.middle_config}")
print(f"runtime_config: {self.runtime_config}")
# Injected parameters (from derived config)
print(f"feature_logging (injected): {self.feature_logging}")
print(f"feature_metrics (injected): {self.feature_metrics}")
print(f"worker_count (injected): {self.worker_count}")
# Store for verification
self.result_base_param = self.base_param
self.result_middle_param = self.middle_param
self.result_final_param = self.final_param
self.result_middle_config = dict(self.middle_config)
self.result_runtime_config = dict(self.runtime_config)
# Injected params
self.result_feature_logging = self.feature_logging
self.result_feature_metrics = self.feature_metrics
self.result_worker_count = self.worker_count
# Compute using injected params
enabled_features = sum(
[self.feature_logging, self.feature_metrics]
) # Count of True values
self.result_computation = (
self.worker_count * enabled_features + self.final_param
)
self.next(self.end)
@step
def end(self):
"""End step"""
print(f"Computation result: {self.result_computation}")
print(f"Pathspec: {current.pathspec}")
print("MutatorWithDerivedConfigFlow completed successfully")
if __name__ == "__main__":
MutatorWithDerivedConfigFlow()
| MutatorWithDerivedConfigFlow |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typePrinter3.py | {
"start": 42,
"end": 290
} | class ____:
class Child1:
pass
class Child2:
pass
# This should generate an error that uses fully-qualified names.
v1: A.Child1 = B.Child1()
# This should generate an error that uses simple names.
v2: A.Child1 = B.Child2()
| B |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 2747,
"end": 30742
} | class ____(fixtures.TestBase, ComparesTables):
def test_metadata_contains(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t2", MetaData(), Column("x", Integer))
t4 = Table("t1", MetaData(), Column("x", Integer), schema="foo")
assert "t1" in metadata
assert "foo.t2" in metadata
assert "t2" not in metadata
assert "foo.t1" not in metadata
assert t1 in metadata
assert t2 in metadata
assert t3 not in metadata
assert t4 not in metadata
def test_uninitialized_column_copy(self):
for col in [
Column("foo", String(), nullable=False),
Column("baz", String(), unique=True),
Column(Integer(), primary_key=True),
Column(
"bar",
Integer(),
Sequence("foo_seq"),
primary_key=True,
key="bar",
autoincrement="ignore_fk",
),
Column(Integer(), ForeignKey("bat.blah"), doc="this is a col"),
Column(
"bar",
Integer(),
ForeignKey("bat.blah"),
primary_key=True,
comment="this is a comment",
key="bar",
),
Column("bar", Integer(), info={"foo": "bar"}),
]:
c2 = col._copy()
for attr in (
"name",
"type",
"nullable",
"primary_key",
"key",
"unique",
"info",
"doc",
"autoincrement",
):
eq_(getattr(col, attr), getattr(c2, attr))
eq_(len(col.foreign_keys), len(c2.foreign_keys))
if col.default:
eq_(c2.default.name, "foo_seq")
for a1, a2 in zip(col.foreign_keys, c2.foreign_keys):
assert a1 is not a2
eq_(a2._colspec, "bat.blah")
def test_col_subclass_copy(self):
class MyColumn(schema.Column):
def __init__(self, *args, **kw):
self.widget = kw.pop("widget", None)
super().__init__(*args, **kw)
def _copy(self, *arg, **kw):
c = super()._copy(*arg, **kw)
c.widget = self.widget
return c
c1 = MyColumn("foo", Integer, widget="x")
c2 = c1._copy()
assert isinstance(c2, MyColumn)
eq_(c2.widget, "x")
def test_uninitialized_column_copy_events(self):
msgs = []
def write(c, t):
msgs.append("attach %s.%s" % (t.name, c.name))
c1 = Column("foo", String())
m = MetaData()
for i in range(3):
cx = c1._copy()
# as of 0.7, these events no longer copy. its expected
# that listeners will be re-established from the
# natural construction of things.
cx._on_table_attach(write)
Table("foo%d" % i, m, cx)
eq_(msgs, ["attach foo0.foo", "attach foo1.foo", "attach foo2.foo"])
def test_schema_collection_add(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
Table("t3", metadata, Column("x", Integer))
eq_(metadata._schemas, {"foo", "bar"})
eq_(len(metadata.tables), 3)
def test_schema_collection_remove(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
metadata.remove(t3)
eq_(metadata._schemas, {"foo", "bar"})
eq_(len(metadata.tables), 2)
metadata.remove(t1)
eq_(metadata._schemas, {"bar"})
eq_(len(metadata.tables), 1)
def test_schema_collection_remove_all(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
metadata.clear()
eq_(metadata._schemas, set())
eq_(len(metadata.tables), 0)
def test_metadata_tables_immutable(self):
# this use case was added due to #1917.
metadata = MetaData()
Table("t1", metadata, Column("x", Integer))
assert "t1" in metadata.tables
assert_raises(TypeError, lambda: metadata.tables.pop("t1"))
@testing.provide_metadata
def test_dupe_tables(self):
metadata = self.metadata
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
metadata.create_all(testing.db)
Table("table1", metadata, autoload_with=testing.db)
def go():
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
assert_raises_message(
tsa.exc.InvalidRequestError,
"Table 'table1' is already defined for this "
"MetaData instance. Specify 'extend_existing=True' "
"to redefine options and columns on an existing "
"Table object.",
go,
)
def test_fk_copy(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
kw = dict(
onupdate="X",
ondelete="Y",
use_alter=True,
name="f1",
deferrable="Z",
initially="Q",
link_to_name=True,
comment="foo",
)
fk1 = ForeignKey(c1, **kw)
fk2 = ForeignKeyConstraint((c1,), (c2,), **kw)
t1.append_constraint(fk2)
fk1c = fk1._copy()
fk2c = fk2._copy()
for k in kw:
eq_(getattr(fk1c, k), kw[k])
eq_(getattr(fk2c, k), kw[k])
def test_check_constraint_copy(self):
def r(x):
return x
c = CheckConstraint(
"foo bar",
name="name",
initially=True,
deferrable=True,
comment="foo",
_create_rule=r,
)
c2 = c._copy()
eq_(c2.name, "name")
eq_(str(c2.sqltext), "foo bar")
eq_(c2.initially, True)
eq_(c2.deferrable, True)
eq_(c2.comment, "foo")
assert c2._create_rule is r
def test_col_replace_w_constraint(self):
m = MetaData()
a = Table("a", m, Column("id", Integer, primary_key=True))
aid = Column("a_id", ForeignKey("a.id"))
b = Table("b", m, aid)
b.append_column(aid)
assert b.c.a_id.references(a.c.id)
eq_(len(b.constraints), 2)
def test_fk_construct(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
fk1 = ForeignKeyConstraint(("foo",), ("bar",), table=t1)
assert fk1 in t1.constraints
def test_fk_constraint_col_collection_w_table(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
fk1 = ForeignKeyConstraint(("foo",), ("bar",), table=t1)
eq_(dict(fk1.columns), {"foo": c1})
def test_fk_constraint_col_collection_no_table(self):
fk1 = ForeignKeyConstraint(("foo", "bat"), ("bar", "hoho"))
eq_(dict(fk1.columns), {})
eq_(fk1.column_keys, ["foo", "bat"])
eq_(fk1._col_description, "foo, bat")
eq_(fk1._elements, {"foo": fk1.elements[0], "bat": fk1.elements[1]})
def test_fk_constraint_col_collection_no_table_real_cols(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
fk1 = ForeignKeyConstraint((c1,), (c2,))
eq_(dict(fk1.columns), {})
eq_(fk1.column_keys, ["foo"])
eq_(fk1._col_description, "foo")
eq_(fk1._elements, {"foo": fk1.elements[0]})
def test_fk_constraint_col_collection_added_to_table(self):
c1 = Column("foo", Integer)
m = MetaData()
fk1 = ForeignKeyConstraint(("foo",), ("bar",))
Table("t", m, c1, fk1)
eq_(dict(fk1.columns), {"foo": c1})
eq_(fk1._elements, {"foo": fk1.elements[0]})
def test_fk_constraint_col_collection_via_fk(self):
fk = ForeignKey("bar")
c1 = Column("foo", Integer, fk)
m = MetaData()
t1 = Table("t", m, c1)
fk1 = fk.constraint
eq_(fk1.column_keys, ["foo"])
assert fk1 in t1.constraints
eq_(fk1.column_keys, ["foo"])
eq_(dict(fk1.columns), {"foo": c1})
eq_(fk1._elements, {"foo": fk})
def test_fk_no_such_parent_col_error(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
Table("b", meta, Column("b", Integer))
def go():
a.append_constraint(ForeignKeyConstraint(["x"], ["b.b"]))
assert_raises_message(
exc.ArgumentError,
"Can't create ForeignKeyConstraint on "
"table 'a': no column named 'x' is present.",
go,
)
def test_fk_given_non_col(self):
not_a_col = bindparam("x")
assert_raises_message(
exc.ArgumentError,
"String column name or Column object for DDL foreign "
"key constraint expected, got .*Bind",
ForeignKey,
not_a_col,
)
def test_fk_given_non_col_clauseelem(self):
class Foo:
def __clause_element__(self):
return bindparam("x")
assert_raises_message(
exc.ArgumentError,
"String column name or Column object for DDL foreign "
"key constraint expected, got .*Foo",
ForeignKey,
Foo(),
)
def test_fk_given_col_non_table(self):
t = Table("t", MetaData(), Column("x", Integer))
xa = t.alias().c.x
assert_raises_message(
exc.ArgumentError,
"ForeignKey received Column not bound to a Table, got: .*Alias",
ForeignKey,
xa,
)
def test_fk_given_col_non_table_clauseelem(self):
t = Table("t", MetaData(), Column("x", Integer))
class Foo:
def __clause_element__(self):
return t.alias().c.x
assert_raises_message(
exc.ArgumentError,
"ForeignKey received Column not bound to a Table, got: .*Alias",
ForeignKey,
Foo(),
)
def test_fk_no_such_target_col_error_upfront(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
Table("b", meta, Column("b", Integer))
a.append_constraint(ForeignKeyConstraint(["a"], ["b.x"]))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey 'b.x' on "
"table 'a': table 'b' has no column named 'x'",
getattr,
list(a.foreign_keys)[0],
"column",
)
def test_fk_no_such_target_col_error_delayed(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
a.append_constraint(ForeignKeyConstraint(["a"], ["b.x"]))
Table("b", meta, Column("b", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey 'b.x' on "
"table 'a': table 'b' has no column named 'x'",
getattr,
list(a.foreign_keys)[0],
"column",
)
def test_fk_mismatched_local_remote_cols(self):
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint number of constrained columns must "
"match the number of referenced columns.",
ForeignKeyConstraint,
["a"],
["b.a", "b.b"],
)
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint number of constrained columns "
"must match the number of referenced columns.",
ForeignKeyConstraint,
["a", "b"],
["b.a"],
)
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint with duplicate source column "
"references are not supported.",
ForeignKeyConstraint,
["a", "a"],
["b.a", "b.b"],
)
def test_pickle_metadata_sequence_restated(self):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
)
m2 = pickle.loads(pickle.dumps(m1))
s2 = Sequence("x_seq")
t2 = Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, s2),
extend_existing=True,
)
assert m2._sequences["x_seq"] is t2.c.x.default
assert m2._sequences["x_seq"] is s2
def test_sequence_restated_replaced(self):
"""Test restatement of Sequence replaces."""
m1 = MetaData()
s1 = Sequence("x_seq")
t = Table("a", m1, Column("x", Integer, s1))
assert m1._sequences["x_seq"] is s1
s2 = Sequence("x_seq")
Table("a", m1, Column("x", Integer, s2), extend_existing=True)
assert t.c.x.default is s2
assert m1._sequences["x_seq"] is s2
def test_sequence_attach_to_table(self):
m1 = MetaData()
s1 = Sequence("s")
Table("a", m1, Column("x", Integer, s1))
assert s1.metadata is m1
def test_sequence_attach_to_existing_table(self):
m1 = MetaData()
s1 = Sequence("s")
t = Table("a", m1, Column("x", Integer))
t.c.x._init_items(s1)
assert s1.metadata is m1
def test_pickle_metadata_sequence_implicit(self, picklers):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
)
m2 = picklers.loads(picklers.dumps(m1))
t2 = Table("a", m2, extend_existing=True)
eq_(m2._sequences, {"x_seq": t2.c.x.default})
def test_pickle_metadata_schema(self, picklers):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
schema="y",
)
m2 = picklers.loads(picklers.dumps(m1))
Table("a", m2, schema="y", extend_existing=True)
eq_(m2._schemas, m1._schemas)
def test_metadata_schema_arg(self):
m1 = MetaData(schema="sch1")
m2 = MetaData(schema="sch1", quote_schema=True)
m3 = MetaData(schema="sch1", quote_schema=False)
m4 = MetaData()
for (
i,
(
name,
metadata,
schema_,
quote_schema,
exp_schema,
exp_quote_schema,
),
) in enumerate(
[
("t1", m1, None, None, "sch1", None),
("t2", m1, "sch2", None, "sch2", None),
("t3", m1, "sch2", True, "sch2", True),
("t4", m1, "sch1", None, "sch1", None),
("t5", m1, BLANK_SCHEMA, None, None, None),
("t1", m2, None, None, "sch1", True),
("t2", m2, "sch2", None, "sch2", None),
("t3", m2, "sch2", True, "sch2", True),
("t4", m2, "sch1", None, "sch1", None),
("t1", m3, None, None, "sch1", False),
("t2", m3, "sch2", None, "sch2", None),
("t3", m3, "sch2", True, "sch2", True),
("t4", m3, "sch1", None, "sch1", None),
("t1", m4, None, None, None, None),
("t2", m4, "sch2", None, "sch2", None),
("t3", m4, "sch2", True, "sch2", True),
("t4", m4, "sch1", None, "sch1", None),
("t5", m4, BLANK_SCHEMA, None, None, None),
]
):
kw = {}
if schema_ is not None:
kw["schema"] = schema_
if quote_schema is not None:
kw["quote_schema"] = quote_schema
t = Table(name, metadata, **kw)
eq_(t.schema, exp_schema, "test %d, table schema" % i)
eq_(
t.schema.quote if t.schema is not None else None,
exp_quote_schema,
"test %d, table quote_schema" % i,
)
seq = Sequence(name, metadata=metadata, **kw)
eq_(seq.schema, exp_schema, "test %d, seq schema" % i)
eq_(
seq.schema.quote if seq.schema is not None else None,
exp_quote_schema,
"test %d, seq quote_schema" % i,
)
def test_manual_dependencies(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer))
e.add_is_dependent_on(c)
a.add_is_dependent_on(b)
b.add_is_dependent_on(d)
e.add_is_dependent_on(b)
c.add_is_dependent_on(a)
eq_(meta.sorted_tables, [d, b, a, c, e])
def test_sort_by_parameter_order(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer))
e.add_is_dependent_on(c)
a.add_is_dependent_on(b)
eq_(meta.sorted_tables, [b, c, d, a, e])
def test_fks_sort_by_parameter_order(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("c.foo")))
eq_(meta.sorted_tables, [b, c, d, a, e])
def test_cycles_fks_warning_one(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("d.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("b.foo")))
d = Table("d", meta, Column("foo", Integer, ForeignKey("c.foo")))
e = Table("e", meta, Column("foo", Integer))
with testing.expect_warnings(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "b, c, d", which is usually caused by mutually '
"dependent foreign key constraints. "
"Foreign key constraints involving these tables will not be "
"considered"
):
eq_(meta.sorted_tables, [b, c, d, e, a])
def test_cycles_fks_warning_two(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("a.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("e.foo")))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("d.foo")))
with testing.expect_warnings(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "a, b", which is usually caused by mutually '
"dependent foreign key constraints. "
"Foreign key constraints involving these tables will not be "
"considered"
):
eq_(meta.sorted_tables, [a, b, d, e, c])
def test_cycles_fks_fks_delivered_separately(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("a.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("e.foo")))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("d.foo")))
results = schema.sort_tables_and_constraints(
sorted(meta.tables.values(), key=lambda t: t.key)
)
results[-1] = (None, set(results[-1][-1]))
eq_(
results,
[
(a, set()),
(b, set()),
(d, {fk.constraint for fk in d.foreign_keys}),
(e, {fk.constraint for fk in e.foreign_keys}),
(c, {fk.constraint for fk in c.foreign_keys}),
(
None,
{fk.constraint for fk in a.foreign_keys}.union(
fk.constraint for fk in b.foreign_keys
),
),
],
)
def test_cycles_fks_usealter(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table(
"b",
meta,
Column("foo", Integer, ForeignKey("d.foo", use_alter=True)),
)
c = Table("c", meta, Column("foo", Integer, ForeignKey("b.foo")))
d = Table("d", meta, Column("foo", Integer, ForeignKey("c.foo")))
e = Table("e", meta, Column("foo", Integer))
eq_(meta.sorted_tables, [b, e, a, c, d])
def test_nonexistent(self):
assert_raises(
tsa.exc.NoSuchTableError,
Table,
"fake_table",
MetaData(),
autoload_with=testing.db,
)
def test_assorted_repr(self):
t1 = Table("foo", MetaData(), Column("x", Integer))
i1 = Index("bar", t1.c.x)
ck = schema.CheckConstraint("x > y", name="someconstraint")
for const, exp in (
(Sequence("my_seq"), "Sequence('my_seq')"),
(Sequence("my_seq", start=5), "Sequence('my_seq', start=5)"),
(Column("foo", Integer), "Column('foo', Integer(), table=None)"),
(
Column(
"foo",
Integer,
primary_key=True,
nullable=False,
onupdate=1,
default=42,
server_default="42",
comment="foo",
),
"Column('foo', Integer(), table=None, primary_key=True, "
f"nullable=False, onupdate={ColumnDefault(1)}, default="
f"{ColumnDefault(42)}, server_default={DefaultClause('42')}, "
"comment='foo')",
),
(
Column(
"foo",
Integer,
primary_key=True,
nullable=False,
onupdate=1,
insert_default=42,
server_default="42",
comment="foo",
),
"Column('foo', Integer(), table=None, primary_key=True, "
f"nullable=False, onupdate={ColumnDefault(1)}, default="
f"{ColumnDefault(42)}, server_default={DefaultClause('42')}, "
"comment='foo')",
),
(
Table("bar", MetaData(), Column("x", String)),
"Table('bar', MetaData(), "
"Column('x', String(), table=<bar>), schema=None)",
),
(
schema.DefaultGenerator(for_update=True),
"DefaultGenerator(for_update=True)",
),
(schema.Index("bar", "c"), "Index('bar', 'c')"),
(i1, "Index('bar', Column('x', Integer(), table=<foo>))"),
(schema.FetchedValue(), "FetchedValue()"),
(
ck,
"CheckConstraint("
"%s"
", name='someconstraint')" % repr(ck.sqltext),
),
(
ColumnDefault(("foo", "bar")),
"ScalarElementColumnDefault(('foo', 'bar'))",
),
):
eq_(repr(const), exp)
@testing.variation("kind", ["engine", "conn", "something"])
def test_metadata_bind(self, connection, kind):
with expect_raises_message(
exc.ArgumentError,
"expected schema argument to be a string, got",
):
if kind.engine:
MetaData(connection.engine)
elif kind.conn:
MetaData(connection)
else:
MetaData(42) # type: ignore
def test_metadata_schemas(self):
eq_(MetaData().schemas, ())
eq_(MetaData(schema="foo").schemas, ("foo",))
m1 = MetaData()
Table("t", m1, schema="x")
Table("t2", m1, schema="y")
eq_(sorted(m1.schemas), ["x", "y"])
m2 = MetaData(schema="w")
Table("t", m2, schema="x")
Table("t2", m2, schema="y")
eq_(sorted(m2.schemas), ["w", "x", "y"])
def test_get_schema_objects_empty(self):
eq_(MetaData().get_schema_objects(Enum), ())
def test_get_schema_objects(self):
class MyEnum(Enum):
pass
m1 = MetaData()
e = Enum("a", "b", name="foo")
e2 = MyEnum("a", "b", name="foo", schema="t", metadata=m1)
s = Sequence("s")
Table("t", m1, Column("c", e), Column("s", Integer, s))
eq_(m1.get_schema_objects(Enum), (e,))
eq_(m1.get_schema_objects(MyEnum), ())
eq_(m1.get_schema_objects(Enum, schema="t"), (e2,))
eq_(m1.get_schema_objects(MyEnum, schema="t"), (e2,))
eq_(m1.get_schema_objects(Sequence), (s,))
def test_get_schema_object_by_name(self):
class MyEnum(Enum):
pass
m1 = MetaData()
e = Enum("a", "b", name="foo")
e2 = Enum("x", "y", name="bar", metadata=m1)
e3 = MyEnum("x", "b", name="foo", schema="t", metadata=m1)
e4 = Enum("a", "y", name="baz", schema="t")
Table("t", m1, Column("c", e), Column("c2", e4))
eq_(m1.get_schema_object_by_name(Enum, "foo"), e)
eq_(m1.get_schema_object_by_name(Enum, "bar"), e2)
eq_(m1.get_schema_object_by_name(MyEnum, "bar"), None)
eq_(m1.get_schema_object_by_name(Enum, "baz"), None)
eq_(m1.get_schema_object_by_name(Enum, "foo", schema="t"), e3)
eq_(m1.get_schema_object_by_name(Enum, "foo", schema="t"), e3)
eq_(m1.get_schema_object_by_name(MyEnum, "foo", schema="t"), e3)
eq_(m1.get_schema_object_by_name(Enum, "baz", schema="t"), e4)
eq_(m1.get_schema_object_by_name(Enum, "bar", schema="t"), None)
def test_custom_schematype(self):
class FooType(sqltypes.SchemaType, sqltypes.TypeEngine):
pass
m1 = MetaData()
t1 = FooType(name="foo")
t2 = FooType(name="bar", metadata=m1)
Table("t", m1, Column("c", t1))
eq_(set(m1.get_schema_objects(FooType)), {t1, t2})
eq_(m1.get_schema_object_by_name(FooType, "foo"), t1)
eq_(m1.get_schema_object_by_name(FooType, "bar"), t2)
eq_(m1.get_schema_object_by_name(Enum, "bar"), None)
| MetaDataTest |
python | donnemartin__interactive-coding-challenges | sorting_searching/merge_into/test_merge_into.py | {
"start": 18,
"end": 788
} | class ____(unittest.TestCase):
def test_merge_into(self):
array = Array()
self.assertRaises(TypeError, array.merge_into, None, None, None, None)
self.assertRaises(ValueError, array.merge_into, [1], [2], -1, -1)
a = [1, 2, 3]
self.assertEqual(array.merge_into(a, [], len(a), 0), [1, 2, 3])
a = [1, 2, 3]
self.assertEqual(array.merge_into(a, [], len(a), 0), [1, 2, 3])
a = [1, 3, 5, 7, 9, None, None, None]
b = [4, 5, 6]
expected = [1, 3, 4, 5, 5, 6, 7, 9]
self.assertEqual(array.merge_into(a, b, 5, len(b)), expected)
print('Success: test_merge_into')
def main():
test = TestArray()
test.test_merge_into()
if __name__ == '__main__':
main()
| TestArray |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor_util_test.py | {
"start": 52542,
"end": 52946
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testConversion(self):
"""Make sure fully known TensorShape objects convert to Tensors."""
shape = tensor_shape.TensorShape([1, tensor_shape.Dimension(2)])
shape_tensor = shape_util.shape_tensor(shape)
self.assertAllEqual((1, 2), shape_tensor)
if __name__ == "__main__":
test.main()
| ShapeTensorTest |
python | kamyu104__LeetCode-Solutions | Python/longest-continuous-increasing-subsequence.py | {
"start": 29,
"end": 414
} | class ____(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result, count = 0, 0
for i in xrange(len(nums)):
if i == 0 or nums[i-1] < nums[i]:
count += 1
result = max(result, count)
else:
count = 1
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-inorder-traversal.py | {
"start": 182,
"end": 956
} | class ____(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result, curr = [], root
while curr:
if curr.left is None:
result.append(curr.val)
curr = curr.right
else:
node = curr.left
while node.right and node.right != curr:
node = node.right
if node.right is None:
node.right = curr
curr = curr.left
else:
result.append(curr.val)
node.right = None
curr = curr.right
return result
# Time: O(n)
# Space: O(h)
# Stack Solution
| Solution |
python | getsentry__sentry | tests/sentry/utils/test_services.py | {
"start": 477,
"end": 566
} | class ____(Operation):
def apply(self, x: int, y: int) -> int:
return x + y
| Add |
python | wandb__wandb | wandb/sdk/launch/agent/job_status_tracker.py | {
"start": 405,
"end": 1830
} | class ____:
run_queue_item_id: str
queue: str
saver: RunQueueItemFileSaver
run_id: Optional[str] = None
project: Optional[str] = None
entity: Optional[str] = None
run: Optional[AbstractRun] = None
failed_to_start: bool = False
completed_status: Optional[str] = None
is_scheduler: bool = False
err_stage: str = "agent"
@property
def job_completed(self) -> bool:
return self.failed_to_start or self.completed_status is not None
def update_run_info(self, launch_project: LaunchProject) -> None:
self.run_id = launch_project.run_id
self.project = launch_project.target_project
self.entity = launch_project.target_entity
def set_err_stage(self, stage: str) -> None:
self.err_stage = stage
async def check_wandb_run_stopped(self, api: Api) -> bool:
assert (
self.run_id is not None
and self.project is not None
and self.entity is not None
), (
"Job tracker does not contain run info. Update with run info before checking if run stopped"
)
check_stop = event_loop_thread_exec(api.api.check_stop_requested)
try:
return bool(await check_stop(self.project, self.entity, self.run_id))
except CommError:
_logger.exception("CommError when checking if wandb run stopped")
return False
| JobAndRunStatusTracker |
python | django-haystack__django-haystack | test_haystack/test_query.py | {
"start": 34489,
"end": 35627
} | class ____(SearchQuerySetTestCase):
def test_values_sqs(self):
sqs = self.msqs.auto_query("test").values("id")
self.assertIsInstance(sqs, ValuesSearchQuerySet)
# We'll do a basic test to confirm that slicing works as expected:
self.assertIsInstance(sqs[0], dict)
self.assertIsInstance(sqs[0:5][0], dict)
def test_valueslist_sqs(self):
sqs = self.msqs.auto_query("test").values_list("id")
self.assertIsInstance(sqs, ValuesListSearchQuerySet)
self.assertIsInstance(sqs[0], (list, tuple))
self.assertIsInstance(sqs[0:1][0], (list, tuple))
self.assertRaises(
TypeError,
self.msqs.auto_query("test").values_list,
"id",
"score",
flat=True,
)
flat_sqs = self.msqs.auto_query("test").values_list("id", flat=True)
self.assertIsInstance(sqs, ValuesListSearchQuerySet)
# Note that this will actually be None because a mocked sqs lacks
# anything else:
self.assertIsNone(flat_sqs[0])
self.assertIsNone(flat_sqs[0:1][0])
| ValuesQuerySetTestCase |
python | mozilla__bleach | tests/test_clean.py | {
"start": 39521,
"end": 40603
} | class ____:
def test_basics(self):
TAGS = {"span", "br"}
ATTRS = {"span": ["style"]}
cleaner = Cleaner(tags=TAGS, attributes=ATTRS)
assert (
cleaner.clean('a <br/><span style="color:red">test</span>')
== 'a <br><span style="">test</span>'
)
def test_filters(self):
# Create a Filter that changes all the attr values to "moo"
class MooFilter(Filter):
def __iter__(self):
for token in Filter.__iter__(self):
if token["type"] in ["StartTag", "EmptyTag"] and token["data"]:
for attr, value in token["data"].items():
token["data"][attr] = "moo"
yield token
ATTRS = {"img": ["rel", "src"]}
TAGS = {"img"}
cleaner = Cleaner(tags=TAGS, attributes=ATTRS, filters=[MooFilter])
dirty = 'this is cute! <img src="http://example.com/puppy.jpg" rel="nofollow">'
assert cleaner.clean(dirty) == 'this is cute! <img src="moo" rel="moo">'
| TestCleaner |
python | huggingface__transformers | src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py | {
"start": 25147,
"end": 31760
} | class ____(RobertaPreLayerNormPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = RobertaPreLayerNormEmbeddings(config)
self.encoder = RobertaPreLayerNormEncoder(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = RobertaPreLayerNormPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
"""
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs[0]
sequence_output = self.LayerNorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
# Copied from transformers.models.bert.modeling_bert.BertModel._create_attention_masks
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
@auto_docstring(
custom_intro="""
RoBERTa-PreLayerNorm Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm, RobertaPreLayerNormTokenizer->RobertaTokenizer
| RobertaPreLayerNormModel |
python | pytorch__pytorch | torch/ao/pruning/scheduler/lambda_scheduler.py | {
"start": 195,
"end": 2416
} | class ____(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
sl_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in sparsifier.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> # Assuming sparsifier has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95**epoch
>>> # xdoctest: +SKIP
>>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(
self,
sparsifier: BaseSparsifier,
sl_lambda: Callable[[int], float] | list[Callable[[int], float]],
last_epoch: int = -1,
verbose: bool = False,
) -> None:
self.sparsifier = sparsifier
if not isinstance(sl_lambda, list) and not isinstance(sl_lambda, tuple):
self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)
else:
if len(sl_lambda) != len(sparsifier.groups):
raise ValueError(
f"Expected {len(sparsifier.groups)} lr_lambdas, but got {len(sl_lambda)}"
)
self.sl_lambdas = list(sl_lambda)
super().__init__(sparsifier, last_epoch, verbose) # type: ignore[no-untyped-call]
def get_sl(self) -> list[float]:
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`.",
stacklevel=2,
)
return [
base_sl * lmbda(self.last_epoch)
for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)
]
| LambdaSL |
python | doocs__leetcode | solution/0700-0799/0767.Reorganize String/Solution2.py | {
"start": 0,
"end": 579
} | class ____:
def reorganizeString(self, s: str) -> str:
return self.rearrangeString(s, 2)
def rearrangeString(self, s: str, k: int) -> str:
h = [(-v, c) for c, v in Counter(s).items()]
heapify(h)
q = deque()
ans = []
while h:
v, c = heappop(h)
v *= -1
ans.append(c)
q.append((v - 1, c))
if len(q) >= k:
w, c = q.popleft()
if w:
heappush(h, (-w, c))
return "" if len(ans) != len(s) else "".join(ans)
| Solution |
python | scikit-learn__scikit-learn | sklearn/neural_network/_stochastic_optimizers.py | {
"start": 6085,
"end": 8838
} | class ____(BaseOptimizer):
"""Stochastic gradient descent optimizer with Adam
Note: All default values are from the original Adam paper
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, default=0.001
The initial learning rate used. It controls the step-size in updating
the weights
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector, should be
in [0, 1)
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector, should be
in [0, 1)
epsilon : float, default=1e-8
Value for numerical stability
Attributes
----------
learning_rate : float
The current learning rate
t : int
Timestep
ms : list, length = len(params)
First moment vectors
vs : list, length = len(params)
Second moment vectors
References
----------
:arxiv:`Kingma, Diederik, and Jimmy Ba (2014) "Adam: A method for
stochastic optimization." <1412.6980>
"""
def __init__(
self, params, learning_rate_init=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8
):
super().__init__(learning_rate_init)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.t = 0
self.ms = [np.zeros_like(param) for param in params]
self.vs = [np.zeros_like(param) for param in params]
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
self.t += 1
self.ms = [
self.beta_1 * m + (1 - self.beta_1) * grad
for m, grad in zip(self.ms, grads)
]
self.vs = [
self.beta_2 * v + (1 - self.beta_2) * (grad**2)
for v, grad in zip(self.vs, grads)
]
self.learning_rate = (
self.learning_rate_init
* np.sqrt(1 - self.beta_2**self.t)
/ (1 - self.beta_1**self.t)
)
updates = [
-self.learning_rate * m / (np.sqrt(v) + self.epsilon)
for m, v in zip(self.ms, self.vs)
]
return updates
| AdamOptimizer |
python | jazzband__django-oauth-toolkit | oauth2_provider/exceptions.py | {
"start": 422,
"end": 792
} | class ____(Exception):
"""
General class to derive from for all OIDC related errors.
"""
status_code = 400
error = None
def __init__(self, description=None):
if description is not None:
self.description = description
message = "({}) {}".format(self.error, self.description)
super().__init__(message)
| OIDCError |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 56723,
"end": 56844
} | class ____(ReturnTypeFromArgs[_T]): # noqa: A001
"""The SQL SUM() aggregate function."""
inherit_cache = True
| sum |
python | PyCQA__bandit | tests/unit/core/test_config.py | {
"start": 8565,
"end": 9748
} | class ____(TestConfigCompat):
sample = textwrap.dedent(
"""
[tool.bandit.profiles.test_1]
include = [
"any_other_function_with_shell_equals_true",
"assert_used",
]
[tool.bandit.profiles.test_2]
include = ["blacklist_calls"]
[tool.bandit.profiles.test_3]
include = ["blacklist_imports"]
[tool.bandit.profiles.test_4]
exclude = ["assert_used"]
[tool.bandit.profiles.test_5]
exclude = ["blacklist_calls", "blacklist_imports"]
[tool.bandit.profiles.test_6]
include = ["blacklist_calls"]
exclude = ["blacklist_imports"]
[[tool.bandit.blacklist_calls.bad_name_sets]]
[tool.bandit.blacklist_calls.bad_name_sets.pickle]
qualnames = ["pickle.loads"]
message = "{func} library appears to be in use."
[[tool.bandit.blacklist_imports.bad_import_sets]]
[tool.bandit.blacklist_imports.bad_import_sets.telnet]
imports = ["telnetlib"]
level = "HIGH"
message = "{module} is considered insecure."
"""
)
suffix = ".toml"
| TestTomlConfig |
python | mlflow__mlflow | mlflow/tracing/client.py | {
"start": 2051,
"end": 30184
} | class ____:
"""
Client of an MLflow Tracking Server that creates and manages experiments and runs.
"""
def __init__(self, tracking_uri: str | None = None):
"""
Args:
tracking_uri: Address of local or remote tracking server.
"""
self.tracking_uri = _resolve_tracking_uri(tracking_uri)
# NB: Fetch the tracking store (`self.store`) upon client initialization to ensure that
# the tracking URI is valid and the store can be properly resolved. We define `store` as a
# property method to ensure that the client is serializable, even if the store is not
# self.store
self.store
@property
def store(self):
return _get_store(self.tracking_uri)
@record_usage_event(StartTraceEvent)
def start_trace(self, trace_info: TraceInfo) -> TraceInfo:
"""
Create a new trace in the backend.
Args:
trace_info: The TraceInfo object to record in the backend.
Returns:
The returned TraceInfoV3 object from the backend.
"""
return self.store.start_trace(trace_info=trace_info)
def log_spans(self, location: str, spans: list[Span]) -> list[Span]:
"""
Log spans to the backend.
Args:
location: The location to log spans to. It should either be an experiment ID or a
Unity Catalog table name.
spans: List of Span objects to log.
Returns:
List of logged Span objects from the backend.
"""
return self.store.log_spans(
location=location,
spans=spans,
tracking_uri=self.tracking_uri if is_databricks_uri(self.tracking_uri) else None,
)
def delete_traces(
self,
experiment_id: str,
max_timestamp_millis: int | None = None,
max_traces: int | None = None,
trace_ids: list[str] | None = None,
) -> int:
return self.store.delete_traces(
experiment_id=experiment_id,
max_timestamp_millis=max_timestamp_millis,
max_traces=max_traces,
trace_ids=trace_ids,
)
def get_trace_info(self, trace_id: str) -> TraceInfo:
"""
Get the trace info matching the ``trace_id``.
Args:
trace_id: String id of the trace to fetch.
Returns:
TraceInfo object, of type ``mlflow.entities.trace_info.TraceInfo``.
"""
with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace:
if trace is not None:
return trace.info
return self.store.get_trace_info(trace_id)
def get_trace(self, trace_id: str) -> Trace:
"""
Get the trace matching the ``trace_id``.
Args:
trace_id: String id of the trace to fetch.
Returns:
The fetched Trace object, of type ``mlflow.entities.Trace``.
"""
location, _ = parse_trace_id_v4(trace_id)
if location is not None:
start_time = time.time()
attempt = 0
while time.time() - start_time < GET_TRACE_V4_RETRY_TIMEOUT_SECONDS:
# For a V4 trace, load spans from the v4 BatchGetTraces endpoint.
# BatchGetTraces returns an empty list if the trace is not found, which will be
# retried up to GET_TRACE_V4_RETRY_TIMEOUT_SECONDS seconds.
if traces := self.store.batch_get_traces([trace_id], location):
return traces[0]
attempt += 1
interval = 2**attempt
_logger.debug(
f"Trace not found, retrying in {interval} seconds (attempt {attempt})"
)
time.sleep(interval)
raise MlflowException(
message=f"Trace with ID {trace_id} is not found.",
error_code=NOT_FOUND,
)
else:
try:
trace_info = self.get_trace_info(trace_id)
# if the trace is stored in the tracking store, load spans from the tracking store
# otherwise, load spans from the artifact repository
if trace_info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE:
try:
return self.store.get_trace(trace_id)
except MlflowNotImplementedException:
pass
if traces := self.store.batch_get_traces([trace_info.trace_id]):
return traces[0]
else:
raise MlflowException(
f"Trace with ID {trace_id} is not found.",
error_code=NOT_FOUND,
)
else:
trace_data = self._download_trace_data(trace_info)
except MlflowTraceDataNotFound:
raise MlflowException(
message=(
f"Trace with ID {trace_id} cannot be loaded because it is missing span "
"data. Please try creating or loading another trace."
),
error_code=BAD_REQUEST,
) from None # Ensure the original spammy exception is not included in the traceback
except MlflowTraceDataCorrupted:
raise MlflowException(
message=(
f"Trace with ID {trace_id} cannot be loaded because its span data"
" is corrupted. Please try creating or loading another trace."
),
error_code=BAD_REQUEST,
) from None # Ensure the original spammy exception is not included in the traceback
return Trace(trace_info, trace_data)
def get_online_trace_details(
self,
trace_id: str,
source_inference_table: str,
source_databricks_request_id: str,
) -> str:
return self.store.get_online_trace_details(
trace_id=trace_id,
source_inference_table=source_inference_table,
source_databricks_request_id=source_databricks_request_id,
)
def _search_traces(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
model_id: str | None = None,
locations: list[str] | None = None,
):
return self.store.search_traces(
experiment_ids=experiment_ids,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
model_id=model_id,
locations=locations,
)
def search_traces(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
run_id: str | None = None,
include_spans: bool = True,
model_id: str | None = None,
locations: list[str] | None = None,
) -> PagedList[Trace]:
"""
Return traces that match the given list of search expressions within the experiments.
Args:
experiment_ids: List of experiment ids to scope the search. Deprecated,
use `locations` instead.
filter_string: A search filter string.
max_results: Maximum number of traces desired.
order_by: List of order_by clauses.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_traces`` call.
run_id: A run id to scope the search. When a trace is created under an active run,
it will be associated with the run and you can filter on the run id to retrieve
the trace.
include_spans: If ``True``, include spans in the returned traces. Otherwise, only
the trace metadata is returned, e.g., trace ID, start time, end time, etc,
without any spans.
model_id: If specified, return traces associated with the model ID.
locations: A list of locations to search over. To search over experiments, provide
a list of experiment IDs. To search over UC tables on databricks, provide
a list of locations in the format `<catalog_name>.<schema_name>`.
Returns:
A :py:class:`PagedList <mlflow.store.entities.PagedList>` of
:py:class:`Trace <mlflow.entities.Trace>` objects that satisfy the search
expressions. If the underlying tracking store supports pagination, the token for the
next page may be obtained via the ``token`` attribute of the returned object; however,
some store implementations may not support pagination and thus the returned token would
not be meaningful in such cases.
"""
if model_id is not None:
if filter_string:
raise MlflowException(
message=(
"Cannot specify both `model_id` or `filter_string` in the search_traces "
"call."
),
error_code=INVALID_PARAMETER_VALUE,
)
# if sql_warehouse_id is not set then we convert model_id to filter_string,
# because `_search_unified_traces` requires sql warehouse id existing.
if MLFLOW_TRACING_SQL_WAREHOUSE_ID.get() is None:
filter_string = f"request_metadata.`mlflow.modelId` = '{model_id}'"
model_id = None
if run_id:
run = self.store.get_run(run_id)
if run.info.experiment_id not in locations:
raise MlflowException(
f"Run {run_id} belongs to experiment {run.info.experiment_id}, which is not "
f"in the list of locations provided: {locations}. Please include "
f"experiment {run.info.experiment_id} in the `locations` parameter to "
"search for traces from this run.",
error_code=INVALID_PARAMETER_VALUE,
)
additional_filter = f"attribute.run_id = '{run_id}'"
if filter_string:
if TraceMetadataKey.SOURCE_RUN in filter_string:
raise MlflowException(
"You cannot filter by run_id when it is already part of the filter string."
f"Please remove the {TraceMetadataKey.SOURCE_RUN} filter from the filter "
"string and try again.",
error_code=INVALID_PARAMETER_VALUE,
)
filter_string += f" AND {additional_filter}"
else:
filter_string = additional_filter
traces = []
next_max_results = max_results
next_token = page_token
max_workers = MLFLOW_SEARCH_TRACES_MAX_THREADS.get()
executor = (
ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="MlflowTracingSearch")
if include_spans
else nullcontext()
)
with executor:
while len(traces) < max_results:
trace_infos, next_token = self._search_traces(
experiment_ids=experiment_ids,
filter_string=filter_string,
max_results=next_max_results,
order_by=order_by,
page_token=next_token,
model_id=model_id,
locations=locations,
)
if include_spans:
trace_infos_by_location = self._group_trace_infos_by_location(trace_infos)
for (
location,
location_trace_infos,
) in trace_infos_by_location.items():
if location == SpansLocation.ARTIFACT_REPO:
# download traces from artifact repository if spans are
# stored in the artifact repository
traces.extend(
trace
for trace in executor.map(
self._download_spans_from_artifact_repo,
location_trace_infos,
)
if trace
)
else:
# Get full traces with BatchGetTraces, all traces in a single call
# must be located in the same table.
trace_ids = [t.trace_id for t in location_trace_infos]
traces.extend(
self._download_spans_from_batch_get_traces(
trace_ids, location, executor
)
)
else:
traces.extend(Trace(t, TraceData(spans=[])) for t in trace_infos)
if not next_token:
break
next_max_results = max_results - len(traces)
return PagedList(traces, next_token)
def _download_spans_from_batch_get_traces(
self, trace_ids: list[str], location: str, executor: ThreadPoolExecutor
) -> list[Trace]:
"""
Fetch full traces including spans from the BatchGetTrace v4 endpoint.
BatchGetTrace endpoint only support up to 10 traces in a single call.
"""
traces = []
def _fetch_minibatch(ids: list[str]) -> list[Trace]:
return self.store.batch_get_traces(ids, location) or []
batch_size = _MLFLOW_SEARCH_TRACES_MAX_BATCH_SIZE.get()
batches = [trace_ids[i : i + batch_size] for i in range(0, len(trace_ids), batch_size)]
for minibatch_traces in executor.map(_fetch_minibatch, batches):
traces.extend(minibatch_traces)
return traces
def _download_spans_from_artifact_repo(self, trace_info: TraceInfo) -> Trace | None:
"""
Download trace data for the given trace_info and returns a Trace object.
If the download fails (e.g., the trace data is missing or corrupted), returns None.
This is used for traces logged via v3 endpoint, where spans are stored in artifact store.
"""
is_online_trace = is_uuid(trace_info.trace_id)
is_databricks = is_databricks_uri(self.tracking_uri)
# For online traces in Databricks, we need to get trace data from a different endpoint
try:
if is_databricks and is_online_trace:
# For online traces, get data from the online API
trace_data = self.get_online_trace_details(
trace_id=trace_info.trace_id,
source_inference_table=trace_info.request_metadata.get("mlflow.sourceTable"),
source_databricks_request_id=trace_info.request_metadata.get(
"mlflow.databricksRequestId"
),
)
trace_data = TraceData.from_dict(json.loads(trace_data))
else:
# For offline traces, download data from artifact storage
trace_data = self._download_trace_data(trace_info)
except MlflowTraceDataException as e:
_logger.warning(
(
f"Failed to download trace data for trace {trace_info.trace_id!r} "
f"with {e.ctx}. For full traceback, set logging level to DEBUG."
),
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
return None
else:
return Trace(trace_info, trace_data)
def _group_trace_infos_by_location(
self, trace_infos: list[TraceInfo]
) -> dict[str, list[TraceInfo]]:
"""
Group the trace infos based on where the trace data is stored.
Returns:
A dictionary mapping location to a list of trace infos.
"""
trace_infos_by_location = defaultdict(list)
for trace_info in trace_infos:
if uc_schema := trace_info.trace_location.uc_schema:
location = f"{uc_schema.catalog_name}.{uc_schema.schema_name}"
trace_infos_by_location[location].append(trace_info)
elif trace_info.trace_location.mlflow_experiment:
# New traces in SQL store store spans in the tracking store, while for old traces or
# traces with File store, spans are stored in artifact repository.
if trace_info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE:
# location is not used for traces with mlflow experiment location in tracking
# store, so we use None as the location
trace_infos_by_location[None].append(trace_info)
else:
trace_infos_by_location[SpansLocation.ARTIFACT_REPO].append(trace_info)
else:
_logger.warning(f"Unsupported location: {trace_info.trace_location}. Skipping.")
return trace_infos_by_location
def calculate_trace_filter_correlation(
self,
experiment_ids: list[str],
filter_string1: str,
filter_string2: str,
base_filter: str | None = None,
):
"""
Calculate the correlation (NPMI) between two trace filter conditions.
This method computes the Normalized Pointwise Mutual Information (NPMI)
between traces matching two different filter conditions, which measures
how much more (or less) likely traces are to satisfy both conditions
compared to if the conditions were independent.
Args:
experiment_ids: List of experiment IDs to search within.
filter_string1: First filter condition (e.g., "span.type = 'LLM'").
filter_string2: Second filter condition (e.g., "feedback.quality > 0.8").
base_filter: Optional base filter that both filter1 and filter2 are tested on top of
(e.g., 'request_time > ... and request_time < ...' for time windows).
Returns:
TraceFilterCorrelationResult containing:
- npmi: NPMI score from -1 (never co-occur) to 1 (always co-occur)
- npmi_smoothed: Smoothed NPMI value with Jeffreys prior for robustness
- filter1_count: Number of traces matching filter_string1
- filter2_count: Number of traces matching filter_string2
- joint_count: Number of traces matching both filters
- total_count: Total number of traces in the experiments
.. code-block:: python
from mlflow.tracing.client import TracingClient
client = TracingClient()
result = client.calculate_trace_filter_correlation(
experiment_ids=["123"],
filter_string1="span.type = 'LLM'",
filter_string2="feedback.quality > 0.8",
)
print(f"NPMI: {result.npmi:.3f}")
# Output: NPMI: 0.456
"""
return self.store.calculate_trace_filter_correlation(
experiment_ids=experiment_ids,
filter_string1=filter_string1,
filter_string2=filter_string2,
base_filter=base_filter,
)
def set_trace_tags(self, trace_id: str, tags: dict[str, str]):
"""
Set tags on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
tags: A dictionary of key-value pairs.
"""
tags = exclude_immutable_tags(tags)
for k, v in tags.items():
self.set_trace_tag(trace_id, k, v)
def set_trace_tag(self, trace_id: str, key: str, value: str):
"""
Set a tag on the trace with the given trace ID.
Args:
trace_id: The ID of the trace to set the tag on.
key: The string key of the tag. Must be at most 250 characters long, otherwise
it will be truncated when stored.
value: The string value of the tag. Must be at most 250 characters long, otherwise
it will be truncated when stored.
"""
if not isinstance(value, str):
_logger.warning(
"Received non-string value for trace tag. Please note that non-string tag values"
"will automatically be stringified when the trace is logged."
)
# Trying to set the tag on the active trace first
with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace:
if trace:
trace.info.tags[key] = str(value)
return
if key in IMMUTABLE_TAGS:
_logger.warning(f"Tag '{key}' is immutable and cannot be set on a trace.")
else:
self.store.set_trace_tag(trace_id, key, str(value))
def delete_trace_tag(self, trace_id: str, key: str):
"""
Delete a tag on the trace with the given trace ID.
Args:
trace_id: The ID of the trace to delete the tag from.
key: The string key of the tag. Must be at most 250 characters long, otherwise
it will be truncated when stored.
"""
# Trying to delete the tag on the active trace first
with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace:
if trace:
if key in trace.info.tags:
trace.info.tags.pop(key)
return
else:
raise MlflowException(
f"Tag with key {key} not found in trace with ID {trace_id}.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
if key in IMMUTABLE_TAGS:
_logger.warning(f"Tag '{key}' is immutable and cannot be deleted on a trace.")
else:
self.store.delete_trace_tag(trace_id, key)
def get_assessment(self, trace_id: str, assessment_id: str) -> Assessment:
"""
Get an assessment entity from the backend store.
Args:
trace_id: The ID of the trace.
assessment_id: The ID of the assessment to get.
Returns:
The Assessment object.
"""
return self.store.get_assessment(trace_id, assessment_id)
@record_usage_event(LogAssessmentEvent)
def log_assessment(self, trace_id: str, assessment: Assessment) -> Assessment:
"""
Log an assessment to a trace.
Args:
trace_id: The ID of the trace.
assessment: The assessment object to log.
Returns:
The logged Assessment object.
"""
assessment.trace_id = trace_id
if trace_id is None or trace_id == NO_OP_SPAN_TRACE_ID:
_logger.debug(
"Skipping assessment logging for NO_OP_SPAN_TRACE_ID. This is expected when "
"tracing is disabled."
)
return assessment
# If the trace is the active trace, add the assessment to it in-memory
if trace_id == mlflow.get_active_trace_id():
with InMemoryTraceManager.get_instance().get_trace(trace_id) as trace:
if trace is None:
_logger.debug(
f"Trace {trace_id} is active but not found in the in-memory buffer. "
"Something is wrong with trace handling. Skipping assessment logging."
)
trace.info.assessments.append(assessment)
return assessment
return self.store.create_assessment(assessment)
def update_assessment(
self,
trace_id: str,
assessment_id: str,
assessment: Assessment,
):
"""
Update an existing assessment entity in the backend store.
Args:
trace_id: The ID of the trace.
assessment_id: The ID of the feedback assessment to update.
assessment: The updated assessment.
"""
return self.store.update_assessment(
trace_id=trace_id,
assessment_id=assessment_id,
name=assessment.name,
expectation=assessment.expectation,
feedback=assessment.feedback,
rationale=assessment.rationale,
metadata=assessment.metadata,
)
def delete_assessment(self, trace_id: str, assessment_id: str):
"""
Delete an assessment associated with a trace.
Args:
trace_id: The ID of the trace.
assessment_id: The ID of the assessment to delete.
"""
self.store.delete_assessment(trace_id=trace_id, assessment_id=assessment_id)
def _get_artifact_repo_for_trace(self, trace_info: TraceInfo):
artifact_uri = get_artifact_uri_for_trace(trace_info)
artifact_uri = add_databricks_profile_info_to_artifact_uri(artifact_uri, self.tracking_uri)
return get_artifact_repository(artifact_uri)
def _download_trace_data(self, trace_info: TraceInfo) -> TraceData:
"""
Download trace data from artifact repository.
Args:
trace_info: Either a TraceInfo or TraceInfoV3 object containing trace metadata.
Returns:
TraceData object representing the downloaded trace data.
"""
artifact_repo = self._get_artifact_repo_for_trace(trace_info)
return TraceData.from_dict(artifact_repo.download_trace_data())
def _upload_trace_data(self, trace_info: TraceInfo, trace_data: TraceData) -> None:
artifact_repo = self._get_artifact_repo_for_trace(trace_info)
trace_data_json = json.dumps(trace_data.to_dict(), cls=TraceJSONEncoder, ensure_ascii=False)
return artifact_repo.upload_trace_data(trace_data_json)
# TODO: Migrate this to the new association table
def link_prompt_versions_to_trace(
self, trace_id: str, prompts: Sequence[PromptVersion]
) -> None:
"""
Link multiple prompt versions to a trace.
Args:
trace_id: The ID of the trace to link prompts to.
prompts: List of PromptVersion objects to link to the trace.
"""
from mlflow.tracking._model_registry.utils import _get_store as _get_model_registry_store
registry_store = _get_model_registry_store()
registry_store.link_prompts_to_trace(prompt_versions=prompts, trace_id=trace_id)
def _set_experiment_trace_location(
self,
location: UCSchemaLocation,
experiment_id: str,
sql_warehouse_id: str | None = None,
) -> UCSchemaLocation:
if is_databricks_uri(self.tracking_uri):
return self.store.set_experiment_trace_location(
experiment_id=str(experiment_id),
location=location,
sql_warehouse_id=sql_warehouse_id,
)
raise MlflowException(
"Setting storage location is not supported on non-Databricks backends."
)
def _unset_experiment_trace_location(
self, experiment_id: str, location: UCSchemaLocation
) -> None:
if is_databricks_uri(self.tracking_uri):
self.store.unset_experiment_trace_location(str(experiment_id), location)
else:
raise MlflowException(
"Clearing storage location is not supported on non-Databricks backends."
)
| TracingClient |
python | jazzband__django-polymorphic | src/polymorphic/formsets/generic.py | {
"start": 1754,
"end": 4184
} | class ____(BaseGenericInlineFormSet, BasePolymorphicModelFormSet):
"""
Polymorphic formset variation for inline generic formsets
"""
def generic_polymorphic_inlineformset_factory(
model,
formset_children,
form=ModelForm,
formset=BaseGenericPolymorphicInlineFormSet,
ct_field="content_type",
fk_field="object_id",
# Base form
# TODO: should these fields be removed in favor of creating
# the base form as a formset child too?
fields=None,
exclude=None,
extra=1,
can_order=False,
can_delete=True,
max_num=None,
formfield_callback=None,
validate_max=False,
for_concrete_model=True,
min_num=None,
validate_min=False,
child_form_kwargs=None,
):
"""
Construct the class for a generic inline polymorphic formset.
All arguments are identical to :func:`~django.contrib.contenttypes.forms.generic_inlineformset_factory`,
with the exception of the ``formset_children`` argument.
:param formset_children: A list of all child :class:`PolymorphicFormSetChild` objects
that tell the inline how to render the child model types.
:type formset_children: Iterable[PolymorphicFormSetChild]
:rtype: type
"""
kwargs = {
"model": model,
"form": form,
"formfield_callback": formfield_callback,
"formset": formset,
"ct_field": ct_field,
"fk_field": fk_field,
"extra": extra,
"can_delete": can_delete,
"can_order": can_order,
"fields": fields,
"exclude": exclude,
"min_num": min_num,
"max_num": max_num,
"validate_min": validate_min,
"validate_max": validate_max,
"for_concrete_model": for_concrete_model,
# 'localized_fields': localized_fields,
# 'labels': labels,
# 'help_texts': help_texts,
# 'error_messages': error_messages,
# 'field_classes': field_classes,
}
if child_form_kwargs is None:
child_form_kwargs = {}
child_kwargs = {
# 'exclude': exclude,
"ct_field": ct_field,
"fk_field": fk_field,
}
if child_form_kwargs:
child_kwargs.update(child_form_kwargs)
FormSet = generic_inlineformset_factory(**kwargs)
FormSet.child_forms = polymorphic_child_forms_factory(formset_children, **child_kwargs)
return FormSet
| BaseGenericPolymorphicInlineFormSet |
python | apache__airflow | providers/sqlite/tests/unit/sqlite/hooks/test_sqlite.py | {
"start": 3202,
"end": 7081
} | class ____:
def setup_method(self):
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestSqliteHook(SqliteHook):
conn_name_attr = "test_conn_id"
log = mock.MagicMock()
def get_conn(self):
return conn
self.db_hook = UnitTestSqliteHook()
def test_get_first_record(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook.get_first(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook.get_records(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_df_pandas(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_df(statement, df_type="pandas")
assert column == df.columns[0]
assert result_sets[0][0] == df.values.tolist()[0][0]
assert result_sets[1][0] == df.values.tolist()[1][0]
self.cur.execute.assert_called_once_with(statement)
def test_get_df_polars(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
mock_execute = mock.MagicMock()
mock_execute.description = [(column, None, None, None, None, None, None)]
mock_execute.fetchall.return_value = result_sets
self.cur.execute.return_value = mock_execute
df = self.db_hook.get_df(statement, df_type="polars")
self.cur.execute.assert_called_once_with(statement)
mock_execute.fetchall.assert_called_once_with()
assert column == df.columns[0]
assert result_sets[0][0] == df.row(0)[0]
assert result_sets[1][0] == df.row(1)[0]
def test_run_log(self):
statement = "SQL"
self.db_hook.run(statement)
assert self.db_hook.log.info.call_count == 2
@pytest.mark.db_test
def test_generate_insert_sql_replace_false(self):
expected_sql = "INSERT INTO Customer (first_name, last_name) VALUES (?,?)"
rows = ("James", "1")
target_fields = ["first_name", "last_name"]
sql = self.db_hook._generate_insert_sql(
table="Customer", values=rows, target_fields=target_fields, replace=False
)
assert sql == expected_sql
@pytest.mark.db_test
def test_generate_insert_sql_replace_true(self):
expected_sql = "REPLACE INTO Customer (first_name, last_name) VALUES (?,?)"
rows = ("James", "1")
target_fields = ["first_name", "last_name"]
sql = self.db_hook._generate_insert_sql(
table="Customer", values=rows, target_fields=target_fields, replace=True
)
assert sql == expected_sql
@pytest.mark.db_test
def test_sqlalchemy_engine(self):
"""Test that the sqlalchemy engine is initialized"""
conn_id = "sqlite_default"
hook = SqliteHook(sqlite_conn_id=conn_id)
engine = hook.get_sqlalchemy_engine()
assert isinstance(engine, sqlalchemy.engine.Engine)
assert engine.name == "sqlite"
# Assert filepath of the sqliate DB is correct
assert engine.url.database == hook.get_connection(conn_id).host
| TestSqliteHook |
python | kamyu104__LeetCode-Solutions | Python/rearrange-array-elements-by-sign.py | {
"start": 44,
"end": 480
} | class ____(object):
def rearrangeArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
pos, neg = 0, 1
result = [0]*len(nums)
for x in nums:
if x > 0:
result[pos] = x
pos += 2
else:
result[neg] = x
neg += 2
return result
# Time: O(n)
# Space: O(1)
# generator
| Solution |
python | getsentry__sentry | tests/sentry/utils/sdk_crashes/test_sdk_crash_detection.py | {
"start": 5374,
"end": 6506
} | class ____(BaseSDKCrashDetectionMixin, SnubaTestCase):
@django_db_all
def test_sdk_crash_event_stored_to_sdk_crash_project(self) -> None:
cocoa_sdk_crashes_project = self.create_project(
name="Cocoa SDK Crashes",
slug="cocoa-sdk-crashes",
teams=[self.team],
fire_project_created=True,
)
event = self.create_event(
data=get_crash_event(),
project_id=self.project.id,
)
configs = build_sdk_configs()
configs[0].project_id = cocoa_sdk_crashes_project.id
sdk_crash_event = sdk_crash_detection.detect_sdk_crash(event=event, configs=configs)
assert sdk_crash_event is not None
event_store = SnubaEventStorage()
fetched_sdk_crash_event = event_store.get_event_by_id(
cocoa_sdk_crashes_project.id, sdk_crash_event.event_id
)
assert fetched_sdk_crash_event is not None
assert cocoa_sdk_crashes_project.id == fetched_sdk_crash_event.project_id
assert sdk_crash_event.event_id == fetched_sdk_crash_event.event_id
| SDKCrashReportTestMixin |
python | ray-project__ray | python/ray/serve/_private/request_router/request_router.py | {
"start": 6753,
"end": 14413
} | class ____:
"""Mixin for multiplex routing.
This mixin is used to route requests to replicas that are multiplexed.
It adds necessary attributes and methods to keep track of multiplexed
model IDs and offer the helpers to apply multiplex routing and rank
replicas based on multiplexed model IDs.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._multiplexed_model_id_to_replica_ids: DefaultDict[
str, Set[ReplicaID]
] = defaultdict(set)
# When there is no match for a multiplexed model id, we will try to fall back
# to all replicas immediately. This set is used to make sure we only fall back
# once for concurrent requests for the same model id.
# Whenever there is a match, we will remove the model id from this set.
self._multiplexed_model_id_fallback_match: Set[str] = set()
self._replica_id_set: Set[ReplicaID] = set()
self._replicas: Dict[ReplicaID, RunningReplica] = {}
def _get_pending_request_matching_multiplexed_model_id(
self,
request_metadata: Optional[RequestMetadata] = None,
) -> Optional[PendingRequest]:
"""Matching pending request based on the request metadata."""
if request_metadata is None or not request_metadata.multiplexed_model_id:
return None
for pr in self._pending_requests_to_fulfill:
if (
not pr.future.done()
and pr.metadata.multiplexed_model_id
== request_metadata.multiplexed_model_id
):
return pr
def _update_multiplexed_model_ids_with_replicas(
self, replicas: List[RunningReplica]
):
"""Update the multiplexed model IDs based on the replicas.
This should be called when the replicas are updated.
"""
new_multiplexed_model_id_to_replica_ids = defaultdict(set)
for r in replicas:
for model_id in r.multiplexed_model_ids:
new_multiplexed_model_id_to_replica_ids[model_id].add(r.replica_id)
self._multiplexed_model_id_to_replica_ids = (
new_multiplexed_model_id_to_replica_ids
)
def _get_replica_ids_with_fewest_multiplexed_models(self) -> Set[str]:
"""Get the set of replicas that have the fewest multiplexed models loaded."""
candidates = set()
sorted_replicas = sorted(
self._replicas.values(), key=lambda x: len(x.multiplexed_model_ids)
)
least_num_multiplexed_model_ids = math.inf
for replica in sorted_replicas:
if len(replica.multiplexed_model_ids) <= least_num_multiplexed_model_ids:
candidates.add(replica.replica_id)
least_num_multiplexed_model_ids = len(replica.multiplexed_model_ids)
else:
break
return candidates
@property
def _multiplexed_matching_timeout(self) -> float:
return random.uniform(
RAY_SERVE_MULTIPLEXED_MODEL_ID_MATCHING_TIMEOUT_S,
RAY_SERVE_MULTIPLEXED_MODEL_ID_MATCHING_TIMEOUT_S * 2,
)
def apply_multiplex_routing(
self,
pending_request: Optional[PendingRequest] = None,
) -> Set[ReplicaID]:
"""Apply multiplex routing to the pending request.
When the request is None, return all replicas. Each call will try to
route the request to the replicas that have the multiplexed model ID
to the hierarchy of first the replicas with the multiplexed model ID,
then the replicas with the fewest multiplexed models, and finally all
replicas.
Args:
pending_request: The pending request to be routed based on
multiplexed model policy.
Returns:
A set of replica IDs that are candidates for the existing
routing call.
"""
if not pending_request:
return self._replica_id_set
if not pending_request.routing_context.multiplexed_start_matching_time:
pending_request.routing_context.multiplexed_start_matching_time = (
time.time()
)
multiplexed_start_matching_time = (
pending_request.routing_context.multiplexed_start_matching_time
)
multiplexed_model_id = pending_request.metadata.multiplexed_model_id
if (
time.time() - multiplexed_start_matching_time
< self._multiplexed_matching_timeout
):
candidate_replica_ids = self._multiplexed_model_id_to_replica_ids.get(
multiplexed_model_id, None
)
if (
not candidate_replica_ids
and multiplexed_model_id
not in self._multiplexed_model_id_fallback_match
) or pending_request.routing_context.tried_first_multiplexed_models:
# When there is no match for a multiplexed model id
# or when the replica(s) with the matching model id is busy,
# first try to fall back to replicas with the fewest models.
candidate_replica_ids = (
self._get_replica_ids_with_fewest_multiplexed_models()
)
self._multiplexed_model_id_fallback_match.add(multiplexed_model_id)
elif candidate_replica_ids:
self._multiplexed_model_id_fallback_match.discard(multiplexed_model_id)
pending_request.routing_context.tried_first_multiplexed_models = True
elif not pending_request.routing_context.tried_fewest_multiplexed_models:
# After the `_multiplexed_matching_timeout` is up, first try
# routing to replicas that have the fewest models loaded.
# We only try this once to avoid deterministically retrying on
# the same replicas repeatedly.
candidate_replica_ids = (
self._get_replica_ids_with_fewest_multiplexed_models()
)
pending_request.routing_context.tried_fewest_multiplexed_models = True
else:
# If the timeout is up, and we've already tried the candidates
# with the fewest models loaded, fall back to all replicas.
candidate_replica_ids = self._replica_id_set
pending_request.routing_context.should_backoff = True
return candidate_replica_ids
def rank_replicas_via_multiplex(
self,
replicas: List[RunningReplica],
multiplexed_model_id: str,
) -> List[List[RunningReplica]]:
"""Rank the replicas based on the multiplexed model ID.
Rank 0 is the list of replicas that have the multiplexed model ID.
Rank 1 is the list of replicas that have the fewest multiplexed models.
Rank 2 is the list of all other replicas.
"""
replica_ids_with_multiplexed_model = (
self._multiplexed_model_id_to_replica_ids.get(multiplexed_model_id, set())
)
replica_ids_with_fewest_multiplexed_models = (
self._get_replica_ids_with_fewest_multiplexed_models()
)
ranked_replicas = [[] for _ in range(3)]
for replica in replicas:
if replica.replica_id in replica_ids_with_multiplexed_model:
ranked_replicas[0].append(replica)
elif replica.replica_id in replica_ids_with_fewest_multiplexed_models:
ranked_replicas[1].append(replica)
else:
ranked_replicas[2].append(replica)
return ranked_replicas
@PublicAPI(stability="alpha")
| MultiplexMixin |
python | pytorch__pytorch | torch/ao/nn/quantized/reference/modules/linear.py | {
"start": 161,
"end": 2254
} | class ____(nn.Linear, ReferenceQuantizedModule):
"""A reference quantized linear module that fits into the FX
Graph Mode Quantization workflow
activation will be floating point Tensor, we will store floating
point weight as well in the module, but in forward we'll quantize
and dequantize the weight before running the floating point functional
linear operator.
"""
_IS_REFERENCE = True
def __init__(
self,
in_features: int,
out_features: int,
bias_: bool = True,
device: torch.device | None = None,
dtype: torch.dtype | None = None,
weight_qparams: dict[str, Any] | None = None,
) -> None:
super().__init__(in_features, out_features, bias_, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def _get_name(self) -> str:
return "QuantizedLinear(Reference)"
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.linear ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.linear --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized linear
"""
weight_quant_dequant = self.get_weight()
result = F.linear(x, weight_quant_dequant, self.bias)
return result
@classmethod
def from_float(
cls, float_linear: nn.Linear, weight_qparams: dict[str, Any]
) -> "Linear":
qref_linear = Linear(
float_linear.in_features,
float_linear.out_features,
float_linear.bias is not None,
device=float_linear.weight.device,
dtype=float_linear.weight.dtype,
weight_qparams=weight_qparams,
)
qref_linear.weight = torch.nn.Parameter(float_linear.weight.detach())
if float_linear.bias is not None:
qref_linear.bias = torch.nn.Parameter(float_linear.bias.detach())
return qref_linear
| Linear |
python | keras-team__keras | keras/src/quantizers/gptq_core_test.py | {
"start": 566,
"end": 831
} | class ____(layers.Layer):
"""A block that contains no quantizable layers."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ln = layers.LayerNormalization()
def call(self, inputs):
return self.ln(inputs)
| EmptyBlock |
python | scikit-learn__scikit-learn | sklearn/model_selection/tests/test_validation.py | {
"start": 3587,
"end": 4710
} | class ____(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes, expected_fit_params=None):
super().__init__(n_max_train_sizes)
self.x = None
self.expected_fit_params = expected_fit_params
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
if self.expected_fit_params:
missing = set(self.expected_fit_params) - set(params)
if missing:
raise AssertionError(
f"Expected fit parameter(s) {list(missing)} not seen."
)
for key, value in params.items():
if key in self.expected_fit_params and _num_samples(
value
) != _num_samples(X):
raise AssertionError(
f"Fit parameter {key} has length {_num_samples(value)}"
f"; expected {_num_samples(X)}."
)
| MockIncrementalImprovingEstimator |
python | pytorch__pytorch | test/distributed/checkpoint/test_async_process_executor.py | {
"start": 7318,
"end": 10939
} | class ____(DTensorTestBase):
"""Test suite for _ProcessGroupInitInfo."""
@with_comms
def test_process_group_init_info_with_default_pg(self) -> None:
"""Test that ProcessGroupInitInfo correctly initializes."""
with patch.dict(os.environ, {}, clear=False):
os.environ.pop("DCP_USE_PREFIX_STORE", None)
pg_init_info = _ProcessGroupInitInfo()
self.assertEqual(pg_init_info.global_rank, dist.get_rank())
self.assertEqual(pg_init_info.world_size, dist.get_world_size())
self.assertIsNotNone(pg_init_info.tcp_store_master_addr)
self.assertGreater(pg_init_info.tcp_store_master_port, 0)
self.assertEqual(pg_init_info.use_prefix_store, False)
@with_comms
def test_process_group_init_info_with_prefix_store_env_var(self) -> None:
"""Test that ProcessGroupInitInfo handles DCP_USE_PREFIX_STORE environment variable."""
# Flag enabled, addr/port correctly defined
with patch.dict(
os.environ,
{
"DCP_USE_PREFIX_STORE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": "12345",
},
):
pg_init_info = _ProcessGroupInitInfo()
self.assertTrue(pg_init_info.use_prefix_store)
# Missing port
with patch.dict(
os.environ, {"DCP_USE_PREFIX_STORE": "1", "MASTER_ADDR": "localhost"}
):
with self.assertRaises(CheckpointException):
pg_init_info = _ProcessGroupInitInfo()
# Missing addr
with patch.dict(
os.environ, {"DCP_USE_PREFIX_STORE": "1", "MASTER_PORT": "12345"}
):
with self.assertRaises(CheckpointException):
pg_init_info = _ProcessGroupInitInfo()
# Invalid port
with patch.dict(
os.environ,
{
"DCP_USE_PREFIX_STORE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": "a",
},
):
with self.assertRaises(CheckpointException):
pg_init_info = _ProcessGroupInitInfo()
@with_comms
def test_process_group_init_info_without_prefix_store_env_var(self) -> None:
"""Test that ProcessGroupInitInfo defaults to not using prefix store."""
# Env var set to 0
with patch.dict(os.environ, {"DCP_USE_PREFIX_STORE": "0"}):
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
# Missing env var
with patch.dict(os.environ, {}, clear=False):
os.environ.pop("DCP_USE_PREFIX_STORE", None)
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
# Invalid env var
with patch.dict(os.environ, {"DCP_USE_PREFIX_STORE": "2"}):
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
with patch.dict(os.environ, {"DCP_USE_PREFIX_STORE": "true"}):
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
with patch.dict(os.environ, {"DCP_USE_PREFIX_STORE": "false"}):
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
with patch.dict(os.environ, {"DCP_USE_PREFIX_STORE": ""}):
pg_init_info = _ProcessGroupInitInfo()
self.assertFalse(pg_init_info.use_prefix_store)
if __name__ == "__main__":
run_tests()
| TestProcessGroupInitInfo |
python | Textualize__textual | examples/color_command.py | {
"start": 1121,
"end": 1695
} | class ____(App):
"""Experiment with the command palette."""
COMMANDS = App.COMMANDS | {ColorCommands}
TITLE = "Press ctrl + p and type a color"
def compose(self) -> ComposeResult:
yield Header()
@on(SwitchColor)
def switch_color(self, event: SwitchColor) -> None:
"""Adds a color block on demand."""
color_block = ColorBlock(event.color)
color_block.styles.background = event.color
self.mount(color_block)
self.screen.scroll_end()
if __name__ == "__main__":
app = ColorApp()
app.run()
| ColorApp |
python | prabhupant__python-ds | data_structures/deque/deque.py | {
"start": 0,
"end": 1435
} | class ____():
def __init__(self):
self.data = list()
def push_front(self, elem):
temp = list()
temp.append(elem)
for i in self.data:
temp.append(i)
self.data = temp
def push_back(self, elem):
self.data.append(elem)
def pop_front(self):
temp = list()
for i in range(0, len(self.data)):
if not i==0:
temp.append(self.data[i])
self.data = temp
def pop_back(self):
temp = list()
for i in range(0, len(self.data)):
if not i==len(self.data)-1:
temp.append(self.data[i])
self.data = temp
def get_first(self):
if(len(self.data)>0):
return self.data[0]
else:
return "Deque is empty"
def get_last(self):
if(len(self.data)>0):
return self.data[len(self.data)-1]
else:
return "Deque is empty"
def size(self):
return len(self.data)
def is_empty(self):
if len(self.data) == 0:
return True
return False
def contains(self, elem):
for i in self.data:
if i==elem:
return True
return False
def print_elements(self):
result = ""
for i in self.data:
result += str(i) + " | "
print(result) | Deque |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.