language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydantic__pydantic | tests/mypy/modules/plugin_fail_baseConfig.py | {
"start": 1374,
"end": 1510
} | class ____(BaseModel):
class Config:
from_attributes = list # not sensible, but should still be handled gracefully
| BadConfig2 |
python | ipython__ipython | tests/test_zzz_autoreload.py | {
"start": 2985,
"end": 5387
} | class ____(TestCase):
"""Fixture for creating test module files"""
test_dir = None
old_sys_path = None
filename_chars = "abcdefghijklmopqrstuvwxyz0123456789"
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.old_sys_path = list(sys.path)
sys.path.insert(0, self.test_dir)
self.shell = FakeShell()
def tearDown(self):
shutil.rmtree(self.test_dir)
sys.path = self.old_sys_path
self.test_dir = None
self.old_sys_path = None
self.shell = None
def get_module(self):
module_name = "tmpmod_" + "".join(random.sample(self.filename_chars, 20))
if module_name in sys.modules:
del sys.modules[module_name]
file_name = os.path.join(self.test_dir, module_name + ".py")
return module_name, file_name
def write_file(self, filename, content):
"""
Write a file, and force a timestamp difference of at least one second
Notes
-----
Python's .pyc files record the timestamp of their compilation
with a time resolution of one second.
Therefore, we need to force a timestamp difference between .py
and .pyc, without having the .py file be timestamped in the
future, and without changing the timestamp of the .pyc file
(because that is stored in the file). The only reliable way
to achieve this seems to be to sleep.
"""
content = textwrap.dedent(content)
# Sleep one second + eps
time.sleep(1.05)
# Write
with open(filename, "w", encoding="utf-8") as f:
f.write(content)
def new_module(self, code):
code = textwrap.dedent(code)
mod_name, mod_fn = self.get_module()
with open(mod_fn, "w", encoding="utf-8") as f:
f.write(code)
return mod_name, mod_fn
# -----------------------------------------------------------------------------
# Test automatic reloading
# -----------------------------------------------------------------------------
def pickle_get_current_class(obj):
"""
Original issue comes from pickle; hence the name.
"""
name = obj.__class__.__name__
module_name = getattr(obj, "__module__", None)
obj2 = sys.modules[module_name]
for subpath in name.split("."):
obj2 = getattr(obj2, subpath)
return obj2
| Fixture |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 31931,
"end": 32344
} | class ____(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
| TestEscapeAppend |
python | conda__conda | conda/exceptions.py | {
"start": 36612,
"end": 37594
} | class ____(CondaError):
def __init__(self, environment_location: PathType, **kwargs):
kwargs.update(
{
"environment_location": environment_location,
}
)
if on_win:
message = dals(
"""
The current user does not have write permissions to the target environment.
environment location: %(environment_location)s
"""
)
else:
message = dals(
"""
The current user does not have write permissions to the target environment.
environment location: %(environment_location)s
uid: %(uid)s
gid: %(gid)s
"""
)
kwargs.update(
{
"uid": os.geteuid(),
"gid": os.getegid(),
}
)
super().__init__(message, **kwargs)
| EnvironmentNotWritableError |
python | pikepdf__pikepdf | tests/test_canvas.py | {
"start": 7835,
"end": 10443
} | class ____:
def test_basic(self):
canvas = Canvas(page_size=(100, 100))
assert canvas.page_size == (100, 100)
with canvas.do.save_state(cm=Matrix().scaled(2, 2)):
canvas.do.stroke_color(Color(1, 0, 0, 1)).line_width(2).dashes(1, 1).line(
0, 0, 10, 10
)
canvas.do.fill_color(Color(0, 1, 0, 1)).rect(
10, 10, 10, 10, fill=False
).rect(10, 10, 5, 5, fill=True)
pdf = canvas.to_pdf()
assert len(pdf.pages) == 1
pdf.check_pdf_syntax()
def test_image(self, resources):
canvas = Canvas(page_size=(400, 100))
canvas.do.draw_image(resources / 'pink-palette-icc.png', 0, 0, 100, 100)
im = Image.open(resources / 'pink-palette-icc.png')
canvas.do.draw_image(im.convert('1'), 100, 0, 100, 100)
canvas.do.draw_image(im.convert('L'), 200, 0, 100, 100)
canvas.do.draw_image(im.convert('RGB'), 300, 0, 100, 100)
pdf = canvas.to_pdf()
pdf.check_pdf_syntax()
def test_text(self):
hello_msg = 'Hello, World!'
hello_arabic = 'مرحبا بالعالم'
canvas = Canvas(page_size=(100, 100))
text = Text()
text.font(Name.Helvetica, 12).render_mode(1).text_transform(
Matrix().translated(10, 10)
).horiz_scale(110).move_cursor(10, 10).show(hello_msg)
# This is cheating! We're using one of the 14 base PDF fonts for a quick
# test. If the resulting PDF is viewed, the result will not be in Arabic.
# This does not properly register the font. The point of this test is
# to ensure that the content stream is properly encoded.
rtltext = Text(TextDirection.RTL)
rtltext.font(Name.Helvetica, 12).render_mode(0).text_transform(
Matrix().translated(10, 10)
).move_cursor(50, 50).show(hello_arabic)
canvas.do.stroke_color(BLACK).draw_text(text)
canvas.do.fill_color(BLACK).draw_text(rtltext)
canvas.add_font(Name.Helvetica, Helvetica())
pdf = canvas.to_pdf()
pdf.check_pdf_syntax()
for msg in [hello_msg, hello_arabic]:
# str -> UTF-16 big endian bytes -> hex encoded str -> hex bytes
hex_bytes = msg.encode('utf-16be').hex().encode('ascii')
assert hex_bytes in pdf.pages[0].Contents.read_bytes()
def test_stack_abuse(self, caplog):
canvas = Canvas(page_size=(100, 100))
canvas.do.pop().pop()
canvas.to_pdf()
assert "Graphics state stack is not empty when page saved" in caplog.text
| TestCanvas |
python | vyperlang__vyper | tests/functional/builtins/codegen/abi_decode.py | {
"start": 407,
"end": 4203
} | class ____(Exception):
pass
def _strict_slice(payload, start, length):
if start < 0:
raise DecodeError(f"OOB {start}")
end = start + length
if end > len(payload):
raise DecodeError(f"OOB {start} + {length} (=={end}) > {len(payload)}")
return payload[start:end]
def _read_int(payload, ofst):
return int.from_bytes(_strict_slice(payload, ofst, 32))
# vyper abi_decode spec implementation
def spec_decode(typ: "VyperType", payload: bytes):
abi_t = typ.abi_type
lo, hi = abi_t.static_size(), abi_t.size_bound()
if not (lo <= len(payload) <= hi):
raise DecodeError(f"bad payload size {lo}, {len(payload)}, {hi}")
return _decode_r(abi_t, 0, payload)
def _decode_r(abi_t: ABIType, current_offset: int, payload: bytes):
if isinstance(abi_t, ABI_Tuple):
return tuple(_decode_multi_r(abi_t.subtyps, current_offset, payload))
if isinstance(abi_t, ABI_StaticArray):
n = abi_t.m_elems
subtypes = [abi_t.subtyp] * n
return _decode_multi_r(subtypes, current_offset, payload)
if isinstance(abi_t, ABI_DynamicArray):
bound = abi_t.elems_bound
n = _read_int(payload, current_offset)
if n > bound:
raise DecodeError("Dynarray too large")
# offsets in dynarray start from after the length word
current_offset += 32
subtypes = [abi_t.subtyp] * n
return _decode_multi_r(subtypes, current_offset, payload)
# sanity check
assert not abi_t.is_complex_type()
if isinstance(abi_t, ABI_Bytes):
bound = abi_t.bytes_bound
length = _read_int(payload, current_offset)
if length > bound:
raise DecodeError("bytes too large")
current_offset += 32 # size of length word
ret = _strict_slice(payload, current_offset, length)
# abi string doesn't actually define string decoder, so we
# just bytecast the output
if isinstance(abi_t, ABI_String):
# match eth-stdlib, since that's what we check against
ret = ret.decode(errors="surrogateescape")
return ret
# sanity check
assert not abi_t.is_dynamic()
if isinstance(abi_t, ABI_GIntM):
ret = _read_int(payload, current_offset)
# handle signedness
if abi_t.signed:
ret = unsigned_to_signed(ret, 256, strict=True)
# bounds check
lo, hi = int_bounds(signed=abi_t.signed, bits=abi_t.m_bits)
if not (lo <= ret <= hi):
u = "" if abi_t.signed else "u"
raise DecodeError(f"invalid {u}int{abi_t.m_bits}")
if isinstance(abi_t, ABI_Address):
return to_checksum_address(ret.to_bytes(20, "big"))
if isinstance(abi_t, ABI_Bool):
if ret not in (0, 1):
raise DecodeError("invalid bool")
return ret
return ret
if isinstance(abi_t, ABI_BytesM):
ret = _strict_slice(payload, current_offset, 32)
m = abi_t.m_bytes
assert 1 <= m <= 32 # internal sanity check
# BytesM is right-padded with zeroes
if ret[m:] != b"\x00" * (32 - m):
raise DecodeError(f"invalid bytes{m}")
return ret[:m]
raise RuntimeError("unreachable")
def _decode_multi_r(types: Iterable[ABIType], outer_offset: int, payload: bytes) -> list:
ret = []
static_ofst = outer_offset
for sub_t in types:
if sub_t.is_dynamic():
# "head" terminology from abi spec
head = _read_int(payload, static_ofst)
ofst = outer_offset + head
else:
ofst = static_ofst
item = _decode_r(sub_t, ofst, payload)
ret.append(item)
static_ofst += sub_t.embedded_static_size()
return ret
| DecodeError |
python | Lightning-AI__lightning | src/lightning/pytorch/serve/servable_module.py | {
"start": 110,
"end": 3288
} | class ____(ABC, torch.nn.Module):
"""The ServableModule provides a simple API to make your model servable.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Here is an example of how to use the ``ServableModule`` module.
.. code-block:: python
from typing import Dict, Any, Callable
import torch
from lightning.pytorch import Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from lightning.pytorch.serve.servable_module_validator import ServableModule, ServableModuleValidator
class ServableBoringModel(BoringModel, ServableModule):
def configure_payload(self) -> Dict[str, Any]:
return {"body": {"x": list(range(32))}}
def configure_serialization(self) -> Tuple[Dict[str, Callable], Dict[str, Callable]]:
def deserialize(x):
return torch.tensor(x, dtype=torch.float)
def serialize(x):
return x.tolist()
return {"x": deserialize}, {"output": serialize}
def serve_step(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
return {"output": torch.tensor([0, 1])}
def configure_response(self):
return {"output": [0, 1]}
serve_cb = ServableModuleValidator()
trainer = Trainer(
max_epochs=1,
limit_train_batches=2,
limit_val_batches=0,
callbacks=[serve_cb],
)
trainer.fit(ServableBoringModel())
assert serve_cb.resp.json() == {"output": [0, 1]}
"""
@abstractmethod
def configure_payload(self) -> dict[str, Any]:
"""Returns a request payload as a dictionary."""
@abstractmethod
def configure_serialization(self) -> tuple[dict[str, Callable], dict[str, Callable]]:
"""Returns a tuple of dictionaries.
The first dictionary contains the name of the ``serve_step`` input variables name as its keys
and the associated de-serialization function (e.g function to convert a payload to tensors).
The second dictionary contains the name of the ``serve_step`` output variables name as its keys
and the associated serialization function (e.g function to convert a tensors into payload).
"""
@abstractmethod
def serve_step(self, *args: Tensor, **kwargs: Tensor) -> dict[str, Tensor]:
r"""Returns the predictions of your model as a dictionary.
.. code-block:: python
def serve_step(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
return {"predictions": self(x)}
Args:
args: The output from de-serializer functions provided by the ``configure_serialization`` hook.
kwargs: The keyword output of the de-serializer functions provided by the ``configure_serialization`` hook.
Return:
- ``dict`` - A dictionary with their associated tensors.
"""
@abstractmethod
def configure_response(self) -> dict[str, Any]:
"""Returns a response to validate the server response."""
| ServableModule |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict1.py | {
"start": 249,
"end": 299
} | class ____(TypedDict, total=not_total):
pass
| TD1 |
python | readthedocs__readthedocs.org | readthedocs/sso/models.py | {
"start": 140,
"end": 1548
} | class ____(models.Model):
"""Single Sign-On integration for an Organization."""
PROVIDER_ALLAUTH = "allauth"
PROVIDER_EMAIL = "email"
PROVIDER_SAML = "saml"
PROVIDER_CHOICES = (
(PROVIDER_ALLAUTH, "AllAuth"),
(PROVIDER_EMAIL, "Email"),
(PROVIDER_SAML, "SAML"),
)
name = models.CharField(
max_length=128,
null=True,
blank=True,
)
token = models.UUIDField(
unique=True,
default=uuid.uuid4,
# editable=False,
)
organization = models.OneToOneField(
"organizations.Organization",
on_delete=models.CASCADE,
)
provider = models.CharField(
choices=PROVIDER_CHOICES,
max_length=32,
)
saml_app = models.OneToOneField(
"socialaccount.SocialApp",
related_name="sso_integration",
on_delete=models.CASCADE,
null=True,
blank=True,
)
domains = models.ManyToManyField(
"sso.SSODomain",
related_name="ssointegrations",
blank=True,
)
using_old_dashboard = models.BooleanField(
default=False,
null=True,
blank=True,
help_text=(
"Whether the SSO integration is using the old dashboard for authentication. Mainly used for SAML integrations."
),
)
def __str__(self):
return self.name or self.provider
| SSOIntegration |
python | astropy__astropy | astropy/time/tests/test_methods.py | {
"start": 597,
"end": 2143
} | class ____:
def setup_class(cls):
mjd = np.arange(50000, 50010)
frac = np.arange(0.0, 0.999, 0.2)
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc"),
"masked": Time(mjd[:, np.newaxis] + frac_masked, format="mjd", scale="utc"),
}
cls.t1 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
}
cls.t2 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
| ShapeSetup |
python | donnemartin__interactive-coding-challenges | stacks_queues/stack/test_stack.py | {
"start": 18,
"end": 1158
} | class ____(unittest.TestCase):
# TODO: It would be better if we had unit tests for each
# method in addition to the following end-to-end test
def test_end_to_end(self):
print('Test: Empty stack')
stack = Stack()
self.assertEqual(stack.peek(), None)
self.assertEqual(stack.pop(), None)
print('Test: One element')
top = Node(5)
stack = Stack(top)
self.assertEqual(stack.pop(), 5)
self.assertEqual(stack.peek(), None)
print('Test: More than one element')
stack = Stack()
stack.push(1)
stack.push(2)
stack.push(3)
self.assertEqual(stack.pop(), 3)
self.assertEqual(stack.peek(), 2)
self.assertEqual(stack.pop(), 2)
self.assertEqual(stack.peek(), 1)
self.assertEqual(stack.is_empty(), False)
self.assertEqual(stack.pop(), 1)
self.assertEqual(stack.peek(), None)
self.assertEqual(stack.is_empty(), True)
print('Success: test_end_to_end')
def main():
test = TestStack()
test.test_end_to_end()
if __name__ == '__main__':
main()
| TestStack |
python | run-llama__llama_index | llama-index-integrations/graph_stores/llama-index-graph-stores-nebula/tests/test_property_graph.py | {
"start": 548,
"end": 3652
} | class ____(TestCase):
@classmethod
def setUp(self) -> None:
try:
g = get_store()
except RuntimeError:
raise SkipTest("NebulaGraph service is not available")
def test_add(self) -> None:
g = get_store()
e1 = EntityNode(name="e1")
e2 = EntityNode(name="e2")
r = Relation(label="r", source_id=e1.id, target_id=e2.id)
g.upsert_nodes([e1, e2])
g.upsert_relations([r])
triplets = g.get_triplets(entity_names=["e1"])
assert len(triplets) == 1
def test_delete(self) -> None:
g = get_store()
e1 = EntityNode(name="e1")
e2 = EntityNode(name="e2")
r = Relation(label="r", source_id=e1.id, target_id=e2.id)
g.upsert_nodes([e1, e2])
g.upsert_relations([r])
g.delete(ids=[e1.id])
assert len(g.get_triplets()) == 0
def test_get(self) -> None:
g = get_store()
e1 = EntityNode(name="e1")
e2 = EntityNode(name="e2", properties={"key": "value"})
r = Relation(label="r", source_id=e1.id, target_id=e2.id)
g.upsert_nodes([e1, e2])
g.upsert_relations([r])
assert g.get_triplets() == []
assert g.get_triplets(entity_names=["e1"]) == [(e1, r, e2)]
assert g.get_triplets(entity_names=["e2"]) == [(e1, r, e2)]
assert g.get_triplets(relation_names=["r"]) == [(e1, r, e2)]
assert g.get_triplets(properties={"key": "value"}) == [(e1, r, e2)]
def test_delete_node_by_node_ids(self) -> None:
g = get_store()
n1 = TextNode(id_="n1", text="n1")
n2 = TextNode(id_="n2", text="n2")
g.upsert_llama_nodes([n1, n2])
g.delete_llama_nodes(node_ids=["n1"])
all_nodes = g.get_all_nodes()
assert len(all_nodes) == 1
def test_delete_node_by_ref_doc_ids(self) -> None:
# TODO: Not yet passed for now
pass
# g = get_store()
# n1 = TextNode(
# id_="n1",
# text="n1",
# relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="n2")},
# )
# n2 = TextNode(id_="n2", text="n2")
# g.upsert_llama_nodes([n1, n2])
# g.delete_llama_nodes(ref_doc_ids=["n2"])
# all_nodes = g.get_all_nodes()
# assert len(all_nodes) == 0
# # g = SimplePropertyGraphStore()
# n1 = TextNode(
# id_="n1",
# text="n1",
# relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="n3")},
# )
# n2 = TextNode(id_="n2", text="n2")
# g.upsert_llama_nodes([n1, n2])
# g.delete_llama_nodes(ref_doc_ids=["n3"])
# all_nodes = g.get_all_nodes()
# assert len(all_nodes) == 1
def test_get_nodes(self) -> None:
g = get_store()
n1 = TextNode(id_="n1", text="n1")
n2 = TextNode(id_="n2", text="n2")
g.upsert_llama_nodes([n1, n2])
retrieved = g.get_llama_nodes(["n1", "n2"])
assert retrieved == [n1, n2] or retrieved == [n2, n1]
| TestPropertyGraphStore |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 58400,
"end": 60347
} | class ____:
def test_matrix_rank(self):
# Full rank matrix
assert_equal(4, matrix_rank(np.eye(4)))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(matrix_rank(I), 3)
# All zeros - zero rank
assert_equal(matrix_rank(np.zeros((4, 4))), 0)
# 1 dimension - rank 1 unless all 0
assert_equal(matrix_rank([1, 0, 0, 0]), 1)
assert_equal(matrix_rank(np.zeros((4,))), 0)
# accepts array-like
assert_equal(matrix_rank([1]), 1)
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4, 4))])
assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
# works on scalar
assert_equal(matrix_rank(1), 1)
with assert_raises_regex(
ValueError, "`tol` and `rtol` can\'t be both set."
):
matrix_rank(I, tol=0.01, rtol=0.01)
def test_symmetric_rank(self):
assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(3, matrix_rank(I, hermitian=True))
# manually supplied tolerance
I[-1, -1] = 1e-8
assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
| TestMatrixRank |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/_line.py | {
"start": 233,
"end": 21814
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"dash",
"reversescale",
"showscale",
"width",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in `line.color` is set
to a numerical array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `line.color`) or the bounds
set in `line.cmin` and `line.cmax` Has an effect only if in
`line.color` is set to a numerical array. Defaults to `false`
when `line.cmin` and `line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `line.color` is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmin` must
be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `line.cmin`
and/or `line.cmax` to be equidistant to this point. Has an
effect only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color`. Has no
effect when `line.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `line.color` is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmax` must
be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the line color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to `line.cmin`
and `line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scatter3d.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.line.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.scatter3d.line.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `line.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`line.cmin` and `line.cmax`. Alternatively, `colorscale` may be
a palette name string of the following list: Blackbody,Bluered,
Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portla
nd,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def dash(self):
"""
Sets the dash style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['dash', 'dashdot', 'dot', 'longdash', 'longdashdot',
'solid']
Returns
-------
Any
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`line.color` is set to a numerical array. If true, `line.cmin`
will correspond to the last color in the array and `line.cmax`
will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `line.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has an
effect only if in `line.color` is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color` is
set to a numerical array. Value should have the same
units as in `line.color`. Has no effect when
`line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatter3d.line.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
dash
Sets the dash style of the lines.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color` is set to a numerical array. If
true, `line.cmin` will correspond to the last color in
the array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color` is
set to a numerical array.
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
dash=None,
reversescale=None,
showscale=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has an
effect only if in `line.color` is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color` is
set to a numerical array. Value should have the same
units as in `line.color`. Has no effect when
`line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color` is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets the line color. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatter3d.line.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `line.cmin` and `line.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
dash
Sets the dash style of the lines.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color` is set to a numerical array. If
true, `line.cmin` will correspond to the last color in
the array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color` is
set to a numerical array.
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("dash", arg, dash)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | davidhalter__jedi | jedi/inference/gradual/typing.py | {
"start": 7524,
"end": 8802
} | class ____(ProxyWithGenerics, _TypingClassMixin):
def infer_type_vars(self, value_set):
type_var_dict = {}
annotation_generics = self.get_generics()
if not annotation_generics:
return type_var_dict
annotation_name = self.py__name__()
if annotation_name == 'Type':
return annotation_generics[0].infer_type_vars(
# This is basically a trick to avoid extra code: We execute the
# incoming classes to be able to use the normal code for type
# var inference.
value_set.execute_annotation(),
)
elif annotation_name == 'Callable':
if len(annotation_generics) == 2:
return annotation_generics[1].infer_type_vars(
value_set.execute_annotation(),
)
elif annotation_name == 'Tuple':
tuple_annotation, = self.execute_annotation()
return tuple_annotation.infer_type_vars(value_set)
return type_var_dict
def _create_instance_with_generics(self, generics_manager):
return TypingClassWithGenerics(
self.parent_context,
self._tree_name,
generics_manager
)
| TypingClassWithGenerics |
python | sympy__sympy | sympy/stats/stochastic_process_types.py | {
"start": 10183,
"end": 10854
} | class ____(StochasticProcess):
"""
Base class for all continuous time stochastic process.
"""
def __call__(self, time):
"""
For indexing continuous time stochastic processes.
Returns
=======
RandomIndexedSymbol
"""
time = sympify(time)
if not time.is_symbol and time not in self.index_set:
raise IndexError("%s is not in the index set of %s"%(time, self.symbol))
func_obj = Function(self.symbol)(time)
pspace_obj = StochasticPSpace(self.symbol, self, self.distribution(time))
return RandomIndexedSymbol(func_obj, pspace_obj)
| ContinuousTimeStochasticProcess |
python | gevent__gevent | src/greentest/3.11/test_ftplib.py | {
"start": 3551,
"end": 9037
} | class ____(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn, encoding=DEFAULT_ENCODING):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = bytearray()
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
self.encoding = encoding
# We use this as the string IPv4 address to direct the client
# to in response to a PASV command. To test security behavior.
# https://bugs.python.org/issue43285/.
self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode(self.encoding)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
default_error_handler()
def push(self, data):
asynchat.async_chat.push(self, data.encode(self.encoding) + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
| DummyFTPHandler |
python | python-poetry__poetry | src/poetry/publishing/publisher.py | {
"start": 345,
"end": 2933
} | class ____:
"""
Registers and publishes packages to remote repositories.
"""
def __init__(self, poetry: Poetry, io: IO, dist_dir: Path | None = None) -> None:
self._poetry = poetry
self._package = poetry.package
self._io = io
self._uploader = Uploader(poetry, io, dist_dir)
self._authenticator = Authenticator(poetry.config, self._io)
@property
def files(self) -> list[Path]:
return self._uploader.files
def publish(
self,
repository_name: str | None,
username: str | None,
password: str | None,
cert: Path | None = None,
client_cert: Path | None = None,
dry_run: bool = False,
skip_existing: bool = False,
) -> None:
if not repository_name:
url = "https://upload.pypi.org/legacy/"
repository_name = "pypi"
else:
# Retrieving config information
url = self._poetry.config.get(f"repositories.{repository_name}.url")
if url is None:
raise RuntimeError(f"Repository {repository_name} is not defined")
if not (username and password):
# Check if we have a token first
token = self._authenticator.get_pypi_token(repository_name)
if token:
logger.debug("Found an API token for %s.", repository_name)
username = "__token__"
password = token
else:
auth = self._authenticator.get_http_auth(repository_name)
if auth:
logger.debug(
"Found authentication information for %s.", repository_name
)
username = auth.username
password = auth.password
certificates = self._authenticator.get_certs_for_repository(repository_name)
resolved_cert = cert or certificates.cert or certificates.verify
resolved_client_cert = client_cert or certificates.client_cert
self._uploader.auth(username, password)
if repository_name == "pypi":
repository_name = "PyPI"
self._io.write_line(
f"Publishing <c1>{self._package.pretty_name}</c1>"
f" (<c2>{self._package.pretty_version}</c2>) to"
f" <info>{repository_name}</info>"
)
self._uploader.upload(
url,
cert=resolved_cert,
client_cert=resolved_client_cert,
dry_run=dry_run,
skip_existing=skip_existing,
)
| Publisher |
python | tiangolo__fastapi | docs_src/response_model/tutorial006.py | {
"start": 104,
"end": 848
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: float = 10.5
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The Bar fighters", "price": 62, "tax": 20.2},
"baz": {
"name": "Baz",
"description": "There goes my baz",
"price": 50.2,
"tax": 10.5,
},
}
@app.get(
"/items/{item_id}/name",
response_model=Item,
response_model_include=["name", "description"],
)
async def read_item_name(item_id: str):
return items[item_id]
@app.get("/items/{item_id}/public", response_model=Item, response_model_exclude=["tax"])
async def read_item_public_data(item_id: str):
return items[item_id]
| Item |
python | apache__airflow | providers/databricks/tests/unit/databricks/sensors/test_databricks_sql.py | {
"start": 1704,
"end": 3853
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
self.sensor = DatabricksSqlSensor(
task_id=TASK_ID,
databricks_conn_id=DEFAULT_CONN_ID,
sql_warehouse_name=DEFAULT_SQL_WAREHOUSE,
dag=self.dag,
sql=DEFAULT_SQL,
schema=DEFAULT_SCHEMA,
catalog=DEFAULT_CATALOG,
timeout=30,
poke_interval=15,
)
def test_init(self):
assert self.sensor.databricks_conn_id == "databricks_default"
assert self.sensor.task_id == "db-sensor"
assert self.sensor._sql_warehouse_name == "sql_warehouse_default"
assert self.sensor.poke_interval == 15
@pytest.mark.parametrize(
argnames=("sensor_poke_result", "expected_poke_result"), argvalues=[(True, True), (False, False)]
)
@patch.object(DatabricksSqlSensor, "poke")
def test_poke(self, mock_poke, sensor_poke_result, expected_poke_result):
mock_poke.return_value = sensor_poke_result
assert self.sensor.poke({}) == expected_poke_result
def test_sql_warehouse_http_path(self):
"""Neither SQL warehouse name not HTTP path has been specified."""
_sensor_without_sql_warehouse_http = DatabricksSqlSensor(
task_id="task2",
databricks_conn_id=DEFAULT_CONN_ID,
dag=self.dag,
sql=DEFAULT_SQL,
schema=DEFAULT_SCHEMA,
catalog=DEFAULT_CATALOG,
timeout=30,
poke_interval=15,
)
with pytest.raises(AirflowException):
_sensor_without_sql_warehouse_http._get_results()
def test_fail__get_results(self):
self.sensor._http_path = None
self.sensor._sql_warehouse_name = None
with pytest.raises(
AirflowException,
match="Databricks SQL warehouse/cluster configuration missing."
" Please specify either http_path or sql_warehouse_name.",
):
self.sensor._get_results()
| TestDatabricksSqlSensor |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 8873,
"end": 10188
} | class ____(nn.Module):
def __init__(self, config: Pix2StructVisionConfig):
super().__init__()
self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
| Pix2StructVisionMlp |
python | has2k1__plotnine | plotnine/composition/_stack.py | {
"start": 217,
"end": 1493
} | class ____(Compose):
"""
Place plots or compositions on top of each other
**Usage**
plot / plot
plot / composition
composition / plot
composition / composition
Typically, you will use this class through the `/` operator.
See Also
--------
plotnine.composition.Beside : To arrange plots side by side
plotnine.composition.Wrap : To arrange plots in a grid
plotnine.composition.plot_spacer : To add a blank space between plots
plotnine.composition.Compose : For more on composing plots
"""
def __truediv__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a row
"""
# This is an adjacent div i.e. (DIV | rhs) so we collapse the
# operands into a single operation
return Stack([*self, rhs]) + self.layout
def __or__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a column
"""
from ._beside import Beside
return Beside([self, rhs])
def __add__(self, rhs):
"""
Add rhs into the stacking composition
"""
from plotnine import ggplot
if not isinstance(rhs, (ggplot, Compose)):
return super().__add__(rhs)
return self / rhs
| Stack |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 2986,
"end": 3228
} | class ____(RootModel[dict[str, Any]]):
model_config = {
"json_schema_extra": {
"$ref": create_definition_ref("io.k8s.api.core.v1.ResourceRequirements"),
"additionalProperties": True,
}
}
| Resources |
python | bokeh__bokeh | tests/unit/bokeh/core/property/_util_property.py | {
"start": 1749,
"end": 1921
} | class ____(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, default=[1, 2, 3])
zz = Dict(String, Int)
s = Nullable(String ,default=None)
| _TestModel |
python | getsentry__sentry-python | sentry_sdk/profiler/transaction_profiler.py | {
"start": 6035,
"end": 17661
} | class ____:
def __init__(
self,
sampled, # type: Optional[bool]
start_ns, # type: int
hub=None, # type: Optional[sentry_sdk.Hub]
scheduler=None, # type: Optional[Scheduler]
):
# type: (...) -> None
self.scheduler = _scheduler if scheduler is None else scheduler
self.event_id = uuid.uuid4().hex # type: str
self.sampled = sampled # type: Optional[bool]
# Various framework integrations are capable of overwriting the active thread id.
# If it is set to `None` at the end of the profile, we fall back to the default.
self._default_active_thread_id = get_current_thread_meta()[0] or 0 # type: int
self.active_thread_id = None # type: Optional[int]
try:
self.start_ns = start_ns # type: int
except AttributeError:
self.start_ns = 0
self.stop_ns = 0 # type: int
self.active = False # type: bool
self.indexed_frames = {} # type: Dict[FrameId, int]
self.indexed_stacks = {} # type: Dict[StackId, int]
self.frames = [] # type: List[ProcessedFrame]
self.stacks = [] # type: List[ProcessedStack]
self.samples = [] # type: List[ProcessedSample]
self.unique_samples = 0
# Backwards compatibility with the old hub property
self._hub = None # type: Optional[sentry_sdk.Hub]
if hub is not None:
self._hub = hub
warnings.warn(
"The `hub` parameter is deprecated. Please do not use it.",
DeprecationWarning,
stacklevel=2,
)
def update_active_thread_id(self):
# type: () -> None
self.active_thread_id = get_current_thread_meta()[0]
logger.debug(
"[Profiling] updating active thread id to {tid}".format(
tid=self.active_thread_id
)
)
def _set_initial_sampling_decision(self, sampling_context):
# type: (SamplingContext) -> None
"""
Sets the profile's sampling decision according to the following
precedence rules:
1. If the transaction to be profiled is not sampled, that decision
will be used, regardless of anything else.
2. Use `profiles_sample_rate` to decide.
"""
# The corresponding transaction was not sampled,
# so don't generate a profile for it.
if not self.sampled:
logger.debug(
"[Profiling] Discarding profile because transaction is discarded."
)
self.sampled = False
return
# The profiler hasn't been properly initialized.
if self.scheduler is None:
logger.debug(
"[Profiling] Discarding profile because profiler was not started."
)
self.sampled = False
return
client = sentry_sdk.get_client()
if not client.is_active():
self.sampled = False
return
options = client.options
if callable(options.get("profiles_sampler")):
sample_rate = options["profiles_sampler"](sampling_context)
elif options["profiles_sample_rate"] is not None:
sample_rate = options["profiles_sample_rate"]
else:
sample_rate = options["_experiments"].get("profiles_sample_rate")
# The profiles_sample_rate option was not set, so profiling
# was never enabled.
if sample_rate is None:
logger.debug(
"[Profiling] Discarding profile because profiling was not enabled."
)
self.sampled = False
return
if not is_valid_sample_rate(sample_rate, source="Profiling"):
logger.warning(
"[Profiling] Discarding profile because of invalid sample rate."
)
self.sampled = False
return
# Now we roll the dice. random.random is inclusive of 0, but not of 1,
# so strict < is safe here. In case sample_rate is a boolean, cast it
# to a float (True becomes 1.0 and False becomes 0.0)
self.sampled = random.random() < float(sample_rate)
if self.sampled:
logger.debug("[Profiling] Initializing profile")
else:
logger.debug(
"[Profiling] Discarding profile because it's not included in the random sample (sample rate = {sample_rate})".format(
sample_rate=float(sample_rate)
)
)
def start(self):
# type: () -> None
if not self.sampled or self.active:
return
assert self.scheduler, "No scheduler specified"
logger.debug("[Profiling] Starting profile")
self.active = True
if not self.start_ns:
self.start_ns = nanosecond_time()
self.scheduler.start_profiling(self)
def stop(self):
# type: () -> None
if not self.sampled or not self.active:
return
assert self.scheduler, "No scheduler specified"
logger.debug("[Profiling] Stopping profile")
self.active = False
self.stop_ns = nanosecond_time()
def __enter__(self):
# type: () -> Profile
scope = sentry_sdk.get_isolation_scope()
old_profile = scope.profile
scope.profile = self
self._context_manager_state = (scope, old_profile)
self.start()
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
with capture_internal_exceptions():
self.stop()
scope, old_profile = self._context_manager_state
del self._context_manager_state
scope.profile = old_profile
def write(self, ts, sample):
# type: (int, ExtractedSample) -> None
if not self.active:
return
if ts < self.start_ns:
return
offset = ts - self.start_ns
if offset > MAX_PROFILE_DURATION_NS:
self.stop()
return
self.unique_samples += 1
elapsed_since_start_ns = str(offset)
for tid, (stack_id, frame_ids, frames) in sample:
try:
# Check if the stack is indexed first, this lets us skip
# indexing frames if it's not necessary
if stack_id not in self.indexed_stacks:
for i, frame_id in enumerate(frame_ids):
if frame_id not in self.indexed_frames:
self.indexed_frames[frame_id] = len(self.indexed_frames)
self.frames.append(frames[i])
self.indexed_stacks[stack_id] = len(self.indexed_stacks)
self.stacks.append(
[self.indexed_frames[frame_id] for frame_id in frame_ids]
)
self.samples.append(
{
"elapsed_since_start_ns": elapsed_since_start_ns,
"thread_id": tid,
"stack_id": self.indexed_stacks[stack_id],
}
)
except AttributeError:
# For some reason, the frame we get doesn't have certain attributes.
# When this happens, we abandon the current sample as it's bad.
capture_internal_exception(sys.exc_info())
def process(self):
# type: () -> ProcessedProfile
# This collects the thread metadata at the end of a profile. Doing it
# this way means that any threads that terminate before the profile ends
# will not have any metadata associated with it.
thread_metadata = {
str(thread.ident): {
"name": str(thread.name),
}
for thread in threading.enumerate()
} # type: Dict[str, ProcessedThreadMetadata]
return {
"frames": self.frames,
"stacks": self.stacks,
"samples": self.samples,
"thread_metadata": thread_metadata,
}
def to_json(self, event_opt, options):
# type: (Event, Dict[str, Any]) -> Dict[str, Any]
profile = self.process()
set_in_app_in_frames(
profile["frames"],
options["in_app_exclude"],
options["in_app_include"],
options["project_root"],
)
return {
"environment": event_opt.get("environment"),
"event_id": self.event_id,
"platform": "python",
"profile": profile,
"release": event_opt.get("release", ""),
"timestamp": event_opt["start_timestamp"],
"version": "1",
"device": {
"architecture": platform.machine(),
},
"os": {
"name": platform.system(),
"version": platform.release(),
},
"runtime": {
"name": platform.python_implementation(),
"version": platform.python_version(),
},
"transactions": [
{
"id": event_opt["event_id"],
"name": event_opt["transaction"],
# we start the transaction before the profile and this is
# the transaction start time relative to the profile, so we
# hardcode it to 0 until we can start the profile before
"relative_start_ns": "0",
# use the duration of the profile instead of the transaction
# because we end the transaction after the profile
"relative_end_ns": str(self.stop_ns - self.start_ns),
"trace_id": event_opt["contexts"]["trace"]["trace_id"],
"active_thread_id": str(
self._default_active_thread_id
if self.active_thread_id is None
else self.active_thread_id
),
}
],
}
def valid(self):
# type: () -> bool
client = sentry_sdk.get_client()
if not client.is_active():
return False
if not has_profiling_enabled(client.options):
return False
if self.sampled is None or not self.sampled:
if client.transport:
client.transport.record_lost_event(
"sample_rate", data_category="profile"
)
return False
if self.unique_samples < PROFILE_MINIMUM_SAMPLES:
if client.transport:
client.transport.record_lost_event(
"insufficient_data", data_category="profile"
)
logger.debug("[Profiling] Discarding profile because insufficient samples.")
return False
return True
@property
def hub(self):
# type: () -> Optional[sentry_sdk.Hub]
warnings.warn(
"The `hub` attribute is deprecated. Please do not access it.",
DeprecationWarning,
stacklevel=2,
)
return self._hub
@hub.setter
def hub(self, value):
# type: (Optional[sentry_sdk.Hub]) -> None
warnings.warn(
"The `hub` attribute is deprecated. Please do not set it.",
DeprecationWarning,
stacklevel=2,
)
self._hub = value
| Profile |
python | huggingface__transformers | src/transformers/models/mpt/modeling_mpt.py | {
"start": 15252,
"end": 19835
} | class ____(MptPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
def __init__(self, config: MptConfig):
super().__init__(config)
self.transformer = MptModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def set_output_embeddings(self, new_embeddings: torch.Tensor):
self.lm_head = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The MPT Model transformer with a sequence classification head on top (linear layer).
[`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
| MptForCausalLM |
python | modin-project__modin | modin/config/envvars.py | {
"start": 6151,
"end": 7012
} | class ____(
EnvironmentVariable,
type=EnvironmentVariable.type,
abstract=True,
):
"""Subclass to disallow getting this variable from the environment when both execution and backend are set in the environment."""
@classmethod
@doc(EnvironmentVariable._get_value_from_config.__doc__)
def _get_value_from_config(cls) -> str:
if Backend.varname in os.environ and (
Engine.varname in os.environ or StorageFormat.varname in os.environ
):
# Handling this case is tricky, in part because the combination of
# Backend and Engine/StorageFormat may be invalid. For now just
# disallow it.
raise ValueError("Can't specify both execution and backend in environment")
return super()._get_value_from_config()
| EnvironmentVariableDisallowingExecutionAndBackendBothSet |
python | numpy__numpy | numpy/typing/tests/data/pass/scalars.py | {
"start": 393,
"end": 3725
} | class ____:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')
# Array-ish semantics
np.int8().real
np.int16().imag
np.int32().data
np.int64().flags
np.uint8().itemsize * 2
np.uint16().ndim + 1
np.uint32().strides
np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
np.datetime64(0, b"D")
np.datetime64(0, ('ms', 3))
np.datetime64("2019")
np.datetime64(b"2019")
np.datetime64("2019", "D")
np.datetime64("2019", "us")
np.datetime64("2019", "as")
np.datetime64(np.datetime64())
np.datetime64(np.datetime64())
np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(dt.datetime(2000, 5, 3), "D")
np.datetime64(dt.datetime(2000, 5, 3), "us")
np.datetime64(dt.datetime(2000, 5, 3), "as")
np.datetime64(dt.date(2000, 5, 3))
np.datetime64(dt.date(2000, 5, 3), "D")
np.datetime64(dt.date(2000, 5, 3), "us")
np.datetime64(dt.date(2000, 5, 3), "as")
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
np.timedelta64(0, ('ms', 3))
np.timedelta64(0, b"D")
np.timedelta64("3")
np.timedelta64(b"5")
np.timedelta64(np.timedelta64(2))
np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
np.void(1)
np.void(np.int64(1))
np.void(True)
np.void(np.bool(True))
np.void(b"test")
np.void(np.bytes_("test"))
np.void(object(), [("a", "O"), ("b", "O")])
np.void(object(), dtype=[("a", "O"), ("b", "O")])
# Protocols
i8 = np.int64()
u8 = np.uint64()
f8 = np.float64()
c16 = np.complex128()
b = np.bool()
td = np.timedelta64()
U = np.str_("1")
S = np.bytes_("1")
AR = np.array(1, dtype=np.float64)
int(i8)
int(u8)
int(f8)
int(b)
int(td)
int(U)
int(S)
int(AR)
with pytest.warns(np.exceptions.ComplexWarning):
int(c16)
float(i8)
float(u8)
float(f8)
float(b_)
float(td)
float(U)
float(S)
float(AR)
with pytest.warns(np.exceptions.ComplexWarning):
float(c16)
complex(i8)
complex(u8)
complex(f8)
complex(c16)
complex(b_)
complex(td)
complex(U)
complex(AR)
# Misc
c16.dtype
c16.real
c16.imag
c16.real.real
c16.real.imag
c16.ndim
c16.size
c16.itemsize
c16.shape
c16.strides
c16.squeeze()
c16.byteswap()
c16.transpose()
# Aliases
np.byte()
np.short()
np.intc()
np.intp()
np.int_()
np.longlong()
np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint()
np.ulonglong()
np.half()
np.single()
np.double()
np.longdouble()
np.csingle()
np.cdouble()
np.clongdouble()
b.item()
i8.item()
u8.item()
f8.item()
c16.item()
U.item()
S.item()
b.tolist()
i8.tolist()
u8.tolist()
f8.tolist()
c16.tolist()
U.tolist()
S.tolist()
b.ravel()
i8.ravel()
u8.ravel()
f8.ravel()
c16.ravel()
U.ravel()
S.ravel()
b.flatten()
i8.flatten()
u8.flatten()
f8.flatten()
c16.flatten()
U.flatten()
S.flatten()
b.reshape(1)
i8.reshape(1)
u8.reshape(1)
f8.reshape(1)
c16.reshape(1)
U.reshape(1)
S.reshape(1)
| A |
python | run-llama__llama_index | llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/base.py | {
"start": 4893,
"end": 14346
} | class ____(BaseAgentCoreMemory):
search_msg_limit: int = Field(
default=5,
description="Limit of chat history messages to use for context in search API",
)
insert_method: InsertMethod = Field(
default=InsertMethod.SYSTEM,
description="Whether to inject memory blocks into a system message or into the latest user message.",
)
_context: AgentCoreMemoryContext = PrivateAttr()
def __init__(
self,
context: AgentCoreMemoryContext,
# TODO: add support for InsertMethod.USER. for now default to InsertMethod.SYSTEM
# insert_method: InsertMethod = InsertMethod.SYSTEM,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: bool = True,
verify: Optional[Union[bool, str]] = None,
endpoint_url: Optional[str] = None,
botocore_session: Optional[Any] = None,
client: Optional[Any] = None,
timeout: Optional[float] = 60.0,
max_retries: Optional[int] = 10,
botocore_config: Optional[Any] = None,
) -> None:
boto3_user_agent_identifier = "x-client-framework:llama_index"
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
}
self._config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra=boto3_user_agent_identifier,
)
if botocore_config is None
else botocore_config
)
self._boto_client_kwargs = {
"api_version": api_version,
"use_ssl": use_ssl,
"verify": verify,
"endpoint_url": endpoint_url,
}
try:
self._config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra=boto3_user_agent_identifier,
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 package not found, install with pip install boto3"
)
session = boto3.Session(**session_kwargs)
if client is not None:
self._client = client
else:
self._client = session.client(
"bedrock-agentcore",
config=self._config,
**self._boto_client_kwargs,
)
self._client._serializer._serializer._serialize_type_timestamp = (
self._serialize_timestamp_with_microseconds
)
super().__init__(self._client)
self._context = context
@model_serializer
def serialize_memory(self) -> Dict[str, Any]:
# leaving out the two keys since they are causing serialization/deserialization problems
return {
"search_msg_limit": self.search_msg_limit,
}
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentCoreMemory"
@classmethod
def from_defaults(cls, **kwargs: Any) -> "AgentCoreMemory":
raise NotImplementedError("Use either from_client or from_config")
def _serialize_timestamp_with_microseconds(self, serialized, value, shape, name):
original_serialize_timestamp = (
self._client._serializer._serializer._serialize_type_timestamp
)
if isinstance(value, datetime):
serialized[name] = value.timestamp() # Float with microseconds
else:
original_serialize_timestamp(serialized, value, shape, name)
def _add_msgs_to_client_memory(self, messages: List[ChatMessage]) -> None:
"""Add new user and assistant messages to client memory."""
self.create_event(
messages=messages,
memory_id=self._context.memory_id,
actor_id=self._context.actor_id,
session_id=self._context.session_id,
)
async def aget(self, input: Optional[str] = None) -> List[ChatMessage]:
# Get list of events to represent as the chat history. Use this as the query for the memory records. If an input is provided, then also append it to the list of events
messages = self.list_events(
memory_id=self._context.memory_id,
session_id=self._context.session_id,
actor_id=self._context.actor_id,
)
input = convert_messages_to_string(messages, input)
search_criteria = {"searchQuery": input[:10000]}
if self._context.memory_strategy_id is not None:
search_criteria["memoryStrategyId"] = self._context.memory_strategy_id
memory_records = self.retrieve_memories(
memory_id=self._context.memory_id,
namespace=self._context.namespace,
search_criteria=search_criteria,
)
if self.insert_method == InsertMethod.SYSTEM:
system_message = convert_memory_to_system_message(memory_records)
# If system message is present
if len(messages) > 0 and messages[0].role == MessageRole.SYSTEM:
assert messages[0].content is not None
system_message = convert_memory_to_system_message(
response=memory_records, existing_system_message=messages[0]
)
messages.insert(0, system_message)
elif self.insert_method == InsertMethod.USER:
# Find the latest user message
session_idx = next(
(
i
for i, msg in enumerate(reversed(messages))
if msg.role == MessageRole.USER
),
None,
)
memory_content = convert_memory_to_user_message(memory_records)
if session_idx is not None:
# Get actual index (since we enumerated in reverse)
actual_idx = len(messages) - 1 - session_idx
# Update existing user message since many LLMs have issues with consecutive user msgs
final_user_content = (
memory_content.content + messages[actual_idx].content
)
messages[actual_idx] = ChatMessage(
content=final_user_content, role=MessageRole.USER
)
messages[actual_idx].blocks = [
*memory_content.blocks,
*messages[actual_idx].blocks,
]
else:
messages.append(
ChatMessage(content=memory_content, role=MessageRole.USER)
)
return messages
async def aget_all(self) -> List[ChatMessage]:
return self.list_events(
memory_id=self._context.memory_id,
session_id=self._context.session_id,
actor_id=self._context.actor_id,
)
async def aput(self, message: ChatMessage) -> None:
"""Add a message to the chat store and process waterfall logic if needed."""
# Add the message to the chat store
self._add_msgs_to_client_memory([message])
async def aput_messages(self, messages: List[ChatMessage]) -> None:
"""Add a list of messages to the chat store and process waterfall logic if needed."""
# Add the messages to the chat store
self._add_msgs_to_client_memory(messages)
async def aset(self, messages: List[ChatMessage]) -> None:
initial_chat_len = len(self.get_all())
# Insert only new chat messages
self._add_msgs_to_client_memory(messages[initial_chat_len:])
# ---- Sync method wrappers ----
def get(self, input: Optional[str] = None) -> List[ChatMessage]:
"""Get chat history."""
return asyncio_run(self.aget(input=input))
def get_all(self) -> List[ChatMessage]:
"""Returns all chat history."""
return asyncio_run(self.aget_all())
def put(self, message: ChatMessage) -> None:
"""Add message to chat history and client memory."""
return asyncio_run(self.aput(message))
def put_messages(self, messages: List[ChatMessage]) -> None:
return asyncio_run(self.aput_messages(messages))
def set(self, messages: List[ChatMessage]) -> None:
"""Set chat history and add new messages to client memory."""
return asyncio_run(self.aset(messages))
def reset(self) -> None:
"""Only reset chat history."""
# Our guidance has been to not delete memory resources in AgentCore on behalf of the customer. If this changes in the future, then we can implement this method.
def get_context(self) -> AgentCoreMemoryContext:
return self._context.get_context()
| AgentCoreMemory |
python | sanic-org__sanic | sanic/server/async_server.py | {
"start": 180,
"end": 3475
} | class ____:
"""Wraps an asyncio server with functionality that might be useful to a user who needs to manage the server lifecycle manually.""" # noqa: E501
__slots__ = ("app", "connections", "loop", "serve_coro", "server")
def __init__(
self,
app: Sanic,
loop,
serve_coro,
connections,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.app = app
self.connections = connections
self.loop = loop
self.serve_coro = serve_coro
self.server = None
def startup(self):
"""Trigger "startup" operations on the app"""
return self.app._startup()
def before_start(self):
"""Trigger "before_server_start" events"""
return self._server_event("init", "before")
def after_start(self):
"""Trigger "after_server_start" events"""
return self._server_event("init", "after")
def before_stop(self):
"""Trigger "before_server_stop" events"""
return self._server_event("shutdown", "before")
def after_stop(self):
"""Trigger "after_server_stop" events"""
return self._server_event("shutdown", "after")
def is_serving(self) -> bool:
"""Returns True if the server is running, False otherwise"""
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
"""Wait until the server is closed"""
if self.server:
return self.server.wait_closed()
def close(self):
"""Close the server"""
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
"""Start serving requests"""
return self._serve(self.server.start_serving)
def serve_forever(self):
"""Serve requests until the server is stopped"""
return self._serve(self.server.serve_forever)
def _serve(self, serve_func):
if self.server:
if not self.app.state.is_started:
raise SanicException(
"Cannot run Sanic server without first running "
"await server.startup()"
)
try:
return serve_func()
except AttributeError:
name = serve_func.__name__
raise NotImplementedError(
f"server.{name} not available in this version "
"of asyncio or uvloop."
)
def _server_event(self, concern: str, action: str):
if not self.app.state.is_started:
raise SanicException(
"Cannot dispatch server event without "
"first running await server.startup()"
)
return self.app._server_event(concern, action, loop=self.loop)
def __await__(self):
"""
Starts the asyncio server, returns AsyncServerCoro
"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self
| AsyncioServer |
python | apache__airflow | airflow-core/src/airflow/serialization/serialized_objects.py | {
"start": 48858,
"end": 97756
} | class ____(DAGNode, BaseSerialization):
"""
A JSON serializable representation of operator.
All operators are casted to SerializedBaseOperator after deserialization.
Class specific attributes used by UI are move to object attributes.
Creating a SerializedBaseOperator is a three-step process:
1. Instantiate a :class:`SerializedBaseOperator` object.
2. Populate attributes with :func:`SerializedBaseOperator.populated_operator`.
3. When the task's containing DAG is available, fix references to the DAG
with :func:`SerializedBaseOperator.set_task_dag_references`.
"""
_decorated_fields = {"executor_config"}
_CONSTRUCTOR_PARAMS = {}
_json_schema: ClassVar[Validator] = lazy_object_proxy.Proxy(load_dag_schema)
_const_fields: ClassVar[set[str] | None] = None
_can_skip_downstream: bool
_is_empty: bool
_needs_expansion: bool
_task_display_name: str | None
_weight_rule: str | PriorityWeightStrategy = "downstream"
# TODO (GH-52141): These should contain serialized containers, but currently
# this class inherits from an SDK one.
dag: SerializedDAG | None = None # type: ignore[assignment]
task_group: SerializedTaskGroup | None = None # type: ignore[assignment]
allow_nested_operators: bool = True
depends_on_past: bool = False
do_xcom_push: bool = True
doc: str | None = None
doc_md: str | None = None
doc_json: str | None = None
doc_yaml: str | None = None
doc_rst: str | None = None
downstream_task_ids: set[str] = set()
email: str | Sequence[str] | None
# Following 2 should be deprecated
email_on_retry: bool = True
email_on_failure: bool = True
execution_timeout: datetime.timedelta | None
executor: str | None
executor_config: dict = {}
ignore_first_depends_on_past: bool = False
inlets: Sequence = []
is_setup: bool = False
is_teardown: bool = False
map_index_template: str | None = None
max_active_tis_per_dag: int | None = None
max_active_tis_per_dagrun: int | None = None
max_retry_delay: datetime.timedelta | float | None = None
multiple_outputs: bool = False
# Boolean flags for callback existence
has_on_execute_callback: bool = False
has_on_failure_callback: bool = False
has_on_retry_callback: bool = False
has_on_success_callback: bool = False
has_on_skipped_callback: bool = False
operator_extra_links: Collection[BaseOperatorLink] = []
on_failure_fail_dagrun: bool = False
outlets: Sequence = []
owner: str = "airflow"
params: SerializedParamsDict = SerializedParamsDict()
pool: str = "default_pool"
pool_slots: int = 1
priority_weight: int = 1
queue: str = "default"
resources: dict[str, Any] | None = None
retries: int = 0
retry_delay: datetime.timedelta = datetime.timedelta(seconds=300)
retry_exponential_backoff: float = 0
run_as_user: str | None = None
start_date: datetime.datetime | None = None
end_date: datetime.datetime | None = None
start_from_trigger: bool = False
start_trigger_args: StartTriggerArgs | None = None
task_type: str = "BaseOperator"
template_ext: Sequence[str] = []
template_fields: Collection[str] = []
template_fields_renderers: ClassVar[dict[str, str]] = {}
trigger_rule: str | TriggerRule = "all_success"
# TODO: Remove the following, they aren't used anymore
ui_color: str = "#fff"
ui_fgcolor: str = "#000"
wait_for_downstream: bool = False
wait_for_past_depends_before_skipping: bool = False
is_mapped = False
def __init__(self, *, task_id: str, _airflow_from_mapped: bool = False) -> None:
super().__init__()
self._BaseOperator__from_mapped = _airflow_from_mapped
self.task_id = task_id
# Move class attributes into object attributes.
self.deps = DEFAULT_OPERATOR_DEPS
self._operator_name: str | None = None
def __eq__(self, other: Any) -> bool:
if not isinstance(other, (SerializedBaseOperator, BaseOperator)):
return NotImplemented
return self.task_type == other.task_type and all(
getattr(self, c, None) == getattr(other, c, None) for c in BaseOperator._comps
)
def __hash__(self):
return hash((self.task_type, *[getattr(self, c, None) for c in BaseOperator._comps]))
def __repr__(self) -> str:
return f"<SerializedTask({self.task_type}): {self.task_id}>"
@property
def node_id(self) -> str:
return self.task_id
# TODO (GH-52141): Replace DAGNode with a scheduler type.
def get_dag(self) -> SerializedDAG | None: # type: ignore[override]
return self.dag
@property
def roots(self) -> Sequence[DAGNode]:
"""Required by DAGNode."""
return [self]
@property
def leaves(self) -> Sequence[DAGNode]:
"""Required by DAGNode."""
return [self]
@cached_property
def operator_extra_link_dict(self) -> dict[str, BaseOperatorLink]:
"""Returns dictionary of all extra links for the operator."""
return {link.name: link for link in self.operator_extra_links}
@cached_property
def global_operator_extra_link_dict(self) -> dict[str, Any]:
"""Returns dictionary of all global extra links."""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.global_operator_extra_links is None:
raise AirflowException("Can't load operators")
return {link.name: link for link in plugins_manager.global_operator_extra_links}
@cached_property
def extra_links(self) -> list[str]:
return sorted(set(self.operator_extra_link_dict).union(self.global_operator_extra_link_dict))
def get_extra_links(self, ti: TaskInstance, name: str) -> str | None:
"""
For an operator, gets the URLs that the ``extra_links`` entry points to.
:meta private:
:raise ValueError: The error message of a ValueError will be passed on through to
the fronted to show up as a tooltip on the disabled link.
:param ti: The TaskInstance for the URL being searched for.
:param name: The name of the link we're looking for the URL for. Should be
one of the options specified in ``extra_links``.
"""
link = self.operator_extra_link_dict.get(name) or self.global_operator_extra_link_dict.get(name)
if not link:
return None
# TODO: GH-52141 - BaseOperatorLink.get_link expects BaseOperator but receives SerializedBaseOperator.
return link.get_link(self, ti_key=ti.key) # type: ignore[arg-type]
@property
def operator_name(self) -> str:
# Overwrites operator_name of BaseOperator to use _operator_name instead of
# __class__.operator_name.
return self._operator_name or self.task_type
@operator_name.setter
def operator_name(self, operator_name: str):
self._operator_name = operator_name
@property
def task_display_name(self) -> str:
return self._task_display_name or self.task_id
def expand_start_trigger_args(self, *, context: Context) -> StartTriggerArgs | None:
return self.start_trigger_args
@property
def weight_rule(self) -> PriorityWeightStrategy:
if isinstance(self._weight_rule, PriorityWeightStrategy):
return self._weight_rule
return validate_and_load_priority_weight_strategy(self._weight_rule)
def __getattr__(self, name):
# Handle missing attributes with task_type instead of SerializedBaseOperator
# Don't intercept special methods that Python internals might check
if name.startswith("__") and name.endswith("__"):
# For special methods, raise the original error
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
# For regular attributes, use task_type in the error message
raise AttributeError(f"'{self.task_type}' object has no attribute '{name}'")
@classmethod
def serialize_mapped_operator(cls, op: MappedOperator) -> dict[str, Any]:
serialized_op = cls._serialize_node(op)
# Handle expand_input and op_kwargs_expand_input.
expansion_kwargs = op._get_specified_expand_input()
if TYPE_CHECKING: # Let Mypy check the input type for us!
_ExpandInputRef.validate_expand_input_value(expansion_kwargs.value)
serialized_op[op._expand_input_attr] = {
"type": type(expansion_kwargs).EXPAND_INPUT_TYPE,
"value": cls.serialize(expansion_kwargs.value),
}
if op.partial_kwargs:
serialized_op["partial_kwargs"] = {}
for k, v in op.partial_kwargs.items():
if cls._is_excluded(v, k, op):
continue
if k in [f"on_{x}_callback" for x in ("execute", "failure", "success", "retry", "skipped")]:
if bool(v):
serialized_op["partial_kwargs"][f"has_{k}"] = True
continue
serialized_op["partial_kwargs"].update({k: cls.serialize(v)})
# we want to store python_callable_name, not python_callable
python_callable = op.partial_kwargs.get("python_callable", None)
if python_callable:
callable_name = qualname(python_callable)
serialized_op["partial_kwargs"]["python_callable_name"] = callable_name
del serialized_op["partial_kwargs"]["python_callable"]
serialized_op["_is_mapped"] = True
return serialized_op
@classmethod
def serialize_operator(cls, op: SdkOperator) -> dict[str, Any]:
return cls._serialize_node(op)
@classmethod
def _serialize_node(cls, op: SdkOperator) -> dict[str, Any]:
"""Serialize operator into a JSON object."""
serialize_op = cls.serialize_to_json(op, cls._decorated_fields)
if not op.email:
# If "email" is empty, we do not need to include other email attrs
for attr in ["email_on_failure", "email_on_retry"]:
if attr in serialize_op:
del serialize_op[attr]
# Detect if there's a change in python callable name
python_callable = getattr(op, "python_callable", None)
if python_callable:
callable_name = qualname(python_callable)
serialize_op["python_callable_name"] = callable_name
serialize_op["task_type"] = getattr(op, "task_type", type(op).__name__)
serialize_op["_task_module"] = getattr(op, "_task_module", type(op).__module__)
if op.operator_name != serialize_op["task_type"]:
serialize_op["_operator_name"] = op.operator_name
# Used to determine if an Operator is inherited from EmptyOperator
if op.inherits_from_empty_operator:
serialize_op["_is_empty"] = True
# Used to determine if an Operator is inherited from SkipMixin or BranchMixin
if op.inherits_from_skipmixin:
serialize_op["_can_skip_downstream"] = True
if op.start_trigger_args:
serialize_op["start_trigger_args"] = encode_start_trigger_args(op.start_trigger_args)
if op.operator_extra_links:
serialize_op["_operator_extra_links"] = cls._serialize_operator_extra_links(
op.operator_extra_links.__get__(op)
if isinstance(op.operator_extra_links, property)
else op.operator_extra_links
)
# Store all template_fields as they are if there are JSON Serializable
# If not, store them as strings
# And raise an exception if the field is not templateable
forbidden_fields = set(signature(BaseOperator.__init__).parameters.keys())
# Though allow some of the BaseOperator fields to be templated anyway
forbidden_fields.difference_update({"email"})
if op.template_fields:
for template_field in op.template_fields:
if template_field in forbidden_fields:
raise AirflowException(
dedent(
f"""Cannot template BaseOperator field:
{template_field!r} {op.__class__.__name__=} {op.template_fields=}"""
)
)
value = getattr(op, template_field, None)
if not cls._is_excluded(value, template_field, op):
serialize_op[template_field] = serialize_template_field(value, template_field)
if op.params:
serialize_op["params"] = cls._serialize_params_dict(op.params)
return serialize_op
@classmethod
def populate_operator(
cls,
op: SerializedOperator,
encoded_op: dict[str, Any],
client_defaults: dict[str, Any] | None = None,
) -> None:
"""
Populate operator attributes with serialized values.
This covers simple attributes that don't reference other things in the
DAG. Setting references (such as ``op.dag`` and task dependencies) is
done in ``set_task_dag_references`` instead, which is called after the
DAG is hydrated.
"""
# Apply defaults by merging them into encoded_op BEFORE main deserialization
encoded_op = cls._apply_defaults_to_encoded_op(encoded_op, client_defaults)
# Preprocess and upgrade all field names for backward compatibility and consistency
encoded_op = cls._preprocess_encoded_operator(encoded_op)
# Extra Operator Links defined in Plugins
op_extra_links_from_plugin = {}
# We don't want to load Extra Operator links in Scheduler
if cls._load_operator_extra_links:
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can not load plugins")
for ope in plugins_manager.operator_extra_links:
for operator in ope.operators:
if (
operator.__name__ == encoded_op["task_type"]
and operator.__module__ == encoded_op["_task_module"]
):
op_extra_links_from_plugin.update({ope.name: ope})
# If OperatorLinks are defined in Plugins but not in the Operator that is being Serialized
# set the Operator links attribute
# The case for "If OperatorLinks are defined in the operator that is being Serialized"
# is handled in the deserialization loop where it matches k == "_operator_extra_links"
if op_extra_links_from_plugin and "_operator_extra_links" not in encoded_op:
setattr(
op,
"operator_extra_links",
list(op_extra_links_from_plugin.values()),
)
deserialized_partial_kwarg_defaults = {}
for k_in, v_in in encoded_op.items():
k = k_in # surpass PLW2901
v = v_in # surpass PLW2901
# Use centralized field deserialization logic
if k in encoded_op.get("template_fields", []):
pass # Template fields are handled separately
elif k == "_operator_extra_links":
if cls._load_operator_extra_links:
op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
# If OperatorLinks with the same name exists, Links via Plugin have higher precedence
op_predefined_extra_links.update(op_extra_links_from_plugin)
else:
op_predefined_extra_links = {}
v = list(op_predefined_extra_links.values())
k = "operator_extra_links"
elif k == "params":
v = cls._deserialize_params_dict(v)
elif k == "partial_kwargs":
# Use unified deserializer that supports both encoded and non-encoded values
v = cls._deserialize_partial_kwargs(v, client_defaults)
elif k in {"expand_input", "op_kwargs_expand_input"}:
v = _ExpandInputRef(v["type"], cls.deserialize(v["value"]))
elif k == "operator_class":
v = {k_: cls.deserialize(v_) for k_, v_ in v.items()}
elif k == "_is_sensor":
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
if v is False:
raise RuntimeError("_is_sensor=False should never have been serialized!")
object.__setattr__(op, "deps", op.deps | {ReadyToRescheduleDep()})
continue
elif (
k in cls._decorated_fields
or k not in op.get_serialized_fields()
or k in ("outlets", "inlets")
):
v = cls.deserialize(v)
elif k == "_on_failure_fail_dagrun":
k = "on_failure_fail_dagrun"
elif k == "weight_rule":
k = "_weight_rule"
v = decode_priority_weight_strategy(v)
elif k == "retry_exponential_backoff":
if isinstance(v, bool):
v = 2.0 if v else 0
else:
v = float(v)
else:
# Apply centralized deserialization for all other fields
v = cls._deserialize_field_value(k, v)
# Handle field differences between SerializedBaseOperator and MappedOperator
# Fields that exist in SerializedBaseOperator but not in MappedOperator need to go to partial_kwargs
if (
op.is_mapped
and k in SerializedBaseOperator.get_serialized_fields()
and k not in op.get_serialized_fields()
):
# This field belongs to SerializedBaseOperator but not MappedOperator
# Store it in partial_kwargs where it belongs
deserialized_partial_kwarg_defaults[k] = v
continue
# else use v as it is
setattr(op, k, v)
# Apply the fields that belong in partial_kwargs for MappedOperator
if op.is_mapped:
for k, v in deserialized_partial_kwarg_defaults.items():
if k not in op.partial_kwargs:
op.partial_kwargs[k] = v
for k in op.get_serialized_fields() - encoded_op.keys():
# TODO: refactor deserialization of BaseOperator and MappedOperator (split it out), then check
# could go away.
if not hasattr(op, k):
setattr(op, k, None)
# Set all the template_field to None that were not present in Serialized JSON
for field in op.template_fields:
if not hasattr(op, field):
setattr(op, field, None)
# Used to determine if an Operator is inherited from EmptyOperator
setattr(op, "_is_empty", bool(encoded_op.get("_is_empty", False)))
# Used to determine if an Operator is inherited from SkipMixin
setattr(op, "_can_skip_downstream", bool(encoded_op.get("_can_skip_downstream", False)))
start_trigger_args = None
encoded_start_trigger_args = encoded_op.get("start_trigger_args", None)
if encoded_start_trigger_args:
encoded_start_trigger_args = cast("dict", encoded_start_trigger_args)
start_trigger_args = decode_start_trigger_args(encoded_start_trigger_args)
setattr(op, "start_trigger_args", start_trigger_args)
setattr(op, "start_from_trigger", bool(encoded_op.get("start_from_trigger", False)))
@staticmethod
def set_task_dag_references(task: SerializedOperator | MappedOperator, dag: SerializedDAG) -> None:
"""
Handle DAG references on an operator.
The operator should have been mostly populated earlier by calling
``populate_operator``. This function further fixes object references
that were not possible before the task's containing DAG is hydrated.
"""
task.dag = dag
for date_attr in ("start_date", "end_date"):
if getattr(task, date_attr, None) is None:
setattr(task, date_attr, getattr(dag, date_attr, None))
# Dereference expand_input and op_kwargs_expand_input.
for k in ("expand_input", "op_kwargs_expand_input"):
if isinstance(kwargs_ref := getattr(task, k, None), _ExpandInputRef):
setattr(task, k, kwargs_ref.deref(dag))
for task_id in task.downstream_task_ids:
# Bypass set_upstream etc here - it does more than we want
dag.task_dict[task_id].upstream_task_ids.add(task.task_id)
@classmethod
def get_operator_const_fields(cls) -> set[str]:
"""Get the set of operator fields that are marked as const in the JSON schema."""
if (schema_loader := cls._json_schema) is None:
return set()
schema_data = schema_loader.schema
operator_def = schema_data.get("definitions", {}).get("operator", {})
properties = operator_def.get("properties", {})
return {
field_name
for field_name, field_def in properties.items()
if isinstance(field_def, dict) and field_def.get("const")
}
@classmethod
@lru_cache(maxsize=1) # Only one type: "operator"
def get_operator_optional_fields_from_schema(cls) -> set[str]:
schema_loader = cls._json_schema
if schema_loader is None:
return set()
schema_data = schema_loader.schema
operator_def = schema_data.get("definitions", {}).get("operator", {})
operator_fields = set(operator_def.get("properties", {}).keys())
required_fields = set(operator_def.get("required", []))
optional_fields = operator_fields - required_fields
return optional_fields
@classmethod
def deserialize_operator(
cls,
encoded_op: dict[str, Any],
client_defaults: dict[str, Any] | None = None,
) -> SerializedOperator:
"""Deserializes an operator from a JSON object."""
op: SerializedOperator
if encoded_op.get("_is_mapped", False):
from airflow.models.mappedoperator import MappedOperator as SerializedMappedOperator
try:
operator_name = encoded_op["_operator_name"]
except KeyError:
operator_name = encoded_op["task_type"]
# Only store minimal class type information instead of full operator data
# This significantly reduces memory usage for mapped operators
operator_class_info = {
"task_type": encoded_op["task_type"],
"_operator_name": operator_name,
}
op = SerializedMappedOperator(
operator_class=operator_class_info,
task_id=encoded_op["task_id"],
operator_extra_links=SerializedBaseOperator.operator_extra_links,
template_ext=SerializedBaseOperator.template_ext,
template_fields=SerializedBaseOperator.template_fields,
template_fields_renderers=SerializedBaseOperator.template_fields_renderers,
ui_color=SerializedBaseOperator.ui_color,
ui_fgcolor=SerializedBaseOperator.ui_fgcolor,
is_sensor=encoded_op.get("_is_sensor", False),
can_skip_downstream=encoded_op.get("_can_skip_downstream", False),
task_module=encoded_op["_task_module"],
task_type=encoded_op["task_type"],
operator_name=operator_name,
disallow_kwargs_override=encoded_op["_disallow_kwargs_override"],
expand_input_attr=encoded_op["_expand_input_attr"],
start_trigger_args=encoded_op.get("start_trigger_args", None),
start_from_trigger=encoded_op.get("start_from_trigger", False),
)
else:
op = SerializedBaseOperator(task_id=encoded_op["task_id"])
cls.populate_operator(op, encoded_op, client_defaults)
return op
@classmethod
def _preprocess_encoded_operator(cls, encoded_op: dict[str, Any]) -> dict[str, Any]:
"""
Preprocess and upgrade all field names for backward compatibility and consistency.
This consolidates all field name transformations in one place:
- Callback field renaming (on_*_callback -> has_on_*_callback)
- Other field upgrades and renames
- Field exclusions
"""
preprocessed = encoded_op.copy()
# Handle callback field renaming for backward compatibility
for callback_type in ("execute", "failure", "success", "retry", "skipped"):
old_key = f"on_{callback_type}_callback"
new_key = f"has_{old_key}"
if old_key in preprocessed:
preprocessed[new_key] = bool(preprocessed[old_key])
del preprocessed[old_key]
# Handle other field renames and upgrades from old format/name
field_renames = {
"task_display_name": "_task_display_name",
"_downstream_task_ids": "downstream_task_ids",
"_task_type": "task_type",
"_outlets": "outlets",
"_inlets": "inlets",
}
for old_name, new_name in field_renames.items():
if old_name in preprocessed:
preprocessed[new_name] = preprocessed.pop(old_name)
# Remove fields that shouldn't be processed
fields_to_exclude = {
"python_callable_name", # Only serves to detect function name changes
"label", # Shouldn't be set anymore - computed from task_id now
}
for field in fields_to_exclude:
preprocessed.pop(field, None)
return preprocessed
@classmethod
def detect_dependencies(cls, op: SdkOperator) -> set[DagDependency]:
"""Detect between DAG dependencies for the operator."""
dependency_detector = DependencyDetector()
deps = set(dependency_detector.detect_task_dependencies(op))
return deps
@classmethod
def _matches_client_defaults(cls, var: Any, attrname: str) -> bool:
"""
Check if a field value matches client_defaults and should be excluded.
This implements the hierarchical defaults optimization where values that match
client_defaults are omitted from individual task serialization.
:param var: The value to check
:param attrname: The attribute name
:return: True if value matches client_defaults and should be excluded
"""
try:
# Get cached client defaults for tasks
task_defaults = cls.generate_client_defaults()
# Check if this field is in client_defaults and values match
if attrname in task_defaults and var == task_defaults[attrname]:
return True
except Exception:
# If anything goes wrong with client_defaults, fall back to normal logic
pass
return False
@classmethod
def _is_excluded(cls, var: Any, attrname: str, op: DAGNode):
"""
Determine if a variable is excluded from the serialized object.
:param var: The value to check. [var == getattr(op, attrname)]
:param attrname: The name of the attribute to check.
:param op: The operator to check.
:return: True if a variable is excluded, False otherwise.
"""
# Check if value matches client_defaults (hierarchical defaults optimization)
if cls._matches_client_defaults(var, attrname):
return True
# for const fields, we should always be excluded when False, regardless of client_defaults
# Use class-level cache for optimisation
if cls._const_fields is None:
cls._const_fields = cls.get_operator_const_fields()
if attrname in cls._const_fields and var is False:
return True
schema_defaults = cls.get_schema_defaults("operator")
if attrname in schema_defaults:
if schema_defaults[attrname] == var:
# If it also matches client_defaults, exclude (optimization)
client_defaults = cls.generate_client_defaults()
if attrname in client_defaults:
if client_defaults[attrname] == var:
return True
# If client_defaults differs, preserve explicit override from user
# Example: default_args={"retries": 0}, schema default=0, client_defaults={"retries": 3}
if client_defaults[attrname] != var:
if op.has_dag():
dag = op.dag
if dag and attrname in dag.default_args and dag.default_args[attrname] == var:
return False
if (
hasattr(op, "_BaseOperator__init_kwargs")
and attrname in op._BaseOperator__init_kwargs
and op._BaseOperator__init_kwargs[attrname] == var
):
return False
# If client_defaults doesn't have this field (matches schema default),
# exclude for optimization even if in default_args
# Example: default_args={"depends_on_past": False}, schema default=False
return True
optional_fields = cls.get_operator_optional_fields_from_schema()
if var is None:
return True
if attrname in optional_fields:
if var in [[], (), set(), {}]:
return True
if var is not None and op.has_dag() and attrname.endswith("_date"):
# If this date is the same as the matching field in the dag, then
# don't store it again at the task level.
dag_date = getattr(op.dag, attrname, None)
if var is dag_date or var == dag_date:
return True
# If none of the exclusion conditions are met, don't exclude the field
return False
@classmethod
def _deserialize_operator_extra_links(
cls, encoded_op_links: dict[str, str]
) -> dict[str, XComOperatorLink]:
"""
Deserialize Operator Links if the Classes are registered in Airflow Plugins.
Error is raised if the OperatorLink is not found in Plugins too.
:param encoded_op_links: Serialized Operator Link
:return: De-Serialized Operator Link
"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.registered_operator_link_classes is None:
raise AirflowException("Can't load plugins")
op_predefined_extra_links = {}
for name, xcom_key in encoded_op_links.items():
# Get the name and xcom_key of the encoded operator and use it to create a XComOperatorLink object
# during deserialization.
#
# Example:
# enc_operator['_operator_extra_links'] =
# {
# 'airflow': 'airflow_link_key',
# 'foo-bar': 'link-key',
# 'no_response': 'key',
# 'raise_error': 'key'
# }
op_predefined_extra_link = XComOperatorLink(name=name, xcom_key=xcom_key)
op_predefined_extra_links.update({op_predefined_extra_link.name: op_predefined_extra_link})
return op_predefined_extra_links
@classmethod
def _serialize_operator_extra_links(
cls, operator_extra_links: Iterable[BaseOperatorLink]
) -> dict[str, str]:
"""
Serialize Operator Links.
Store the "name" of the link mapped with the xcom_key which can be later used to retrieve this
operator extra link from XComs.
For example:
``{'link-name-1': 'xcom-key-1'}``
:param operator_extra_links: Operator Link
:return: Serialized Operator Link
"""
return {link.name: link.xcom_key for link in operator_extra_links}
@classmethod
def serialize(cls, var: Any, *, strict: bool = False) -> Any:
# the wonders of multiple inheritance BaseOperator defines an instance method
return BaseSerialization.serialize(var=var, strict=strict)
@classmethod
def deserialize(cls, encoded_var: Any) -> Any:
return BaseSerialization.deserialize(encoded_var=encoded_var)
def serialize_for_task_group(self) -> tuple[DAT, Any]:
"""Serialize; required by DAGNode."""
return DAT.OP, self.task_id
@property
def inherits_from_empty_operator(self) -> bool:
return self._is_empty
@property
def inherits_from_skipmixin(self) -> bool:
return self._can_skip_downstream
def expand_start_from_trigger(self, *, context: Context) -> bool:
"""
Get the start_from_trigger value of the current abstract operator.
Since a BaseOperator is not mapped to begin with, this simply returns
the original value of start_from_trigger.
:meta private:
"""
return self.start_from_trigger
@classmethod
def get_serialized_fields(cls):
"""Fields to deserialize from the serialized JSON object."""
return frozenset(
{
"_logger_name",
"_needs_expansion",
"_task_display_name",
"allow_nested_operators",
"depends_on_past",
"do_xcom_push",
"doc",
"doc_json",
"doc_md",
"doc_rst",
"doc_yaml",
"downstream_task_ids",
"email",
"email_on_failure",
"email_on_retry",
"end_date",
"execution_timeout",
"executor",
"executor_config",
"ignore_first_depends_on_past",
"inlets",
"is_setup",
"is_teardown",
"map_index_template",
"max_active_tis_per_dag",
"max_active_tis_per_dagrun",
"max_retry_delay",
"multiple_outputs",
"has_on_execute_callback",
"has_on_failure_callback",
"has_on_retry_callback",
"has_on_skipped_callback",
"has_on_success_callback",
"on_failure_fail_dagrun",
"outlets",
"owner",
"params",
"pool",
"pool_slots",
"priority_weight",
"queue",
"resources",
"retries",
"retry_delay",
"retry_exponential_backoff",
"run_as_user",
"start_date",
"start_from_trigger",
"start_trigger_args",
"task_id",
"task_type",
"template_ext",
"template_fields",
"template_fields_renderers",
"trigger_rule",
"ui_color",
"ui_fgcolor",
"wait_for_downstream",
"wait_for_past_depends_before_skipping",
"weight_rule",
}
)
@classmethod
@lru_cache(maxsize=1)
def generate_client_defaults(cls) -> dict[str, Any]:
"""
Generate `client_defaults` section that only includes values differing from schema defaults.
This optimizes serialization size by avoiding redundant storage of schema defaults.
Uses OPERATOR_DEFAULTS as the source of truth for task default values.
:return: client_defaults dictionary with only non-schema values
"""
# Get schema defaults for comparison
schema_defaults = cls.get_schema_defaults("operator")
client_defaults = {}
# Only include OPERATOR_DEFAULTS values that differ from schema defaults
for k, v in OPERATOR_DEFAULTS.items():
if k not in cls.get_serialized_fields():
continue
# Exclude values that are None or empty collections
if v is None or v in [[], (), set(), {}]:
continue
# Check schema defaults first with raw value comparison (fast path)
if k in schema_defaults and schema_defaults[k] == v:
continue
# Use the existing serialize method to ensure consistent format
serialized_value = cls.serialize(v)
# Extract just the value part, consistent with serialize_to_json behavior
if isinstance(serialized_value, dict) and Encoding.TYPE in serialized_value:
serialized_value = serialized_value[Encoding.VAR]
# For cases where raw comparison failed but serialized values might match
# (e.g., timedelta vs float), check again with serialized value
if k in schema_defaults and schema_defaults[k] == serialized_value:
continue
client_defaults[k] = serialized_value
return client_defaults
@classmethod
def _deserialize_field_value(cls, field_name: str, value: Any) -> Any:
"""
Deserialize a single field value using the same logic as populate_operator.
This method centralizes field-specific deserialization logic to avoid duplication.
:param field_name: The name of the field being deserialized
:param value: The value to deserialize
:return: The deserialized value
"""
if field_name == "downstream_task_ids":
return set(value) if value is not None else set()
elif field_name in [
f"has_on_{x}_callback" for x in ("execute", "failure", "success", "retry", "skipped")
]:
return bool(value)
elif field_name in {"retry_delay", "execution_timeout", "max_retry_delay"}:
# Reuse existing timedelta deserialization logic
if value is not None:
return cls._deserialize_timedelta(value)
return None
elif field_name == "resources":
return Resources.from_dict(value) if value is not None else None
elif field_name.endswith("_date"):
return cls._deserialize_datetime(value) if value is not None else None
else:
# For all other fields, return as-is (strings, ints, bools, etc.)
return value
@classmethod
def _deserialize_partial_kwargs(
cls, partial_kwargs_data: dict[str, Any], client_defaults: dict[str, Any] | None = None
) -> dict[str, Any]:
"""
Deserialize partial_kwargs supporting both encoded and non-encoded values.
This method can handle:
1. Encoded values: {"__type": "timedelta", "__var": 300.0}
2. Non-encoded values: 300.0 (for optimization)
It also applies client_defaults for missing fields.
:param partial_kwargs_data: The partial_kwargs data from serialized JSON
:param client_defaults: Client defaults to apply for missing fields
:return: Deserialized partial_kwargs dict
"""
deserialized = {}
for k, v in partial_kwargs_data.items():
# Check if this is an encoded value (has __type and __var structure)
if isinstance(v, dict) and Encoding.TYPE in v and Encoding.VAR in v:
# This is encoded - use full deserialization
deserialized[k] = cls.deserialize(v)
else:
# This is non-encoded (optimized format)
# Reuse the same deserialization logic from populate_operator
deserialized[k] = cls._deserialize_field_value(k, v)
# Apply client_defaults for missing fields if provided
if client_defaults and "tasks" in client_defaults:
task_defaults = client_defaults["tasks"]
for k, default_value in task_defaults.items():
if k not in deserialized:
# Apply the same deserialization logic to client_defaults
deserialized[k] = cls._deserialize_field_value(k, default_value)
return deserialized
@classmethod
def _apply_defaults_to_encoded_op(
cls,
encoded_op: dict[str, Any],
client_defaults: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""
Apply client defaults to encoded operator before deserialization.
Args:
encoded_op: The serialized operator data (already includes applied default_args)
client_defaults: SDK-specific defaults from client_defaults section
Note: DAG default_args are already applied during task creation in the SDK,
so encoded_op contains the final resolved values.
Hierarchy (lowest to highest priority):
1. client_defaults.tasks (SDK-wide defaults for size optimization)
2. Explicit task values (already in encoded_op, includes applied default_args)
Returns a new dict with defaults merged in.
"""
# Build hierarchy from lowest to highest priority
result = {}
# Level 1: Apply client_defaults.tasks (lowest priority)
# Values are already serialized in generate_client_defaults()
if client_defaults:
task_defaults = client_defaults.get("tasks", {})
result.update(task_defaults)
# Level 2: Apply explicit task values (highest priority - overrides everything)
# Note: encoded_op already contains default_args applied during task creation
result.update(encoded_op)
return result
def _iter_all_mapped_downstreams(self) -> Iterator[MappedOperator | SerializedMappedTaskGroup]:
"""
Return mapped nodes that are direct dependencies of the current task.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
Note that this does not guarantee the returned tasks actually use the
current task for task mapping, but only checks those task are mapped
operators, and are downstreams of the current task.
To get a list of tasks that uses the current task for task mapping, use
:meth:`iter_mapped_dependants` instead.
"""
def _walk_group(group: SerializedTaskGroup) -> Iterable[tuple[str, DAGNode]]:
"""
Recursively walk children in a task group.
This yields all direct children (including both tasks and task
groups), and all children of any task groups.
"""
for key, child in group.children.items():
yield key, child
if isinstance(child, SerializedTaskGroup):
yield from _walk_group(child)
if not (dag := self.dag):
raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
for key, child in _walk_group(dag.task_group):
if key == self.node_id:
continue
if not isinstance(child, MappedOperator | SerializedMappedTaskGroup):
continue
if self.node_id in child.upstream_task_ids:
yield child
def iter_mapped_dependants(self) -> Iterator[MappedOperator | SerializedMappedTaskGroup]:
"""
Return mapped nodes that depend on the current task the expansion.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
"""
return (
downstream
for downstream in self._iter_all_mapped_downstreams()
if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
)
# TODO (GH-52141): Copied from sdk. Find a better place for this to live in.
def iter_mapped_task_groups(self) -> Iterator[SerializedMappedTaskGroup]:
"""
Return mapped task groups this task belongs to.
Groups are returned from the innermost to the outmost.
:meta private:
"""
if (group := self.task_group) is None:
return
yield from group.iter_mapped_task_groups()
# TODO (GH-52141): Copied from sdk. Find a better place for this to live in.
def get_closest_mapped_task_group(self) -> SerializedMappedTaskGroup | None:
"""
Get the mapped task group "closest" to this task in the DAG.
:meta private:
"""
return next(self.iter_mapped_task_groups(), None)
# TODO (GH-52141): Copied from sdk. Find a better place for this to live in.
def get_needs_expansion(self) -> bool:
"""
Return true if the task is MappedOperator or is in a mapped task group.
:meta private:
"""
return self._needs_expansion
# TODO (GH-52141): Copied from sdk. Find a better place for this to live in.
@methodtools.lru_cache(maxsize=1)
def get_parse_time_mapped_ti_count(self) -> int:
"""
Return the number of mapped task instances that can be created on DAG run creation.
This only considers literal mapped arguments, and would return *None*
when any non-literal values are used for mapping.
:raise NotFullyPopulated: If non-literal mapped arguments are encountered.
:raise NotMapped: If the operator is neither mapped, nor has any parent
mapped task groups.
:return: Total number of mapped TIs this task should have.
"""
from airflow.exceptions import NotMapped
group = self.get_closest_mapped_task_group()
if group is None:
raise NotMapped()
return group.get_parse_time_mapped_ti_count()
@provide_session
def _create_orm_dagrun(
*,
dag: SerializedDAG,
run_id: str,
logical_date: datetime.datetime | None,
data_interval: DataInterval | None,
run_after: datetime.datetime,
start_date: datetime.datetime | None,
conf: Any,
state: DagRunState | None,
run_type: DagRunType,
creating_job_id: int | None,
backfill_id: NonNegativeInt | None,
triggered_by: DagRunTriggeredByType,
triggering_user_name: str | None = None,
partition_key: str | None = None,
session: Session = NEW_SESSION,
) -> DagRun:
bundle_version = None
if not dag.disable_bundle_versioning:
bundle_version = session.scalar(
select(DagModel.bundle_version).where(DagModel.dag_id == dag.dag_id),
)
dag_version = DagVersion.get_latest_version(dag.dag_id, session=session)
if not dag_version:
raise AirflowException(f"Cannot create DagRun for DAG {dag.dag_id} because the dag is not serialized")
run = DagRun(
dag_id=dag.dag_id,
run_id=run_id,
logical_date=logical_date,
start_date=start_date,
run_after=run_after,
conf=conf,
state=state,
run_type=run_type,
creating_job_id=creating_job_id,
data_interval=data_interval,
triggered_by=triggered_by,
triggering_user_name=triggering_user_name,
backfill_id=backfill_id,
bundle_version=bundle_version,
partition_key=partition_key,
)
# Load defaults into the following two fields to ensure result can be serialized detached
max_log_template_id = session.scalar(select(func.max(LogTemplate.__table__.c.id)))
run.log_template_id = int(max_log_template_id) if max_log_template_id is not None else 0
run.created_dag_version = dag_version
run.consumed_asset_events = []
session.add(run)
session.flush()
run.dag = dag
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session, dag_version_id=dag_version.id)
return run
| SerializedBaseOperator |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 13942,
"end": 14064
} | class ____(OpcodeWithArg): # Arg: Number of args with default values
_FLAGS = HAS_ARGUMENT
__slots__ = ()
| MAKE_FUNCTION |
python | davidhalter__parso | parso/cache.py | {
"start": 2707,
"end": 8452
} | class ____:
def __init__(self, node, lines, change_time=None):
self.node = node
self.lines = lines
if change_time is None:
change_time = time.time()
self.change_time = change_time
self.last_used = change_time
def load_module(hashed_grammar, file_io, cache_path=None):
"""
Returns a module or None, if it fails.
"""
p_time = file_io.get_last_modified()
if p_time is None:
return None
try:
module_cache_item = parser_cache[hashed_grammar][file_io.path]
if p_time <= module_cache_item.change_time:
module_cache_item.last_used = time.time()
return module_cache_item.node
except KeyError:
return _load_from_file_system(
hashed_grammar,
file_io.path,
p_time,
cache_path=cache_path
)
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path)
try:
if p_time > os.path.getmtime(cache_path):
# Cache is outdated
return None
with open(cache_path, 'rb') as f:
gc.disable()
try:
module_cache_item = pickle.load(f)
finally:
gc.enable()
except FileNotFoundError:
return None
else:
_set_cache_item(hashed_grammar, path, module_cache_item)
LOG.debug('pickle loaded: %s', path)
return module_cache_item.node
def _set_cache_item(hashed_grammar, path, module_cache_item):
if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER:
# Garbage collection of old cache files.
# We are basically throwing everything away that hasn't been accessed
# in 10 minutes.
cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL
for key, path_to_item_map in parser_cache.items():
parser_cache[key] = {
path: node_item
for path, node_item in path_to_item_map.items()
if node_item.last_used > cutoff_time
}
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
path = file_io.path
try:
p_time = None if path is None else file_io.get_last_modified()
except OSError:
p_time = None
pickling = False
item = _NodeCacheItem(module, lines, p_time)
_set_cache_item(hashed_grammar, path, item)
if pickling and path is not None:
try:
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
except PermissionError:
# It's not really a big issue if the cache cannot be saved to the
# file system. It's still in RAM in that case. However we should
# still warn the user that this is happening.
warnings.warn(
'Tried to save a file to %s, but got permission denied.' % path,
Warning
)
else:
_remove_cache_and_update_lock(cache_path=cache_path)
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f:
pickle.dump(item, f, pickle.HIGHEST_PROTOCOL)
def clear_cache(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
shutil.rmtree(cache_path)
parser_cache.clear()
def clear_inactive_cache(
cache_path=None,
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
):
if cache_path is None:
cache_path = _default_cache_path
if not cache_path.exists():
return False
for dirname in os.listdir(cache_path):
version_path = cache_path.joinpath(dirname)
if not version_path.is_dir():
continue
for file in os.scandir(version_path):
if file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL <= time.time():
try:
os.remove(file.path)
except OSError: # silently ignore all failures
continue
else:
return True
def _touch(path):
try:
os.utime(path, None)
except FileNotFoundError:
try:
file = open(path, 'a')
file.close()
except (OSError, IOError): # TODO Maybe log this?
return False
return True
def _remove_cache_and_update_lock(cache_path=None):
lock_path = _get_cache_clear_lock_path(cache_path=cache_path)
try:
clear_lock_time = os.path.getmtime(lock_path)
except FileNotFoundError:
clear_lock_time = None
if (
clear_lock_time is None # first time
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
):
if not _touch(lock_path):
# First make sure that as few as possible other cleanup jobs also
# get started. There is still a race condition but it's probably
# not a big problem.
return False
clear_inactive_cache(cache_path=cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path)
file_hash = hashlib.sha256(str(path).encode("utf-8")).hexdigest()
return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash))
def _get_cache_directory_path(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
directory = cache_path.joinpath(_VERSION_TAG)
if not directory.exists():
os.makedirs(directory)
return directory
| _NodeCacheItem |
python | google__jax | jax/_src/pallas/core.py | {
"start": 3640,
"end": 3784
} | class ____(semaphore_dtype):
"""Barrier semaphore dtype.
Like its superclass, this class should never be instantiated.
"""
| barrier_semaphore |
python | hyperopt__hyperopt | hyperopt/mongoexp.py | {
"start": 34662,
"end": 39822
} | class ____:
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(
self,
mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename="logfile.txt",
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(fmt="%(levelname)s (%(name)s): %(message)s")
)
self.log_handler.setLevel(logging.INFO)
def run_one(self, host_id=None, reserve_timeout=None, erase_created_workdir=False):
if host_id == None:
host_id = ("%s:%i" % (socket.gethostname(), os.getpid()),)
job = None
start_time = time.time()
mj = self.mj
while job is None:
if reserve_timeout and (time.time() - start_time) > reserve_timeout:
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = 1 + numpy.random.rand() * (float(self.poll_interval) - 1.0)
logger.info("no job found, sleeping for %.1fs" % interval)
time.sleep(interval)
logger.debug("job found: %s" % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job["misc"])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job["exp_key"], refresh=False),
read_only=False,
current_trial=job,
)
if self.workdir is None:
workdir = job["misc"].get("workdir", os.getcwd())
if workdir is None:
workdir = ""
workdir = os.path.join(workdir, str(job["_id"]))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job["misc"]["cmd"]
cmd_protocol = cmd[0]
try:
if cmd_protocol == "cpickled fn":
worker_fn = pickler.loads(cmd[1])
elif cmd_protocol == "call evaluate":
bandit = pickler.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "token_load":
cmd_toks = cmd[1].split(".")
cmd_module = ".".join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == "bandit_json evaluate":
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "driver_attachment":
# name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = pickler.loads(blob)
worker_fn = json_call(
bandit_name, args=bandit_args, kwargs=bandit_kwargs
).evaluate
elif cmd_protocol == "domain_attachment":
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = pickler.loads(blob)
except BaseException as e:
logger.info("Error while unpickling.")
raise
worker_fn = domain.evaluate
else:
raise ValueError("Unrecognized cmd protocol", cmd_protocol)
with temp_dir(workdir, erase_created_workdir), working_dir(workdir):
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException as e:
# XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info("job exception: %s" % str(e))
ctrl.checkpoint()
mj.update(
job, {"state": JOB_STATE_ERROR, "error": (str(type(e)), str(e))}
)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
logger.info("job finished: %s" % str(job["_id"]))
attachments = result.pop("attachments", {})
for aname, aval in list(attachments.items()):
logger.info(
"mongoexp: saving attachment name=%s (%i bytes)" % (aname, len(aval))
)
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {"state": JOB_STATE_DONE})
| MongoWorker |
python | huggingface__transformers | src/transformers/models/pegasus_x/modeling_pegasus_x.py | {
"start": 42236,
"end": 53077
} | class ____(PegasusXPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusXConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusXConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
padding_idx = config.pad_token_id
self.embed_tokens = PegasusXScaledWordEmbedding(
config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale
)
self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model)
self.layers = nn.ModuleList([PegasusXDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = (
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values
)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
position_ids = cache_position.unsqueeze(1)
position_ids = self.embed_positions(inputs_embeds, past_key_values_length, position_ids)
position_ids = position_ids.to(inputs_embeds.device)
hidden_states = inputs_embeds + position_ids
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
causal_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| PegasusXDecoder |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/model_parallel.py | {
"start": 2533,
"end": 16072
} | class ____(ParallelStrategy):
"""Enables user-defined parallelism applied to a model.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Currently supports up to 2D parallelism. Specifically, it supports the combination of
Fully Sharded Data-Parallel 2 (FSDP2) with Tensor Parallelism (DTensor). These PyTorch APIs are currently still
experimental in PyTorch (see https://pytorch.org/docs/stable/distributed.tensor.parallel.html).
Requires PyTorch 2.4 or newer.
Arguments:
data_parallel_size: The number of devices within a data-parallel group. Defaults to ``"auto"``, which
sets this size to the number of nodes in the cluster.
tensor_parallel_size: The number of devices within a tensor-parallel group. Defaults to ``"auto"``, which
sets this size to the number of GPUs in a single node.
save_distributed_checkpoint: If ``True``, each rank saves its shard of weights and optimizer states to a file.
The checkpoint is a folder with as many files as the world size.
If ``False``, the full weights and optimizer states get assembled on rank 0 and saved to a single file.
"""
def __init__(
self,
data_parallel_size: Union[Literal["auto"], int] = "auto",
tensor_parallel_size: Union[Literal["auto"], int] = "auto",
save_distributed_checkpoint: bool = True,
process_group_backend: Optional[str] = None,
timeout: Optional[timedelta] = default_pg_timeout,
) -> None:
super().__init__()
if not _TORCH_GREATER_EQUAL_2_4:
raise ImportError(f"{type(self).__name__} requires PyTorch 2.4 or higher.")
self._data_parallel_size = data_parallel_size
self._tensor_parallel_size = tensor_parallel_size
self._save_distributed_checkpoint = save_distributed_checkpoint
self._process_group_backend: Optional[str] = process_group_backend
self._timeout: Optional[timedelta] = timeout
self._device_mesh: Optional[DeviceMesh] = None
self.num_nodes = 1
@property
def device_mesh(self) -> "DeviceMesh":
if self._device_mesh is None:
raise RuntimeError("Accessing the device mesh before processes have initialized is not allowed.")
return self._device_mesh
@property
@override
def root_device(self) -> torch.device:
assert self.parallel_devices is not None
return self.parallel_devices[self.local_rank]
@property
def num_processes(self) -> int:
return len(self.parallel_devices) if self.parallel_devices is not None else 0
@property
@override
def distributed_sampler_kwargs(self) -> dict[str, Any]:
assert self.device_mesh is not None
data_parallel_mesh = self.device_mesh["data_parallel"]
return {"num_replicas": data_parallel_mesh.size(), "rank": data_parallel_mesh.get_local_rank()}
@property
def process_group_backend(self) -> Optional[str]:
return self._process_group_backend
@property
@override
def restore_checkpoint_after_setup(self) -> bool:
return True
@property
@override
def lightning_restore_optimizer(self) -> bool:
return False
@override
def _configure_launcher(self) -> None:
assert self.cluster_environment is not None
if not self.cluster_environment.creates_processes_externally:
self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes)
@override
def setup_environment(self) -> None:
super().setup_environment()
self._setup_distributed()
if self._data_parallel_size == "auto":
self._data_parallel_size = self.num_nodes
if self._tensor_parallel_size == "auto":
self._tensor_parallel_size = self.num_processes
self._device_mesh = _setup_device_mesh(
self._data_parallel_size, self._tensor_parallel_size, self.world_size, self.root_device
)
# Users can access device mesh in `LightningModule.configure_model()`
assert self.lightning_module is not None
self.lightning_module._device_mesh = self._device_mesh
@override
def setup(self, trainer: "pl.Trainer") -> None:
from torch.distributed.fsdp import FullyShardedDataParallel
assert self.model is not None
assert self.accelerator is not None
self.accelerator.setup(trainer)
if not is_overridden("configure_model", self.lightning_module):
raise TypeError(
f"When using the {type(self).__name__}, you are required to override the `configure_model()` hook in"
f" the LightningModule and apply parallelization there."
)
if any(isinstance(mod, FullyShardedDataParallel) for mod in self.model.modules()):
raise TypeError(
"Found modules that are wrapped with `torch.distributed.fsdp.FullyShardedDataParallel`."
f" The `{self.__class__.__name__}` only supports the new FSDP2 APIs in PyTorch >= 2.4."
)
_materialize_distributed_module(self.model, self.root_device)
self.model = self.precision_plugin.convert_module(self.model)
self.model_to_device() # move all remaining layers if any left on CPU.
self.barrier()
if trainer.state.fn == TrainerFn.FITTING:
self.setup_optimizers(trainer)
self.setup_precision_plugin()
if trainer.state.fn == TrainerFn.FITTING:
_optimizers_to_device(self.optimizers, self.root_device)
@override
def setup_optimizers(self, trainer: "pl.Trainer") -> None:
# If we're setting up for evaluation after fitting, we need to discard the optimizers
# since we're rewrapping the model, otherwise optimizer param references are no longer valid
# and subsequent checkpoint saving can fail
self._reset_optimizers_and_schedulers()
return super().setup_optimizers(trainer)
@override
def model_to_device(self) -> None:
assert self.model is not None
self.model.to(self.root_device)
@contextmanager
@override
def tensor_init_context(self, empty_init: Optional[bool] = None) -> Generator[None, None, None]:
# Materializaton happens in `setup()`
empty_init_context = torch.device("meta") if empty_init else nullcontext()
with empty_init_context, self.precision_plugin.tensor_init_context():
yield
@override
def barrier(self, name: Optional[str] = None) -> None:
if not _distributed_is_initialized():
return
if torch.distributed.get_backend() == "nccl":
torch.distributed.barrier(device_ids=self._determine_device_ids())
else:
torch.distributed.barrier()
@override
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
if not _distributed_is_initialized():
return obj
obj = [obj]
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
return obj[0]
@override
def reduce(
self,
tensor: Union[Tensor, Any],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = "mean",
) -> Tensor:
if isinstance(tensor, Tensor):
return _sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
return tensor
def _determine_device_ids(self) -> list[int]:
return [self.root_device.index]
@override
def teardown(self) -> None:
assert self.cluster_environment is not None
assert self.accelerator is not None
self.cluster_environment.teardown()
self.precision_plugin.teardown()
self.accelerator.teardown()
@override
def lightning_module_state_dict(self) -> dict[str, Any]:
"""Collects the state dict of the model.
Only returns a non-empty state dict on rank 0 if ``save_distributed_checkpoint=False``.
"""
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
state_dict_options = StateDictOptions(full_state_dict=(not self._save_distributed_checkpoint), cpu_offload=True)
assert self.model is not None
return get_model_state_dict(self.model, options=state_dict_options)
@override
def load_model_state_dict(self, checkpoint: Mapping[str, Any], strict: bool = True) -> None:
# Override to do nothing, the strategy already loaded the states in `load_checkpoint()`
pass
@override
def optimizer_state(self, optimizer: Optimizer) -> dict[str, Any]:
"""Collects the state of the given optimizer.
Only returns a non-empty state dict on rank 0 if ``save_distributed_checkpoint=False``.
"""
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_optimizer_state_dict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import OptimStateKeyType
state_dict_options = StateDictOptions(full_state_dict=(not self._save_distributed_checkpoint), cpu_offload=True)
if isinstance(optimizer, LightningOptimizer):
optimizer = optimizer._optimizer
assert self.model is not None
state_dict = get_optimizer_state_dict(self.model, optimizer, options=state_dict_options)
if not self._save_distributed_checkpoint and self.global_rank == 0:
# Store the optimizer state dict in standard format
state_dict = FSDP.rekey_optim_state_dict(state_dict, OptimStateKeyType.PARAM_ID, self.model)
return state_dict
@override
def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# Override to do nothing, the strategy already loaded the states in `load_checkpoint()`
pass
@override
def save_checkpoint(
self, checkpoint: dict[str, Any], filepath: _PATH, storage_options: Optional[Any] = None
) -> None:
if storage_options is not None:
raise TypeError(
f"`{type(self).__name__}.save_checkpoint(..., storage_options=...)` is not supported because"
f" `{type(self).__name__}` does not use the `CheckpointIO`."
)
# broadcast the path from rank 0 to ensure all the checkpoints are saved to a common path
path = Path(self.broadcast(filepath))
if path.is_dir() and not self._save_distributed_checkpoint and not _is_sharded_checkpoint(path):
raise IsADirectoryError(f"The checkpoint path exists and is a directory: {path}")
if self._save_distributed_checkpoint:
if path.is_file():
path.unlink()
path.mkdir(parents=True, exist_ok=True)
converted_state = {"state_dict": checkpoint.pop("state_dict")}
converted_state.update({
f"optimizer_{idx}": optim_state
for idx, optim_state in enumerate(checkpoint.pop("optimizer_states", []))
})
_distributed_checkpoint_save(converted_state, path)
if self.global_rank == 0:
torch.save(checkpoint, path / _METADATA_FILENAME)
else:
if _is_sharded_checkpoint(path):
shutil.rmtree(path)
return super().save_checkpoint(checkpoint=checkpoint, filepath=path)
@override
def load_checkpoint(self, checkpoint_path: _PATH, weights_only: Optional[bool] = None) -> dict[str, Any]:
# broadcast the path from rank 0 to ensure all the states are loaded from a common path
path = Path(self.broadcast(checkpoint_path))
state = {
"state_dict": self.model,
**{f"optimizer_{idx}": optimizer for idx, optimizer in enumerate(self.optimizers)},
}
assert self.lightning_module is not None
return _load_checkpoint(
path=path,
state=state,
strict=self.lightning_module.strict_loading,
optimizer_states_from_list=True,
weights_only=weights_only,
)
def _setup_distributed(self) -> None:
super().setup_environment()
reset_seed()
self.set_world_ranks()
self._process_group_backend = self._get_process_group_backend()
assert self.cluster_environment is not None
kwargs: dict[str, Any] = {"timeout": self._timeout}
if _TORCH_GREATER_EQUAL_2_3:
kwargs["device_id"] = self.root_device if self.root_device.type != "cpu" else None
_init_dist_connection(self.cluster_environment, self._process_group_backend, **kwargs)
def _get_process_group_backend(self) -> str:
return self._process_group_backend or _get_default_process_group_backend_for_device(self.root_device)
def set_world_ranks(self) -> None:
if self.cluster_environment is not None:
self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
# `LightningEnvironment.set_global_rank` will do this too, but we cannot rely on that implementation detail
# additionally, for some implementations, the setter is a no-op, so it's safer to access the getter
rank_zero_only.rank = utils_rank_zero_only.rank = self.global_rank
| ModelParallelStrategy |
python | django__django | django/core/files/uploadedfile.py | {
"start": 3467,
"end": 4207
} | class ____(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a
name.
"""
def __init__(self, name, content, content_type="text/plain"):
content = content or b""
super().__init__(
BytesIO(content), None, name, content_type, len(content), None, None
)
@classmethod
def from_dict(cls, file_dict):
"""
Create a SimpleUploadedFile object from a dictionary with keys:
- filename
- content-type
- content
"""
return cls(
file_dict["filename"],
file_dict["content"],
file_dict.get("content-type", "text/plain"),
)
| SimpleUploadedFile |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 148813,
"end": 149403
} | class ____(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
super().__init__()
self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=stride)
pad = kernel_size - stride
self.left_pad = math.ceil(pad)
self.right_pad = pad = self.left_pad
def forward(self, hidden_state):
hidden_state = self.conv(hidden_state)
hidden_state = hidden_state[..., self.left_pad : hidden_state.shape[-1] - self.right_pad]
return hidden_state.contiguous()
| Qwen3OmniMoeCausalTransConvNet |
python | prabhupant__python-ds | data_structures/binary_trees/boundary_traversal.py | {
"start": 126,
"end": 1561
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def print_leaves(root):
if root:
print_leaves(root.left)
if not root.left and not root.right:
print(root.val, end=' ')
print_leaves(root.right)
def print_left_boundary(root):
if root:
if root.left:
print(root.val, end=' ')
print_left_boundary(root.left)
elif root.right:
print(root.val, end=' ')
print_left_boundary(root.right)
def print_right_boundary(root):
"""
The traversal here will be from top to bottom because
this will be called after finishing the left side which is
top-to-bottom traversal
"""
if root:
if root.right:
print_right_boundary(root.right)
print(root.val, end=' ')
elif root.left:
print_right_boundary(root.left)
print(root.val, end=' ')
def print_boundary(root):
if root:
print(root.val)
print_left_boundary(root.left)
print_leaves(root.left)
print_leaves(root.right)
print_right_boundary(root.right)
root = Node(20)
root.left = Node(8)
root.left.left = Node(4)
root.left.right = Node(12)
root.left.right.left = Node(10)
root.left.right.right = Node(14)
root.right = Node(22)
root.right.right = Node(25)
print_boundary(root) | Node |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 35904,
"end": 36044
} | class ____(serializers.ModelSerializer):
class Meta:
model = UniqueForYearModel
fields = '__all__'
| UniqueForYearSerializer |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 5778,
"end": 24612
} | class ____(Trace['JaxprTracer']):
def __init__(self, parent_trace:Trace, name_stack: source_info_util.NameStack, tag:TraceTag):
super().__init__()
self.name_stack = name_stack
self.tag = tag
self.parent_trace = parent_trace
self.requires_low = False
self.effect_handles : list[EffectHandle] = []
self.counter = it.count()
def to_jaxpr_tracer(self, x):
if isinstance(x, JaxprTracer) and x._trace.tag is self.tag:
if x._trace is self:
return x
else:
return JaxprTracer(self, x.pval, FreeVar(x))
else:
return self.new_const(x)
def new_const(self, val) -> JaxprTracer:
return JaxprTracer(self, PartialVal.known(val), None)
def new_instantiated_literal(self, val) -> JaxprTracer:
aval = get_aval(val)
return JaxprTracer(self, PartialVal.unknown(aval), Literal(val, aval))
def new_instantiated_const(self, val) -> JaxprTracer:
aval = get_aval(val)
return JaxprTracer(self, PartialVal.unknown(aval), ConstVar(val))
def new_arg(self, pval: PartialVal) -> JaxprTracer:
const = pval.get_known()
# XXX: Think twice before changing this constant argument pruning!
# This has really important consequences for partial_eval_jaxpr.
# Most importantly, this guarantees that the unknown jaxpr never uses
# known inputs (if it needs them, then they get passed through residuals).
if const is None:
aval = pval.get_aval()
if type(aval) is DShapedArray:
# TODO(dougalm): Fix the type error and remove the pytype pragmas.
# pytype: disable=attribute-error
shape = [self.new_instantiated_const(d)
if isinstance(d, Tracer) and d._trace.level < self.level else d
for d in aval.shape]
# pytype: enable=attribute-error
aval = aval.update(shape=tuple(shape))
return JaxprTracer(self, PartialVal.unknown(aval), LambdaBinding())
else:
return self.new_const(const)
def instantiate_const(self, tracer: JaxprTracer) -> JaxprTracer:
const = tracer.pval.get_known()
if const is None:
return tracer
else:
if core.is_literalable(const):
return self.new_instantiated_literal(const)
else:
return self.new_instantiated_const(const)
def cur_qdd(self, x):
const = self.to_jaxpr_tracer(x).pval.get_known()
if const is None:
assert False # TODO: track tangent QDDs
else:
with core.set_current_trace(self.parent_trace):
return core.cur_qdd(const)
def process_primitive(self, primitive, tracers, params):
with core.set_current_trace(self.parent_trace):
if primitive in custom_partial_eval_rules:
tracers = map(self.to_jaxpr_tracer, tracers)
return custom_partial_eval_rules[primitive](self, *tracers, **params)
else:
return self.default_process_primitive(primitive, tracers, params)
def default_process_primitive(self, primitive, tracers, params):
# By default, if all the input tracers are known, then bind the primitive
# and consider all outputs known. Otherwise, stage the application into the
# jaxpr and consider all outputs unknown.
tracers = map(self.to_jaxpr_tracer, tracers)
consts = [t.pval.get_known() for t in tracers]
if all(c is not None for c in consts):
return primitive.bind_with_trace(self.parent_trace, consts, params)
tracers = map(self.instantiate_const, tracers)
avals = [t.aval for t in tracers]
out_aval, effs = primitive.abstract_eval(*avals, **params)
name_stack = self._current_truncated_name_stack()
source = source_info_util.current().replace(name_stack=name_stack)
if primitive.multiple_results:
out_tracers = [JaxprTracer(self, PartialVal.unknown(aval), None)
for aval in out_aval]
eqn = new_eqn_recipe(self, tracers, out_tracers, primitive, params, effs,
source)
if effects.partial_eval_kept_effects.filter_in(effs):
self.effect_handles.append(EffectHandle(tracers, eqn))
for t in out_tracers: t.recipe = eqn
return out_tracers
else:
out_tracer = JaxprTracer(self, PartialVal.unknown(out_aval), None)
eqn = new_eqn_recipe(self, tracers, [out_tracer], primitive,
params, effs, source)
if effects.partial_eval_kept_effects.filter_in(effs):
self.effect_handles.append(EffectHandle(tracers, eqn))
out_tracer.recipe = eqn
return out_tracer
def process_call(self, primitive, f: lu.WrappedFun, tracers, params):
tracers = map(self.to_jaxpr_tracer, tracers)
rule = call_partial_eval_rules.get(primitive)
if rule:
return rule(self, primitive, f, tracers, params)
update_params = call_param_updaters.get(primitive) or (lambda p, _, __: p)
in_knowns, in_avals, in_consts = partition_pvals([t.pval for t in tracers])
# TODO(mattjj): check in_avals are consistent with f.in_type
# We want to partially evaluate this call into two calls: one evaluated now
# taking known values (in_consts) as inputs and producing known values
# (out_consts) as outputs, and the other staged out as an eqn into the jaxpr
# being built. The latter takes as input residuals (res) produced as outputs
# of the first call, shared closed-over values (env), and explicit arguments
# which were unknown to the first call (corresponding to in_avals).
# Wrap f to perform the partial evaluation and plumb out aux data.
f = f.with_unknown_names()
f_ = trace_to_subjaxpr_nounits_fwd(f, self.tag, f.debug_info, False)
f_, aux = partial_eval_wrapper_nounits(f_, tuple(in_knowns), tuple(in_avals))
# Adjust parameters (e.g. donated_invars) for the call to be evaluated now.
const_params = update_params(params, in_knowns, 0)
# Run the call, getting known out vals and aux data used for staged-out call
fun_and_args = (_update_annotation_known(f_, f.in_type, in_knowns),) + tuple(in_consts)
out = primitive.bind_with_trace(self.parent_trace, fun_and_args, const_params)
fwds, out_knowns, out_type, jaxpr, env = aux()
# Split apart known outputs from the original call and non-fwded residuals.
out_consts, non_fwd_res = split_list(out, [sum(out_knowns)])
# Form the complete list of residuals by forwarding some inputs.
if config.dynamic_shapes.value:
# With dynamic shapes, we may need to forward implicit arguments.
assert f.in_type is not None, "f must be annotated with lu.annotate()"
in_consts_, in_knowns_ = iter(in_consts), iter(in_knowns)
in_consts_full = [None] * len(f.in_type)
for idx, (aval, explicit) in enumerate(f.in_type):
if explicit and next(in_knowns_):
c = in_consts_full[idx] = next(in_consts_)
if aval.shape:
for d1, d2 in zip(aval.shape, c.shape):
if type(d1) is DBIdx:
in_consts_full[d1.val] = d2
else:
in_consts_full = in_consts
res = subs_list(fwds, in_consts_full, non_fwd_res)
# Create the input tracers for the staged-out (unknown-value) call.
res_tracers = map(self.instantiate_const, map(self.new_const, res))
env_tracers = map(self.to_jaxpr_tracer, env)
unknown_arg_tracers = [t for t in tracers if not t.is_known()]
# Adjust parameters (e.g. donated_invars) for the staged-out call's args.
num_new_args = len(res_tracers) + len(env_tracers)
new_jaxpr = convert_constvars_jaxpr(jaxpr)
if isinstance(primitive, core.ClosedCallPrimitive):
new_jaxpr = close_jaxpr(new_jaxpr) # type: ignore
staged_params = dict(params, call_jaxpr=new_jaxpr)
staged_params = update_params(staged_params, map(op.not_, in_knowns),
num_new_args)
# The outputs of the staged-out call are Tracers with the new eqn as recipe.
if config.dynamic_shapes.value:
# With dynamic shapes, we may need to substitute Tracers into avals.
out_tracers = []
for aval, _ in out_type:
if type(aval) is DShapedArray:
shape = [[*res_tracers, *env_tracers, *unknown_arg_tracers][d.val]
if type(d) is InDBIdx else d for d in aval.shape]
aval = aval.update(shape=tuple(shape))
out_tracers.append(JaxprTracer(self, PartialVal.unknown(aval), None))
else:
out_tracers = [JaxprTracer(self, PartialVal.unknown(a), None)
for a in out_type]
name_stack = self._current_truncated_name_stack()
source = source_info_util.current().replace(name_stack=name_stack)
eqn = new_eqn_recipe(self, (*res_tracers, *env_tracers, *unknown_arg_tracers),
out_tracers, primitive, staged_params, jaxpr.effects,
source)
for t in out_tracers: t.recipe = eqn
return merge_lists(out_knowns, out_tracers, out_consts)
def process_map(self, primitive, f: lu.WrappedFun, tracers, params):
tracers = map(self.to_jaxpr_tracer, tracers)
update_params = call_param_updaters.get(primitive) or (lambda p, _, __: p)
in_knowns, in_avals, in_consts = partition_pvals([t.pval for t in tracers])
# This method is like process_call above, except:
# 1. we delete an axis from mapped-over input avals' shapes, and
# analogously add an axis to mapped-over output avals' shapes;
# 2. we update the in_axes and out_axes/out_axes_thunk parameters to
# reflect the inputs and outputs pruned from the unknown/known sides.
# Map (delete an axis from) unknown inputs' avals as dictated by in_axes.
unk_in_axes, const_in_axes = partition_list(in_knowns, params['in_axes'])
in_avals_mapped = [mapped_aval(params['axis_size'], ax, aval)
for ax, aval in zip(unk_in_axes, in_avals)]
# Wrap f to perform partial evaluation and plumb out aux data.
f = trace_to_subjaxpr_nounits2(f, self.tag, f.debug_info, False)
f, aux = partial_eval_wrapper_nounits(f, tuple(in_knowns),
tuple(in_avals_mapped))
# Adjust params for knowns (e.g. donated_invars, in_axes, out_axes_thunk)
const_params = update_params(params, in_knowns, 0) # handles donated_invars
out_axes_thunk = params['out_axes_thunk']
@as_hashable_function(closure=out_axes_thunk)
def const_out_axes_thunk():
out_knowns, _, jaxpr, _ = aux()
_, out_axes = partition_list(out_knowns, out_axes_thunk())
return tuple(out_axes) + (0,) * len(jaxpr.constvars) # res mapped axis 0
const_params = dict(const_params, in_axes=tuple(const_in_axes),
out_axes_thunk=const_out_axes_thunk)
# Run the map, getting known out vals and aux data used for staged-out map.
out = primitive.bind_with_trace(self.parent_trace, (f, *in_consts), const_params)
out_knowns, out_avals_mapped, jaxpr, env = aux()
# Split apart known outputs from the original call and residuals.
out_consts, res = split_list(out, [len(out) - len(jaxpr.constvars)])
# We can only check_jaxpr with the dynamic axis environment extended:
with core.extend_axis_env_nd([(params['axis_name'], params['axis_size'])]):
call_jaxpr = convert_constvars_jaxpr(jaxpr)
# Compute staged and const out_axes, taking into account residuals.
out_axes = params['out_axes_thunk']()
staged_out_axes, _ = partition_list(out_knowns, out_axes)
staged_in_axes = (0,) * len(res) + (None,) * len(env) + (*unk_in_axes,)
# Create the input tracers for the staged-out (unknown-value) call.
const_tracers = map(self.new_instantiated_const, res)
env_tracers = map(self.to_jaxpr_tracer, env)
unknown_arg_tracers = [t for t in tracers if not t.is_known()]
# Adjust params for staged-out call on unknown values.
num_new_args = len(const_tracers) + len(env_tracers)
staged_params = update_params(params, map(op.not_, in_knowns), num_new_args)
staged_params = dict(staged_params, in_axes=staged_in_axes,
out_axes=tuple(staged_out_axes), call_jaxpr=call_jaxpr)
del staged_params['out_axes_thunk']
# The outputs of the staged-out call are Tracers with the new eqn as recipe.
out_avals = [unmapped_aval(params['axis_size'], ax, a)
for ax, a in zip(staged_out_axes, out_avals_mapped)]
out_tracers = [JaxprTracer(self, PartialVal.unknown(a), None)
for a in out_avals]
effs = core.filter_named_axis_effects(jaxpr.effects, {params['axis_name']})
src_info = source_info_util.current()
eqn = new_eqn_recipe(self, (*const_tracers, *env_tracers, *unknown_arg_tracers),
out_tracers, primitive, staged_params, effs, src_info)
for t in out_tracers: t.recipe = eqn
return merge_lists(out_knowns, out_tracers, out_consts)
def _current_truncated_name_stack(self):
return source_info_util.current_name_stack()[len(self.name_stack):]
def process_custom_jvp_call(self, prim, fun, jvp, tracers, symbolic_zeros):
tracers = map(self.to_jaxpr_tracer, tracers)
if all(t.is_known() for t in tracers):
with core.set_current_trace(self.parent_trace):
vals = [t.pval[1] for t in tracers]
return prim.bind(fun, jvp, *vals, symbolic_zeros=symbolic_zeros)
# We assume non-trivial partial evaluation is only performed to build linear
# functions, and hence we don't need to keep the custom JVP rule around.
del jvp, symbolic_zeros
with core.set_current_trace(self):
return fun.call_wrapped(*tracers)
def process_custom_transpose(self, prim, call, tracers, **params):
tracers = map(self.to_jaxpr_tracer, tracers)
res_ts, lin_ts = split_list(tracers, [params['res_tree'].num_leaves])
assert all(t.is_known() for t in res_ts)
lin_all_known = all(t.is_known() for t in lin_ts)
if lin_all_known:
res_cvals = [t.pval[1] for t in res_ts]
lin_cvals = [t.pval[1] for t in lin_ts]
return prim.bind(call, *res_cvals, *lin_cvals, **params)
else:
out_tracers = [JaxprTracer(self, PartialVal.unknown(aval), None)
for aval in params['out_types']]
in_tracers = map(self.instantiate_const, tracers)
new_params = dict(params, call=call)
eqn = new_eqn_recipe(self, in_tracers, out_tracers, prim, new_params,
core.no_effects, source_info_util.current())
for t in out_tracers: t.recipe = eqn
return out_tracers
def process_custom_vjp_call(self, prim, f, fwd, bwd, tracers, out_trees, symbolic_zeros):
tracers = map(self.to_jaxpr_tracer, tracers)
if all(t.is_known() for t in tracers):
vals = [t.pval[1] for t in tracers]
with core.set_current_trace(self.parent_trace):
return prim.bind(f, fwd, bwd, *vals, out_trees=out_trees,
symbolic_zeros=symbolic_zeros)
tracers = map(self.instantiate_const, tracers)
in_knowns = (False,) * len(tracers)
in_avals = tuple(t.aval for t in tracers)
f_ = trace_to_subjaxpr_nounits2(f, self.tag, f.debug_info, True)
f_, aux = partial_eval_wrapper_nounits(f_, in_knowns, in_avals)
params = dict(out_trees=out_trees, symbolic_zeros=symbolic_zeros)
res = prim.bind_with_trace(self.parent_trace, (f_, fwd, bwd), params)
out_knowns, out_avals, jaxpr, env = aux()
assert not any(out_knowns)
res_tracers = map(self.instantiate_const, map(self.new_const, res))
env_tracers = map(self.to_jaxpr_tracer, env)
out_tracers = [JaxprTracer(self, PartialVal.unknown(a), None)
for a in out_avals]
closed_jaxpr = close_jaxpr(convert_constvars_jaxpr(jaxpr))
@partial(lu.wrap_init, debug_info=fwd.debug_info)
@_memoize
def fwd_jaxpr_thunk(*zeros):
fwd_ = _interleave_fun(fwd.with_unknown_names(), zeros)
fwd_jaxpr, _, consts = trace_to_jaxpr_dynamic(fwd_, in_avals)
return fwd_jaxpr, consts
name_stack = self._current_truncated_name_stack()
source = source_info_util.current().replace(name_stack=name_stack)
params = dict(
call_jaxpr=closed_jaxpr,
fwd_jaxpr_thunk=fwd_jaxpr_thunk,
num_consts=len(res) + len(env),
bwd=bwd,
out_trees=out_trees,
symbolic_zeros=symbolic_zeros
)
eqn = new_eqn_recipe(self, (*res_tracers, *env_tracers, *tracers),
out_tracers, prim, params, jaxpr.effects, source)
for t in out_tracers: t.recipe = eqn
return out_tracers
def partition_pvals(
pvals: list[PartialVal]
) -> tuple[list[bool], list[AbstractValue], list[Any]]:
knowns = [pval.is_known() for pval in pvals ]
avals = [pval.get_aval() for pval in pvals if not pval.is_known()]
consts = [pval.get_known() for pval in pvals if pval.is_known()]
return knowns, avals, consts
@lu.transformation_with_aux2
def partial_eval_wrapper_nounits(
f: Callable,
store: lu.Store,
in_knowns: Sequence[bool],
in_avals: Sequence[AbstractValue],
*in_consts: Any):
in_avals_, in_consts_ = iter(in_avals), iter(in_consts)
in_pvals = [PartialVal.known(next(in_consts_)) if known else
PartialVal.unknown(next(in_avals_)) for known in in_knowns]
sentinel = object()
assert next(in_avals_, sentinel) is next(in_consts_, sentinel) is sentinel
jaxpr, (*maybe_fwds, out_pvals, res, env) = f(in_pvals)
out_knowns, out_avals, out_consts = partition_pvals(out_pvals)
store.store((*maybe_fwds, out_knowns, out_avals, jaxpr, env))
return (*out_consts, *res)
@lu.transformation_with_aux2
def partial_eval_wrapper_nounits2(
f: Callable,
store: lu.Store,
in_knowns: Sequence[bool],
in_avals: Sequence[AbstractValue],
*in_consts: Any):
in_avals_, in_consts_ = iter(in_avals), iter(in_consts)
in_pvals = [PartialVal.known(next(in_consts_)) if known else
PartialVal.unknown(next(in_avals_)) for known in in_knowns]
sentinel = object()
assert next(in_avals_, sentinel) is next(in_consts_, sentinel) is sentinel
jaxpr, (*maybe_fwds, out_pvals, res, env) = f(in_pvals)
out_knowns, _, out_consts = partition_pvals(out_pvals)
res_avals = [typeof(r) for r in res]
store.store((*maybe_fwds, out_knowns, res_avals, jaxpr, env))
return (*out_consts, *res)
custom_partial_eval_rules: dict[Primitive, Callable] = {}
call_partial_eval_rules: dict[Primitive, Callable] = {}
call_param_updaters: dict[Primitive, Callable] = {}
def abstract_eval_fun(fun: Callable, *avals,
debug_info: core.DebugInfo, **params):
_, avals_out, _ = trace_to_jaxpr_dynamic(
lu.wrap_init(fun, params, debug_info=debug_info), avals)
assert all(isinstance(aval, AbstractValue) for aval in avals_out)
return avals_out
JaxprTracerRecipe = Union[
'JaxprEqnRecipe', 'LambdaBinding', 'FreeVar', 'ConstVar', Literal,
]
| JaxprTrace |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 1913,
"end": 2041
} | class ____( # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
):
...
@dataclass
| AltCLIPOutput |
python | keon__algorithms | algorithms/queues/priority_queue.py | {
"start": 125,
"end": 332
} | class ____:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return "{}: {}".format(self.data, self.priority)
| PriorityQueueNode |
python | streamlit__streamlit | lib/tests/streamlit/runtime/scriptrunner/script_runner_test.py | {
"start": 3219,
"end": 43506
} | class ____(AsyncTestCase):
def setUp(self) -> None:
super().setUp()
mock_runtime = MagicMock(spec=Runtime)
mock_runtime.media_file_mgr = MediaFileManager(
MemoryMediaFileStorage("/mock/media")
)
mock_runtime.media_file_mgr.clear_session_refs = MagicMock()
Runtime._instance = mock_runtime
def tearDown(self) -> None:
super().tearDown()
Runtime._instance = None
def test_startup_shutdown(self):
"""Test that we can create and shut down a ScriptRunner."""
scriptrunner = TestScriptRunner("good_script.py")
# Request that the ScriptRunner stop before it even starts, so that
# it doesn't start the script at all.
scriptrunner.request_stop()
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_control_events(scriptrunner, [ScriptRunnerEvent.SHUTDOWN])
self._assert_text_deltas(scriptrunner, [])
def test_yield_on_enqueue(self):
"""Make sure we try to handle execution control requests whenever
our _enqueue_forward_msg function is called.
"""
# Create a TestScriptRunner. We won't actually be starting its
# script thread - instead, we'll manually call _enqueue_forward_msg on it, and
# pretend we're in the script thread.
runner = TestScriptRunner("not_a_script.py")
runner._is_in_script_thread = MagicMock(return_value=True)
# Mock the call to _maybe_handle_execution_control_request.
# This is what we're testing gets called or not.
maybe_handle_execution_control_request_mock = MagicMock()
runner._maybe_handle_execution_control_request = (
maybe_handle_execution_control_request_mock
)
# Enqueue a ForwardMsg on the runner
mock_msg = MagicMock()
runner._enqueue_forward_msg(mock_msg)
# Ensure the ForwardMsg was delivered to event listeners.
self._assert_forward_msgs(runner, [mock_msg])
# maybe_handle_execution_control_request should be called by the
# enqueue function.
assert maybe_handle_execution_control_request_mock.call_count == 1
def test_dont_enqueue_with_pending_script_request(self):
"""No ForwardMsgs are enqueued when the ScriptRunner has
a STOP or RERUN request.
"""
# Create a ScriptRunner and pretend that we've already started
# executing.
runner = TestScriptRunner("not_a_script.py")
runner._is_in_script_thread = MagicMock(return_value=True)
runner._execing = True
runner._requests._state = ScriptRequestType.CONTINUE
# Enqueue a ForwardMsg on the runner, and ensure it's delivered
# to event listeners. (We're not stopped yet.)
mock_msg = MagicMock()
runner._enqueue_forward_msg(mock_msg)
self._assert_forward_msgs(runner, [mock_msg])
runner.clear_forward_msgs()
# Now, "stop" our ScriptRunner. Enqueuing should result in
# a StopException being raised, and no message enqueued.
runner._requests.request_stop()
with pytest.raises(StopException):
runner._enqueue_forward_msg(MagicMock())
self._assert_forward_msgs(runner, [])
# And finally, request a rerun. Enqueuing should result in
# a RerunException being raised and no message enqueued.
runner._requests = ScriptRequests()
runner.request_rerun(RerunData())
with pytest.raises(RerunException):
runner._enqueue_forward_msg(MagicMock())
self._assert_forward_msgs(runner, [])
def test_maybe_handle_execution_control_request(self):
"""maybe_handle_execution_control_request should no-op if called
from another thread.
"""
runner = TestScriptRunner("not_a_script.py")
runner._execing = True
# Mock ScriptRequests.on_scriptrunner_yield(). It will return a fake
# rerun request.
requests_mock = MagicMock()
requests_mock.on_scriptrunner_yield = MagicMock(
return_value=ScriptRequest(ScriptRequestType.RERUN, RerunData())
)
runner._requests = requests_mock
# If _is_in_script_thread is False, our request shouldn't get popped
runner._is_in_script_thread = MagicMock(return_value=False)
runner._maybe_handle_execution_control_request()
requests_mock.on_scriptrunner_yield.assert_not_called()
# If _is_in_script_thread is True, our rerun request should get
# popped (and this will result in a RerunException being raised).
runner._is_in_script_thread = MagicMock(return_value=True)
with pytest.raises(RerunException):
runner._maybe_handle_execution_control_request()
requests_mock.on_scriptrunner_yield.assert_called_once()
def test_run_script_in_loop(self):
"""_run_script_thread should continue re-running its script
while it has pending rerun requests."""
scriptrunner = TestScriptRunner("not_a_script.py")
# ScriptRequests.on_scriptrunner_ready will return 3 rerun requests,
# and then stop.
on_scriptrunner_ready_mock = MagicMock()
on_scriptrunner_ready_mock.side_effect = [
ScriptRequest(ScriptRequestType.RERUN, RerunData()),
ScriptRequest(ScriptRequestType.RERUN, RerunData()),
ScriptRequest(ScriptRequestType.RERUN, RerunData()),
ScriptRequest(ScriptRequestType.STOP),
]
scriptrunner._requests.on_scriptrunner_ready = on_scriptrunner_ready_mock
run_script_mock = MagicMock()
scriptrunner._run_script = run_script_mock
scriptrunner.start()
scriptrunner.join()
# _run_script should have been called 3 times, once for each
# RERUN request.
self._assert_no_exceptions(scriptrunner)
assert run_script_mock.call_count == 3
@parameterized.expand(
[
("good_script.py", text_utf),
# These files are .txt to avoid being broken by "make update-headers".
("good_script_no_encoding.py.txt", text_no_encoding),
("good_script_latin_encoding.py.txt", text_latin),
]
)
def test_run_script(self, filename, text):
"""Tests that we can run a script to completion."""
scriptrunner = TestScriptRunner(filename)
scriptrunner._fragment_storage = MagicMock()
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, [text])
scriptrunner._fragment_storage.clear.assert_called_once()
# The following check is a requirement for the CodeHasher to
# work correctly. The CodeHasher is scoped to
# files contained in the directory of __main__.__file__, which we
# assume is the main script directory.
assert os.path.realpath(scriptrunner._main_script_path) == os.path.realpath(
sys.modules["__main__"].__file__
), " ScriptRunner should set the __main__.__file__ attribute correctly"
Runtime._instance.media_file_mgr.clear_session_refs.assert_called_once()
def test_run_one_fragment(self):
"""Tests that we can run one fragment."""
fragment = MagicMock()
scriptrunner = TestScriptRunner("good_script.py")
scriptrunner._fragment_storage.set("my_fragment", fragment)
scriptrunner.request_rerun(RerunData(fragment_id_queue=["my_fragment"]))
scriptrunner.start()
scriptrunner.join()
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
script_started_event_data = scriptrunner.event_data[0]
script_started_event_data["fragment_ids_this_run"] = ["my_fragment"]
fragment.assert_called_once()
def test_run_multiple_fragments(self):
"""Tests that we can run fragments."""
fragment = MagicMock()
scriptrunner = TestScriptRunner("good_script.py")
scriptrunner._fragment_storage.set("my_fragment1", fragment)
scriptrunner._fragment_storage.set("my_fragment2", fragment)
scriptrunner._fragment_storage.set("my_fragment3", fragment)
scriptrunner.request_rerun(
RerunData(
fragment_id_queue=[
"my_fragment1",
"my_fragment2",
"my_fragment3",
]
)
)
scriptrunner.start()
scriptrunner.join()
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
script_started_event_data = scriptrunner.event_data[0]
script_started_event_data["fragment_ids_this_run"] = [
"my_fragment1",
"my_fragment2",
"my_fragment3",
]
fragment.assert_has_calls([call(), call(), call()])
Runtime._instance.media_file_mgr.clear_session_refs.assert_not_called()
def test_run_multiple_fragments_even_if_one_raised_an_exception(self):
"""Tests that fragments continue to run when previous fragment raised an error."""
fragment = MagicMock()
scriptrunner = TestScriptRunner("good_script.py")
raised_exception = {"called": False}
def raise_exception():
raised_exception["called"] = True
raise RuntimeError("this fragment errored out")
scriptrunner._fragment_storage.set("my_fragment1", raise_exception)
scriptrunner._fragment_storage.set("my_fragment2", fragment)
scriptrunner._fragment_storage.set("my_fragment3", fragment)
scriptrunner.request_rerun(
RerunData(
fragment_id_queue=[
"my_fragment1",
"my_fragment2",
"my_fragment3",
]
)
)
scriptrunner.start()
scriptrunner.join()
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
assert raised_exception["called"]
fragment.assert_has_calls([call(), call()])
Runtime._instance.media_file_mgr.clear_session_refs.assert_not_called()
@patch("streamlit.runtime.scriptrunner.script_runner.get_script_run_ctx")
@patch("streamlit.runtime.fragment.handle_uncaught_app_exception")
def test_regular_KeyError_is_rethrown(
self, patched_handle_exception, patched_get_script_run_ctx
):
"""Test that regular key-errors within a fragment are surfaced
as such and not caught by the FragmentStorageKeyError.
"""
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
ctx.current_fragment_id = "my_fragment_id"
def non_optional_func():
raise KeyError("kaboom")
def fragment():
_fragment(non_optional_func)()
scriptrunner = TestScriptRunner("good_script.py")
scriptrunner._fragment_storage.set("my_fragment", fragment)
scriptrunner.request_rerun(RerunData(fragment_id_queue=["my_fragment"]))
scriptrunner.start()
scriptrunner.join()
ex = patched_handle_exception.call_args[0][0]
assert isinstance(ex, KeyError)
@patch("streamlit.runtime.scriptrunner.script_runner._LOGGER.exception")
def test_compile_error(self, patched_logger_exception):
"""Tests that we get an exception event when a script can't compile."""
scriptrunner = TestScriptRunner("compile_error.py.txt")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, [])
# Verify that the exception was logged
patched_logger_exception.assert_called_once()
# Verify the logger was called with the correct message
assert patched_logger_exception.call_args[0][0] == "Script compilation error"
# Ensure that exc_info parameter was passed (contains the actual exception)
assert "exc_info" in patched_logger_exception.call_args[1]
@patch("streamlit.runtime.state.session_state.SessionState._call_callbacks")
def test_calls_widget_callbacks(self, patched_call_callbacks):
"""Before a script is rerun, we call callbacks for any widgets
whose value has changed.
"""
scriptrunner = TestScriptRunner("widgets_script.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
# Default widget values
require_widgets_deltas([scriptrunner])
self._assert_text_deltas(
scriptrunner, ["False", "ahoy!", "0", "False", "loop_forever"]
)
patched_call_callbacks.assert_not_called()
# Update widgets
states = WidgetStates()
w1_id = scriptrunner.get_widget_id("checkbox", "checkbox")
_create_widget(w1_id, states).bool_value = True
w2_id = scriptrunner.get_widget_id("text_area", "text_area")
_create_widget(w2_id, states).string_value = "matey!"
w3_id = scriptrunner.get_widget_id("radio", "radio")
_create_widget(w3_id, states).int_value = 2
w4_id = scriptrunner.get_widget_id("button", "button")
_create_widget(w4_id, states).trigger_value = True
# Explicitly clear deltas before re-running, to prevent a race
# condition. (The ScriptRunner will clear the deltas when it
# starts the re-run, but if that doesn't happen before
# require_widgets_deltas() starts polling the ScriptRunner's deltas,
# it will see stale deltas from the last run.)
scriptrunner.clear_forward_msgs()
scriptrunner.request_rerun(RerunData(widget_states=states))
require_widgets_deltas([scriptrunner])
patched_call_callbacks.assert_called_once()
self._assert_text_deltas(
scriptrunner, ["True", "matey!", "2", "True", "loop_forever"]
)
scriptrunner.request_stop()
scriptrunner.join()
@patch("streamlit.runtime.state.session_state.SessionState._call_callbacks")
def test_calls_widget_callbacks_on_new_scriptrunner_instance(
self, patched_call_callbacks
):
"""A new ScriptRunner instance will call widget callbacks
if widget values have changed. (This differs slightly from
`test_calls_widget_callbacks`, which tests that an *already-running*
ScriptRunner calls its callbacks on rerun).
"""
# Create a ScriptRunner and run it once so we can grab its widgets.
scriptrunner = TestScriptRunner("widgets_script.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
require_widgets_deltas([scriptrunner])
scriptrunner.request_stop()
scriptrunner.join()
patched_call_callbacks.assert_not_called()
# Set our checkbox's value to True
states = WidgetStates()
checkbox_id = scriptrunner.get_widget_id("checkbox", "checkbox")
_create_widget(checkbox_id, states).bool_value = True
# Create a *new* ScriptRunner with our new RerunData. Our callbacks
# should be called this time.
scriptrunner = TestScriptRunner("widgets_script.py")
scriptrunner.request_rerun(RerunData(widget_states=states))
scriptrunner.start()
require_widgets_deltas([scriptrunner])
scriptrunner.request_stop()
scriptrunner.join()
patched_call_callbacks.assert_called_once()
@patch("streamlit.elements.exception._exception")
@patch("streamlit.runtime.state.session_state.SessionState._call_callbacks")
def test_calls_widget_callbacks_error(
self, patched_call_callbacks, patched_st_exception
):
"""If an exception is raised from a callback function,
it should result in a call to `streamlit.exception`.
"""
patched_call_callbacks.side_effect = RuntimeError("Random Error")
scriptrunner = TestScriptRunner("widgets_script.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
# Default widget values
require_widgets_deltas([scriptrunner])
self._assert_text_deltas(
scriptrunner, ["False", "ahoy!", "0", "False", "loop_forever"]
)
patched_call_callbacks.assert_not_called()
# Update widgets
states = WidgetStates()
w1_id = scriptrunner.get_widget_id("checkbox", "checkbox")
_create_widget(w1_id, states).bool_value = True
w2_id = scriptrunner.get_widget_id("text_area", "text_area")
_create_widget(w2_id, states).string_value = "matey!"
w3_id = scriptrunner.get_widget_id("radio", "radio")
_create_widget(w3_id, states).int_value = 2
w4_id = scriptrunner.get_widget_id("button", "button")
_create_widget(w4_id, states).trigger_value = True
# Explicitly clear deltas before re-running, to prevent a race
# condition. (The ScriptRunner will clear the deltas when it
# starts the re-run, but if that doesn't happen before
# require_widgets_deltas() starts polling the ScriptRunner's deltas,
# it will see stale deltas from the last run.)
scriptrunner.clear_forward_msgs()
scriptrunner.request_rerun(RerunData(widget_states=states))
scriptrunner.join()
patched_call_callbacks.assert_called_once()
self._assert_control_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_FOR_RERUN,
ScriptRunnerEvent.SCRIPT_STARTED,
# We use the SCRIPT_STOPPED_WITH_SUCCESS event even if the
# script runs into an error during execution. The user is
# informed of the error by an `st.exception` box that we check
# for below.
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
patched_st_exception.assert_called_once()
def test_missing_script(self):
"""Tests that we get an exception event when a script doesn't exist."""
scriptrunner = TestScriptRunner("i_do_not_exist.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, [])
@patch("streamlit.runtime.scriptrunner.script_runner.create_page_profile_message")
def test_uncaught_exception_gets_tracked(self, patched_create_page_profile_message):
"""Tests that we track uncaught exceptions."""
with testutil.patch_config_options({"browser.gatherUsageStats": True}):
scriptrunner = TestScriptRunner("runtime_error.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
patched_create_page_profile_message.assert_called_once()
call_kwargs = patched_create_page_profile_message.call_args_list[0].kwargs
# Check the
assert len(call_kwargs["commands"]) == 2 # text & exception command
assert call_kwargs["exec_time"] > 0
assert call_kwargs["prep_time"] > 0
assert call_kwargs["uncaught_exception"] == "AttributeError"
@parameterized.expand([(True,), (False,)])
@patch("streamlit.runtime.runtime.Runtime.exists", MagicMock(return_value=True))
def test_runtime_error(self, show_error_details: bool):
"""Tests that we correctly handle scripts with runtime errors."""
with testutil.patch_config_options(
{"client.showErrorDetails": show_error_details}
):
scriptrunner = TestScriptRunner("runtime_error.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG, # text delta
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG, # exception delta
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
# We'll get two deltas: one for st.text(), and one for the
# exception that gets thrown afterwards.
elts = scriptrunner.elements()
assert elts[0].WhichOneof("type") == "text"
if show_error_details:
self._assert_num_deltas(scriptrunner, 2)
assert elts[1].WhichOneof("type") == "exception"
else:
self._assert_num_deltas(scriptrunner, 2)
assert elts[1].WhichOneof("type") == "exception"
exc_msg = elts[1].exception.message
assert exc_msg == _GENERIC_UNCAUGHT_EXCEPTION_TEXT
@pytest.mark.slow
def test_stop_script(self):
"""Tests that we can stop a script while it's running."""
scriptrunner = TestScriptRunner("infinite_loop.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
time.sleep(0.1)
scriptrunner.request_rerun(RerunData())
# This test will fail if the script runner does not execute the infinite
# script's write call at least once during the final script run.
# The script runs forever, and when we enqueue a rerun it forcibly
# stops execution and runs some cleanup. If we do not wait for the
# forced GC to finish, the script won't start running before we stop
# the script runner, so the expected delta is never created.
time.sleep(1)
scriptrunner.request_stop()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
# We use _assert_control_events, and not _assert_events,
# because the infinite loop will fire an indeterminate number of
# ForwardMsg enqueue requests. Those ForwardMsgs will all be ultimately
# coalesced down to a single message by the ForwardMsgQueue, which is
# why the "_assert_text_deltas" call, below, just asserts the existence
# of a single ForwardMsg.
self._assert_control_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_FOR_RERUN,
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, ["loop_forever"])
def test_shutdown(self):
"""Test that we can shutdown while a script is running."""
scriptrunner = TestScriptRunner("infinite_loop.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
time.sleep(0.1)
scriptrunner.request_stop()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_control_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, ["loop_forever"])
def test_widgets(self):
"""Tests that widget values behave as expected."""
scriptrunner = TestScriptRunner("widgets_script.py")
try:
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
# Default widget values
require_widgets_deltas([scriptrunner])
self._assert_text_deltas(
scriptrunner, ["False", "ahoy!", "0", "False", "loop_forever"]
)
# Update widgets
states = WidgetStates()
w1_id = scriptrunner.get_widget_id("checkbox", "checkbox")
_create_widget(w1_id, states).bool_value = True
w2_id = scriptrunner.get_widget_id("text_area", "text_area")
_create_widget(w2_id, states).string_value = "matey!"
w3_id = scriptrunner.get_widget_id("radio", "radio")
_create_widget(w3_id, states).int_value = 2
w4_id = scriptrunner.get_widget_id("button", "button")
_create_widget(w4_id, states).trigger_value = True
# Explicitly clear deltas before re-running, to prevent a race
# condition. (The ScriptRunner will clear the deltas when it
# starts the re-run, but if that doesn't happen before
# require_widgets_deltas() starts polling the ScriptRunner's deltas,
# it will see stale deltas from the last run.)
scriptrunner.clear_forward_msgs()
scriptrunner.request_rerun(RerunData(widget_states=states))
require_widgets_deltas([scriptrunner])
self._assert_text_deltas(
scriptrunner, ["True", "matey!", "2", "True", "loop_forever"]
)
# Rerun with previous values. The button should be reset;
# everything else should be the same.
scriptrunner.clear_forward_msgs()
scriptrunner.request_rerun(RerunData())
require_widgets_deltas([scriptrunner])
self._assert_text_deltas(
scriptrunner, ["True", "matey!", "2", "False", "loop_forever"]
)
finally:
scriptrunner.request_stop()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
def test_query_string_and_page_script_hash_saved(self):
scriptrunner = TestScriptRunner("good_script.py")
scriptrunner.request_rerun(
RerunData(query_string="foo=bar", page_script_hash="hash1")
)
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
shutdown_data = scriptrunner.event_data[-1]
assert shutdown_data["client_state"].query_string == "foo=bar"
assert shutdown_data["client_state"].page_script_hash == "hash1"
def test_context_info_saved_in_shutdown(self):
"""Test that context_info is preserved in the SHUTDOWN event."""
from streamlit.proto.ClientState_pb2 import ContextInfo
scriptrunner = TestScriptRunner("good_script.py")
# Create context info
context_info = ContextInfo()
context_info.timezone = "Europe/Berlin"
context_info.locale = "de-DE"
context_info.url = "http://localhost:8501"
context_info.is_embedded = False
scriptrunner.request_rerun(
RerunData(
query_string="foo=bar",
page_script_hash="hash1",
context_info=context_info,
)
)
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
shutdown_data = scriptrunner.event_data[-1]
client_state = shutdown_data["client_state"]
assert client_state.query_string == "foo=bar"
assert client_state.page_script_hash == "hash1"
# Verify context_info is preserved
assert client_state.HasField("context_info")
assert client_state.context_info.timezone == "Europe/Berlin"
assert client_state.context_info.locale == "de-DE"
assert client_state.context_info.url == "http://localhost:8501"
assert client_state.context_info.is_embedded is False
def test_coalesce_rerun(self):
"""Tests that multiple pending rerun requests get coalesced."""
scriptrunner = TestScriptRunner("good_script.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.request_rerun(RerunData())
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, [text_utf])
def test_remove_nonexistent_elements(self):
"""Tests that nonexistent elements are removed from widget cache after
script run.
"""
widget_id = "nonexistent_widget_id"
# Run script, sending in a WidgetStates containing our fake widget ID.
scriptrunner = TestScriptRunner("good_script.py")
states = WidgetStates()
_create_widget(widget_id, states).string_value = "streamlit"
scriptrunner.request_rerun(RerunData(widget_states=states))
scriptrunner.start()
# At this point, scriptrunner should have finished running, detected
# that our widget_id wasn't in the list of widgets found this run, and
# culled it. Ensure widget cache no longer holds our widget ID.
with pytest.raises(KeyError):
scriptrunner._session_state[widget_id]
def test_dg_stack_preserved_for_fragment_rerun(self):
"""Tests that the dg_stack and cursor are preserved for a fragment rerun.
Having a fragment rerun that is interrupted by a RerunException triggered by
another fragment run simulates what we have seen in the issue where the main app
was rendered inside of a dialog when two fragment-related reruns were handled
in the same ScriptRunner thread.
"""
scriptrunner = TestScriptRunner("good_script.py")
# set the dg_stack from the fragment to simulate a populated dg_stack of
# a real app
dg_stack_set_by_fragment = (
DeltaGenerator(),
DeltaGenerator(),
DeltaGenerator(),
DeltaGenerator(),
)
scriptrunner._fragment_storage.set(
"my_fragment1",
lambda: context_dg_stack.set(dg_stack_set_by_fragment),
)
# trigger a run with fragment_id to avoid clearing the fragment_storage in the
# script runner
scriptrunner.request_rerun(RerunData(fragment_id_queue=["my_fragment1"]))
# yielding a rerun request will raise a RerunException in the script runner
# with the provided RerunData
on_scriptrunner_yield_mock = MagicMock()
on_scriptrunner_yield_mock.side_effect = [
# the original_dg_stack will be set to the dg_stack populated by the first
# requested_rerun of the fragment
ScriptRequest(
ScriptRequestType.RERUN, RerunData(fragment_id_queue=["my_fragment1"])
),
ScriptRequest(ScriptRequestType.STOP),
]
scriptrunner._requests.on_scriptrunner_yield = on_scriptrunner_yield_mock
scriptrunner.start()
scriptrunner.join()
assert len(scriptrunner.get_runner_thread_dg_stack()) == len(
dg_stack_set_by_fragment
)
assert scriptrunner.get_runner_thread_dg_stack() == dg_stack_set_by_fragment
def test_dg_stack_reset_for_full_app_rerun(self):
"""Tests that the dg_stack and cursor are reset for a full app rerun."""
scriptrunner = TestScriptRunner("good_script.py")
# simulate a dg_stack populated by the fragment
dg_stack_set_by_fragment = (
DeltaGenerator(),
DeltaGenerator(),
DeltaGenerator(),
DeltaGenerator(),
)
scriptrunner._fragment_storage.set(
"my_fragment1",
lambda: context_dg_stack.set(dg_stack_set_by_fragment),
)
# trigger a run with fragment_id to avoid clearing the fragment_storage
# in the script runner
scriptrunner.request_rerun(RerunData(fragment_id_queue=["my_fragment1"]))
# yielding a rerun request will raise a RerunException in the script runner
# with the provided RerunData
on_scriptrunner_yield_mock = MagicMock()
on_scriptrunner_yield_mock.side_effect = [
# raise RerunException for full app run
ScriptRequest(ScriptRequestType.RERUN, RerunData()),
ScriptRequest(ScriptRequestType.STOP),
]
scriptrunner._requests.on_scriptrunner_yield = on_scriptrunner_yield_mock
scriptrunner.start()
scriptrunner.join()
# for full app run, the dg_stack should have been reset
assert len(scriptrunner.get_runner_thread_dg_stack()) == 1
# TODO: re-enable after flakiness is fixed
def off_test_multiple_scriptrunners(self):
"""Tests that multiple scriptrunners can run simultaneously."""
# This scriptrunner will run before the other 3. It's used to retrieve
# the widget id before initializing deltas on other runners.
scriptrunner = TestScriptRunner("widgets_script.py")
scriptrunner.request_rerun(RerunData())
scriptrunner.start()
# Get the widget ID of a radio button and shut down the first runner.
require_widgets_deltas([scriptrunner])
radio_widget_id = scriptrunner.get_widget_id("radio", "radio")
scriptrunner.request_stop()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
# Build several runners. Each will set a different int value for
# its radio button.
runners = []
for ii in range(3):
runner = TestScriptRunner("widgets_script.py")
runners.append(runner)
states = WidgetStates()
_create_widget(radio_widget_id, states).int_value = ii
runner.request_rerun(RerunData(widget_states=states))
# Start the runners and wait a beat.
for runner in runners:
runner.start()
require_widgets_deltas(runners)
# Ensure that each runner's radio value is as expected.
for ii, runner in enumerate(runners):
self._assert_text_deltas(
runner, ["False", "ahoy!", str(ii), "False", "loop_forever"]
)
runner.request_stop()
time.sleep(0.1)
# Shut 'em all down!
for runner in runners:
runner.join()
for runner in runners:
self._assert_no_exceptions(runner)
self._assert_control_events(
runner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
def test_page_script_hash_to_script_path(self):
scriptrunner = TestScriptRunner("good_navigation_script.py")
scriptrunner.request_rerun(RerunData(page_name="good_script2"))
scriptrunner.start()
scriptrunner.join()
self._assert_no_exceptions(scriptrunner)
self._assert_events(
scriptrunner,
[
ScriptRunnerEvent.SCRIPT_STARTED,
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG, # Navigation call
ScriptRunnerEvent.ENQUEUE_FORWARD_MSG, # text delta
ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS,
ScriptRunnerEvent.SHUTDOWN,
],
)
self._assert_text_deltas(scriptrunner, [text_utf2])
assert (
os.path.join(
os.path.dirname(__file__), "test_data", "good_navigation_script.py"
)
== sys.modules["__main__"].__file__
), " ScriptRunner should set the __main__.__file__ attribute correctly"
shutdown_data = scriptrunner.event_data[-1]
assert (
shutdown_data["client_state"].page_script_hash
== "f0b2ab81496648a6f2af976dfd35f4a8"
)
def _assert_no_exceptions(self, scriptrunner: TestScriptRunner) -> None:
"""Assert that no uncaught exceptions were thrown in the
scriptrunner's run thread.
"""
assert scriptrunner.script_thread_exceptions == []
def _assert_events(
self, scriptrunner: TestScriptRunner, expected_events: list[ScriptRunnerEvent]
) -> None:
"""Assert that the ScriptRunnerEvents emitted by a TestScriptRunner
are what we expect."""
assert expected_events == scriptrunner.events
def _assert_control_events(
self, scriptrunner: TestScriptRunner, expected_events: list[ScriptRunnerEvent]
) -> None:
"""Assert the non-data ScriptRunnerEvents emitted by a TestScriptRunner
are what we expect. ("Non-data" refers to all events except
ENQUEUE_FORWARD_MSG.)
"""
control_events = [
event for event in scriptrunner.events if _is_control_event(event)
]
assert expected_events == control_events
def _assert_forward_msgs(
self, scriptrunner: TestScriptRunner, messages: list[ForwardMsg]
) -> None:
"""Assert that the ScriptRunner's ForwardMsgQueue contains the
given list of ForwardMsgs.
"""
assert messages == scriptrunner.forward_msgs()
def _assert_num_deltas(
self, scriptrunner: TestScriptRunner, num_deltas: int
) -> None:
"""Assert that the given number of delta ForwardMsgs were enqueued
during script execution.
Parameters
----------
scriptrunner : TestScriptRunner
num_deltas : int
"""
assert num_deltas == len(scriptrunner.deltas())
def _assert_text_deltas(
self, scriptrunner: TestScriptRunner, text_deltas: list[str]
) -> None:
"""Assert that the scriptrunner's ForwardMsgQueue contains text deltas
with the given contents.
"""
assert text_deltas == scriptrunner.text_deltas()
| ScriptRunnerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/util.py | {
"start": 74616,
"end": 82311
} | class ____(Exception):
pass
def _cleanup_mapped_str_annotation(
annotation: str, originating_module: str
) -> str:
# fix up an annotation that comes in as the form:
# 'Mapped[List[Address]]' so that it instead looks like:
# 'Mapped[List["Address"]]' , which will allow us to get
# "Address" as a string
# additionally, resolve symbols for these names since this is where
# we'd have to do it
inner: Optional[Match[str]]
mm = re.match(r"^([^ \|]+?)\[(.+)\]$", annotation)
if not mm:
return annotation
# ticket #8759. Resolve the Mapped name to a real symbol.
# originally this just checked the name.
try:
obj = eval_name_only(mm.group(1), originating_module)
except NameError as ne:
raise _CleanupError(
f'For annotation "{annotation}", could not resolve '
f'container type "{mm.group(1)}". '
"Please ensure this type is imported at the module level "
"outside of TYPE_CHECKING blocks"
) from ne
if obj is typing.ClassVar:
real_symbol = "ClassVar"
else:
try:
if issubclass(obj, _MappedAnnotationBase):
real_symbol = obj.__name__
else:
return annotation
except TypeError:
# avoid isinstance(obj, type) check, just catch TypeError
return annotation
# note: if one of the codepaths above didn't define real_symbol and
# then didn't return, real_symbol raises UnboundLocalError
# which is actually a NameError, and the calling routines don't
# notice this since they are catching NameError anyway. Just in case
# this is being modified in the future, something to be aware of.
stack = []
inner = mm
while True:
stack.append(real_symbol if mm is inner else inner.group(1))
g2 = inner.group(2)
inner = re.match(r"^([^ \|]+?)\[(.+)\]$", g2)
if inner is None:
stack.append(g2)
break
# stacks we want to rewrite, that is, quote the last entry which
# we think is a relationship class name:
#
# ['Mapped', 'List', 'Address']
# ['Mapped', 'A']
#
# stacks we dont want to rewrite, which are generally MappedColumn
# use cases:
#
# ['Mapped', "'Optional[Dict[str, str]]'"]
# ['Mapped', 'dict[str, str] | None']
if (
# avoid already quoted symbols such as
# ['Mapped', "'Optional[Dict[str, str]]'"]
not re.match(r"""^["'].*["']$""", stack[-1])
# avoid further generics like Dict[] such as
# ['Mapped', 'dict[str, str] | None'],
# ['Mapped', 'list[int] | list[str]'],
# ['Mapped', 'Union[list[int], list[str]]'],
and not re.search(r"[\[\]]", stack[-1])
):
stripchars = "\"' "
stack[-1] = ", ".join(
f'"{elem.strip(stripchars)}"' for elem in stack[-1].split(",")
)
annotation = "[".join(stack) + ("]" * (len(stack) - 1))
return annotation
def _extract_mapped_subtype(
raw_annotation: Optional[_AnnotationScanType],
cls: type,
originating_module: str,
key: str,
attr_cls: Type[Any],
required: bool,
is_dataclass_field: bool,
expect_mapped: bool = True,
raiseerr: bool = True,
) -> Optional[Tuple[Union[_AnnotationScanType, str], Optional[type]]]:
"""given an annotation, figure out if it's ``Mapped[something]`` and if
so, return the ``something`` part.
Includes error raise scenarios and other options.
"""
if raw_annotation is None:
if required:
raise orm_exc.MappedAnnotationError(
f"Python typing annotation is required for attribute "
f'"{cls.__name__}.{key}" when primary argument(s) for '
f'"{attr_cls.__name__}" construct are None or not present'
)
return None
try:
# destringify the "outside" of the annotation. note we are not
# adding include_generic so it will *not* dig into generic contents,
# which will remain as ForwardRef or plain str under future annotations
# mode. The full destringify happens later when mapped_column goes
# to do a full lookup in the registry type_annotations_map.
annotated = de_stringify_annotation(
cls,
raw_annotation,
originating_module,
str_cleanup_fn=_cleanup_mapped_str_annotation,
)
except _CleanupError as ce:
raise orm_exc.MappedAnnotationError(
f"Could not interpret annotation {raw_annotation}. "
"Check that it uses names that are correctly imported at the "
"module level. See chained stack trace for more hints."
) from ce
except NameError as ne:
if raiseerr and "Mapped[" in raw_annotation: # type: ignore
raise orm_exc.MappedAnnotationError(
f"Could not interpret annotation {raw_annotation}. "
"Check that it uses names that are correctly imported at the "
"module level. See chained stack trace for more hints."
) from ne
annotated = raw_annotation # type: ignore
if is_dataclass_field:
return annotated, None
else:
if not hasattr(annotated, "__origin__") or not is_origin_of_cls(
annotated, _MappedAnnotationBase
):
if expect_mapped:
if not raiseerr:
return None
origin = getattr(annotated, "__origin__", None)
if origin is typing.ClassVar:
return None
# check for other kind of ORM descriptor like AssociationProxy,
# don't raise for that (issue #9957)
elif isinstance(origin, type) and issubclass(
origin, ORMDescriptor
):
return None
raise orm_exc.MappedAnnotationError(
f'Type annotation for "{cls.__name__}.{key}" '
"can't be correctly interpreted for "
"Annotated Declarative Table form. ORM annotations "
"should normally make use of the ``Mapped[]`` generic "
"type, or other ORM-compatible generic type, as a "
"container for the actual type, which indicates the "
"intent that the attribute is mapped. "
"Class variables that are not intended to be mapped "
"by the ORM should use ClassVar[]. "
"To allow Annotated Declarative to disregard legacy "
"annotations which don't use Mapped[] to pass, set "
'"__allow_unmapped__ = True" on the class or a '
"superclass this class.",
code="zlpr",
)
else:
return annotated, None
generic_annotated = cast(GenericProtocol[Any], annotated)
if len(generic_annotated.__args__) != 1:
raise orm_exc.MappedAnnotationError(
"Expected sub-type for Mapped[] annotation"
)
return (
# fix dict/list/set args to be ForwardRef, see #11814
fixup_container_fwd_refs(generic_annotated.__args__[0]),
generic_annotated.__origin__,
)
def _mapper_property_as_plain_name(prop: Type[Any]) -> str:
if hasattr(prop, "_mapper_property_name"):
name = prop._mapper_property_name()
else:
name = None
return util.clsname_as_plain_name(prop, name)
| _CleanupError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/vml/test_write_stroke.py | {
"start": 289,
"end": 742
} | class ____(unittest.TestCase):
"""
Test the Vml _write_stroke() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_stroke(self):
"""Test the _write_stroke() method"""
self.vml._write_stroke()
exp = """<v:stroke joinstyle="miter"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteVstroke |
python | ray-project__ray | python/ray/util/client/worker.py | {
"start": 2826,
"end": 37514
} | class ____:
def __init__(
self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
_credentials: Optional[grpc.ChannelCredentials] = None,
):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
_credentials: gprc channel credentials. Default ones will be used
if None.
"""
self._client_id = make_client_id()
self.metadata = [("client_id", self._client_id)] + (
metadata if metadata else []
)
self.channel = None
self.server = None
self._conn_state = grpc.ChannelConnectivity.IDLE
self._converted: Dict[str, ClientStub] = {}
self._secure = secure or os.environ.get("RAY_USE_TLS", "0").lower() in (
"1",
"true",
)
self._conn_str = conn_str
self._connection_retries = connection_retries
if _credentials is not None:
self._credentials = _credentials
self._secure = True
else:
self._credentials = None
self._reconnect_grace_period = DEFAULT_CLIENT_RECONNECT_GRACE_PERIOD
if "RAY_CLIENT_RECONNECT_GRACE_PERIOD" in os.environ:
# Use value in environment variable if available
self._reconnect_grace_period = int(
os.environ["RAY_CLIENT_RECONNECT_GRACE_PERIOD"]
)
# Disable retries if grace period is set to 0
self._reconnect_enabled = self._reconnect_grace_period != 0
# Set to True when the connection cannot be recovered and reconnect
# attempts should be stopped
self._in_shutdown = False
# Set to True after initial connection succeeds
self._has_connected = False
self._connect_channel()
self._has_connected = True
# Has Ray been initialized on the server?
self._serverside_ray_initialized = False
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self, self._client_id, self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
# Track this value to raise a warning if a lot of data are transferred.
self.total_outbound_message_size_bytes = 0
# Used to create unique IDs for RPCs to the RayletServicer
self._req_id_lock = threading.Lock()
self._req_id = 0
def _connect_channel(self, reconnecting=False) -> None:
"""
Attempts to connect to the server specified by conn_str. If
reconnecting after an RPC error, cleans up the old channel and
continues to attempt to connect until the grace period is over.
"""
if self.channel is not None:
self.channel.unsubscribe(self._on_channel_state_change)
self.channel.close()
from ray._private.grpc_utils import init_grpc_channel
# Prepare credentials if secure connection is requested
credentials = None
if self._secure:
if self._credentials is not None:
credentials = self._credentials
elif os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
# init_grpc_channel will handle this via load_certs_from_env()
credentials = None
else:
# Default SSL credentials (no specific certs)
credentials = grpc.ssl_channel_credentials()
# Create channel with auth interceptors via helper
# This automatically adds auth interceptors when token auth is enabled
self.channel = init_grpc_channel(
self._conn_str,
options=GRPC_OPTIONS,
asynchronous=False,
credentials=credentials,
)
self.channel.subscribe(self._on_channel_state_change)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
start_time = time.time()
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
service_ready = False
while conn_attempts < max(self._connection_retries, 1) or reconnecting:
conn_attempts += 1
if self._in_shutdown:
# User manually closed the worker before connection finished
break
elapsed_time = time.time() - start_time
if reconnecting and elapsed_time > self._reconnect_grace_period:
self._in_shutdown = True
raise ConnectionError(
"Failed to reconnect within the reconnection grace period "
f"({self._reconnect_grace_period}s)"
)
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
service_ready = bool(self.ping_server())
if service_ready:
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.debug(f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
logger.debug(
f"Ray client server unavailable, retrying in {timeout}s..."
)
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout.
time.sleep(timeout)
# Fallthrough, backoff, and retry at the top of the loop
logger.debug(
f"Waiting for Ray to become ready on the server, retry in {timeout}s..."
)
if not reconnecting:
# Don't increase backoff when trying to reconnect --
# we already know the server exists, attempt to reconnect
# as soon as we can
timeout = backoff(timeout)
# If we made it through the loop without service_ready
# it means we've used up our retries and
# should error back to the user.
if not service_ready:
self._in_shutdown = True
if log_once("ray_client_security_groups"):
warnings.warn(
"Ray Client connection timed out. Ensure that "
"the Ray Client port on the head node is reachable "
"from your local machine. See https://docs.ray.io/en"
"/latest/cluster/ray-client.html#step-2-check-ports for "
"more information."
)
raise ConnectionError("ray client connection timeout")
def _can_reconnect(self, e: grpc.RpcError) -> bool:
"""
Returns True if the RPC error can be recovered from and a retry is
appropriate, false otherwise.
"""
if not self._reconnect_enabled:
return False
if self._in_shutdown:
# Channel is being shutdown, don't try to reconnect
return False
if e.code() in GRPC_UNRECOVERABLE_ERRORS:
# Unrecoverable error -- These errors are specifically raised
# by the server's application logic
return False
if e.code() == grpc.StatusCode.INTERNAL:
details = e.details()
if details == "Exception serializing request!":
# The client failed tried to send a bad request (for example,
# passing "None" instead of a valid grpc message). Don't
# try to reconnect/retry.
return False
# All other errors can be treated as recoverable
return True
def _call_stub(self, stub_name: str, *args, **kwargs) -> Any:
"""
Calls the stub specified by stub_name (Schedule, WaitObject, etc...).
If a recoverable error occurrs while calling the stub, attempts to
retry the RPC.
"""
while not self._in_shutdown:
try:
return getattr(self.server, stub_name)(*args, **kwargs)
except grpc.RpcError as e:
if self._can_reconnect(e):
time.sleep(0.5)
continue
raise
except ValueError:
# Trying to use the stub on a cancelled channel will raise
# ValueError. This should only happen when the data client
# is attempting to reset the connection -- sleep and try
# again.
time.sleep(0.5)
continue
raise ConnectionError("Client is shutting down.")
def _get_object_iterator(
self, req: ray_client_pb2.GetRequest, *args, **kwargs
) -> Any:
"""
Calls the stub for GetObject on the underlying server stub. If a
recoverable error occurs while streaming the response, attempts
to retry the get starting from the first chunk that hasn't been
received.
"""
last_seen_chunk = -1
while not self._in_shutdown:
# If we disconnect partway through, restart the get request
# at the first chunk we haven't seen
req.start_chunk_id = last_seen_chunk + 1
try:
for chunk in self.server.GetObject(req, *args, **kwargs):
if chunk.chunk_id <= last_seen_chunk:
# Ignore repeat chunks
logger.debug(
f"Received a repeated chunk {chunk.chunk_id} "
f"from request {req.req_id}."
)
continue
if last_seen_chunk + 1 != chunk.chunk_id:
raise RuntimeError(
f"Received chunk {chunk.chunk_id} when we expected "
f"{self.last_seen_chunk + 1}"
)
last_seen_chunk = chunk.chunk_id
yield chunk
if last_seen_chunk == chunk.total_chunks - 1:
# We've yielded the last chunk, exit early
return
return
except grpc.RpcError as e:
if self._can_reconnect(e):
time.sleep(0.5)
continue
raise
except ValueError:
# Trying to use the stub on a cancelled channel will raise
# ValueError. This should only happen when the data client
# is attempting to reset the connection -- sleep and try
# again.
time.sleep(0.5)
continue
raise ConnectionError("Client is shutting down.")
def _add_ids_to_metadata(self, metadata: Any):
"""
Adds a unique req_id and the current thread's identifier to the
metadata. These values are useful for preventing mutating operations
from being replayed on the server side in the event that the client
must retry a requsest.
Args:
metadata: the gRPC metadata to append the IDs to
"""
if not self._reconnect_enabled:
# IDs not needed if the reconnects are disabled
return metadata
thread_id = str(threading.get_ident())
with self._req_id_lock:
self._req_id += 1
if self._req_id > INT32_MAX:
self._req_id = 1
req_id = str(self._req_id)
return metadata + [("thread_id", thread_id), ("req_id", req_id)]
def _on_channel_state_change(self, conn_state: grpc.ChannelConnectivity):
logger.debug(f"client gRPC channel state change: {conn_state}")
self._conn_state = conn_state
def connection_info(self):
try:
data = self.data_client.ConnectionInfo()
except grpc.RpcError as e:
raise decode_exception(e)
return {
"num_clients": data.num_clients,
"python_version": data.python_version,
"ray_version": data.ray_version,
"ray_commit": data.ray_commit,
}
def register_callback(
self,
ref: ClientObjectRef,
callback: Callable[[ray_client_pb2.DataResponse], None],
) -> None:
req = ray_client_pb2.GetRequest(ids=[ref.id], asynchronous=True)
self.data_client.RegisterGetCallback(req, callback)
def get(self, vals, *, timeout: Optional[float] = None) -> Any:
if isinstance(vals, list):
if not vals:
return []
to_get = vals
elif isinstance(vals, ClientObjectRef):
to_get = [vals]
else:
raise Exception(
"Can't get something that's not a "
"list of IDs or just an ID: %s" % type(vals)
)
if timeout is None:
deadline = None
else:
deadline = time.monotonic() + timeout
while True:
if deadline:
op_timeout = min(
MAX_BLOCKING_OPERATION_TIME_S,
max(deadline - time.monotonic(), 0.001),
)
else:
op_timeout = MAX_BLOCKING_OPERATION_TIME_S
try:
res = self._get(to_get, op_timeout)
break
except GetTimeoutError:
if deadline and time.monotonic() > deadline:
raise
logger.debug("Internal retry for get {}".format(to_get))
if len(to_get) != len(res):
raise Exception(
"Mismatched number of items in request ({}) and response ({})".format(
len(to_get), len(res)
)
)
if isinstance(vals, ClientObjectRef):
res = res[0]
return res
def _get(self, ref: List[ClientObjectRef], timeout: float):
req = ray_client_pb2.GetRequest(ids=[r.id for r in ref], timeout=timeout)
data = bytearray()
try:
resp = self._get_object_iterator(req, metadata=self.metadata)
for chunk in resp:
if not chunk.valid:
try:
err = cloudpickle.loads(chunk.error)
except (pickle.UnpicklingError, TypeError):
logger.exception("Failed to deserialize {}".format(chunk.error))
raise
raise err
if chunk.total_size > OBJECT_TRANSFER_WARNING_SIZE and log_once(
"client_object_transfer_size_warning"
):
size_gb = chunk.total_size / 2**30
warnings.warn(
"Ray Client is attempting to retrieve a "
f"{size_gb:.2f} GiB object over the network, which may "
"be slow. Consider serializing the object to a file "
"and using S3 or rsync instead.",
UserWarning,
stacklevel=5,
)
data.extend(chunk.data)
except grpc.RpcError as e:
raise decode_exception(e)
return loads_from_server(data)
def put(
self,
val,
*,
client_ref_id: bytes = None,
_owner: Optional[ClientActorHandle] = None,
):
if isinstance(val, ClientObjectRef):
raise TypeError(
"Calling 'put' on an ObjectRef is not allowed "
"(similarly, returning an ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ObjectRef in a list and "
"call 'put' on it (or return it)."
)
data = dumps_from_client(val, self._client_id)
return self._put_pickled(data, client_ref_id, _owner)
def _put_pickled(
self, data, client_ref_id: bytes, owner: Optional[ClientActorHandle] = None
):
req = ray_client_pb2.PutRequest(data=data)
if client_ref_id is not None:
req.client_ref_id = client_ref_id
if owner is not None:
req.owner_id = owner.actor_ref.id
resp = self.data_client.PutObject(req)
if not resp.valid:
try:
raise cloudpickle.loads(resp.error)
except (pickle.UnpicklingError, TypeError):
logger.exception("Failed to deserialize {}".format(resp.error))
raise
return ClientObjectRef(resp.id)
# TODO(ekl) respect MAX_BLOCKING_OPERATION_TIME_S for wait too
def wait(
self,
object_refs: List[ClientObjectRef],
*,
num_returns: int = 1,
timeout: float = None,
fetch_local: bool = True,
) -> Tuple[List[ClientObjectRef], List[ClientObjectRef]]:
if not isinstance(object_refs, list):
raise TypeError(
f"wait() expected a list of ClientObjectRef, got {type(object_refs)}"
)
for ref in object_refs:
if not isinstance(ref, ClientObjectRef):
raise TypeError(
"wait() expected a list of ClientObjectRef, "
f"got list containing {type(ref)}"
)
data = {
"object_ids": [object_ref.id for object_ref in object_refs],
"num_returns": num_returns,
"timeout": timeout if (timeout is not None) else -1,
"client_id": self._client_id,
}
req = ray_client_pb2.WaitRequest(**data)
resp = self._call_stub("WaitObject", req, metadata=self.metadata)
if not resp.valid:
# TODO(ameer): improve error/exceptions messages.
raise Exception("Client Wait request failed. Reference invalid?")
client_ready_object_ids = [
ClientObjectRef(ref) for ref in resp.ready_object_ids
]
client_remaining_object_ids = [
ClientObjectRef(ref) for ref in resp.remaining_object_ids
]
return (client_ready_object_ids, client_remaining_object_ids)
def call_remote(self, instance, *args, **kwargs) -> List[Future]:
task = instance._prepare_client_task()
# data is serialized tuple of (args, kwargs)
task.data = dumps_from_client((args, kwargs), self._client_id)
num_returns = instance._num_returns()
if num_returns == "dynamic":
num_returns = -1
if num_returns == "streaming":
raise RuntimeError(
'Streaming actor methods (num_returns="streaming") '
"are not currently supported when using Ray Client."
)
return self._call_schedule_for_task(task, num_returns)
def _call_schedule_for_task(
self, task: ray_client_pb2.ClientTask, num_returns: Optional[int]
) -> List[Future]:
logger.debug(f"Scheduling task {task.name} {task.type} {task.payload_id}")
task.client_id = self._client_id
if num_returns is None:
num_returns = 1
num_return_refs = num_returns
if num_return_refs == -1:
num_return_refs = 1
id_futures = [Future() for _ in range(num_return_refs)]
def populate_ids(resp: Union[ray_client_pb2.DataResponse, Exception]) -> None:
if isinstance(resp, Exception):
if isinstance(resp, grpc.RpcError):
resp = decode_exception(resp)
for future in id_futures:
future.set_exception(resp)
return
ticket = resp.task_ticket
if not ticket.valid:
try:
ex = cloudpickle.loads(ticket.error)
except (pickle.UnpicklingError, TypeError) as e_new:
ex = e_new
for future in id_futures:
future.set_exception(ex)
return
if len(ticket.return_ids) != num_return_refs:
exc = ValueError(
f"Expected {num_return_refs} returns but received "
f"{len(ticket.return_ids)}"
)
for future, raw_id in zip(id_futures, ticket.return_ids):
future.set_exception(exc)
return
for future, raw_id in zip(id_futures, ticket.return_ids):
future.set_result(raw_id)
self.data_client.Schedule(task, populate_ids)
self.total_outbound_message_size_bytes += task.ByteSize()
if (
self.total_outbound_message_size_bytes > MESSAGE_SIZE_THRESHOLD
and log_once("client_communication_overhead_warning")
):
warnings.warn(
"More than 10MB of messages have been created to schedule "
"tasks on the server. This can be slow on Ray Client due to "
"communication overhead over the network. If you're running "
"many fine-grained tasks, consider running them inside a "
'single remote function. See the section on "Too '
'fine-grained tasks" in the Ray Design Patterns document for '
f"more details: {DESIGN_PATTERN_FINE_GRAIN_TASKS_LINK}. If "
"your functions frequently use large objects, consider "
"storing the objects remotely with ray.put. An example of "
'this is shown in the "Closure capture of large / '
'unserializable object" section of the Ray Design Patterns '
"document, available here: "
f"{DESIGN_PATTERN_LARGE_OBJECTS_LINK}",
UserWarning,
)
return id_futures
def call_release(self, id: bytes) -> None:
if self.closed:
return
self.reference_count[id] -= 1
if self.reference_count[id] == 0:
self._release_server(id)
del self.reference_count[id]
def _release_server(self, id: bytes) -> None:
if self.data_client is not None:
logger.debug(f"Releasing {id.hex()}")
self.data_client.ReleaseObject(ray_client_pb2.ReleaseRequest(ids=[id]))
def call_retain(self, id: bytes) -> None:
logger.debug(f"Retaining {id.hex()}")
self.reference_count[id] += 1
def close(self):
self._in_shutdown = True
self.closed = True
self.data_client.close()
self.log_client.close()
self.server = None
if self.channel:
self.channel.close()
self.channel = None
def get_actor(
self, name: str, namespace: Optional[str] = None
) -> ClientActorHandle:
task = ray_client_pb2.ClientTask()
task.type = ray_client_pb2.ClientTask.NAMED_ACTOR
task.name = name
task.namespace = namespace or ""
# Populate task.data with empty args and kwargs
task.data = dumps_from_client(([], {}), self._client_id)
futures = self._call_schedule_for_task(task, 1)
assert len(futures) == 1
handle = ClientActorHandle(ClientActorRef(futures[0], weak_ref=True))
# `actor_ref.is_nil()` waits until the underlying ID is resolved.
# This is needed because `get_actor` is often used to check the
# existence of an actor.
if handle.actor_ref.is_nil():
raise ValueError(f"ActorID for {name} is empty")
return handle
def terminate_actor(self, actor: ClientActorHandle, no_restart: bool) -> None:
if not isinstance(actor, ClientActorHandle):
raise ValueError(
"ray.kill() only supported for actors. Got: {}.".format(type(actor))
)
term_actor = ray_client_pb2.TerminateRequest.ActorTerminate()
term_actor.id = actor.actor_ref.id
term_actor.no_restart = no_restart
term = ray_client_pb2.TerminateRequest(actor=term_actor)
term.client_id = self._client_id
try:
self.data_client.Terminate(term)
except grpc.RpcError as e:
raise decode_exception(e)
def terminate_task(
self, obj: ClientObjectRef, force: bool, recursive: bool
) -> None:
if not isinstance(obj, ClientObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(obj)}."
)
term_object = ray_client_pb2.TerminateRequest.TaskObjectTerminate()
term_object.id = obj.id
term_object.force = force
term_object.recursive = recursive
term = ray_client_pb2.TerminateRequest(task_object=term_object)
term.client_id = self._client_id
try:
self.data_client.Terminate(term)
except grpc.RpcError as e:
raise decode_exception(e)
def get_cluster_info(
self,
req_type: ray_client_pb2.ClusterInfoType.TypeEnum,
timeout: Optional[float] = None,
):
req = ray_client_pb2.ClusterInfoRequest()
req.type = req_type
resp = self.server.ClusterInfo(req, timeout=timeout, metadata=self.metadata)
if resp.WhichOneof("response_type") == "resource_table":
# translate from a proto map to a python dict
output_dict = dict(resp.resource_table.table)
return output_dict
elif resp.WhichOneof("response_type") == "runtime_context":
return resp.runtime_context
return json.loads(resp.json)
def internal_kv_get(self, key: bytes, namespace: Optional[bytes]) -> bytes:
req = ray_client_pb2.KVGetRequest(key=key, namespace=namespace)
try:
resp = self._call_stub("KVGet", req, metadata=self.metadata)
except grpc.RpcError as e:
raise decode_exception(e)
if resp.HasField("value"):
return resp.value
# Value is None when the key does not exist in the KV.
return None
def internal_kv_exists(self, key: bytes, namespace: Optional[bytes]) -> bool:
req = ray_client_pb2.KVExistsRequest(key=key, namespace=namespace)
try:
resp = self._call_stub("KVExists", req, metadata=self.metadata)
except grpc.RpcError as e:
raise decode_exception(e)
return resp.exists
def internal_kv_put(
self, key: bytes, value: bytes, overwrite: bool, namespace: Optional[bytes]
) -> bool:
req = ray_client_pb2.KVPutRequest(
key=key, value=value, overwrite=overwrite, namespace=namespace
)
metadata = self._add_ids_to_metadata(self.metadata)
try:
resp = self._call_stub("KVPut", req, metadata=metadata)
except grpc.RpcError as e:
raise decode_exception(e)
return resp.already_exists
def internal_kv_del(
self, key: bytes, del_by_prefix: bool, namespace: Optional[bytes]
) -> int:
req = ray_client_pb2.KVDelRequest(
key=key, del_by_prefix=del_by_prefix, namespace=namespace
)
metadata = self._add_ids_to_metadata(self.metadata)
try:
resp = self._call_stub("KVDel", req, metadata=metadata)
except grpc.RpcError as e:
raise decode_exception(e)
return resp.deleted_num
def internal_kv_list(
self, prefix: bytes, namespace: Optional[bytes]
) -> List[bytes]:
try:
req = ray_client_pb2.KVListRequest(prefix=prefix, namespace=namespace)
return self._call_stub("KVList", req, metadata=self.metadata).keys
except grpc.RpcError as e:
raise decode_exception(e)
def pin_runtime_env_uri(self, uri: str, expiration_s: int) -> None:
req = ray_client_pb2.ClientPinRuntimeEnvURIRequest(
uri=uri, expiration_s=expiration_s
)
self._call_stub("PinRuntimeEnvURI", req, metadata=self.metadata)
def list_named_actors(self, all_namespaces: bool) -> List[Dict[str, str]]:
req = ray_client_pb2.ClientListNamedActorsRequest(all_namespaces=all_namespaces)
return json.loads(self.data_client.ListNamedActors(req).actors_json)
def is_initialized(self) -> bool:
if not self.is_connected() or self.server is None:
return False
if not self._serverside_ray_initialized:
# We only check that Ray is initialized on the server once to
# avoid making an RPC every time this function is called. This is
# safe to do because Ray only 'un-initializes' on the server when
# the Client connection is torn down.
self._serverside_ray_initialized = self.get_cluster_info(
ray_client_pb2.ClusterInfoType.IS_INITIALIZED
)
return self._serverside_ray_initialized
def ping_server(self, timeout=None) -> bool:
"""Simple health check.
Piggybacks the IS_INITIALIZED call to check if the server provides
an actual response.
"""
if self.server is not None:
logger.debug("Pinging server.")
result = self.get_cluster_info(
ray_client_pb2.ClusterInfoType.PING, timeout=timeout
)
return result is not None
return False
def is_connected(self) -> bool:
return not self._in_shutdown and self._has_connected
def _server_init(
self, job_config: JobConfig, ray_init_kwargs: Optional[Dict[str, Any]] = None
):
"""Initialize the server"""
if ray_init_kwargs is None:
ray_init_kwargs = {}
try:
if job_config is None:
serialized_job_config = None
else:
with tempfile.TemporaryDirectory() as tmp_dir:
from ray._private.ray_constants import (
RAY_RUNTIME_ENV_IGNORE_GITIGNORE,
)
runtime_env = job_config.runtime_env or {}
# Determine whether to respect .gitignore files based on environment variable
# Default is True (respect .gitignore). Set to False if env var is "1".
include_gitignore = (
os.environ.get(RAY_RUNTIME_ENV_IGNORE_GITIGNORE, "0") != "1"
)
runtime_env = upload_py_modules_if_needed(
runtime_env,
scratch_dir=tmp_dir,
include_gitignore=include_gitignore,
logger=logger,
)
runtime_env = upload_working_dir_if_needed(
runtime_env,
scratch_dir=tmp_dir,
include_gitignore=include_gitignore,
logger=logger,
)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env, validate=True)
serialized_job_config = pickle.dumps(job_config)
response = self.data_client.Init(
ray_client_pb2.InitRequest(
job_config=serialized_job_config,
ray_init_kwargs=json.dumps(ray_init_kwargs),
reconnect_grace_period=self._reconnect_grace_period,
)
)
if not response.ok:
raise ConnectionAbortedError(
f"Initialization failure from server:\n{response.msg}"
)
except grpc.RpcError as e:
raise decode_exception(e)
def _convert_actor(self, actor: "ActorClass") -> str:
"""Register a ClientActorClass for the ActorClass and return a UUID"""
key = uuid.uuid4().hex
cls = actor.__ray_metadata__.modified_class
self._converted[key] = ClientActorClass(cls, options=actor._default_options)
return key
def _convert_function(self, func: "RemoteFunction") -> str:
"""Register a ClientRemoteFunc for the ActorClass and return a UUID"""
key = uuid.uuid4().hex
self._converted[key] = ClientRemoteFunc(
func._function, options=func._default_options
)
return key
def _get_converted(self, key: str) -> "ClientStub":
"""Given a UUID, return the converted object"""
return self._converted[key]
def _converted_key_exists(self, key: str) -> bool:
"""Check if a key UUID is present in the store of converted objects."""
return key in self._converted
def _dumps_from_client(self, val) -> bytes:
return dumps_from_client(val, self._client_id)
def make_client_id() -> str:
id = uuid.uuid4()
return id.hex
def decode_exception(e: grpc.RpcError) -> Exception:
if e.code() != grpc.StatusCode.ABORTED:
# The ABORTED status code is used by the server when an application
# error is serialized into the exception details. If the code
# isn't ABORTED, then return the original error since there's no
# serialized error to decode.
# See server.py::return_exception_in_context for details
return ConnectionError(f"GRPC connection failed: {e}")
data = base64.standard_b64decode(e.details())
return loads_from_server(data)
| Worker |
python | PyCQA__pylint | tests/functional/s/singledispatch/singledispatch_method.py | {
"start": 1155,
"end": 2020
} | class ____:
@singledispatch # [singledispatch-method]
@staticmethod
def convert_position(position):
pass
@convert_position.register # [singledispatch-method]
@staticmethod
def _(position: str) -> tuple:
position_a, position_b = position.split(",")
return (int(position_a), int(position_b))
@convert_position.register # [singledispatch-method]
@staticmethod
def _(position: tuple) -> str:
return f"{position[0]},{position[1]}"
# Do not emit `singledispatch-method`:
@singledispatch
def convert_position(position):
print(position)
@convert_position.register
def _(position: str) -> tuple:
position_a, position_b = position.split(",")
return (int(position_a), int(position_b))
@convert_position.register
def _(position: tuple) -> str:
return f"{position[0]},{position[1]}"
| Board3 |
python | PrefectHQ__prefect | tests/server/orchestration/test_policies.py | {
"start": 226,
"end": 2909
} | class ____:
def test_policies_return_rules_in_priority_order(self):
class FirstRuleOfFightClub(BaseOrchestrationRule):
TO_STATES = ALL_ORCHESTRATION_STATES
FROM_STATES = ALL_ORCHESTRATION_STATES
def before_transition(self, initial_state, proposed_state, context):
"we don't talk about fight club"
class SecondRuleOfFightClub(BaseOrchestrationRule):
TO_STATES = ALL_ORCHESTRATION_STATES
FROM_STATES = ALL_ORCHESTRATION_STATES
def before_transition(self, initial_state, proposed_state, context):
"we don't talk about fight club"
class FightClub(BaseOrchestrationPolicy):
@staticmethod
def priority():
return [
FirstRuleOfFightClub,
SecondRuleOfFightClub,
]
class CopyCatClub(BaseOrchestrationPolicy):
@staticmethod
def priority():
return [
FirstRuleOfFightClub,
SecondRuleOfFightClub,
]
class DefinitelyADifferentClub(BaseOrchestrationPolicy):
@staticmethod
def priority():
return [
SecondRuleOfFightClub,
FirstRuleOfFightClub,
]
transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
fight_club_rules = FightClub.compile_transition_rules(*transition)
copycat_rules = CopyCatClub.compile_transition_rules(*transition)
definitely_different_rules = DefinitelyADifferentClub.compile_transition_rules(
*transition
)
assert fight_club_rules == copycat_rules
assert fight_club_rules != definitely_different_rules
def test_policies_only_return_relevant_rules(self):
class UnenforcableRule(BaseOrchestrationRule):
TO_STATES = []
FROM_STATES = []
class UselessRule(BaseOrchestrationRule):
TO_STATES = []
FROM_STATES = []
class ValidRule(BaseOrchestrationRule):
TO_STATES = ALL_ORCHESTRATION_STATES
FROM_STATES = ALL_ORCHESTRATION_STATES
class Bureaucracy(BaseOrchestrationPolicy):
@staticmethod
def priority():
return [
UselessRule,
UnenforcableRule,
ValidRule,
]
transition = (states.StateType.PENDING, states.StateType.RUNNING)
assert Bureaucracy.compile_transition_rules(*transition) == [ValidRule]
| TestPoliciesRespectOrdering |
python | sympy__sympy | sympy/core/exprtools.py | {
"start": 9678,
"end": 26677
} | class ____:
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ('factors', 'gens')
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset({2, x})
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors in (None, S.One):
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
# Handle all rational Coefficients
for f in list(factors.keys()):
if isinstance(f, Rational) and not isinstance(f, Integer):
p, q = Integer(f.p), Integer(f.q)
factors[p] = (factors[p] if p in factors else S.Zero) + factors[f]
factors[q] = (factors[q] if q in factors else S.Zero) - factors[f]
factors.pop(f)
if i:
factors[I] = factors.get(I, S.Zero) + i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = [k for k in factors if k is I or k in (-1, 1)]
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
factors[a.base] = factors.get(a.base, S.Zero) + a.exp
elif equal_valued(a, 1):
factors[a] = S.One
elif equal_valued(a, -1):
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
keys = getattr(factors, 'keys', None)
if keys is None:
raise TypeError('expecting Expr or dictionary')
self.gens = frozenset(keys())
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
if isinstance(exp, Integer):
b, e = factor.as_base_exp()
e = _keep_coeff(exp, e)
args.append(b**e)
else:
args.append(factor**exp)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt == True:
factors[factor] = exp
elif lt == False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __truediv__(self, other): # Factors
return self.quo(other)
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self == other
| Factors |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/shape_output_test.py | {
"start": 5390,
"end": 6450
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Two inputs, one of the is pruned."""
def setUp(self):
super().setUp()
self.DisableNonTrtOptimizers()
def GraphFn(self, x, y):
q = array_ops.shape(x)
z = y * y + y
z = gen_array_ops.reshape(z, q)
out_0 = array_ops.identity(q, name="output_0")
out_1 = array_ops.identity(z, name="output_1")
return (out_0, out_1)
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[1, 2, 5, 3], [2, 15]], [[4], [1, 2, 5, 3]],
extra_inputs=[],
extra_outputs=[],
input_mask=[[False, True, True, True], [False, True]],
output_mask=[[True], [False, True, True, True]])
def ExpectedEnginesToBuild(self, run_params):
"""Returns the expected engines to build."""
return ["TRTEngineOp_000"]
def ShouldRunTest(self, run_params):
# Shape op is only converted in dynamic shape mode.
return (run_params.dynamic_shape and
run_params.is_v2, "test v2 dynamic shape")
| PrunedInputTest2 |
python | kamyu104__LeetCode-Solutions | Python/find-k-length-substrings-with-no-repeated-characters.py | {
"start": 29,
"end": 441
} | class ____(object):
def numKLenSubstrNoRepeats(self, S, K):
"""
:type S: str
:type K: int
:rtype: int
"""
result, i = 0, 0
lookup = set()
for j in xrange(len(S)):
while S[j] in lookup:
lookup.remove(S[i])
i += 1
lookup.add(S[j])
result += j-i+1 >= K
return result
| Solution |
python | spyder-ide__spyder | spyder/plugins/debugger/widgets/main_widget.py | {
"start": 1889,
"end": 2001
} | class ____:
Display = 'excludes_section'
Highlight = 'highlight_section'
| DebuggerWidgetOptionsMenuSections |
python | ray-project__ray | rllib/examples/rl_modules/classes/vpg_torch_rlm.py | {
"start": 116,
"end": 2746
} | class ____(TorchRLModule):
"""A simple VPG (vanilla policy gradient)-style RLModule for testing purposes.
Use this as a minimum, bare-bones example implementation of a custom TorchRLModule.
"""
def setup(self):
"""Use this method to create all the model components that you require.
Feel free to access the following useful properties in this class:
- `self.model_config`: The config dict for this RLModule class,
which should contain flexible settings, for example: {"hiddens": [256, 256]}.
- `self.observation|action_space`: The observation and action space that
this RLModule is subject to. Note that the observation space might not be the
exact space from your env, but that it might have already gone through
preprocessing through a connector pipeline (for example, flattening,
frame-stacking, mean/std-filtering, etc..).
- `self.inference_only`: If True, this model should be built only for inference
purposes, in which case you may want to exclude any components that are not used
for computing actions, for example a value function branch.
"""
input_dim = self.observation_space.shape[0]
hidden_dim = self.model_config["hidden_dim"]
output_dim = self.action_space.n
self._policy_net = torch.nn.Sequential(
torch.nn.Linear(input_dim, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, output_dim),
)
def _forward(self, batch, **kwargs):
# Push the observations from the batch through our `self._policy_net`.
action_logits = self._policy_net(batch[Columns.OBS])
# Return parameters for the (default) action distribution, which is
# `TorchCategorical` (due to our action space being `gym.spaces.Discrete`).
return {Columns.ACTION_DIST_INPUTS: action_logits}
# If you need more granularity between the different forward behaviors during
# the different phases of the module's lifecycle, implement three different
# forward methods. Thereby, it is recommended to put the inference and
# exploration versions inside a `with torch.no_grad()` context for better
# performance.
# def _forward_train(self, batch):
# ...
#
# def _forward_inference(self, batch):
# with torch.no_grad():
# return self._forward_train(batch)
#
# def _forward_exploration(self, batch):
# with torch.no_grad():
# return self._forward_train(batch)
| VPGTorchRLModule |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/optimization/automatic.py | {
"start": 1442,
"end": 3612
} | class ____(OutputResult):
"""A container to hold the result of a :class:`Closure` call.
It is created from the output of :meth:`~lightning.pytorch.core.LightningModule.training_step`.
Attributes:
closure_loss: The loss with a graph attached.
loss: A detached copy of the closure loss.
extra: Any keys other than the loss returned.
"""
closure_loss: Optional[Tensor]
loss: Optional[Tensor] = field(init=False, default=None)
extra: dict[str, Any] = field(default_factory=dict)
def __post_init__(self) -> None:
self._clone_loss()
def _clone_loss(self) -> None:
if self.closure_loss is not None:
# the loss will get scaled for amp. avoid any modifications to it
self.loss = self.closure_loss.detach().clone()
@classmethod
def from_training_step_output(cls, training_step_output: STEP_OUTPUT, normalize: int = 1) -> "ClosureResult":
closure_loss, extra = None, {}
if isinstance(training_step_output, Mapping):
closure_loss = training_step_output.get("loss")
if closure_loss is None:
raise MisconfigurationException(
"In automatic_optimization, when `training_step` returns a dict, the 'loss' key needs to be present"
)
extra = {k: v for k, v in training_step_output.items() if k != "loss"}
elif isinstance(training_step_output, Tensor):
closure_loss = training_step_output
elif training_step_output is not None:
raise MisconfigurationException(
"In automatic optimization, `training_step` must return a Tensor, a dict, or None (where the step will"
" be skipped)."
)
if closure_loss is not None:
# accumulate the loss. If ``accumulate_grad_batches == 1``, no effect
# note: avoid in-place operation `x /= y` here on purpose
closure_loss = closure_loss / normalize
return cls(closure_loss, extra=extra)
@override
def asdict(self) -> dict[str, Any]:
return {"loss": self.loss, **self.extra}
| ClosureResult |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1178808,
"end": 1179006
} | class ____(VegaLiteSchema):
"""SelectionType schema wrapper."""
_schema = {"$ref": "#/definitions/SelectionType"}
def __init__(self, *args):
super().__init__(*args)
| SelectionType |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/llm_config.py | {
"start": 1961,
"end": 2055
} | class ____(str, Enum):
"""Enum that represents an LLMEngine."""
vLLM = "vLLM"
| LLMEngine |
python | joblib__joblib | joblib/externals/loky/backend/queues.py | {
"start": 705,
"end": 6145
} | class ____(mp_Queue):
def __init__(self, maxsize=0, reducers=None, ctx=None):
super().__init__(maxsize=maxsize, ctx=ctx)
self._reducers = reducers
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (
self._ignore_epipe,
self._maxsize,
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
self._sem,
self._opid,
)
def __setstate__(self, state):
(
self._ignore_epipe,
self._maxsize,
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
self._sem,
self._opid,
) = state
if sys.version_info >= (3, 9):
self._reset()
else:
self._after_fork()
# Overload _start_thread to correctly call our custom _feed
def _start_thread(self):
util.debug("Queue._start_thread()")
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(
self._buffer,
self._notempty,
self._send_bytes,
self._wlock,
self._writer.close,
self._reducers,
self._ignore_epipe,
self._on_queue_feeder_error,
self._sem,
),
name="QueueFeederThread",
)
self._thread.daemon = True
util.debug("doing self._thread.start()")
self._thread.start()
util.debug("... done self._thread.start()")
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = self._opid == os.getpid()
if not self._joincancelled and not created_by_this_process:
self._jointhread = util.Finalize(
self._thread,
Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5,
)
# Send sentinel to the thread queue object when garbage collected
self._close = util.Finalize(
self,
Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10,
)
# Overload the _feed methods to use our custom pickling strategy.
@staticmethod
def _feed(
buffer,
notempty,
send_bytes,
writelock,
close,
reducers,
ignore_epipe,
onerror,
queue_sem,
):
util.debug("starting thread to feed data to pipe")
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != "win32":
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
while True:
try:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while True:
obj = bpopleft()
if obj is sentinel:
util.debug("feeder thread got sentinel -- exiting")
close()
return
# serialize the data before acquiring the lock
obj_ = dumps(obj, reducers=reducers)
if wacquire is None:
send_bytes(obj_)
else:
wacquire()
try:
send_bytes(obj_)
finally:
wrelease()
# Remove references early to avoid leaking memory
del obj, obj_
except IndexError:
pass
except BaseException as e:
if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
if util.is_exiting():
util.info(f"error in queue thread: {e}")
return
else:
queue_sem.release()
onerror(e, obj)
def _on_queue_feeder_error(self, e, obj):
"""
Private API hook called when feeding data in the background thread
raises an exception. For overriding by concurrent.futures.
"""
import traceback
traceback.print_exc()
| Queue |
python | PrefectHQ__prefect | tests/cli/test_start_server.py | {
"start": 8354,
"end": 12184
} | class ____:
def test_start_and_stop_background_server(self, unused_tcp_port: int):
invoke_and_assert(
command=[
"server",
"start",
"--port",
str(unused_tcp_port),
"--background",
],
expected_output_contains="The Prefect server is running in the background.",
expected_code=0,
)
pid_file = PREFECT_HOME.value() / "server.pid"
assert pid_file.exists(), "Server PID file does not exist"
invoke_and_assert(
command=[
"server",
"stop",
],
expected_output_contains="Server stopped!",
expected_code=0,
)
assert not (PREFECT_HOME.value() / "server.pid").exists(), (
"Server PID file exists"
)
def test_start_duplicate_background_server(
self, unused_tcp_port_factory: Callable[[], int]
):
port_1 = unused_tcp_port_factory()
invoke_and_assert(
command=[
"server",
"start",
"--port",
str(port_1),
"--background",
],
expected_output_contains="The Prefect server is running in the background.",
expected_code=0,
)
port_2 = unused_tcp_port_factory()
invoke_and_assert(
command=[
"server",
"start",
"--port",
str(port_2),
"--background",
],
expected_output_contains="A server is already running in the background.",
expected_code=1,
)
invoke_and_assert(
command=[
"server",
"stop",
],
expected_output_contains="Server stopped!",
expected_code=0,
)
def test_start_port_in_use(self, unused_tcp_port: int):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", unused_tcp_port))
invoke_and_assert(
command=[
"server",
"start",
"--port",
str(unused_tcp_port),
"--background",
],
expected_output_contains=f"Port {unused_tcp_port} is already in use.",
expected_code=1,
)
def test_start_port_in_use_by_background_server(self, unused_tcp_port: int):
pid_file = PREFECT_HOME.value() / SERVER_PID_FILE_NAME
pid_file.write_text("99999")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", unused_tcp_port))
invoke_and_assert(
command=[
"server",
"start",
"--port",
str(unused_tcp_port),
"--background",
],
expected_output_contains=f"A background server process is already running on port {unused_tcp_port}.",
expected_code=1,
)
def test_stop_stale_pid_file(self, unused_tcp_port: int):
pid_file = PREFECT_HOME.value() / SERVER_PID_FILE_NAME
pid_file.write_text("99999")
invoke_and_assert(
command=[
"server",
"stop",
],
expected_output_contains="Cleaning up stale PID file.",
expected_output_does_not_contain="Server stopped!",
expected_code=0,
)
assert not (PREFECT_HOME.value() / "server.pid").exists(), (
"Server PID file exists"
)
@pytest.mark.service("process")
| TestBackgroundServer |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 4417,
"end": 4585
} | class ____(CoreSchemaGenerationWarning):
"""A warning raised when a `Field()` attribute isn't supported in the context it is used."""
| UnsupportedFieldAttributeWarning |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 69926,
"end": 72191
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
for v in self.get_arguments("vary"):
self.add_header("Vary", v)
# Must write at least MIN_LENGTH bytes to activate compression.
self.write("hello world" + ("!" * GZipContentEncoding.MIN_LENGTH))
def get_app_kwargs(self):
return dict(
gzip=True, static_path=os.path.join(os.path.dirname(__file__), "static")
)
def assert_compressed(self, response):
# simple_httpclient renames the content-encoding header;
# curl_httpclient doesn't.
self.assertEqual(
response.headers.get(
"Content-Encoding", response.headers.get("X-Consumed-Content-Encoding")
),
"gzip",
)
def test_gzip(self):
response = self.fetch("/")
self.assert_compressed(response)
self.assertEqual(response.headers["Vary"], "Accept-Encoding")
def test_gzip_static(self):
# The streaming responses in StaticFileHandler have subtle
# interactions with the gzip output so test this case separately.
response = self.fetch("/robots.txt")
self.assert_compressed(response)
self.assertEqual(response.headers["Vary"], "Accept-Encoding")
def test_gzip_not_requested(self):
response = self.fetch("/", use_gzip=False)
self.assertNotIn("Content-Encoding", response.headers)
self.assertEqual(response.headers["Vary"], "Accept-Encoding")
def test_vary_already_present(self):
response = self.fetch("/?vary=Accept-Language")
self.assert_compressed(response)
self.assertEqual(
[s.strip() for s in response.headers["Vary"].split(",")],
["Accept-Language", "Accept-Encoding"],
)
def test_vary_already_present_multiple(self):
# Regression test for https://github.com/tornadoweb/tornado/issues/1670
response = self.fetch("/?vary=Accept-Language&vary=Cookie")
self.assert_compressed(response)
self.assertEqual(
[s.strip() for s in response.headers["Vary"].split(",")],
["Accept-Language", "Cookie", "Accept-Encoding"],
)
| GzipTestCase |
python | ray-project__ray | python/ray/serve/tests/test_metrics_2.py | {
"start": 1545,
"end": 23228
} | class ____:
def _generate_metrics_summary(self, metrics: List[Dict]):
"""Generate "route" and "application" information from metrics.
Args:
metrics: List of metric dictionaries, each generated by the
get_metric_dictionaries function.
Returns:
Tuple[dict, dict]:
- The first dictionary maps deployment names to a set of routes.
- The second dictionary maps deployment names to application names.
"""
metrics_summary_route = DefaultDict(set)
metrics_summary_app = DefaultDict(str)
for request_metrics in metrics:
metrics_summary_route[request_metrics["deployment"]].add(
request_metrics["route"]
)
metrics_summary_app[request_metrics["deployment"]] = request_metrics[
"application"
]
return metrics_summary_route, metrics_summary_app
def verify_metrics(self, metric, expected_output):
for key in expected_output:
assert metric[key] == expected_output[key]
def test_request_context_pass_for_http_proxy(self, metrics_start_shutdown):
"""Test HTTP proxy passing request context"""
@serve.deployment(graceful_shutdown_timeout_s=0.001)
def f():
return "hello"
@serve.deployment(graceful_shutdown_timeout_s=0.001)
def g():
return "world"
@serve.deployment(graceful_shutdown_timeout_s=0.001)
def h():
return 1 / 0
serve.run(f.bind(), name="app1", route_prefix="/app1")
serve.run(g.bind(), name="app2", route_prefix="/app2")
serve.run(h.bind(), name="app3", route_prefix="/app3")
resp = httpx.get("http://127.0.0.1:8000/app1")
assert resp.status_code == 200
assert resp.text == "hello"
resp = httpx.get("http://127.0.0.1:8000/app2")
assert resp.status_code == 200
assert resp.text == "world"
resp = httpx.get("http://127.0.0.1:8000/app3")
assert resp.status_code == 500
timeseries = PrometheusTimeseries()
wait_for_condition(
lambda: len(
get_metric_dictionaries(
"ray_serve_deployment_processing_latency_ms_sum",
timeseries=timeseries,
)
)
== 3,
timeout=40,
)
def wait_for_route_and_name(
metric_name: str,
deployment_name: str,
app_name: str,
route: str,
timeout: float = 5,
):
"""Waits for app name and route to appear in deployment's metric."""
def check():
# Check replica qps & latency
(
qps_metrics_route,
qps_metrics_app_name,
) = self._generate_metrics_summary(
get_metric_dictionaries(metric_name, timeseries=timeseries),
)
assert qps_metrics_app_name[deployment_name] == app_name
assert qps_metrics_route[deployment_name] == {route}
return True
wait_for_condition(check, timeout=timeout)
# Check replica qps & latency
wait_for_route_and_name(
"ray_serve_deployment_request_counter_total", "f", "app1", "/app1"
)
wait_for_route_and_name(
"ray_serve_deployment_request_counter_total", "g", "app2", "/app2"
)
wait_for_route_and_name(
"ray_serve_deployment_error_counter_total", "h", "app3", "/app3"
)
# Check http proxy qps & latency
for metric_name in [
"ray_serve_num_http_requests_total",
"ray_serve_http_request_latency_ms_sum",
]:
metrics = [
sample.labels
for sample in fetch_prometheus_metric_timeseries(
["localhost:9999"], timeseries
)[metric_name]
]
assert {metric["route"] for metric in metrics} == {
"/app1",
"/app2",
"/app3",
}
for metric_name in [
"ray_serve_handle_request_counter_total",
"ray_serve_num_router_requests_total",
"ray_serve_deployment_processing_latency_ms_sum",
]:
metrics_route, metrics_app_name = self._generate_metrics_summary(
[
sample.labels
for sample in fetch_prometheus_metric_timeseries(
["localhost:9999"], timeseries
)[metric_name]
]
)
msg = f"Incorrect metrics for {metric_name}"
assert metrics_route["f"] == {"/app1"}, msg
assert metrics_route["g"] == {"/app2"}, msg
assert metrics_route["h"] == {"/app3"}, msg
assert metrics_app_name["f"] == "app1", msg
assert metrics_app_name["g"] == "app2", msg
assert metrics_app_name["h"] == "app3", msg
def test_request_context_pass_for_grpc_proxy(self, metrics_start_shutdown):
"""Test gRPC proxy passing request context"""
@serve.deployment(graceful_shutdown_timeout_s=0.001)
class H:
def __call__(self, *args, **kwargs):
return 1 / 0
h = H.bind()
app_name1 = "app1"
depl_name1 = "grpc-deployment"
app_name2 = "app2"
depl_name2 = "grpc-deployment-model-composition"
app_name3 = "app3"
depl_name3 = "H"
serve.run(g, name=app_name1, route_prefix="/app1")
serve.run(g2, name=app_name2, route_prefix="/app2")
serve.run(h, name=app_name3, route_prefix="/app3")
channel = grpc.insecure_channel("localhost:9000")
ping_grpc_call_method(channel, app_name1)
ping_fruit_stand(channel, app_name2)
with pytest.raises(grpc.RpcError):
ping_grpc_call_method(channel, app_name3)
timeseries = PrometheusTimeseries()
# app1 has 1 deployment, app2 has 3 deployments, and app3 has 1 deployment.
wait_for_condition(
lambda: len(
get_metric_dictionaries(
"ray_serve_deployment_processing_latency_ms_sum",
timeseries=timeseries,
)
)
== 5,
timeout=40,
)
def wait_for_route_and_name(
_metric_name: str,
deployment_name: str,
app_name: str,
route: str,
timeout: float = 5,
):
"""Waits for app name and route to appear in deployment's metric."""
def check():
# Check replica qps & latency
(
qps_metrics_route,
qps_metrics_app_name,
) = self._generate_metrics_summary(
get_metric_dictionaries(_metric_name, timeseries=timeseries),
)
assert qps_metrics_app_name[deployment_name] == app_name
assert qps_metrics_route[deployment_name] == {route}
return True
wait_for_condition(check, timeout=timeout)
# Check replica qps & latency
wait_for_route_and_name(
"ray_serve_deployment_request_counter_total",
depl_name1,
app_name1,
app_name1,
)
wait_for_route_and_name(
"ray_serve_deployment_request_counter_total",
depl_name2,
app_name2,
app_name2,
)
wait_for_route_and_name(
"ray_serve_deployment_error_counter_total", depl_name3, app_name3, app_name3
)
# Check grpc proxy qps & latency
for metric_name in [
"ray_serve_num_grpc_requests_total",
"ray_serve_grpc_request_latency_ms_sum",
]:
metrics = [
sample.labels
for sample in fetch_prometheus_metric_timeseries(
["localhost:9999"], timeseries
)[metric_name]
]
assert {metric["route"] for metric in metrics} == {
"app1",
"app2",
"app3",
}
for metric_name in [
"ray_serve_handle_request_counter_total",
"ray_serve_num_router_requests_total",
"ray_serve_deployment_processing_latency_ms_sum",
]:
metrics_route, metrics_app_name = self._generate_metrics_summary(
get_metric_dictionaries(metric_name, timeseries=timeseries),
)
msg = f"Incorrect metrics for {metric_name}"
assert metrics_route[depl_name1] == {"app1"}, msg
assert metrics_route[depl_name2] == {"app2"}, msg
assert metrics_route[depl_name3] == {"app3"}, msg
assert metrics_app_name[depl_name1] == "app1", msg
assert metrics_app_name[depl_name2] == "app2", msg
assert metrics_app_name[depl_name3] == "app3", msg
def test_request_context_pass_for_handle_passing(self, metrics_start_shutdown):
"""Test handle passing contexts between replicas"""
@serve.deployment
def g1():
return "ok1"
@serve.deployment
def g2():
return "ok2"
app = FastAPI()
@serve.deployment
@serve.ingress(app)
class G:
def __init__(self, handle1: DeploymentHandle, handle2: DeploymentHandle):
self.handle1 = handle1
self.handle2 = handle2
@app.get("/api")
async def app1(self):
return await self.handle1.remote()
@app.get("/api2")
async def app2(self):
return await self.handle2.remote()
serve.run(G.bind(g1.bind(), g2.bind()), name="app")
app_url = get_application_url("HTTP", "app")
resp = httpx.get(f"{app_url}/api")
assert resp.text == '"ok1"'
resp = httpx.get(f"{app_url}/api2")
assert resp.text == '"ok2"'
# G deployment metrics:
# {xxx, route:/api}, {xxx, route:/api2}
# g1 deployment metrics:
# {xxx, route:/api}
# g2 deployment metrics:
# {xxx, route:/api2}
timeseries = PrometheusTimeseries()
wait_for_condition(
lambda: len(
get_metric_dictionaries(
"ray_serve_deployment_request_counter_total", timeseries=timeseries
),
)
== 4,
timeout=40,
)
(
requests_metrics_route,
requests_metrics_app_name,
) = self._generate_metrics_summary(
get_metric_dictionaries(
"ray_serve_deployment_request_counter_total", timeseries=timeseries
),
)
assert requests_metrics_route["G"] == {"/api", "/api2"}
assert requests_metrics_route["g1"] == {"/api"}
assert requests_metrics_route["g2"] == {"/api2"}
assert requests_metrics_app_name["G"] == "app"
assert requests_metrics_app_name["g1"] == "app"
assert requests_metrics_app_name["g2"] == "app"
@pytest.mark.parametrize("route_prefix", ["", "/prefix"])
def test_fastapi_route_metrics(self, metrics_start_shutdown, route_prefix: str):
app = FastAPI()
@serve.deployment
@serve.ingress(app)
class A:
@app.get("/api")
def route1(self):
return "ok1"
@app.get("/api2/{user_id}")
def route2(self):
return "ok2"
if route_prefix:
serve.run(A.bind(), route_prefix=route_prefix)
else:
serve.run(A.bind())
base_url = get_application_url("HTTP")
resp = httpx.get(f"{base_url}/api")
assert resp.text == '"ok1"'
resp = httpx.get(f"{base_url}/api2/abc123")
assert resp.text == '"ok2"'
timeseries = PrometheusTimeseries()
wait_for_condition(
lambda: len(
get_metric_dictionaries(
"ray_serve_deployment_request_counter_total", timeseries=timeseries
)
)
== 2,
timeout=40,
)
(requests_metrics_route, _,) = self._generate_metrics_summary(
get_metric_dictionaries(
"ray_serve_deployment_request_counter_total", timeseries=timeseries
)
)
assert requests_metrics_route["A"] == {
route_prefix + "/api",
route_prefix + "/api2/{user_id}",
}
def test_customer_metrics_with_context(self, metrics_start_shutdown):
@serve.deployment
class Model:
def __init__(self):
self.counter = Counter(
"my_counter",
description="my counter metrics",
tag_keys=(
"my_static_tag",
"my_runtime_tag",
"route",
),
)
self.counter.set_default_tags({"my_static_tag": "static_value"})
self.histogram = Histogram(
"my_histogram",
description=("my histogram "),
boundaries=DEFAULT_LATENCY_BUCKET_MS,
tag_keys=(
"my_static_tag",
"my_runtime_tag",
"route",
),
)
self.histogram.set_default_tags({"my_static_tag": "static_value"})
self.gauge = Gauge(
"my_gauge",
description=("my_gauge"),
tag_keys=(
"my_static_tag",
"my_runtime_tag",
"route",
),
)
self.gauge.set_default_tags({"my_static_tag": "static_value"})
def __call__(self):
self.counter.inc(tags={"my_runtime_tag": "100"})
self.histogram.observe(200, tags={"my_runtime_tag": "200"})
self.gauge.set(300, tags={"my_runtime_tag": "300"})
return [
# NOTE(zcin): this is to match the current implementation in
# Serve's _add_serve_metric_default_tags().
ray.serve.context._INTERNAL_REPLICA_CONTEXT.deployment,
ray.serve.context._INTERNAL_REPLICA_CONTEXT.replica_id.unique_id,
]
timeseries = PrometheusTimeseries()
serve.run(Model.bind(), name="app", route_prefix="/app")
http_url = get_application_url("HTTP", "app")
resp = httpx.get(http_url)
deployment_name, replica_id = resp.json()
wait_for_condition(
lambda: len(
get_metric_dictionaries("ray_my_gauge", timeseries=timeseries),
)
== 1,
timeout=40,
)
counter_metrics = get_metric_dictionaries(
"ray_my_counter_total", timeseries=timeseries
)
assert len(counter_metrics) == 1
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "100",
"replica": replica_id,
"deployment": deployment_name,
"application": "app",
"route": "/app",
}
self.verify_metrics(counter_metrics[0], expected_metrics)
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "300",
"replica": replica_id,
"deployment": deployment_name,
"application": "app",
"route": "/app",
}
gauge_metrics = get_metric_dictionaries("ray_my_gauge", timeseries=timeseries)
assert len(gauge_metrics) == 1
self.verify_metrics(gauge_metrics[0], expected_metrics)
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "200",
"replica": replica_id,
"deployment": deployment_name,
"application": "app",
"route": "/app",
}
histogram_metrics = get_metric_dictionaries(
"ray_my_histogram_sum", timeseries=timeseries
)
assert len(histogram_metrics) == 1
self.verify_metrics(histogram_metrics[0], expected_metrics)
@pytest.mark.parametrize("use_actor", [False, True])
def test_serve_metrics_outside_serve(self, use_actor, metrics_start_shutdown):
"""Make sure ray.serve.metrics work in ray actor"""
if use_actor:
@ray.remote
class MyActor:
def __init__(self):
self.counter = Counter(
"my_counter",
description="my counter metrics",
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
self.counter.set_default_tags({"my_static_tag": "static_value"})
self.histogram = Histogram(
"my_histogram",
description=("my histogram "),
boundaries=DEFAULT_LATENCY_BUCKET_MS,
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
self.histogram.set_default_tags({"my_static_tag": "static_value"})
self.gauge = Gauge(
"my_gauge",
description=("my_gauge"),
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
self.gauge.set_default_tags({"my_static_tag": "static_value"})
def test(self):
self.counter.inc(tags={"my_runtime_tag": "100"})
self.histogram.observe(200, tags={"my_runtime_tag": "200"})
self.gauge.set(300, tags={"my_runtime_tag": "300"})
return "hello"
else:
counter = Counter(
"my_counter",
description="my counter metrics",
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
histogram = Histogram(
"my_histogram",
description=("my histogram "),
boundaries=DEFAULT_LATENCY_BUCKET_MS,
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
gauge = Gauge(
"my_gauge",
description=("my_gauge"),
tag_keys=(
"my_static_tag",
"my_runtime_tag",
),
)
@ray.remote
def fn():
counter.set_default_tags({"my_static_tag": "static_value"})
histogram.set_default_tags({"my_static_tag": "static_value"})
gauge.set_default_tags({"my_static_tag": "static_value"})
counter.inc(tags={"my_runtime_tag": "100"})
histogram.observe(200, tags={"my_runtime_tag": "200"})
gauge.set(300, tags={"my_runtime_tag": "300"})
return "hello"
@serve.deployment
class Model:
def __init__(self):
if use_actor:
self.my_actor = MyActor.remote()
async def __call__(self):
if use_actor:
return await self.my_actor.test.remote()
else:
return await fn.remote()
serve.run(Model.bind(), name="app", route_prefix="/app")
http_url = get_application_url("HTTP", "app")
resp = httpx.get(http_url)
assert resp.text == "hello"
timeseries = PrometheusTimeseries()
wait_for_condition(
lambda: len(
get_metric_dictionaries("ray_my_gauge", timeseries=timeseries),
)
== 1,
timeout=40,
)
counter_metrics = get_metric_dictionaries(
"ray_my_counter_total", timeseries=timeseries
)
assert len(counter_metrics) == 1
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "100",
}
self.verify_metrics(counter_metrics[0], expected_metrics)
gauge_metrics = get_metric_dictionaries("ray_my_gauge", timeseries=timeseries)
assert len(gauge_metrics) == 1
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "300",
}
self.verify_metrics(gauge_metrics[0], expected_metrics)
histogram_metrics = get_metric_dictionaries(
"ray_my_histogram_sum", timeseries=timeseries
)
assert len(histogram_metrics) == 1
expected_metrics = {
"my_static_tag": "static_value",
"my_runtime_tag": "200",
}
self.verify_metrics(histogram_metrics[0], expected_metrics)
| TestRequestContextMetrics |
python | django__django | tests/defer/tests.py | {
"start": 14620,
"end": 16347
} | class ____(SimpleTestCase):
def test_invalid_defer(self):
msg = "Primary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.defer("missing"))
with self.assertRaisesMessage(FieldError, "missing"):
list(Primary.objects.defer("value__missing"))
msg = "Secondary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.defer("related__missing"))
def test_invalid_only(self):
msg = "Primary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.only("missing"))
with self.assertRaisesMessage(FieldError, "missing"):
list(Primary.objects.only("value__missing"))
msg = "Secondary has no field named 'missing'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
list(Primary.objects.only("related__missing"))
def test_defer_select_related_raises_invalid_query(self):
msg = (
"Field Primary.related cannot be both deferred and traversed using "
"select_related at the same time."
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
msg = (
"Field Primary.related cannot be both deferred and traversed using "
"select_related at the same time."
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.only("name").select_related("related")[0]
| InvalidDeferTests |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 49012,
"end": 50980
} | class ____(TypedDict):
version: int
extractCustomTags: list[str]
customMeasurements: CustomMeasurementSettings
acceptTransactionNames: TransactionNameStrategy
def _should_extract_transaction_metrics(project: Project) -> bool:
return features.has(
"organizations:transaction-metrics-extraction", project.organization
) and not killswitches.killswitch_matches_context(
"relay.drop-transaction-metrics", {"project_id": project.id}
)
def get_transaction_metrics_settings(
timeout: TimeChecker, project: Project, breakdowns_config: Mapping[str, Any] | None
) -> TransactionMetricsSettings:
"""This function assumes that the corresponding feature flag has been checked.
See _should_extract_transaction_metrics.
"""
custom_tags: list[str] = []
if breakdowns_config is not None:
# we already have a breakdown configuration that tells relay which
# breakdowns to compute for an event. metrics extraction should
# probably be in sync with that, or at least not extract more metrics
# than there are breakdowns configured.
try:
for _, breakdown_config in breakdowns_config.items():
assert breakdown_config["type"] == "spanOperations"
except Exception:
capture_exception()
# Tells relay which user-defined tags to add to each extracted
# transaction metric. This cannot include things such as `os.name`
# which are computed on the server, they have to come from the SDK as
# event tags.
try:
custom_tags.extend(project.get_option("sentry:transaction_metrics_custom_tags") or ())
except Exception:
capture_exception()
return {
"version": TRANSACTION_METRICS_EXTRACTION_VERSION,
"extractCustomTags": custom_tags,
"customMeasurements": {"limit": CUSTOM_MEASUREMENT_LIMIT},
"acceptTransactionNames": "clientBased",
}
| TransactionMetricsSettings |
python | encode__django-rest-framework | rest_framework/versioning.py | {
"start": 5953,
"end": 6801
} | class ____(BaseVersioning):
"""
GET /something/?version=0.1 HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in query parameter.')
def determine_version(self, request, *args, **kwargs):
version = request.query_params.get(self.version_param, self.default_version)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url = super().reverse(
viewname, args, kwargs, request, format, **extra
)
if request.version is not None:
return replace_query_param(url, self.version_param, request.version)
return url
| QueryParameterVersioning |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/tags.py | {
"start": 4400,
"end": 4989
} | class ____(Enum):
# Custom tag provided by a user
USER_PROVIDED = "USER_PROVIDED"
# Tags used by Dagster to manage execution that should be surfaced to users.
SYSTEM = "SYSTEM"
# Metadata used by Dagster for execution but isn't useful for users to see.
# For example, metadata about the gRPC server that executed a run.
HIDDEN = "HIDDEN"
def get_tag_type(tag):
if tag.startswith(SYSTEM_TAG_PREFIX):
return TagType.SYSTEM
elif tag.startswith(HIDDEN_TAG_PREFIX):
return TagType.HIDDEN
else:
return TagType.USER_PROVIDED
| TagType |
python | pypa__pipenv | pipenv/vendor/importlib_metadata/__init__.py | {
"start": 9667,
"end": 22069
} | class ____(metaclass=abc.ABCMeta):
"""
An abstract Python distribution package.
Custom providers may derive from this class and define
the abstract methods to provide a concrete implementation
for their environment. Some providers may opt to override
the default implementation of some properties to bypass
the file-reading mechanism.
"""
@abc.abstractmethod
def read_text(self, filename) -> Optional[str]:
"""Attempt to load metadata file given by the name.
Python distribution metadata is organized by blobs of text
typically represented as "files" in the metadata directory
(e.g. package-1.0.dist-info). These files include things
like:
- METADATA: The distribution metadata including fields
like Name and Version and Description.
- entry_points.txt: A series of entry points as defined in
`the entry points spec <https://packaging.python.org/en/latest/specifications/entry-points/#file-format>`_.
- RECORD: A record of files according to
`this recording spec <https://packaging.python.org/en/latest/specifications/recording-installed-packages/#the-record-file>`_.
A package may provide any set of files, including those
not listed here or none at all.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path: str | os.PathLike[str]) -> SimplePath:
"""
Given a path to a file in this distribution, return a SimplePath
to it.
This method is used by callers of ``Distribution.files()`` to
locate files within the distribution. If it's possible for a
Distribution to represent files in the distribution as
``SimplePath`` objects, it should implement this method
to resolve such objects.
Some Distribution providers may elect not to resolve SimplePath
objects within the distribution by raising a
NotImplementedError, but consumers of such a Distribution would
be unable to invoke ``Distribution.files()``.
"""
@classmethod
def from_name(cls, name: str) -> Distribution:
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
:raises ValueError: When an invalid value is supplied for name.
"""
if not name:
raise ValueError("A distribution name is required.")
try:
return next(iter(cls._prefer_valid(cls.discover(name=name))))
except StopIteration:
raise PackageNotFoundError(name)
@classmethod
def discover(
cls, *, context: Optional[DistributionFinder.Context] = None, **kwargs
) -> Iterable[Distribution]:
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for packages matching
the context.
"""
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def _prefer_valid(dists: Iterable[Distribution]) -> Iterable[Distribution]:
"""
Prefer (move to the front) distributions that have metadata.
Ref python/importlib_resources#489.
"""
buckets = bucket(dists, lambda dist: bool(dist.metadata))
return itertools.chain(buckets[True], buckets[False])
@staticmethod
def at(path: str | os.PathLike[str]) -> Distribution:
"""Return a Distribution for the indicated metadata path.
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers (MetadataPathFinders)."""
declared = (
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self) -> _meta.PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata per the
`Core metadata specifications <https://packaging.python.org/en/latest/specifications/core-metadata/#core-metadata>`_.
Custom providers may provide the METADATA file or override this
property.
"""
# deferred for performance (python/cpython#109829)
from . import _adapters
opt_text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
text = cast(str, opt_text)
return _adapters.Message(email.message_from_string(text))
@property
def name(self) -> str:
"""Return the 'Name' metadata for the distribution package."""
return self.metadata['Name']
@property
def _normalized_name(self):
"""Return a normalized version of the name."""
return Prepared.normalize(self.name)
@property
def version(self) -> str:
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self) -> EntryPoints:
"""
Return EntryPoints for this distribution.
Custom providers may provide the ``entry_points.txt`` file
or override this property.
"""
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
@property
def files(self) -> Optional[List[PackagePath]]:
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info, or installed-files.txt or
SOURCES.txt for egg-info) is missing.
Result may be empty if the metadata exists but is empty.
Custom providers are recommended to provide a "RECORD" file (in
``read_text``) or override this property to allow for callers to be
able to resolve filenames provided by the package.
"""
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
@pass_none
def make_files(lines):
# Delay csv import, since Distribution.files is not as widely used
# as other parts of importlib.metadata
import csv
return starmap(make_file, csv.reader(lines))
@pass_none
def skip_missing_files(package_paths):
return list(filter(lambda path: path.locate().exists(), package_paths))
return skip_missing_files(
make_files(
self._read_files_distinfo()
or self._read_files_egginfo_installed()
or self._read_files_egginfo_sources()
)
)
def _read_files_distinfo(self):
"""
Read the lines of RECORD.
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo_installed(self):
"""
Read installed-files.txt and return lines in a similar
CSV-parsable format as RECORD: each file must be placed
relative to the site-packages directory and must also be
quoted (since file names can contain literal commas).
This file is written when the package is installed by pip,
but it might not be written for other installation methods.
Assume the file is accurate if it exists.
"""
text = self.read_text('installed-files.txt')
# Prepend the .egg-info/ subdir to the lines in this file.
# But this subdir is only available from PathDistribution's
# self._path.
subdir = getattr(self, '_path', None)
if not text or not subdir:
return
paths = (
py311.relative_fix((subdir / name).resolve())
.relative_to(self.locate_file('').resolve(), walk_up=True)
.as_posix()
for name in text.splitlines()
)
return map('"{}"'.format, paths)
def _read_files_egginfo_sources(self):
"""
Read SOURCES.txt and return lines in a similar CSV-parsable
format as RECORD: each file name must be quoted (since it
might contain literal commas).
Note that SOURCES.txt is not a reliable source for what
files are installed by a package. This file is generated
for a source archive, and the files that are present
there (e.g. setup.py) may not correctly reflect the files
that are present after the package has been installed.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self) -> Optional[List[str]]:
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return pass_none(self._deps_from_requires_text)(source)
@classmethod
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and f'extra == "{name}"'
def quoted_marker(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
def url_req_space(req):
"""
PEP 508 requires a space between the url_spec and the quoted_marker.
Ref python/importlib_metadata#357.
"""
# '@' is uniquely indicative of a url_req.
return ' ' * ('@' in req)
for section in sections:
space = url_req_space(section.value)
yield section.value + space + quoted_marker(section.name)
@property
def origin(self):
return self._load_json('direct_url.json')
def _load_json(self, filename):
# Deferred for performance (python/importlib_metadata#503)
import json
return pass_none(json.loads)(
self.read_text(filename),
object_hook=lambda data: types.SimpleNamespace(**data),
)
| Distribution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_sagemaker_unified_studio.py | {
"start": 1195,
"end": 2035
} | class ____(BaseAwsLinksTestCase):
link_class = SageMakerUnifiedStudioLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "us-east-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"job_name": "test_job_name",
},
)
self.assert_extra_link_url(
expected_url=("https://console.aws.amazon.com/datazone/home?region=us-east-1"),
region_name="us-east-1",
aws_partition="aws",
job_name="test_job_name",
)
| TestSageMakerUnifiedStudioLink |
python | pandas-dev__pandas | asv_bench/benchmarks/io/json.py | {
"start": 2105,
"end": 2811
} | class ____(BaseIO):
fname = "__test__.json"
params = [
["split", "columns", "index", "values", "records"],
["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"],
]
param_names = ["orient", "frame"]
def setup(self, orient, frame):
data = {
"hello": ["thisisatest", 999898, "mixed types"],
"nest1": {"nest2": {"nest3": "nest3_value", "nest3_int": 3445}},
"nest1_list": {"nest2": ["blah", 32423, 546456.876, 92030234]},
"hello2": "string",
}
self.data = [data for i in range(10000)]
def time_normalize_json(self, orient, frame):
json_normalize(self.data)
| NormalizeJSON |
python | walkccc__LeetCode | solutions/1534. Count Good Triplets/1534.py | {
"start": 0,
"end": 299
} | class ____:
def countGoodTriplets(self, arr: list[int], a: int, b: int, c: int) -> int:
return sum(abs(arr[i] - arr[j]) <= a and
abs(arr[j] - arr[k]) <= b and
abs(arr[i] - arr[k]) <= c
for i, j, k in itertools.combinations(range(len(arr)), 3))
| Solution |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 6184,
"end": 7366
} | class ____(PrefectOperatorFilterBaseModel):
"""Filter for flows. Only flows matching all criteria will be returned."""
id: Optional[FlowFilterId] = Field(
default=None, description="Filter criteria for `Flow.id`"
)
deployment: Optional[FlowFilterDeployment] = Field(
default=None, description="Filter criteria for Flow deployments"
)
name: Optional[FlowFilterName] = Field(
default=None, description="Filter criteria for `Flow.name`"
)
tags: Optional[FlowFilterTags] = Field(
default=None, description="Filter criteria for `Flow.tags`"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.id is not None:
filters.append(self.id.as_sql_filter())
if self.deployment is not None:
filters.append(self.deployment.as_sql_filter())
if self.name is not None:
filters.append(self.name.as_sql_filter())
if self.tags is not None:
filters.append(self.tags.as_sql_filter())
return filters
| FlowFilter |
python | kamyu104__LeetCode-Solutions | Python/sum-of-largest-prime-substrings.py | {
"start": 89,
"end": 2157
} | class ____(object):
def sumOfLargestPrimes(self, s):
"""
:type s: str
:rtype: int
"""
COUNT = 3
def nth_element(nums, n, left=0, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
right = len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def is_prime(n):
if n == 1:
return False
if n in (2, 3):
return True
if n%2 == 0 or n%3 == 0:
return False
for i in xrange(5, n, 6):
if i*i > n:
break
if n%i == 0 or n%(i+2) == 0:
return False
return True
primes_set = set()
for i in xrange(len(s)):
curr = 0
for j in xrange(i, len(s)):
curr = curr*10+int(s[j])
if is_prime(curr):
primes_set.add(curr)
primes = list(primes_set)
d = min(len(primes), COUNT)
nth_element(primes, d, compare=lambda a, b: a > b)
return sum(primes[i] for i in xrange(d))
| Solution |
python | apache__airflow | providers/teradata/tests/unit/teradata/transfers/test_s3_to_teradata.py | {
"start": 1232,
"end": 2918
} | class ____:
def test_init(self):
operator = S3ToTeradataOperator(
s3_source_key=S3_SOURCE_KEY,
teradata_table=TERADATA_TABLE,
aws_conn_id=AWS_CONN_ID,
teradata_conn_id=TERADATA_CONN_ID,
task_id=TASK_ID,
dag=None,
)
assert operator.aws_conn_id == AWS_CONN_ID
assert operator.s3_source_key == S3_SOURCE_KEY
assert operator.teradata_conn_id == TERADATA_CONN_ID
assert operator.teradata_table == TERADATA_TABLE
assert operator.task_id == TASK_ID
@mock.patch("airflow.providers.amazon.aws.hooks.s3.S3Hook.get_connection")
@mock.patch("airflow.models.connection.Connection")
@mock.patch("boto3.session.Session")
@mock.patch("airflow.providers.teradata.hooks.teradata.TeradataHook.run")
def test_execute(self, mock_run, mock_session, mock_connection, mock_hook):
access_key = "aws_access_key_id"
access_secret = "aws_secret_access_key"
mock_session.return_value = Session(access_key, access_secret)
mock_session.return_value.access_key = access_key
mock_session.return_value.secret_key = access_secret
mock_session.return_value.token = None
mock_connection.return_value = Connection()
mock_hook.return_value = Connection()
op = S3ToTeradataOperator(
s3_source_key=S3_SOURCE_KEY,
teradata_table=TERADATA_TABLE,
aws_conn_id=AWS_CONN_ID,
teradata_conn_id=TERADATA_CONN_ID,
task_id=TASK_ID,
dag=None,
)
op.execute(None)
assert mock_run.call_count == 1
| TestS3ToTeradataTransfer |
python | doocs__leetcode | solution/2300-2399/2358.Maximum Number of Groups Entering a Competition/Solution.py | {
"start": 0,
"end": 173
} | class ____:
def maximumGroups(self, grades: List[int]) -> int:
n = len(grades)
return bisect_right(range(n + 1), n * 2, key=lambda x: x * x + x) - 1
| Solution |
python | has2k1__plotnine | plotnine/stats/stat_quantile.py | {
"start": 275,
"end": 2635
} | class ____(stat):
"""
Compute quantile regression lines
{usage}
Parameters
----------
{common_parameters}
quantiles : tuple, default=(0.25, 0.5, 0.75)
Quantiles of y to compute
formula : str, default="y ~ x"
Formula relating y variables to x variables
method_args : dict, default=None
Extra arguments passed on to the model fitting method,
[](`~statsmodels.regression.quantile_regression.QuantReg.fit`).
See Also
--------
plotnine.geom_quantile : The default `geom` for this `stat`.
statsmodels.regression.quantile_regression.QuantReg
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"quantile" # quantile
"group" # group identifier
```
Calculated aesthetics are accessed using the `after_stat` function.
e.g. `after_stat('quantile')`{.py}.
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"geom": "quantile",
"position": "identity",
"na_rm": False,
"quantiles": (0.25, 0.5, 0.75),
"formula": "y ~ x",
"method_args": {},
}
CREATES = {"quantile", "group"}
def setup_params(self, data):
params = self.params
if params["formula"] is None:
params["formula"] = "y ~ x"
warn("Formula not specified, using '{}'", PlotnineWarning)
else:
params["eval_env"] = self.environment.to_patsy_env()
try:
iter(params["quantiles"])
except TypeError:
params["quantiles"] = (params["quantiles"],)
def compute_group(self, data, scales):
res = [
quant_pred(q, data, self.params) for q in self.params["quantiles"]
]
return pd.concat(res, axis=0, ignore_index=True)
def quant_pred(q, data, params):
"""
Quantile precitions
"""
import statsmodels.formula.api as smf
mod = smf.quantreg(
params["formula"],
data,
eval_env=params.get("eval_env"),
)
reg_res = mod.fit(q=q, **params["method_args"])
out = pd.DataFrame(
{
"x": [data["x"].min(), data["x"].max()],
"quantile": q,
"group": f"{data['group'].iloc[0]}-{q}",
}
)
out["y"] = reg_res.predict(out)
return out
| stat_quantile |
python | python__mypy | mypyc/ir/ops.py | {
"start": 50605,
"end": 51463
} | class ____(RegisterOp):
"""Set the value of a struct element.
This evaluates to a new struct with the changed value.
Use together with Undef to initialize a fresh struct value
(see Undef for more details).
"""
error_kind = ERR_NEVER
def __init__(self, src: Value, field: str, item: Value, line: int = -1) -> None:
super().__init__(line)
assert isinstance(src.type, RStruct), src.type
self.type = src.type
self.src = src
self.item = item
self.field = field
def sources(self) -> list[Value]:
return [self.src]
def set_sources(self, new: list[Value]) -> None:
(self.src,) = new
def stolen(self) -> list[Value]:
return [self.src]
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_set_element(self)
@final
| SetElement |
python | huggingface__transformers | tests/quantization/autoawq/test_awq.py | {
"start": 23086,
"end": 24178
} | class ____(unittest.TestCase):
def test_quantized_model_ipex(self):
"""
Simple test that checks if the quantized model is working properly with ipex backend
"""
quantization_config = AwqConfig(version="ipex")
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ",
quantization_config=quantization_config,
device_map="cpu",
)
tokenizer = AutoTokenizer.from_pretrained("TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ")
input_ids = tokenizer.encode("How to make a cake", return_tensors="pt")
pad_token_id = tokenizer.eos_token_id
output = model.generate(input_ids, do_sample=False, max_length=20, pad_token_id=pad_token_id)
print(tokenizer.decode(output[0], skip_special_tokens=True))
expected_output = (
"How to make a cake with a round tin?\nHow to make a cake with a round tin?\n1. Preheat the oven to 180°"
)
self.assertIn(tokenizer.decode(output[0], skip_special_tokens=True), expected_output)
| AwqIPEXTest |
python | coleifer__peewee | tests/schema.py | {
"start": 1224,
"end": 1353
} | class ____(TestModel):
key = TextField(unique=True)
value = TextField()
class Meta:
schema = 'cache'
| CacheData |
python | getsentry__sentry | src/sentry/db/models/fields/bounded.py | {
"start": 734,
"end": 1011
} | class ____(models.PositiveIntegerField):
MAX_VALUE = I32_MAX
def get_prep_value(self, value: int) -> int:
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super().get_prep_value(value)
| BoundedPositiveIntegerField |
python | django__django | tests/model_enums/tests.py | {
"start": 8485,
"end": 8640
} | class ____(datetime.datetime, models.Choices):
A = 2010, 10, 10, 10, 10, 10
B = 2011, 11, 11, 11, 11, 11
C = 2012, 12, 12, 12, 12, 12
| DateAndTime |
python | google__python-fire | fire/test_components.py | {
"start": 3020,
"end": 3146
} | class ____:
def sum(self, Delta=1.0, Gamma=2.0): # pylint: disable=invalid-name
return Delta + Gamma
| CapitalizedArgNames |
python | kamyu104__LeetCode-Solutions | Python/binary-subarrays-with-sum.py | {
"start": 53,
"end": 739
} | class ____(object):
def numSubarraysWithSum(self, A, S):
"""
:type A: List[int]
:type S: int
:rtype: int
"""
result = 0
left, right, sum_left, sum_right = 0, 0, 0, 0
for i, a in enumerate(A):
sum_left += a
while left < i and sum_left > S:
sum_left -= A[left]
left += 1
sum_right += a
while right < i and \
(sum_right > S or (sum_right == S and not A[right])):
sum_right -= A[right]
right += 1
if sum_left == S:
result += right-left+1
return result
| Solution |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 11474,
"end": 11932
} | class ____(nn.Module):
def __init__(self, embed_dim, mlp_ratio, drop_rate):
super().__init__()
self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
self.dropout = nn.Dropout(drop_rate)
def forward(self, hidden_state, input_tensor):
hidden_state = self.dense(hidden_state)
hidden_state = self.dropout(hidden_state)
hidden_state = hidden_state + input_tensor
return hidden_state
| CvtOutput |
python | Pylons__pyramid | docs/tutorials/wiki/src/basiclayout/tutorial/models/__init__.py | {
"start": 51,
"end": 284
} | class ____(PersistentMapping):
__parent__ = __name__ = None
def appmaker(zodb_root):
if 'app_root' not in zodb_root:
app_root = MyModel()
zodb_root['app_root'] = app_root
return zodb_root['app_root']
| MyModel |
python | doocs__leetcode | lcp/LCP 07. 传递信息/Solution.py | {
"start": 0,
"end": 285
} | class ____:
def numWays(self, n: int, relation: List[List[int]], k: int) -> int:
f = [[0] * n for _ in range(k + 1)]
f[0][0] = 1
for i in range(1, k + 1):
for a, b in relation:
f[i][b] += f[i - 1][a]
return f[-1][-1]
| Solution |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 27240,
"end": 27340
} | class ____(FailingQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
| PyFailingQueueTest |
python | doocs__leetcode | solution/1200-1299/1297.Maximum Number of Occurrences of a Substring/Solution2.py | {
"start": 532,
"end": 1187
} | class ____:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
freq = Counter()
hashing = Hashing(s)
cnt = Counter()
ans = k = 0
for i, c in enumerate(s, 1):
freq[c] += 1
if freq[c] == 1:
k += 1
if i >= minSize:
if k <= maxLetters:
x = hashing.query(i - minSize + 1, i)
cnt[x] += 1
ans = max(ans, cnt[x])
j = i - minSize
freq[s[j]] -= 1
if freq[s[j]] == 0:
k -= 1
return ans
| Solution |
python | apache__airflow | airflow-core/src/airflow/task/priority_strategy.py | {
"start": 1079,
"end": 2850
} | class ____(ABC):
"""
Priority weight strategy interface.
This feature is experimental and subject to change at any time.
Currently, we don't serialize the priority weight strategy parameters. This means that
the priority weight strategy must be stateless, but you can add class attributes, and
create multiple subclasses with different attributes values if you need to create
different versions of the same strategy.
"""
@abstractmethod
def get_weight(self, ti: TaskInstance):
"""Get the priority weight of a task."""
...
@classmethod
def deserialize(cls, data: dict[str, Any]) -> PriorityWeightStrategy:
"""
Deserialize a priority weight strategy from data.
This is called when a serialized DAG is deserialized. ``data`` will be whatever
was returned by ``serialize`` during DAG serialization. The default
implementation constructs the priority weight strategy without any arguments.
"""
return cls(**data)
def serialize(self) -> dict[str, Any]:
"""
Serialize the priority weight strategy for JSON encoding.
This is called during DAG serialization to store priority weight strategy information
in the database. This should return a JSON-serializable dict that will be fed into
``deserialize`` when the DAG is deserialized. The default implementation returns
an empty dict.
"""
return {}
def __eq__(self, other: object) -> bool:
"""Equality comparison."""
if not isinstance(other, type(self)):
return False
return self.serialize() == other.serialize()
def __hash__(self):
return hash(self.serialize())
| PriorityWeightStrategy |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 1852,
"end": 2006
} | class ____(JSONRPCException):
"""
Invalid method parameter(s).
"""
def error_code(self) -> int:
return -32602
| InvalidParameterError |
python | tqdm__tqdm | tqdm/std.py | {
"start": 944,
"end": 987
} | class ____(TypeError):
pass
| TqdmTypeError |
python | hyperopt__hyperopt | hyperopt/mongoexp.py | {
"start": 8855,
"end": 21476
} | class ____:
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
"""
Parameters
----------
db - Mongo Database (e.g. `Connection()[dbname]`)
database in which all job-related info is stored
jobs - Mongo Collection handle
collection within `db` to use for job arguments, return vals,
and various bookkeeping stuff and meta-data. Typically this is
`db['jobs']`
gfs - Mongo GridFS handle
GridFS is used to store attachments - binary blobs that don't fit
or are awkward to store in the `jobs` collection directly.
conn - Mongo Connection
Why we need to keep this, I'm not sure.
tunnel - something for ssh tunneling if you're doing that
See `connection_with_tunnel` for more info.
config_name - string
XXX: No idea what this is for, seems unimportant.
"""
if not _has_mongo:
raise Exception(
"MongoJobs cannot import pymongo classes. Make sure that pymongo "
"is available in your environment. E.g., try running 'import pymongo'"
)
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn = conn
self.tunnel = tunnel
self.config_name = config_name
collection = property(lambda s: s.jobs)
@classmethod
def alloc(
cls,
dbname,
host="localhost",
auth_dbname="admin",
port=27017,
jobs_coll="jobs",
gfs_coll="fs",
ssh=False,
user=None,
pw=None,
):
connection, tunnel = connection_with_tunnel(
dbname, host, auth_dbname, port, ssh, user, pw
)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll="fs", config_name="spec"):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ["exp_key", "result.loss", "book_time"]:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index("exp_key", unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if not r.get("MIA", False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if r.get("MIA", False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# -- this call adds an _id field to cpy
_id = self.jobs.insert(cpy, check_keys=True)
# -- so now we return the dict with the _id field
assert _id == cpy["_id"]
return cpy
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# This was meant to make it easier to catch insertion errors
# in a generic way even if different databases were used.
# ... but there's just MongoDB so far, so kinda goofy.
raise OperationFailure(e)
def delete(self, job):
"""Delete job[s]"""
try:
self.jobs.remove(job)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all(self, cond=None):
"""Delete all jobs and attachments"""
if cond is None:
cond = {}
try:
for d in self.jobs.find(filter=cond, projection=["_id", "_attachments"]):
logger.info("deleting job %s" % d["_id"])
for name, file_id in d.get("_attachments", []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error(f"failed to remove attachment {name}:{file_id}")
self.jobs.remove(d)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all_error_jobs(self):
return self.delete_all(cond={"state": JOB_STATE_ERROR})
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(
cond
) # copy is important, will be modified, but only the top-level
if exp_key is not None:
cond["exp_key"] = exp_key
# having an owner of None implies state==JOB_STATE_NEW, so this effectively
# acts as a filter to make sure that only new jobs get reserved.
if cond.get("owner") is not None:
raise ValueError("refusing to reserve owned job")
else:
cond["owner"] = None
cond["state"] = (
JOB_STATE_NEW # theoretically this is redundant, theoretically
)
try:
rval = self.jobs.find_and_modify(
cond,
{
"$set": {
"owner": host_id,
"book_time": now,
"state": JOB_STATE_RUNNING,
"refresh_time": now,
}
},
new=True,
upsert=False,
)
except pymongo.errors.OperationFailure as e:
logger.error("Error during reserve_job: %s" % str(e))
rval = None
return rval
def refresh(self, doc):
self.update(doc, dict(refresh_time=coarse_utcnow()))
def update(self, doc, dct, collection=None, do_sanity_checks=True):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
"""
if collection is None:
collection = self.collection
dct = copy.deepcopy(dct)
if "_id" not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if "_id" in dct:
if dct["_id"] != doc["_id"]:
raise ValueError("cannot update the _id field")
del dct["_id"]
if "version" in dct:
if dct["version"] != doc["version"]:
warnings.warn('Ignoring "version" field in update dictionary')
if "version" in doc:
doc_query = dict(_id=doc["_id"], version=doc["version"])
dct["version"] = doc["version"] + 1
else:
doc_query = dict(_id=doc["_id"])
dct["version"] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(doc_query, {"$set": dct}, upsert=False, multi=False)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if do_sanity_checks:
server_doc = collection.find_one(
dict(_id=doc["_id"], version=doc["version"])
)
if server_doc is None:
raise OperationFailure("updated doc not found : %s" % str(doc))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], str), name_id
return str(name_id[0])
return list(map(as_str, doc.get("_attachments", [])))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get("_attachments", [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename="{}_{}".format(doc["_id"], name))
logger.info(
"stored blob of %i bytes with id=%s and filename %s_%s"
% (len(blob), str(new_file_id), doc["_id"], name)
)
new_attachments = [a for a in attachments if a[0] != name] + [
(name, new_file_id)
]
try:
ii = 0
doc = self.update(
doc, {"_attachments": new_attachments}, collection=collection
)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning(
"Leak during set_attachment: old_file_id=%s" % (name_matches[ii][1])
)
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
# return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get("_attachments", [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure("Attachment not found: %s" % name)
if len(file_ids) > 1:
raise OperationFailure("multiple name matches", (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get("_attachments", [])
file_id = None
for i, a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure("Attachment not found: %s" % name)
del attachments[i]
self.update(doc, {"_attachments": attachments}, collection=collection)
self.gfs.delete(file_id)
| MongoJobs |
python | google__python-fire | fire/core.py | {
"start": 6840,
"end": 7053
} | class ____(Exception):
"""Exception used by Fire when a Fire command cannot be executed.
These exceptions are not raised by the Fire function, but rather are caught
and added to the FireTrace.
"""
| FireError |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 69508,
"end": 69874
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 5),)
| LinearReluModel |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_sensor.py | {
"start": 26530,
"end": 27034
} | class ____:
@pytest.mark.parametrize(
("soft_fail", "expected_exception"),
[
(True, AirflowSkipException),
(False, AirflowException),
],
)
def test_fail_after_resuming_deferred_sensor(self, soft_fail, expected_exception):
async_sensor = DummyAsyncSensor(task_id="dummy_async_sensor", soft_fail=soft_fail)
with pytest.raises(expected_exception):
async_sensor.resume_execution("execute_complete", None, {})
| TestAsyncSensor |
python | ray-project__ray | rllib/algorithms/marwil/marwil_torch_policy.py | {
"start": 796,
"end": 5350
} | class ____(ValueNetworkMixin, PostprocessAdvantages, TorchPolicyV2):
"""PyTorch policy class used with Marwil."""
def __init__(self, observation_space, action_space, config):
TorchPolicyV2.__init__(
self,
observation_space,
action_space,
config,
max_seq_len=config["model"]["max_seq_len"],
)
ValueNetworkMixin.__init__(self, config)
PostprocessAdvantages.__init__(self)
# Not needed for pure BC.
if config["beta"] != 0.0:
# Set up a torch-var for the squared moving avg. advantage norm.
self._moving_average_sqd_adv_norm = torch.tensor(
[config["moving_average_sqd_adv_norm_start"]],
dtype=torch.float32,
requires_grad=False,
).to(self.device)
# TODO: Don't require users to call this manually.
self._initialize_loss_from_dummy_batch()
@override(TorchPolicyV2)
def loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
model_out, _ = model(train_batch)
action_dist = dist_class(model_out, model)
actions = train_batch[SampleBatch.ACTIONS]
# log\pi_\theta(a|s)
logprobs = action_dist.logp(actions)
# Advantage estimation.
if self.config["beta"] != 0.0:
cumulative_rewards = train_batch[Postprocessing.ADVANTAGES]
state_values = model.value_function()
adv = cumulative_rewards - state_values
adv_squared_mean = torch.mean(torch.pow(adv, 2.0))
explained_var = explained_variance(cumulative_rewards, state_values)
ev = torch.mean(explained_var)
model.tower_stats["explained_variance"] = ev
# Policy loss.
# Update averaged advantage norm.
rate = self.config["moving_average_sqd_adv_norm_update_rate"]
self._moving_average_sqd_adv_norm = (
rate * (adv_squared_mean.detach() - self._moving_average_sqd_adv_norm)
+ self._moving_average_sqd_adv_norm
)
model.tower_stats[
"_moving_average_sqd_adv_norm"
] = self._moving_average_sqd_adv_norm
# Exponentially weighted advantages.
exp_advs = torch.exp(
self.config["beta"]
* (adv / (1e-8 + torch.pow(self._moving_average_sqd_adv_norm, 0.5)))
).detach()
# Value loss.
v_loss = 0.5 * adv_squared_mean
else:
# Policy loss (simple BC loss term).
exp_advs = 1.0
# Value loss.
v_loss = 0.0
model.tower_stats["v_loss"] = v_loss
# logprob loss alone tends to push action distributions to
# have very low entropy, resulting in worse performance for
# unfamiliar situations.
# A scaled logstd loss term encourages stochasticity, thus
# alleviate the problem to some extent.
logstd_coeff = self.config["bc_logstd_coeff"]
if logstd_coeff > 0.0:
logstds = torch.mean(action_dist.log_std, dim=1)
else:
logstds = 0.0
p_loss = -torch.mean(exp_advs * (logprobs + logstd_coeff * logstds))
model.tower_stats["p_loss"] = p_loss
# Combine both losses.
self.v_loss = v_loss
self.p_loss = p_loss
total_loss = p_loss + self.config["vf_coeff"] * v_loss
model.tower_stats["total_loss"] = total_loss
return total_loss
@override(TorchPolicyV2)
def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
stats = {
"policy_loss": self.get_tower_stats("p_loss")[0].item(),
"total_loss": self.get_tower_stats("total_loss")[0].item(),
}
if self.config["beta"] != 0.0:
stats["moving_average_sqd_adv_norm"] = self.get_tower_stats(
"_moving_average_sqd_adv_norm"
)[0].item()
stats["vf_explained_var"] = self.get_tower_stats("explained_variance")[
0
].item()
stats["vf_loss"] = self.get_tower_stats("v_loss")[0].item()
return convert_to_numpy(stats)
def extra_grad_process(
self, optimizer: "torch.optim.Optimizer", loss: TensorType
) -> Dict[str, TensorType]:
return apply_grad_clipping(self, optimizer, loss)
| MARWILTorchPolicy |
python | zarr-developers__zarr-python | tests/test_store/test_wrapper.py | {
"start": 537,
"end": 759
} | class ____(TypedDict):
store_cls: type[LocalStore]
root: str
# TODO: fix this warning
@pytest.mark.filterwarnings(
"ignore:coroutine 'ClientCreatorContext.__aexit__' was never awaited:RuntimeWarning"
)
| OpenKwargs |
python | Netflix__metaflow | test/core/tests/project_production.py | {
"start": 72,
"end": 838
} | class ____(MetaflowTest):
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
HEADER = """
import os
os.environ['METAFLOW_PRODUCTION'] = 'True'
@project(name='project_prod')
"""
@steps(0, ["singleton"], required=True)
def step_single(self):
pass
@steps(1, ["all"])
def step_all(self):
from metaflow import current
assert_equals(current.branch_name, "prod")
assert_equals(
current.project_flow_name, "project_prod.prod.ProjectProductionTestFlow"
)
| ProjectProductionTest |
python | huggingface__transformers | src/transformers/models/rembert/modeling_rembert.py | {
"start": 20570,
"end": 20909
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RemBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@auto_docstring
| RemBertOnlyMLMHead |
python | doocs__leetcode | lcof2/剑指 Offer II 017. 含有所有字符的最短字符串/Solution2.py | {
"start": 0,
"end": 1043
} | class ____:
def minWindow(self, s: str, t: str) -> str:
m, n = len(s), len(t)
if n > m:
return ""
need, window = defaultdict(int), defaultdict(int)
needCount, windowCount = 0, 0
for c in t:
if need[c] == 0:
needCount += 1
need[c] += 1
start, minLen = 0, inf
left, right = 0, 0
while right < m:
ch = s[right]
right += 1
if ch in need:
window[ch] += 1
if window[ch] == need[ch]:
windowCount += 1
while windowCount == needCount:
if right - left < minLen:
minLen = right - left
start = left
ch = s[left]
left += 1
if ch in need:
if window[ch] == need[ch]:
windowCount -= 1
window[ch] -= 1
return "" if minLen == inf else s[start : start + minLen]
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.