language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py | {
"start": 667,
"end": 2170
} | class ____:
"""
Represents a single group of a query.
Attributes:
series: The timeseries data associated with the group. Each entry in the timeseries is characterized by the time
and the aggregate value.
totals: The totals data associated with the group. Totals represent just a single scalar value.
"""
series: Series
totals: Totals
@classmethod
def empty(cls) -> "GroupValue":
return GroupValue(series=[], totals=None)
def add_series_entry(self, time: str, aggregate_value: ResultValue):
self.series.append((time, self._transform_aggregate_value(aggregate_value)))
def add_totals(self, aggregate_value: ResultValue):
self.totals = self._transform_aggregate_value(aggregate_value)
def _transform_aggregate_value(self, aggregate_value: ResultValue) -> ResultValue:
"""
Transforms a list aggregate value into a scalar aggregate value.
The reason for this transformation is that we don't support the array return type, since the set of
operations that the API can support won't lead to multiple values in a single aggregate value.
Returns:
The transformed aggregate value.
"""
if isinstance(aggregate_value, list):
if aggregate_value:
return aggregate_value[0]
raise MetricsQueryExecutionError("Received an empty array as aggregate value")
return aggregate_value
@dataclass
| GroupValue |
python | OmkarPathak__pygorithm | tests/test_math.py | {
"start": 1111,
"end": 3154
} | class ____(unittest.TestCase):
def test_matrix_addition(self):
X = [[12,7,3],
[4 ,5,6],
[7 ,8,9]]
Y = [[5,8,1],
[6,7,3],
[4,5,9]]
matrix = matrix_operations.Matrix(X, Y)
self.assertEqual(matrix.add(), [[17, 15, 4], [10, 12, 9], [11, 13, 18]])
def test_matrix_subtraction(self):
X = [[12,7,3],
[4,5,6],
[7,8,9]]
Y = [[5,8,1],
[6,7,3],
[4,5,9]]
matrix = matrix_operations.Matrix(X, Y)
self.assertEqual(matrix.subtract(), [[7, -1, 2], [-2, -2, 3], [3, 3, 0]])
def test_matrix_multiplication(self):
X = [[12,7,3],
[4,5,6],
[7,8,9]]
Y = [[5,8,1,2],
[6,7,3,0],
[4,5,9,1]]
matrix = matrix_operations.Matrix(X, Y)
self.assertEqual(matrix.multiply(), [[114, 160, 60, 27], [74, 97, 73, 14], [119, 157, 112, 23]])
def test_matrix_transpose(self):
X = [[12,7],
[4 ,5],
[3 ,8]]
matrix = matrix_operations.Matrix(X)
self.assertEqual(matrix.transpose(), [[12, 4, 3],[7, 5, 8]])
def test_matrix_rotate(self):
X =[[1, 2, 3, 4 ],
[5, 6, 7, 8 ],
[9, 10, 11, 12 ],
[13, 14, 15, 16 ]]
matrix = matrix_operations.Matrix(X)
self.assertEqual(matrix.rotate(), [[5, 1, 2, 3], [9, 10, 6, 4], [13, 11, 7, 8], [14, 15, 16, 12]])
def test_matrix_unique_paths(self):
matrix = matrix_operations.Matrix()
self.assertEqual(matrix.count_unique_paths(3, 3), 6)
def test_matrix_exceptions(self):
X = [[12,7,3],
[4,5,6],
[7,8,9]]
Y = [[5,8],
[6,7],
[4,5]]
matrix = matrix_operations.Matrix(X, Y)
# test exception
self.assertRaises(Exception, matrix.add)
self.assertRaises(Exception, matrix.subtract)
if __name__ == '__main__':
unittest.main()
| TestMatrixOperations |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 69625,
"end": 79681
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
test_needs_fk=True,
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
test_needs_fk=True,
)
Table(
"atob",
metadata,
Column("aid", Integer, ForeignKey("a.id")),
Column("bid", Integer, ForeignKey("b.id")),
test_needs_fk=True,
)
Table(
"c",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("bid", Integer, ForeignKey("b.id")),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
pass
def test_delete_orphan(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
# if no backref here, delete-orphan failed until [ticket:427]
# was fixed
self.mapper_registry.map_imperatively(
A,
a,
properties={
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
)
},
)
self.mapper_registry.map_imperatively(B, b)
sess = fixture_session()
b1 = B(data="b1")
a1 = A(data="a1", bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(atob)).scalar(), 0
)
eq_(sess.execute(select(func.count("*")).select_from(b)).scalar(), 0)
eq_(sess.execute(select(func.count("*")).select_from(a)).scalar(), 1)
def test_delete_orphan_dynamic(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
# if no backref here, delete-orphan
properties={
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
lazy="dynamic",
)
},
)
# failed until [ticket:427] was fixed
self.mapper_registry.map_imperatively(B, b)
sess = fixture_session()
b1 = B(data="b1")
a1 = A(data="a1", bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(atob)).scalar(), 0
)
eq_(sess.execute(select(func.count("*")).select_from(b)).scalar(), 0)
eq_(sess.execute(select(func.count("*")).select_from(a)).scalar(), 1)
def test_delete_orphan_cascades(self):
a, A, c, b, C, B, atob = (
self.tables.a,
self.classes.A,
self.tables.c,
self.tables.b,
self.classes.C,
self.classes.B,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={
# if no backref here, delete-orphan failed until #
# [ticket:427] was fixed
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
)
},
)
self.mapper_registry.map_imperatively(
B,
b,
properties={"cs": relationship(C, cascade="all, delete-orphan")},
)
self.mapper_registry.map_imperatively(C, c)
sess = fixture_session()
b1 = B(data="b1", cs=[C(data="c1")])
a1 = A(data="a1", bs=[b1])
sess.add(a1)
sess.flush()
a1.bs.remove(b1)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(atob)).scalar(), 0
)
eq_(sess.execute(select(func.count("*")).select_from(b)).scalar(), 0)
eq_(sess.execute(select(func.count("*")).select_from(a)).scalar(), 1)
eq_(sess.execute(select(func.count("*")).select_from(c)).scalar(), 0)
def test_cascade_delete(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
)
},
)
self.mapper_registry.map_imperatively(B, b)
sess = fixture_session()
a1 = A(data="a1", bs=[B(data="b1")])
sess.add(a1)
sess.flush()
sess.delete(a1)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(atob)).scalar(), 0
)
eq_(sess.execute(select(func.count("*")).select_from(b)).scalar(), 0)
eq_(sess.execute(select(func.count("*")).select_from(a)).scalar(), 0)
def test_single_parent_error(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={
"bs": relationship(
B, secondary=atob, cascade="all, delete-orphan"
)
},
)
self.mapper_registry.map_imperatively(B, b)
assert_raises_message(
sa_exc.ArgumentError,
"For many-to-many relationship A.bs, delete-orphan cascade",
configure_mappers,
)
def test_single_parent_raise(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
)
},
)
self.mapper_registry.map_imperatively(B, b)
b1 = B(data="b1")
A(data="a1", bs=[b1])
assert_raises(sa_exc.InvalidRequestError, A, data="a2", bs=[b1])
def test_single_parent_backref(self):
"""test that setting m2m via a uselist=False backref bypasses the
single_parent raise"""
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={
"bs": relationship(
B,
secondary=atob,
cascade="all, delete-orphan",
single_parent=True,
backref=backref("a", uselist=False),
)
},
)
self.mapper_registry.map_imperatively(B, b)
b1 = B(data="b1")
a1 = A(data="a1", bs=[b1])
assert_raises(sa_exc.InvalidRequestError, A, data="a2", bs=[b1])
a2 = A(data="a2")
b1.a = a2
assert b1 not in a1.bs
assert b1 in a2.bs
def test_none_m2m_collection_assignment(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, secondary=atob, backref="as")},
)
self.mapper_registry.map_imperatively(B, b)
s = fixture_session()
a1 = A(bs=[None])
s.add(a1)
eq_(a1.bs, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection A.bs",
s.commit,
)
eq_(a1.bs, [None])
def test_none_m2m_collection_append(self):
a, A, B, b, atob = (
self.tables.a,
self.classes.A,
self.classes.B,
self.tables.b,
self.tables.atob,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={"bs": relationship(B, secondary=atob, backref="as")},
)
self.mapper_registry.map_imperatively(B, b)
s = fixture_session()
a1 = A()
a1.bs.append(None)
s.add(a1)
eq_(a1.bs, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection A.bs",
s.commit,
)
eq_(a1.bs, [None])
| M2MCascadeTest |
python | Textualize__textual | tests/test_screen_modes.py | {
"start": 838,
"end": 1157
} | class ____(ScreenBindingsMixin):
def __init__(self, label):
super().__init__()
self.label = label
def compose(self) -> ComposeResult:
yield Header()
yield Label(self.label)
yield Footer()
def action_remove(self) -> None:
self.app.remove_mode("one")
| BaseScreen |
python | django__django | django/forms/renderers.py | {
"start": 1891,
"end": 2141
} | class ____(BaseRenderer):
"""
Load templates using template.loader.get_template() which is configured
based on settings.TEMPLATES.
"""
def get_template(self, template_name):
return get_template(template_name)
| TemplatesSetting |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 672,
"end": 795
} | class ____(HaystackError):
"""Raised when incorrect arguments have been provided for faceting."""
pass
| FacetingError |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_aliases.py | {
"start": 6394,
"end": 8024
} | class ____(BaseTest):
create_file = True
def get_signal_handler(self):
return NotImplementedError("Subclasses should return the handler")
def setUp(self):
super().setUp()
signals.saved_file.connect(
self.get_signal_handler(), sender=models.Profile)
# Fix the standard storage to use the test's temporary location.
self._MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = self.storage.temporary_location
# Make the temporary storage location the default storage for now.
self._old_default_storage = django_storage.default_storage._wrapped
django_storage.default_storage._wrapped = self.storage
self._old_thumbnail_default_storage = storage.thumbnail_default_storage
storage.thumbnail_default_storage = self.storage
def tearDown(self):
# Put the default storage back how we found it.
storage.thumbnail_default_storage = self._old_thumbnail_default_storage
django_storage.default_storage._wrapped = self._old_default_storage
settings.MEDIA_ROOT = self._MEDIA_ROOT
signals.saved_file.disconnect(
self.get_signal_handler(), sender=models.Profile)
super().tearDown()
def fake_save(self, instance):
cls = instance.__class__
pre_save.send(sender=cls, instance=instance)
for field in cls._meta.fields:
if isinstance(field, FileField):
getattr(instance, field.name)._committed = True
post_save.send(sender=cls, instance=instance)
return self.storage.listdir('avatars')[1]
| GenerationBase |
python | pytorch__pytorch | test/functorch/test_eager_transforms.py | {
"start": 98571,
"end": 103958
} | class ____(TestCase):
# Case 1 in [Forward Grad View/inplace]
def test_all_dual_no_view(self, device):
B = 2
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
return inner
def f(x, y):
x.copy_(y)
return x
x = torch.randn(3, B, device=device)
xt = torch.randn(3, B, device=device)
y = torch.randn(3, B, device=device)
yt = torch.randn(3, B, device=device)
out, out_tangent = vmap(push_jvp(f), in_dims=1)(x, xt, y, yt)
self.assertEqual(out, x.movedim(1, 0))
self.assertEqual(out_tangent, yt.movedim(1, 0))
x = torch.randn(3, B, device=device)
xt = torch.randn(3, B, device=device)
y = torch.randn(3, 3, device=device)[:, 1]
yt = torch.randn(6, device=device)[::2]
out, out_tangent = vmap(push_jvp(f), in_dims=(1, 1, None, None))(x, xt, y, yt)
self.assertEqual(out, x.movedim(1, 0))
self.assertEqual(out_tangent, yt.expand(B, 3))
# Case 2 in [Forward Grad View/inplace]
def test_all_dual_base_view_inplace(self, device):
B = 2
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
return inner
# with view, propagate from view to base
def f(x, y):
view = x[:, ::2]
view.copy_(y)
return view, x
orig_x = torch.randn(2, 6, B, device=device)
orig_xt = torch.randn(2, 6, B, device=device)
x = orig_x.clone()
xt = orig_xt.clone()
y = torch.randn(2, B, 3, device=device)
yt = torch.randn(2, B, 3, device=device)
out, out_tangent = vmap(push_jvp(f), in_dims=(2, 2, 1, 1))(x, xt, y, yt)
expected_out = vmap(f, in_dims=(2, 1))(orig_x.clone(), y)
self.assertEqual(out[0], expected_out[0])
self.assertEqual(out[1], expected_out[1])
self.assertEqual(out_tangent[0], yt.movedim(1, 0))
expected_x_tangent = orig_xt.movedim(-1, 0).clone()
expected_x_tangent[:, :, ::2].copy_(yt.movedim(1, 0))
self.assertEqual(out_tangent[1], expected_x_tangent)
expected = orig_x.movedim(2, 0).clone()
expected[:, :, ::2] = y.movedim(1, 0)
self.assertEqual(x.movedim(2, 0), expected)
# Case 3 in [Forward Grad View/inplace]
def test_all_dual_base_inplace(self, device):
B = 2
def push_jvp(f):
def inner(x, xt, y, yt):
return jvp(f, (x, y), (xt, yt))
return inner
# Case 3: with view, propagate from base to view
def f(x, y):
view = x[0, ::2]
x.copy_(y)
return x, view
x = torch.randn(2, B, 6, device=device)
xt = torch.randn(2, 6, B, device=device)
y = torch.randn(2, B, 6, device=device)
yt = torch.randn(2, B, 6, device=device)
out, out_tangent = vmap(push_jvp(f), in_dims=(1, 2, 1, 1))(x.clone(), xt, y, yt)
expected_out = vmap(f, in_dims=(1, 1))(x.clone(), y)
self.assertEqual(out[0], expected_out[0])
self.assertEqual(out[1], expected_out[1])
self.assertEqual(out_tangent[0], yt.movedim(1, 0))
self.assertEqual(out_tangent[1], yt.movedim(1, 0)[:, 0, ::2])
# Case 4 in [Forward Grad View/inplace]
def test_right_dual_view_prop(self, device):
B = 2
# Changes on the view must propagate to its base. Also:
# - x is a regular Tensor
# - y is a dual tensor
def f(x, y):
x = x.clone()
view = x[0]
view.copy_(y)
return view, x
def push_jvp(x, y, yt):
return jvp(partial(f, x), (y,), (yt,))
x = torch.randn(2, B, 6, device=device)
y = torch.randn(6, B, device=device)
yt = torch.randn(6, B, device=device)
outs, tangents = vmap(push_jvp, in_dims=(1, 1, 1))(x, y, yt)
expected_out = vmap(f, in_dims=(1, 1))(x.clone(), y)
self.assertEqual(outs[0], expected_out[0])
self.assertEqual(outs[1], expected_out[1])
self.assertEqual(tangents[0], yt.movedim(1, 0))
expected_tangent_1 = torch.zeros_like(x).movedim(1, 0)
expected_tangent_1[:, 0].copy_(yt.movedim(1, 0))
self.assertEqual(tangents[1], expected_tangent_1)
# Case 5 in [Forward Grad View/inplace]
def test_right_dual_base_prop(self, device):
B = 2
# Changes on the base must propagate on all its views. Also:
# - x is a regular Tensor
# - y is a dual tensor
def f(x, y):
x = x.clone()
view = x[0]
x.copy_(y)
return view, x
def push_jvp(x, y, yt):
return jvp(partial(f, x), (y,), (yt,))
x = torch.randn(2, B, 6)
y = torch.randn(2, 6, B)
yt = torch.randn(2, 6, B)
outs, tangents = vmap(push_jvp, in_dims=(1, 2, 2))(x, y, yt)
expected_out = vmap(f, in_dims=(1, 2))(x, y)
self.assertEqual(outs[0], expected_out[0])
self.assertEqual(outs[1], expected_out[1])
self.assertEqual(tangents[0], yt.movedim(2, 0)[:, 0])
self.assertEqual(tangents[1], yt.movedim(2, 0))
# Use for testing miscellaneous helper functions
@markDynamoStrictTest
| TestVmapJvpInplaceView |
python | scrapy__scrapy | scrapy/core/downloader/contextfactory.py | {
"start": 1071,
"end": 3988
} | class ____(BrowserLikePolicyForHTTPS):
"""
Non-peer-certificate verifying HTTPS context factory
Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
which allows TLS protocol negotiation
'A TLS/SSL connection established with [this method] may
understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'
"""
def __init__(
self,
method: int = SSL.SSLv23_METHOD,
tls_verbose_logging: bool = False,
tls_ciphers: str | None = None,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._ssl_method: int = method
self.tls_verbose_logging: bool = tls_verbose_logging
self.tls_ciphers: AcceptableCiphers
if tls_ciphers:
self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)
else:
self.tls_ciphers = DEFAULT_CIPHERS
if method_is_overridden(type(self), ScrapyClientContextFactory, "getContext"):
warnings.warn(
"Overriding ScrapyClientContextFactory.getContext() is deprecated and that method"
" will be removed in a future Scrapy version. Override creatorForNetloc() instead.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
@classmethod
def from_crawler(
cls,
crawler: Crawler,
method: int = SSL.SSLv23_METHOD,
*args: Any,
**kwargs: Any,
) -> Self:
tls_verbose_logging: bool = crawler.settings.getbool(
"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING"
)
tls_ciphers: str | None = crawler.settings["DOWNLOADER_CLIENT_TLS_CIPHERS"]
return cls( # type: ignore[misc]
method=method,
tls_verbose_logging=tls_verbose_logging,
tls_ciphers=tls_ciphers,
*args,
**kwargs,
)
def getCertificateOptions(self) -> CertificateOptions:
# setting verify=True will require you to provide CAs
# to verify against; in other words: it's not that simple
return CertificateOptions(
verify=False,
method=self._ssl_method,
fixBrokenPeers=True,
acceptableCiphers=self.tls_ciphers,
)
# kept for old-style HTTP/1.0 downloader context twisted calls,
# e.g. connectSSL()
def getContext(self, hostname: Any = None, port: Any = None) -> SSL.Context:
ctx: SSL.Context = self.getCertificateOptions().getContext()
ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT
return ctx
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
return ScrapyClientTLSOptions(
hostname.decode("ascii"),
self.getContext(),
verbose_logging=self.tls_verbose_logging,
)
@implementer(IPolicyForHTTPS)
| ScrapyClientContextFactory |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 22448,
"end": 23274
} | class ____(fixtures.DeclarativeMappedTest):
__requires__ = ("foreign_keys", "recursive_fk_cascade")
@classmethod
def setup_classes(cls):
class A(cls.DeclarativeBasic):
__tablename__ = "A"
__table_args__ = dict(test_needs_fk=True)
__mapper_args__ = {"confirm_deleted_rows": False}
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("A.id", ondelete="CASCADE"))
def test_delete_both(self):
A = self.classes.A
session = Session(testing.db)
a1, a2 = A(id=1), A(id=2, parent_id=1)
session.add_all([a1, a2])
session.flush()
session.delete(a1)
session.delete(a2)
# no issue with multi-row count here
session.flush()
| BatchDeleteIgnoresRowcountTest |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/usage.py | {
"start": 551,
"end": 5073
} | class ____(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
```python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022")
callback = UsageMetadataCallbackHandler()
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
callback.usage_metadata
```
```txt
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
```
!!! version-added "Added in `langchain-core` 0.3.49"
"""
def __init__(self) -> None:
"""Initialize the UsageMetadataCallbackHandler."""
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
@override
def __repr__(self) -> str:
return str(self.usage_metadata)
@override
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get usage metadata callback.
Get context manager for tracking usage metadata across chat model calls using
`AIMessage.usage_metadata`.
Args:
name: The name of the context variable.
Yields:
The usage metadata callback.
Example:
```python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022")
with get_usage_metadata_callback() as cb:
llm_1.invoke("Hello")
llm_2.invoke("Hello")
print(cb.usage_metadata)
```
```txt
{
"gpt-4o-mini-2024-07-18": {
"input_tokens": 8,
"output_tokens": 10,
"total_tokens": 18,
"input_token_details": {"audio": 0, "cache_read": 0},
"output_token_details": {"audio": 0, "reasoning": 0},
},
"claude-3-5-haiku-20241022": {
"input_tokens": 8,
"output_tokens": 21,
"total_tokens": 29,
"input_token_details": {"cache_read": 0, "cache_creation": 0},
},
}
```
!!! version-added "Added in `langchain-core` 0.3.49"
"""
usage_metadata_callback_var: ContextVar[UsageMetadataCallbackHandler | None] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, inheritable=True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
| UsageMetadataCallbackHandler |
python | FactoryBoy__factory_boy | tests/cyclic/foo.py | {
"start": 136,
"end": 223
} | class ____:
def __init__(self, bar, x):
self.bar = bar
self.x = x
| Foo |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 41101,
"end": 62855
} | class ____(object):
"""
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects.
:param `fileobj`:
A file or stream in iCalendar format, which should be UTF-8 encoded
with CRLF endings.
.. _`RFC 5545`: https://tools.ietf.org/html/rfc5545
"""
def __init__(self, fileobj):
global rrule
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
else:
self._s = getattr(fileobj, 'name', repr(fileobj))
fileobj = _nullcontext(fileobj)
self._vtz = {}
with fileobj as fobj:
self._parse_rfc(fobj.read())
def keys(self):
"""
Retrieves the available time zones as a list.
"""
return list(self._vtz.keys())
def get(self, tzid=None):
"""
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
"""
if tzid is None:
if len(self._vtz) == 0:
raise ValueError("no timezones defined")
elif len(self._vtz) > 1:
raise ValueError("more than one timezone available")
tzid = next(iter(self._vtz))
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
elif len(s) == 6:
return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
else:
raise ValueError("invalid offset: " + s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
# DTSTART in VTIMEZONE takes a subset of valid RRULE
# values under RFC 5545.
for parm in parms:
if parm != 'VALUE=DATE-TIME':
msg = ('Unsupported DTSTART param in ' +
'VTIMEZONE: ' + parm)
raise ValueError(msg)
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo",
"/usr/lib/zoneinfo",
"/usr/share/lib/zoneinfo",
"/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def __get_gettz():
tzlocal_classes = (tzlocal,)
if tzwinlocal is not None:
tzlocal_classes += (tzwinlocal,)
class GettzFunc(object):
"""
Retrieve a time zone object from a string representation
This function is intended to retrieve the :py:class:`tzinfo` subclass
that best represents the time zone that would be used if a POSIX
`TZ variable`_ were set to the same value.
If no argument or an empty string is passed to ``gettz``, local time
is returned:
.. code-block:: python3
>>> gettz()
tzfile('/etc/localtime')
This function is also the preferred way to map IANA tz database keys
to :class:`tzfile` objects:
.. code-block:: python3
>>> gettz('Pacific/Kiritimati')
tzfile('/usr/share/zoneinfo/Pacific/Kiritimati')
On Windows, the standard is extended to include the Windows-specific
zone names provided by the operating system:
.. code-block:: python3
>>> gettz('Egypt Standard Time')
tzwin('Egypt Standard Time')
Passing a GNU ``TZ`` style string time zone specification returns a
:class:`tzstr` object:
.. code-block:: python3
>>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
:param name:
A time zone name (IANA, or, on Windows, Windows keys), location of
a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone
specifier. An empty string, no argument or ``None`` is interpreted
as local time.
:return:
Returns an instance of one of ``dateutil``'s :py:class:`tzinfo`
subclasses.
.. versionchanged:: 2.7.0
After version 2.7.0, any two calls to ``gettz`` using the same
input strings will return the same object:
.. code-block:: python3
>>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago')
True
In addition to improving performance, this ensures that
`"same zone" semantics`_ are used for datetimes in the same zone.
.. _`TZ variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
.. _`"same zone" semantics`:
https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html
"""
def __init__(self):
self.__instances = weakref.WeakValueDictionary()
self.__strong_cache_size = 8
self.__strong_cache = OrderedDict()
self._cache_lock = _thread.allocate_lock()
def __call__(self, name=None):
with self._cache_lock:
rv = self.__instances.get(name, None)
if rv is None:
rv = self.nocache(name=name)
if not (name is None
or isinstance(rv, tzlocal_classes)
or rv is None):
# tzlocal is slightly more complicated than the other
# time zone providers because it depends on environment
# at construction time, so don't cache that.
#
# We also cannot store weak references to None, so we
# will also not store that.
self.__instances[name] = rv
else:
# No need for strong caching, return immediately
return rv
self.__strong_cache[name] = self.__strong_cache.pop(name, rv)
if len(self.__strong_cache) > self.__strong_cache_size:
self.__strong_cache.popitem(last=False)
return rv
def set_cache_size(self, size):
with self._cache_lock:
self.__strong_cache_size = size
while len(self.__strong_cache) > size:
self.__strong_cache.popitem(last=False)
def cache_clear(self):
with self._cache_lock:
self.__instances = weakref.WeakValueDictionary()
self.__strong_cache.clear()
@staticmethod
def nocache(name=None):
"""A non-cached version of gettz"""
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name in ("", ":"):
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
try:
if name.startswith(":"):
name = name[1:]
except TypeError as e:
if isinstance(name, bytes):
new_msg = "gettz argument should be str, not bytes"
six.raise_from(TypeError(new_msg), e)
else:
raise
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except (WindowsError, UnicodeEncodeError):
# UnicodeEncodeError is for Python 2.7 compat
tz = None
if not tz:
from dateutil.zoneinfo import get_zonefile_instance
tz = get_zonefile_instance().get(name)
if not tz:
for c in name:
# name is not a tzstr unless it has at least
# one offset. For short values of "name", an
# explicit for loop seems to be the fastest way
# To determine if a string contains a digit
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = UTC
elif name in time.tzname:
tz = tzlocal()
return tz
return GettzFunc()
gettz = __get_gettz()
del __get_gettz
def datetime_exists(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in
``tz``.
.. versionadded:: 2.7.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
# This is essentially a test of whether or not the datetime can survive
# a round trip to UTC.
dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt
def datetime_ambiguous(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
# If a time zone defines its own "is_ambiguous" function, we'll use that.
is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
if is_ambiguous_fn is not None:
try:
return tz.is_ambiguous(dt)
except Exception:
pass
# If it doesn't come out and tell us it's ambiguous, we'll just check if
# the fold attribute has any effect on this particular date and time.
dt = dt.replace(tzinfo=tz)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dst = wall_0.dst() == wall_1.dst()
return not (same_offset and same_dst)
def resolve_imaginary(dt):
"""
Given a datetime that may be imaginary, return an existing datetime.
This function assumes that an imaginary datetime represents what the
wall time would be in a zone had the offset transition not occurred, so
it will always fall forward by the transition's change in offset.
.. doctest::
>>> from dateutil import tz
>>> from datetime import datetime
>>> NYC = tz.gettz('America/New_York')
>>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
2017-03-12 03:30:00-04:00
>>> KIR = tz.gettz('Pacific/Kiritimati')
>>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
1995-01-02 12:30:00+14:00
As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
existing datetime, so a round-trip to and from UTC is sufficient to get
an extant datetime, however, this generally "falls back" to an earlier time
rather than falling forward to the STD side (though no guarantees are made
about this behavior).
:param dt:
A :class:`datetime.datetime` which may or may not exist.
:return:
Returns an existing :class:`datetime.datetime`. If ``dt`` was not
imaginary, the datetime returned is guaranteed to be the same object
passed to the function.
.. versionadded:: 2.7.0
"""
if dt.tzinfo is not None and not datetime_exists(dt):
curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
dt += curr_offset - old_offset
return dt
def _datetime_to_timestamp(dt):
"""
Convert a :class:`datetime.datetime` object to an epoch timestamp in
seconds since January 1, 1970, ignoring the time zone.
"""
return (dt.replace(tzinfo=None) - EPOCH).total_seconds()
if sys.version_info >= (3, 6):
def _get_supported_offset(second_offset):
return second_offset
else:
def _get_supported_offset(second_offset):
# For python pre-3.6, round to full-minutes if that's not the case.
# Python's datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 or https://bugs.python.org/issue5288
# for some information.
old_offset = second_offset
calculated_offset = 60 * ((second_offset + 30) // 60)
return calculated_offset
try:
# Python 3.7 feature
from contextlib import nullcontext as _nullcontext
except ImportError:
class _nullcontext(object):
"""
Class for wrapping contexts so that they are passed through in a
with statement.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
return self.context
def __exit__(*args, **kwargs):
pass
# vim:ts=4:sw=4:et
| tzical |
python | tornadoweb__tornado | tornado/test/routing_test.py | {
"start": 2087,
"end": 2385
} | class ____(Router):
def __init__(self, app):
self.app = app
def find_handler(self, request, **kwargs):
handler = GetResource if request.method == "GET" else PostResource
return self.app.get_handler_delegate(request, handler, path_args=[request.path])
| HTTPMethodRouter |
python | getsentry__sentry | src/sentry/api/endpoints/organization_plugin_deprecation_info.py | {
"start": 566,
"end": 3323
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ECOSYSTEM
def get(self, request, organization, plugin_slug):
"""
Returns a list of objects that are affected by a plugin deprecation. Objects could be issues or alert rules or both
pparam: organization, plugin_slug
"""
# Plugins in the db are stored in lowercase but there is not guarantee that's how the customer will call the API
plugin = plugin_slug.lower()
plugin_projects = Project.objects.filter(
status=ObjectStatus.ACTIVE,
organization=organization,
projectoption__key=f"{plugin}:enabled",
projectoption__value=True,
).distinct()
url_prefix = generate_organization_url(organization.slug)
affected_rules_urls = self.get_plugin_rules_urls(
plugin_projects, f"{url_prefix}/organizations/{organization.slug}", plugin
)
affected_issue_urls = self.get_plugin_groups_urls(plugin_projects, plugin, url_prefix)
return Response(
{"affected_rules": affected_rules_urls, "affected_groups": affected_issue_urls}
)
def get_plugin_rules_urls(
self, plugin_projects: BaseQuerySet[Project, Project], url_prefix: str, plugin: str
) -> list[str]:
candidate_rules = Rule.objects.filter(
status=ObjectStatus.ACTIVE,
project__in=plugin_projects,
).distinct()
matching_rule_urls = []
for rule in candidate_rules:
actions = rule.data.get("actions", [])
for action in actions:
if (
action.get("id")
== "sentry.rules.actions.notify_event_service.NotifyEventServiceAction"
and action.get("service") == plugin
):
matching_rule_urls.append(
f"{url_prefix}/alerts/rules/{rule.project.slug}/{rule.id}/details/"
)
break
return matching_rule_urls
def get_plugin_groups_urls(
self, plugin_projects: BaseQuerySet[Project, Project], plugin: str, url_prefix: str
) -> list[str]:
groups_with_plugin_meta = (
Group.objects.filter(
project__in=plugin_projects,
groupmeta__key__contains=f"{plugin}:tid",
)
.distinct()
.select_related("project")
)
affected_groups_urls = []
for group in groups_with_plugin_meta:
affected_groups_urls.append(f"{url_prefix}/issues/{group.id}/")
return affected_groups_urls
| OrganizationPluginDeprecationInfoEndpoint |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/self_or_cls_assignment.py | {
"start": 0,
"end": 1472
} | class ____:
@classmethod
def list_fruits(cls) -> None:
cls = "apple" # PLW0642
cls: Fruit = "apple" # PLW0642
cls += "orange" # OK, augmented assignments are ignored
*cls = "banana" # PLW0642
cls, blah = "apple", "orange" # PLW0642
blah, (cls, blah2) = "apple", ("orange", "banana") # PLW0642
blah, [cls, blah2] = "apple", ("orange", "banana") # PLW0642
@classmethod
def add_fruits(cls, fruits, /) -> None:
cls = fruits # PLW0642
def print_color(self) -> None:
self = "red" # PLW0642
self: Self = "red" # PLW0642
self += "blue" # OK, augmented assignments are ignored
*self = "blue" # PLW0642
self, blah = "red", "blue" # PLW0642
blah, (self, blah2) = "apple", ("orange", "banana") # PLW0642
blah, [self, blah2] = "apple", ("orange", "banana") # PLW0642
def print_color(self, color, /) -> None:
self = color
def ok(self) -> None:
cls = None # OK because the rule looks for the name in the signature
@classmethod
def ok(cls) -> None:
self = None
@staticmethod
def list_fruits_static(self, cls) -> None:
self = "apple" # Ok
cls = "banana" # Ok
def list_fruits(self, cls) -> None:
self = "apple" # Ok
cls = "banana" # Ok
# `__new__` is implicitly a static method
# but for the purposes of this check we treat
# it as a class method.
| Fruit |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_storage_transfer_service.py | {
"start": 2198,
"end": 2348
} | class ____:
"""Google Cloud Transfer job status."""
ENABLED = "ENABLED"
DISABLED = "DISABLED"
DELETED = "DELETED"
| GcpTransferJobsStatus |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 8665,
"end": 8763
} | class ____(graphene.InputObjectType):
experiment_id = graphene.String()
| MlflowGetExperimentInput |
python | django-extensions__django-extensions | tests/management/commands/test_print_user_for_session.py | {
"start": 645,
"end": 3785
} | class ____(TestCase):
"""Test for print_user_for_session command."""
def setUp(self):
self.engine = import_module(settings.SESSION_ENGINE)
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_Session_Key_does_not_exist_or_expired(self, m_stdout):
call_command("print_user_for_session", "l6hxnwblpvrfu8bohelmqjj4soyo2r12")
self.assertIn("Session Key does not exist. Expired?", m_stdout.getvalue())
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_that_there_is_no_user_associated_with_given_session(
self, m_stdout
):
session = self.engine.SessionStore()
session.update(
{
"_auth_user_backend": "django.contrib.auth.backends.ModelBackend",
"_auth_user_hash": "b67352fde8582b12f068c10fd9d29f9fa1af0459",
}
)
session.create()
call_command("print_user_for_session", session.session_key)
self.assertIn("No user associated with session", m_stdout.getvalue())
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_that_there_is_no_backend_associated_with_given_session(
self, m_stdout
):
session = self.engine.SessionStore()
session.update(
{
"_auth_user_id": 1234,
"_auth_user_hash": "b67352fde8582b12f068c10fd9d29f9fa1af0459",
}
)
session.create()
call_command("print_user_for_session", session.session_key)
self.assertIn(
"No authentication backend associated with session", m_stdout.getvalue()
)
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_that_there_is_no_user_associated_with_id(self, m_stdout):
session = self.engine.SessionStore()
session.update(
{
"_auth_user_id": 1234,
"_auth_user_backend": "django.contrib.auth.backends.ModelBackend",
"_auth_user_hash": "b67352fde8582b12f068c10fd9d29f9fa1af0459",
}
)
session.create()
call_command("print_user_for_session", session.session_key)
self.assertIn("No user associated with that id.", m_stdout.getvalue())
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_user_info_for_session(self, m_stdout):
user = get_user_model().objects.create(
first_name="John", last_name="Doe", username="foobar", email="foo@bar.com"
)
session = self.engine.SessionStore()
session.update(
{
"_auth_user_id": user.pk,
"_auth_user_backend": "django.contrib.auth.backends.ModelBackend",
"_auth_user_hash": "b67352fde8582b12f068c10fd9d29f9fa1af0459",
}
)
session.create()
expected_out = """User id: {}
full name: John Doe
short name: John
username: foobar
email: foo@bar.com
""".format(user.pk)
call_command("print_user_for_session", session.session_key)
self.assertIn(expected_out, m_stdout.getvalue())
| PrintUserForSessionTests |
python | davidhalter__parso | parso/python/tree.py | {
"start": 26670,
"end": 28652
} | class ____(Import):
"""For ``import_name`` nodes. Covers normal imports without ``from``."""
type = 'import_name'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the import defines. The defined names
is always the first name after `import` or in case an alias - `as` - is
present that name is returned.
"""
return [alias or path[0] for path, alias in self._dotted_as_names()]
@property
def level(self):
"""The level parameter of ``__import__``."""
return 0 # Obviously 0 for imports without from.
def get_paths(self):
return [path for path, alias in self._dotted_as_names()]
def _dotted_as_names(self):
"""Generator of (list(path), alias) where alias may be None."""
dotted_as_names = self.children[1]
if dotted_as_names.type == 'dotted_as_names':
as_names = dotted_as_names.children[::2]
else:
as_names = [dotted_as_names]
for as_name in as_names:
if as_name.type == 'dotted_as_name':
alias = as_name.children[2]
as_name = as_name.children[0]
else:
alias = None
if as_name.type == 'name':
yield [as_name], alias
else:
# dotted_names
yield as_name.children[::2], alias
def is_nested(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return bool([1 for path, alias in self._dotted_as_names()
if alias is None and len(path) > 1])
def _aliases(self):
"""
:return list of Name: Returns all the alias
"""
return dict((alias, path[-1]) for path, alias in self._dotted_as_names()
if alias is not None)
| ImportName |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 18689,
"end": 18900
} | class ____(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions. """
option = 'include'
@classmethod
def default(cls):
return False
| Include |
python | numpy__numpy | numpy/random/tests/test_smoke.py | {
"start": 26770,
"end": 27456
} | class ____(RNG):
@classmethod
def _create_rng(cls):
bit_generator = MT19937
advance = None
seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 32
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
def test_numpy_state(self):
rg = self._create_rng().rg
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
rg.bit_generator.state = state
state2 = rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_(state[2] == state2['state']['pos'])
| TestMT19937 |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/error.py | {
"start": 7135,
"end": 8046
} | class ____(YAMLWarning):
def __init__(self, node, flt_str):
# type: (Any, Any) -> None
self.node = node
self.flt = flt_str
def __str__(self):
# type: () -> Any
line = self.node.start_mark.line
col = self.node.start_mark.column
return """
In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification
( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
Correct your float: "{}" on line: {}, column: {}
or alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', spack.vendor.ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
""".format(
self.flt, line, col
)
warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
| MantissaNoDotYAML1_1Warning |
python | huggingface__transformers | src/transformers/models/qwen2/tokenization_qwen2.py | {
"start": 1338,
"end": 3718
} | class ____(TokenizersBackend):
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
vocab_file=None,
merges_file=None,
unk_token="<|endoftext|>",
bos_token=None,
eos_token="<|endoftext|>",
pad_token="<|endoftext|>",
add_prefix_space=None,
vocab=None,
merges=None,
**kwargs,
):
self.add_prefix_space = add_prefix_space if add_prefix_space is not None else False
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
"<|endoftext|>": 0,
}
self._merges = merges if merges is not None else generate_merges(self._vocab)
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
unk_token=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
byte_fallback=False,
)
)
self._tokenizer.decoder = decoders.ByteLevel()
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Split(
Regex(PRETOKENIZE_REGEX),
behavior="isolated",
invert=False,
),
pre_tokenizers.ByteLevel(
add_prefix_space=self.add_prefix_space,
use_regex=False,
),
]
)
tokenizer_object = self._tokenizer
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
tokenizer_object=tokenizer_object,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
self.add_tokens([AddedToken(token, special=True) for token in self.all_special_tokens])
__all__ = ["Qwen2Tokenizer"]
| Qwen2Tokenizer |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/plugins.py | {
"start": 4237,
"end": 4371
} | class ____(BaseModel):
"""Plugin Import Error serializer for responses."""
source: str
error: str
| PluginImportErrorResponse |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 2341,
"end": 3475
} | class ____:
FEM_PIXELS_PER_CHIP_X = 256
FEM_PIXELS_PER_CHIP_Y = 256
FEM_CHIPS_PER_STRIPE_X = 8
FEM_CHIPS_PER_STRIPE_Y = 1
FEM_STRIPES_PER_MODULE = 2
@property
def sensor_module_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y * self.FEM_STRIPES_PER_MODULE
return y_pixels, x_pixels,
@property
def fem_stripe_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y
return y_pixels, x_pixels,
def generate_sensor_module_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.sensor_module_dimensions, dtype=dtype)
dset.fill(value)
return dset
def generate_fem_stripe_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.fem_stripe_dimensions, dtype=dtype)
dset.fill(value)
return dset
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
| ExcaliburData |
python | walkccc__LeetCode | solutions/3000. Maximum Area of Longest Diagonal Rectangle/3000.py | {
"start": 0,
"end": 175
} | class ____:
def areaOfMaxDiagonal(self, dimensions: list[list[int]]) -> int:
a, b = max(dimensions, key=lambda x: (x[0]**2 + x[1]**2, x[0] * x[1]))
return a * b
| Solution |
python | getsentry__sentry | tests/sentry/api/serializers/test_organization_member.py | {
"start": 2171,
"end": 3069
} | class ____(OrganizationMemberSerializerTest):
def test_simple(self) -> None:
projects = [self.project, self.project_2]
org_members = self._get_org_members()
result = serialize(
org_members,
self.user_2,
OrganizationMemberWithProjectsSerializer(projects=projects),
)
expected_projects = [[self.project.slug, self.project_2.slug], [self.project.slug]]
expected_projects[0].sort()
assert [r["projects"] for r in result] == expected_projects
projects = [self.project_2]
result = serialize(
org_members,
self.user_2,
OrganizationMemberWithProjectsSerializer(projects=projects),
)
expected_projects = [[self.project_2.slug], []]
assert [r["projects"] for r in result] == expected_projects
| OrganizationMemberWithProjectsSerializerTest |
python | huggingface__transformers | src/transformers/models/encodec/modeling_encodec.py | {
"start": 17719,
"end": 18822
} | class ____(PreTrainedAudioTokenizerBase):
config: EncodecConfig
base_model_prefix = "encodec"
main_input_name = "input_values"
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.GroupNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
init.uniform_(module.bias, a=-k, b=k)
elif isinstance(module, nn.ConvTranspose1d):
module.reset_parameters()
elif isinstance(module, nn.LSTM):
for name, param in module.named_parameters():
if "weight" in name:
init.xavier_uniform_(param)
elif "bias" in name:
init.constant_(param, 0.0)
@auto_docstring(
custom_intro="""
The EnCodec neural audio codec model.
"""
)
| EncodecPreTrainedModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 70852,
"end": 100555
} | class ____(_AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.Relationship`
using joined eager loading.
"""
__slots__ = "join_depth"
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs,
):
"""Add a left outer join to the statement that's being constructed."""
if not compile_state.compile_options._enable_eagerloads:
return
elif (
loadopt
and compile_state.statement is not None
and compile_state.statement.is_dml
):
util.warn_deprecated(
"The joinedload loader option is not compatible with DML "
"statements such as INSERT, UPDATE. Only SELECT may be used."
"This warning will become an exception in a future release.",
"2.0",
)
elif self.uselist:
compile_state.multi_row_eager_loaders = True
path = path[self.parent_property]
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, compile_state.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
# setup an adapter but dont create any JOIN, assume it's already
# in the query
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
compile_state,
query_entity,
path,
adapter,
user_defined_adapter,
)
# don't do "wrap" for multi-row, we want to wrap
# limited/distinct SELECT,
# because we want to put the JOIN on the outside.
else:
# if not via query option, check for
# a cycle
if not path.contains(compile_state.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
# add the JOIN and create an adapter
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
# for multi-row, we want to wrap limited/distinct SELECT,
# because we want to put the JOIN on the outside.
compile_state.eager_adding_joins = True
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
compile_state,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
has_nones = util.NONE_SET.intersection(compile_state.secondary_columns)
if has_nones:
if with_poly_entity is not None:
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
else:
compile_state.secondary_columns = [
c for c in compile_state.secondary_columns if c is not None
]
def _init_user_defined_eager_proc(
self, loadopt, compile_state, target_attributes
):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
compile_state.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = orm_util.ORMAdapter(
orm_util._TraceAdaptRole.JOINEDLOAD_USER_DEFINED_ALIAS,
prop.mapper,
selectable=alias,
equivalents=prop.mapper._equivalent_columns,
limit_on_entity=False,
)
else:
if path.contains(
compile_state.attributes, "path_with_polymorphic"
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
orm_util._TraceAdaptRole.JOINEDLOAD_PATH_WITH_POLYMORPHIC,
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = compile_state._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
target_attributes,
"user_defined_eager_row_processor",
adapter,
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(
self,
compile_state,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
insp = inspect(self.entity)
if insp.is_aliased_class:
alt_selectable = insp.selectable
else:
alt_selectable = None
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=(
alt_selectable._anonymous_fromclause(flat=True)
if alt_selectable is not None
else None
),
flat=True,
use_mapper_path=True,
)
to_adapt_insp = inspect(to_adapt)
clauses = to_adapt_insp._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
orm_util._TraceAdaptRole.JOINEDLOAD_MEMOIZED_ADAPTER,
to_adapt_insp,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.is_aliased_class
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
compile_state.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
loadopt._extra_criteria if loadopt else (),
)
)
add_to_collection = compile_state.secondary_columns
path.set(compile_state.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
compile_state,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
extra_criteria,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = compile_state._should_nest_selectable
query_entity_key = None
if (
query_entity not in compile_state.eager_joins
and not should_nest_selectable
and compile_state.from_clauses
):
indexes = sql_util.find_left_clause_that_matches_given(
compile_state.from_clauses, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = compile_state.from_clauses[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = compile_state.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "is_aliased_class", False):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = adapter.aliased_insp._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.is_aliased_class
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
extra_join_criteria = extra_criteria
additional_entity_criteria = compile_state.global_attributes.get(
("additional_entity_criteria", self.mapper), ()
)
if additional_entity_criteria:
extra_join_criteria += tuple(
ae._resolve_where_criteria(self.mapper)
for ae in additional_entity_criteria
if ae.propagate_to_loaders
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_insp,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=path[self.mapper],
_extra_criteria=extra_join_criteria,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, path[-2], towrap, clauses, onclause, extra_join_criteria
)
compile_state.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
compile_state._append_dedupe_col_collection(
col, compile_state.primary_columns
)
if self.parent_property.order_by:
compile_state.eager_order_by += tuple(
(eagerjoin._target_adapter.copy_and_process)(
util.to_list(self.parent_property.order_by)
)
)
def _splice_nested_inner_join(
self,
path,
entity_we_want_to_splice_onto,
join_obj,
clauses,
onclause,
extra_criteria,
entity_inside_join_structure: Union[
Mapper, None, Literal[False]
] = False,
detected_existing_path: Optional[path_registry.PathRegistry] = None,
):
# recursive fn to splice a nested join into an existing one.
# entity_inside_join_structure=False means this is the outermost call,
# and it should return a value. entity_inside_join_structure=<mapper>
# indicates we've descended into a join and are looking at a FROM
# clause representing this mapper; if this is not
# entity_we_want_to_splice_onto then return None to end the recursive
# branch
assert entity_we_want_to_splice_onto is path[-2]
if entity_inside_join_structure is False:
assert isinstance(join_obj, orm_util._ORMJoin)
if isinstance(join_obj, sql.selectable.FromGrouping):
# FromGrouping - continue descending into the structure
return self._splice_nested_inner_join(
path,
entity_we_want_to_splice_onto,
join_obj.element,
clauses,
onclause,
extra_criteria,
entity_inside_join_structure,
)
elif isinstance(join_obj, orm_util._ORMJoin):
# _ORMJoin - continue descending into the structure
join_right_path = join_obj._right_memo
# see if right side of join is viable
target_join = self._splice_nested_inner_join(
path,
entity_we_want_to_splice_onto,
join_obj.right,
clauses,
onclause,
extra_criteria,
entity_inside_join_structure=(
join_right_path[-1].mapper
if join_right_path is not None
else None
),
)
if target_join is not None:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
else:
# see if left side of join is viable
target_join = self._splice_nested_inner_join(
path,
entity_we_want_to_splice_onto,
join_obj.left,
clauses,
onclause,
extra_criteria,
entity_inside_join_structure=join_obj._left_memo,
detected_existing_path=join_right_path,
)
if target_join is not None:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
# neither side viable, return None, or fail if this was the top
# most call
if entity_inside_join_structure is False:
assert (
False
), "assertion failed attempting to produce joined eager loads"
return None
# reached an endpoint (e.g. a table that's mapped, or an alias of that
# table). determine if we can use this endpoint to splice onto
# is this the entity we want to splice onto in the first place?
if not entity_we_want_to_splice_onto.isa(entity_inside_join_structure):
return None
# path check. if we know the path how this join endpoint got here,
# lets look at our path we are satisfying and see if we're in the
# wrong place. This is specifically for when our entity may
# appear more than once in the path, issue #11449
# updated in issue #11965.
if detected_existing_path and len(detected_existing_path) > 2:
# this assertion is currently based on how this call is made,
# where given a join_obj, the call will have these parameters as
# entity_inside_join_structure=join_obj._left_memo
# and entity_inside_join_structure=join_obj._right_memo.mapper
assert detected_existing_path[-3] is entity_inside_join_structure
# from that, see if the path we are targeting matches the
# "existing" path of this join all the way up to the midpoint
# of this join object (e.g. the relationship).
# if not, then this is not our target
#
# a test condition where this test is false looks like:
#
# desired splice: Node->kind->Kind
# path of desired splice: NodeGroup->nodes->Node->kind
# path we've located: NodeGroup->nodes->Node->common_node->Node
#
# above, because we want to splice kind->Kind onto
# NodeGroup->nodes->Node, this is not our path because it actually
# goes more steps than we want into self-referential
# ->common_node->Node
#
# a test condition where this test is true looks like:
#
# desired splice: B->c2s->C2
# path of desired splice: A->bs->B->c2s
# path we've located: A->bs->B->c1s->C1
#
# above, we want to splice c2s->C2 onto B, and the located path
# shows that the join ends with B->c1s->C1. so we will
# add another join onto that, which would create a "branch" that
# we might represent in a pseudopath as:
#
# B->c1s->C1
# ->c2s->C2
#
# i.e. A JOIN B ON <bs> JOIN C1 ON <c1s>
# JOIN C2 ON <c2s>
#
if detected_existing_path[0:-2] != path.path[0:-1]:
return None
return orm_util._ORMJoin(
join_obj,
clauses.aliased_insp,
onclause,
isouter=False,
_left_memo=entity_inside_join_structure,
_right_memo=path[path[-1].mapper],
_extra_criteria=extra_criteria,
)
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
compile_state = context.compile_state
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, context.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if compile_state.compound_eager_adapter and decorator:
decorator = decorator.wrap(
compile_state.compound_eager_adapter
)
elif compile_state.compound_eager_adapter:
decorator = compile_state.compound_eager_adapter
else:
decorator = path.get(
compile_state.attributes, "eager_row_processor"
)
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if not context.compile_state.compile_options._enable_eagerloads:
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
if self.uselist:
context.loaders_require_uniquing = True
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
query_entity,
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
# note this must unconditionally clear out any existing collection.
# an existing collection would be present only in the case of
# populate_existing().
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="selectin")
| _JoinedLoader |
python | encode__django-rest-framework | rest_framework/relations.py | {
"start": 2742,
"end": 7880
} | class ____(Field):
queryset = None
html_cutoff = None
html_cutoff_text = None
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', self.queryset)
cutoff_from_settings = api_settings.HTML_SELECT_CUTOFF
if cutoff_from_settings is not None:
cutoff_from_settings = int(cutoff_from_settings)
self.html_cutoff = kwargs.pop('html_cutoff', cutoff_from_settings)
self.html_cutoff_text = kwargs.pop(
'html_cutoff_text',
self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)
)
if not method_overridden('get_queryset', RelatedField, self):
assert self.queryset is not None or kwargs.get('read_only'), (
'Relational field must provide a `queryset` argument, '
'override `get_queryset`, or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only')), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
super().__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ManyRelatedField` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super().__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs:
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super().run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, Manager)):
# Ensure queryset is re-evaluated whenever used.
# Note that actually a `Manager` class may also be used as the
# queryset argument. This occurs on ModelSerializer fields,
# as it allows us to generate a more expressive 'repr' output
# for the field.
# Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'
queryset = queryset.all()
return queryset
def use_pk_only_optimization(self):
return False
def get_attribute(self, instance):
if self.use_pk_only_optimization() and self.source_attrs:
# Optimized case, return a mock object only containing the pk attribute.
with contextlib.suppress(AttributeError):
attribute_instance = get_attribute(instance, self.source_attrs[:-1])
value = attribute_instance.serializable_value(self.source_attrs[-1])
if is_simple_callable(value):
# Handle edge case where the relationship `source` argument
# points to a `get_relationship()` method on the model.
value = value()
# Handle edge case where relationship `source` argument points
# to an instance instead of a pk (e.g., a `@property`).
value = getattr(value, 'pk', value)
return PKOnlyObject(pk=value)
# Standard case, return the object instance.
return super().get_attribute(instance)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return {
self.to_representation(item): self.display_value(item) for item in queryset
}
@property
def choices(self):
return self.get_choices()
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.get_choices(cutoff=self.html_cutoff),
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def display_value(self, instance):
return str(instance)
| RelatedField |
python | django__django | tests/generic_views/views.py | {
"start": 1861,
"end": 1963
} | class ____(AuthorList):
def get_queryset(self):
return None
| AuthorListGetQuerysetReturnsNone |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_doc_builder.py | {
"start": 677,
"end": 4229
} | class ____(TestCase):
fixtures = ["test_data", "eric"]
def setUp(self):
self.project = Project.objects.get(slug="pip")
self.version = self.project.versions.first()
self.build_env = mock.MagicMock()
self.build_env.project = self.project
self.build_env.version = self.version
self.build_env.build = {
"id": 123,
}
self.build_env.api_client = mock.MagicMock()
BaseSphinx.type = "base"
BaseSphinx.sphinx_build_dir = tempfile.mkdtemp()
BaseSphinx.relative_output_dir = "_readthedocs/"
@patch("readthedocs.doc_builder.backends.sphinx.BaseSphinx.docs_dir")
@patch("readthedocs.doc_builder.backends.sphinx.BaseSphinx.run")
@patch("readthedocs.builds.models.Version.get_conf_py_path")
@patch("readthedocs.projects.models.Project.checkout_path")
@patch("readthedocs.doc_builder.python_environments.load_yaml_config")
def test_project_without_conf_py(
self,
load_yaml_config,
checkout_path,
get_conf_py_path,
_,
docs_dir,
):
"""
Test for a project without ``conf.py`` file.
When this happen, the ``get_conf_py_path`` raises a
``ProjectConfigurationError`` which is captured by our own code.
"""
tmp_dir = tempfile.mkdtemp()
checkout_path.return_value = tmp_dir
docs_dir.return_value = tmp_dir
get_conf_py_path.side_effect = ProjectConfigurationError
python_env = Virtualenv(
version=self.version,
build_env=self.build_env,
config=get_build_config({}, validate=True),
)
base_sphinx = BaseSphinx(
build_env=self.build_env,
python_env=python_env,
)
with self.assertRaises(ProjectConfigurationError) as e:
base_sphinx.show_conf()
self.assertEqual(
e.exception.message_id,
ProjectConfigurationError.NOT_FOUND,
)
@patch("readthedocs.doc_builder.backends.sphinx.BaseSphinx.docs_dir")
@patch("readthedocs.doc_builder.backends.sphinx.BaseSphinx.run")
@patch("readthedocs.builds.models.Version.get_conf_py_path")
@patch("readthedocs.projects.models.Project.checkout_path")
@patch("readthedocs.doc_builder.python_environments.load_yaml_config")
def test_multiple_conf_py(
self,
load_yaml_config,
checkout_path,
get_conf_py_path,
_,
docs_dir,
):
"""
Test for a project with multiple ``conf.py`` files.
An error should be raised to the user if we can't
guess the correct conf.py file.
"""
tmp_docs_dir = py.path.local(tempfile.mkdtemp())
tmp_docs_dir.join("conf.py").write("")
tmp_docs_dir.join("test").mkdir().join("conf.py").write("")
docs_dir.return_value = str(tmp_docs_dir)
checkout_path.return_value = str(tmp_docs_dir)
get_conf_py_path.side_effect = ProjectConfigurationError
python_env = Virtualenv(
version=self.version,
build_env=self.build_env,
config=get_build_config({}, validate=True),
)
base_sphinx = BaseSphinx(
build_env=self.build_env,
python_env=python_env,
)
with pytest.raises(ProjectConfigurationError):
with override_settings(DOCROOT=tmp_docs_dir):
base_sphinx.show_conf()
@override_settings(PRODUCTION_DOMAIN="readthedocs.org")
| SphinxBuilderTest |
python | scipy__scipy | scipy/optimize/_dual_annealing.py | {
"start": 16089,
"end": 31121
} | class ____:
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, search_bounds, func_wrapper, *args, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.jac = self.kwargs.get('jac', None)
self.hess = self.kwargs.get('hess', None)
self.hessp = self.kwargs.get('hessp', None)
self.kwargs.pop("args", None)
self.minimizer = minimize
bounds_list = list(zip(*search_bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
else:
if callable(self.jac):
def wrapped_jac(x):
return self.jac(x, *args)
self.kwargs['jac'] = wrapped_jac
if callable(self.hess):
def wrapped_hess(x):
return self.hess(x, *args)
self.kwargs['hess'] = wrapped_hess
if callable(self.hessp):
def wrapped_hessp(x, p):
return self.hessp(x, p, *args)
self.kwargs['hessp'] = wrapped_hessp
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres:
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres:
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
@_transition_to_rng("seed", position_num=10)
def dual_annealing(func, bounds, args=(), maxiter=1000,
minimizer_kwargs=None, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, rng=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
minimizer_kwargs : dict, optional
Keyword arguments to be passed to the local minimizer
(`minimize`). An important option could be ``method`` for the minimizer
method to use.
If no keyword arguments are provided, the local minimizer defaults to
'L-BFGS-B' and uses the already supplied bounds. If `minimizer_kwargs`
is specified, then the dict must contain all parameters required to
control the local minimization. `args` is ignored in this dict, as it is
passed automatically. `bounds` is not automatically passed on to the
local minimizer as the method may not support them.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (1, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a `Generator`.
Specify `rng` for repeatable minimizations. The random numbers
generated only affect the visiting distribution function
and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has one of the following
values:
- ``0``: minimum detected in the annealing process.
- ``1``: detection occurred in the local search process.
- ``2``: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1988).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014).
:doi:`10.18637/jss.v060.i06`
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> import numpy as np
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
>>> ret.x
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
-6.05775280e-09, -5.00668935e-09]) # random
>>> ret.fun
0.000000
"""
if isinstance(bounds, Bounds):
bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# minimizer_kwargs has to be a dict, not None
minimizer_kwargs = minimizer_kwargs or {}
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, *args, **minimizer_kwargs)
# Initialization of random Generator for reproducible runs if rng provided
rng_gen = check_random_state(rng)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rng_gen, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rng_gen)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rng_gen, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while not need_to_stop:
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rng_gen)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
| LocalSearchWrapper |
python | coleifer__peewee | tests/manytomany.py | {
"start": 925,
"end": 1158
} | class ____(TestModel):
name = TextField()
students = ManyToManyField(Student, backref='+')
students2 = ManyToManyField(Student, through_model=CourseStudentDeferred)
CourseStudent = Course.students.get_through_model()
| Course |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/webhook.py | {
"start": 3486,
"end": 3596
} | class ____(GitHubEnterpriseWebhook, InstallationEventWebhook):
pass
| GitHubEnterpriseInstallationEventWebhook |
python | pypa__packaging | tests/test_markers.py | {
"start": 1241,
"end": 1923
} | class ____:
@pytest.mark.parametrize("value", ["one", "two", None, 3, 5, []])
def test_accepts_value(self, value: str | None | int | list[str]) -> None:
assert Node(value).value == value # type: ignore[arg-type]
@pytest.mark.parametrize("value", ["one", "two"])
def test_str(self, value: str) -> None:
assert str(Node(value)) == str(value)
@pytest.mark.parametrize("value", ["one", "two"])
def test_repr(self, value: str) -> None:
assert repr(Node(value)) == f"<Node({str(value)!r})>"
def test_base_class(self) -> None:
with pytest.raises(NotImplementedError):
Node("cover all the code").serialize()
| TestNode |
python | huggingface__transformers | examples/modular-transformers/modular_multimodal2.py | {
"start": 1567,
"end": 2248
} | class ____(CLIPPreTrainedModel):
def _init_weights(self, module):
if isinstance(module, Multimodal2VisionMLP):
pass
MULTIMODAL2_VISION_START_DOCSTRING = "doc"
# Here the only arg `self.vision_model = CLIPVisionTransformer(config)` in CLIPVisionModel already has the "Vision" part, so
# no need to overwrite it, it will look for `Multimodal2VisionTransformer` which has already being redefined above
# Note: we may want to redefine decorator as well for full consistency, as CLIP does not use "CLIP_VISION_START_DOCSTRING" but only
# "CLIP_START_DOCSTRING"
@add_start_docstrings("New doc", MULTIMODAL2_VISION_START_DOCSTRING)
| Multimodal2VisionPreTrainedModel |
python | sympy__sympy | sympy/concrete/expr_with_intlimits.py | {
"start": 132,
"end": 373
} | class ____(NotImplementedError):
"""
Exception raised when trying to reorder dependent limits.
"""
def __init__(self, expr, msg):
super().__init__(
"%s could not be reordered: %s." % (expr, msg))
| ReorderError |
python | openai__openai-python | src/openai/types/realtime/realtime_mcp_protocol_error_param.py | {
"start": 229,
"end": 389
} | class ____(TypedDict, total=False):
code: Required[int]
message: Required[str]
type: Required[Literal["protocol_error"]]
| RealtimeMcpProtocolErrorParam |
python | plotly__plotly.py | plotly/graph_objs/layout/xaxis/_rangeslider.py | {
"start": 235,
"end": 10045
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.rangeslider"
_valid_props = {
"autorange",
"bgcolor",
"bordercolor",
"borderwidth",
"range",
"thickness",
"visible",
"yaxis",
}
@property
def autorange(self):
"""
Determines whether or not the range slider range is computed in
relation to the input data. If `range` is provided, then
`autorange` is set to False.
The 'autorange' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
@property
def bgcolor(self):
"""
Sets the background color of the range slider.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the border color of the range slider.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the border width of the range slider.
The 'borderwidth' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def range(self):
"""
Sets the range of the range slider. If not set, defaults to the
full xaxis range. If the axis `type` is "log", then you must
take the log of your desired range. If the axis `type` is
"date", it should be date strings, like date data, though Date
objects and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is assigned a
serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def thickness(self):
"""
The height of the range slider as a fraction of the total plot
area height.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def visible(self):
"""
Determines whether or not the range slider will be visible. If
visible, perpendicular axes will be set to `fixedrange`
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def yaxis(self):
"""
The 'yaxis' property is an instance of YAxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.xaxis.rangeslider.YAxis`
- A dict of string/value properties that will be passed
to the YAxis constructor
Returns
-------
plotly.graph_objs.layout.xaxis.rangeslider.YAxis
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range slider range is
computed in relation to the input data. If `range` is
provided, then `autorange` is set to False.
bgcolor
Sets the background color of the range slider.
bordercolor
Sets the border color of the range slider.
borderwidth
Sets the border width of the range slider.
range
Sets the range of the range slider. If not set,
defaults to the full xaxis range. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
thickness
The height of the range slider as a fraction of the
total plot area height.
visible
Determines whether or not the range slider will be
visible. If visible, perpendicular axes will be set to
`fixedrange`
yaxis
:class:`plotly.graph_objects.layout.xaxis.rangeslider.Y
Axis` instance or dict with compatible properties
"""
def __init__(
self,
arg=None,
autorange=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
range=None,
thickness=None,
visible=None,
yaxis=None,
**kwargs,
):
"""
Construct a new Rangeslider object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Rangeslider`
autorange
Determines whether or not the range slider range is
computed in relation to the input data. If `range` is
provided, then `autorange` is set to False.
bgcolor
Sets the background color of the range slider.
bordercolor
Sets the border color of the range slider.
borderwidth
Sets the border width of the range slider.
range
Sets the range of the range slider. If not set,
defaults to the full xaxis range. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
thickness
The height of the range slider as a fraction of the
total plot area height.
visible
Determines whether or not the range slider will be
visible. If visible, perpendicular axes will be set to
`fixedrange`
yaxis
:class:`plotly.graph_objects.layout.xaxis.rangeslider.Y
Axis` instance or dict with compatible properties
Returns
-------
Rangeslider
"""
super().__init__("rangeslider")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.xaxis.Rangeslider
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Rangeslider`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autorange", arg, autorange)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("range", arg, range)
self._set_property("thickness", arg, thickness)
self._set_property("visible", arg, visible)
self._set_property("yaxis", arg, yaxis)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Rangeslider |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 67343,
"end": 70124
} | class ____(VisitorTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, ['body'])
pos = node.pos
is_async = node.is_async
body, target, manager = node.body, node.target, node.manager
manager = node.manager = ExprNodes.ProxyNode(manager)
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__aenter__' if is_async else '__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if target is not None:
body = Nodes.StatListNode(
pos, stats=[
Nodes.WithTargetAssignmentStatNode(
pos, lhs=target, with_node=node),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
visit_Node = VisitorTransform.recurse_to_children
| WithTransform |
python | doocs__leetcode | solution/1600-1699/1643.Kth Smallest Instructions/Solution.py | {
"start": 0,
"end": 506
} | class ____:
def kthSmallestPath(self, destination: List[int], k: int) -> str:
v, h = destination
ans = []
for _ in range(h + v):
if h == 0:
ans.append("V")
else:
x = comb(h + v - 1, h - 1)
if k > x:
ans.append("V")
v -= 1
k -= x
else:
ans.append("H")
h -= 1
return "".join(ans)
| Solution |
python | pola-rs__polars | py-polars/tests/unit/utils/test_deprecation.py | {
"start": 1367,
"end": 3509
} | class ____: # noqa: D101
@deprecate_nonkeyword_arguments(allowed_args=["self", "baz"], version="1.0.0")
def bar(
self, baz: str, ham: str | None = None, foobar: str | None = None
) -> None: ...
def test_deprecate_nonkeyword_arguments_method_signature() -> None:
# Note the added star indicating keyword-only arguments after 'baz'
expected = "(self, baz: 'str', *, ham: 'str | None' = None, foobar: 'str | None' = None) -> 'None'"
assert str(inspect.signature(Foo.bar)) == expected
def test_deprecate_nonkeyword_arguments_method_warning() -> None:
msg = (
r"all arguments of Foo\.bar except for \'baz\' will be keyword-only in the next breaking release."
r" Use keyword arguments to silence this warning."
)
with pytest.deprecated_call(match=msg):
Foo().bar("qux", "quox")
def test_deprecate_parameter_as_multi_positional(recwarn: Any) -> None:
@deprecate_parameter_as_multi_positional("foo")
def hello(*foo: str) -> tuple[str, ...]:
return foo
with pytest.deprecated_call():
result = hello(foo="x")
assert result == hello("x")
with pytest.deprecated_call():
result = hello(foo=["x", "y"]) # type: ignore[arg-type]
assert result == hello("x", "y")
def test_deprecate_parameter_as_multi_positional_existing_arg(recwarn: Any) -> None:
@deprecate_parameter_as_multi_positional("foo")
def hello(bar: int, *foo: str) -> tuple[int, tuple[str, ...]]:
return bar, foo
with pytest.deprecated_call():
result = hello(5, foo="x")
assert result == hello(5, "x")
with pytest.deprecated_call():
result = hello(5, foo=["x", "y"]) # type: ignore[arg-type]
assert result == hello(5, "x", "y")
def test_identify_deprecations() -> None:
dep = identify_deprecations()
assert isinstance(dep, dict)
valid_args = get_args(DeprecationType)
assert all(key in valid_args for key in dep)
with pytest.raises(
ValueError,
match="unrecognised deprecation type 'bitterballen'",
):
identify_deprecations("bitterballen") # type: ignore[arg-type]
| Foo |
python | numba__numba | numba/tests/test_tuples.py | {
"start": 4256,
"end": 5441
} | class ____(TestCase):
def test_unituple(self):
tuple_type = types.UniTuple(types.int32, 2)
cf_first = njit((tuple_type,))(tuple_first)
cf_second = njit((tuple_type,))(tuple_second)
self.assertPreciseEqual(cf_first((4, 5)), 4)
self.assertPreciseEqual(cf_second((4, 5)), 5)
def test_hetero_tuple(self):
tuple_type = types.Tuple((types.int64, types.float32))
cf_first = njit((tuple_type,))(tuple_first)
cf_second = njit((tuple_type,))(tuple_second)
self.assertPreciseEqual(cf_first((2**61, 1.5)), 2**61)
self.assertPreciseEqual(cf_second((2**61, 1.5)), 1.5)
def test_size_mismatch(self):
# Issue #1638: tuple size should be checked when unboxing
tuple_type = types.UniTuple(types.int32, 2)
cfunc = njit((tuple_type,))(tuple_first)
entry_point = cfunc.overloads[cfunc.signatures[0]].entry_point
with self.assertRaises(ValueError) as raises:
entry_point((4, 5, 6))
self.assertEqual(str(raises.exception),
("size mismatch for tuple, "
"expected 2 element(s) but got 3"))
| TestTuplePassing |
python | getsentry__sentry | tests/sentry/hybridcloud/test_region.py | {
"start": 778,
"end": 2902
} | class ____(TestCase):
def setUp(self) -> None:
self.target_region = _TEST_REGIONS[0]
self.organization = self.create_organization(region=self.target_region)
def test_by_organization_id(self) -> None:
region_resolution = ByOrganizationId()
arguments = {"organization_id": self.organization.id}
actual_region = region_resolution.resolve(arguments)
assert actual_region == self.target_region
def test_by_organization_slug(self) -> None:
region_resolution = ByOrganizationSlug()
arguments = {"slug": self.organization.slug}
actual_region = region_resolution.resolve(arguments)
assert actual_region == self.target_region
def test_by_organization_id_attribute(self) -> None:
region_resolution = ByOrganizationIdAttribute("organization_member")
with assume_test_silo_mode(SiloMode.REGION):
org_member = OrganizationMember.objects.create(
organization_id=self.organization.id,
user_id=self.user.id,
)
arguments = {"organization_member": org_member}
actual_region = region_resolution.resolve(arguments)
assert actual_region == self.target_region
def test_require_single_organization(self) -> None:
region_resolution = RequireSingleOrganization()
with (
override_regions([self.target_region]),
override_settings(SENTRY_SINGLE_ORGANIZATION=True),
):
actual_region = region_resolution.resolve({})
assert actual_region == self.target_region
with (
override_regions([self.target_region]),
override_settings(SENTRY_SINGLE_ORGANIZATION=False),
):
with pytest.raises(RegionResolutionError):
region_resolution.resolve({})
with override_regions(_TEST_REGIONS), override_settings(SENTRY_SINGLE_ORGANIZATION=True):
self.create_organization(region=_TEST_REGIONS[1])
with pytest.raises(RegionResolutionError):
region_resolution.resolve({})
| RegionResolutionTest |
python | PrefectHQ__prefect | tests/test_transactions.py | {
"start": 18294,
"end": 22999
} | class ____:
@pytest.fixture(autouse=True)
def default_storage_setting(self, tmp_path: Path):
name = str(uuid.uuid4())
LocalFileSystem(basepath=str(tmp_path)).save(name)
with temporary_settings(
{
PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: f"local-file-system/{name}",
PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK: f"local-file-system/{name}",
}
):
yield
class TestTransaction:
def test_transaction_outside_of_run(self):
with transaction(
key="test_transaction_outside_of_run", write_on_commit=True
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = txn.read()
assert isinstance(record, ResultRecord)
assert record.result == {"foo": "bar"}
def test_transaction_inside_flow_default_storage(self):
@flow(persist_result=True)
def test_flow():
with transaction(
key="test_transaction_inside_flow_default_storage"
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = txn.read()
assert isinstance(record, ResultRecord)
# make sure we aren't using an anonymous block
assert (
record.metadata.storage_block_id
== get_default_result_storage()._block_document_id
)
return record.result
assert test_flow() == {"foo": "bar"}
def test_transaction_inside_task_default_storage(self):
@task(persist_result=True)
def test_task():
with transaction(
key="test_transaction_inside_task_default_storage",
commit_mode=CommitMode.EAGER,
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = txn.read()
assert isinstance(record, ResultRecord)
# make sure we aren't using an anonymous block
assert (
record.metadata.storage_block_id
== get_default_result_storage()._block_document_id
)
return record.result
assert test_task() == {"foo": "bar"}
class TestAsyncTransaction:
async def test_transaction_outside_of_run(self):
async with atransaction(
key="test_transaction_outside_of_run", write_on_commit=True
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = await txn.read()
assert isinstance(record, ResultRecord)
assert record.result == {"foo": "bar"}
async def test_transaction_inside_flow_default_storage(self):
@flow(persist_result=True)
async def test_flow():
async with atransaction(
key="test_transaction_inside_flow_default_storage"
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = await txn.read()
assert isinstance(record, ResultRecord)
# make sure we aren't using an anonymous block
assert (
record.metadata.storage_block_id
== (await aget_default_result_storage())._block_document_id
)
return record.result
assert await test_flow() == {"foo": "bar"}
async def test_transaction_inside_task_default_storage(self):
@task(persist_result=True)
async def test_task():
async with atransaction(
key="test_transaction_inside_task_default_storage",
commit_mode=CommitMode.EAGER,
) as txn:
assert isinstance(txn.store, ResultStore)
txn.stage({"foo": "bar"})
record = await txn.read()
assert isinstance(record, ResultRecord)
# make sure we aren't using an anonymous block
assert (
record.metadata.storage_block_id
== (await aget_default_result_storage())._block_document_id
)
return record.result
assert await test_task() == {"foo": "bar"}
| TestDefaultTransactionStorage |
python | dask__distributed | distributed/shuffle/tests/test_buffer.py | {
"start": 3961,
"end": 5321
} | class ____(ShardsBuffer):
def __init__(self, memory_limiter: ResourceLimiter, concurrency_limit: int) -> None:
self.storage: dict[str, bytes] = defaultdict(bytes)
super().__init__(
memory_limiter=memory_limiter, concurrency_limit=concurrency_limit
)
async def _process(self, id: str, shards: list[bytes]) -> None:
if id == "error":
raise RuntimeError("Error during processing")
self.storage[id] += b"".join(shards)
def read(self, id: str) -> bytes:
return self.storage[id]
@gen_test()
async def test_memory_limit_blocked_exception():
limit = parse_bytes("10.0 MiB")
big_payload = {
"shard-1": gen_bytes(2, limit),
}
broken_payload = {
"error": "not-bytes",
}
limiter = ResourceLimiter(limit)
async with BufferShardsBroken(
memory_limiter=limiter,
concurrency_limit=2,
) as mf:
big_write = asyncio.create_task(mf.write(big_payload))
small_write = asyncio.create_task(mf.write(broken_payload))
# The broken write hits the limit and blocks
await big_write
await small_write
await mf.flush()
# Make sure exception is not dropped
with pytest.raises(RuntimeError, match="Error during processing"):
mf.raise_on_exception()
| BufferShardsBroken |
python | davidhalter__jedi | test/completion/import_tree/recurse_class2.py | {
"start": 30,
"end": 66
} | class ____(recurse_class1.C):
pass
| C |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 7765,
"end": 7898
} | class ____(BiffRecord):
_REC_ID = 0x00E1
def __init__(self):
self._rec_data = pack('BB', 0xB0, 0x04)
| InteraceHdrRecord |
python | getsentry__sentry | src/sentry/integrations/jira/endpoints/search.py | {
"start": 869,
"end": 4623
} | class ____(IntegrationEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
"""
Called by our front end when it needs to make requests to Jira's API for data.
"""
provider = IntegrationProviderSlug.JIRA.value
def _get_integration(self, organization: RpcOrganization, integration_id: int) -> Integration:
return Integration.objects.get(
organizationintegration__organization_id=organization.id,
id=integration_id,
provider=self.provider,
)
def get(
self, request: Request, organization: RpcOrganization, integration_id: int, **kwds: Any
) -> Response:
try:
integration = self._get_integration(organization, integration_id)
except Integration.DoesNotExist:
return Response(status=404)
installation = integration.get_installation(organization.id)
if not isinstance(installation, JiraIntegration):
raise NotFound("Integration by that id is not a JiraIntegration.")
jira_client = installation.get_client()
field = request.GET.get("field")
query = request.GET.get("query")
if field is None:
return Response({"detail": "field is a required parameter"}, status=400)
if not query:
return Response({"detail": "query is a required parameter"}, status=400)
if field in ("externalIssue", "parent"):
if not query:
return Response([])
try:
resp = installation.search_issues(query)
except IntegrationError as e:
return Response({"detail": str(e)}, status=400)
return Response(
[
{"label": "({}) {}".format(i["key"], i["fields"]["summary"]), "value": i["key"]}
for i in resp.get("issues", [])
]
)
if field in ("assignee", "reporter"):
try:
response = jira_client.search_users_for_project(
request.GET.get("project", ""), query
)
except (ApiUnauthorized, ApiError):
return Response({"detail": "Unable to fetch users from Jira"}, status=400)
user_tuples = filter(
None, [build_user_choice(user, jira_client.user_id_field()) for user in response]
)
users = [{"value": user_id, "label": display} for user_id, display in user_tuples]
return Response(users)
if field == "project":
try:
response = jira_client.get_projects_paginated(params={"query": query})
except (ApiUnauthorized, ApiError):
return Response({"detail": "Unable to fetch projects from Jira"}, status=400)
projects = [
JiraProjectMapping(label=f"{p["key"]} - {p["name"]}", value=p["id"])
for p in response.get("values", [])
]
return Response(projects)
try:
response = jira_client.get_field_autocomplete(name=field, value=query)
except (ApiUnauthorized, ApiError):
return Response(
{"detail": f"Unable to fetch autocomplete for {field} from Jira"},
status=400,
)
choices = [
{
"value": result["value"],
# Jira's response will highlight the matching substring in the name using HTML formatting.
"label": BeautifulSoup(result["displayName"], "html.parser").get_text(),
}
for result in response["results"]
]
return Response(choices)
| JiraSearchEndpoint |
python | ansible__ansible | test/lib/ansible_test/_internal/config.py | {
"start": 1123,
"end": 6975
} | class ____(CommonConfig):
"""Configuration common to all commands which execute in an environment."""
def __init__(self, args: t.Any, command: str) -> None:
super().__init__(args, command)
self.host_settings: HostSettings = args.host_settings
self.host_path: t.Optional[str] = args.host_path
self.containers: t.Optional[str] = args.containers
self.pypi_proxy: bool = args.pypi_proxy
self.pypi_endpoint: t.Optional[str] = args.pypi_endpoint
# Populated by content_config.get_content_config on the origin.
# Serialized and passed to delegated instances to avoid parsing a second time.
self.content_config: t.Optional[ContentConfig] = None
# Set by check_controller_python once HostState has been created by prepare_profiles.
# This is here for convenience, to avoid needing to pass HostState to some functions which already have access to EnvironmentConfig.
self.controller_python: t.Optional[PythonConfig] = None
"""
The Python interpreter used by the controller.
Only available after delegation has been performed or skipped (if delegation is not required).
"""
if self.host_path:
self.delegate = False
else:
self.delegate = (
not isinstance(self.controller, OriginConfig)
or isinstance(self.controller.python, VirtualPythonConfig)
or self.controller.python.version != version_to_str(sys.version_info[:2])
or bool(verify_sys_executable(self.controller.python.path))
)
self.docker_network: t.Optional[str] = args.docker_network
self.docker_terminate: t.Optional[TerminateMode] = args.docker_terminate
self.remote_endpoint: t.Optional[str] = args.remote_endpoint
self.remote_stage: t.Optional[str] = args.remote_stage
self.remote_terminate: t.Optional[TerminateMode] = args.remote_terminate
self.prime_containers: bool = args.prime_containers
self.requirements: bool = args.requirements
self.delegate_args: list[str] = []
self.dev_systemd_debug: bool = args.dev_systemd_debug
self.dev_probe_cgroups: t.Optional[str] = args.dev_probe_cgroups
debugger_flags = DebuggerFlags(
on_demand=args.dev_debug_on_demand,
cli=args.dev_debug_cli,
ansiballz=args.dev_debug_ansiballz,
self=args.dev_debug_self,
)
self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata(debugger_flags=debugger_flags)
self.metadata_path: t.Optional[str] = None
def metadata_callback(payload_config: PayloadConfig) -> None:
"""Add the metadata file to the payload file list."""
config = self
files = payload_config.files
if config.metadata_path:
files.append((os.path.abspath(config.metadata_path), config.metadata_path))
data_context().register_payload_callback(metadata_callback)
def host_callback(payload_config: PayloadConfig) -> None:
"""Add the host files to the payload file list."""
config = self
if config.host_path:
settings_path = os.path.join(config.host_path, 'settings.dat')
state_path = os.path.join(config.host_path, 'state.dat')
config_path = os.path.join(config.host_path, 'config.dat')
files = payload_config.files
files.append((os.path.abspath(settings_path), settings_path))
files.append((os.path.abspath(state_path), state_path))
files.append((os.path.abspath(config_path), config_path))
data_context().register_payload_callback(host_callback)
@property
def controller(self) -> ControllerHostConfig:
"""Host configuration for the controller."""
return self.host_settings.controller
@property
def targets(self) -> list[HostConfig]:
"""Host configuration for the targets."""
return self.host_settings.targets
def only_target[THostConfig: HostConfig](self, target_type: t.Type[THostConfig]) -> THostConfig:
"""
Return the host configuration for the target.
Requires that there is exactly one target of the specified type.
"""
targets = list(self.targets)
if len(targets) != 1:
raise Exception('There must be exactly one target.')
target = targets.pop()
if not isinstance(target, target_type):
raise Exception(f'Target is {type(target_type)} instead of {target_type}.')
return target
def only_targets[THostConfig: HostConfig](self, target_type: t.Type[THostConfig]) -> list[THostConfig]:
"""
Return a list of target host configurations.
Requires that there are one or more targets, all the specified type.
"""
if not self.targets:
raise Exception('There must be one or more targets.')
assert type_guard(self.targets, target_type)
return t.cast(list[THostConfig], self.targets)
@property
def target_type(self) -> t.Type[HostConfig]:
"""
The true type of the target(s).
If the target is the controller, the controller type is returned.
Requires at least one target, and all targets must be of the same type.
"""
target_types = set(type(target) for target in self.targets)
if len(target_types) != 1:
raise Exception('There must be one or more targets, all of the same type.')
target_type = target_types.pop()
if issubclass(target_type, ControllerConfig):
target_type = type(self.controller)
return target_type
| EnvironmentConfig |
python | mahmoud__glom | glom/core.py | {
"start": 36797,
"end": 39035
} | class ____:
""":class:`Call` specifies when a target should be passed to a function,
*func*.
:class:`Call` is similar to :func:`~functools.partial` in that
it is no more powerful than ``lambda`` or other functions, but
it is designed to be more readable, with a better ``repr``.
Args:
func (callable): a function or other callable to be called with
the target
:class:`Call` combines well with :attr:`~glom.T` to construct objects. For
instance, to generate a dict and then pass it to a constructor:
>>> class ExampleClass(object):
... def __init__(self, attr):
... self.attr = attr
...
>>> target = {'attr': 3.14}
>>> glom(target, Call(ExampleClass, kwargs=T)).attr
3.14
This does the same as ``glom(target, lambda target:
ExampleClass(**target))``, but it's easy to see which one reads
better.
.. note::
``Call`` is mostly for functions. Use a :attr:`~glom.T` object
if you need to call a method.
.. warning::
:class:`Call` has a successor with a fuller-featured API, new
in 19.10.0: the :class:`Invoke` specifier type.
"""
def __init__(self, func=None, args=None, kwargs=None):
if func is None:
func = T
if not (callable(func) or isinstance(func, (Spec, TType))):
raise TypeError('expected func to be a callable or T'
' expression, not: %r' % (func,))
if args is None:
args = ()
if kwargs is None:
kwargs = {}
self.func, self.args, self.kwargs = func, args, kwargs
def glomit(self, target, scope):
'run against the current target'
r = lambda spec: arg_val(target, spec, scope)
return r(self.func)(*r(self.args), **r(self.kwargs))
def __repr__(self):
cn = self.__class__.__name__
return f'{cn}({bbrepr(self.func)}, args={self.args!r}, kwargs={self.kwargs!r})'
def _is_spec(obj, strict=False):
# a little util for codifying the spec type checking in glom
if isinstance(obj, TType):
return True
if strict:
return type(obj) is Spec
return _has_callable_glomit(obj) # pragma: no cover
| Call |
python | kamyu104__LeetCode-Solutions | Python/subdomain-visit-count.py | {
"start": 125,
"end": 743
} | class ____(object):
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
result = collections.defaultdict(int)
for domain in cpdomains:
count, domain = domain.split()
count = int(count)
frags = domain.split('.')
curr = []
for i in reversed(xrange(len(frags))):
curr.append(frags[i])
result[".".join(reversed(curr))] += count
return ["{} {}".format(count, domain) \
for domain, count in result.iteritems()]
| Solution |
python | pytorch__pytorch | torch/distributed/pipelining/microbatch.py | {
"start": 3190,
"end": 18011
} | class ____:
pass
def _split_block_mask(
block_mask: BlockMask,
num_chunks: int,
) -> list[BlockMask]:
"""Given a block mask, split the block mask along the batch dimension (dim0).
Args:
block_mask: Block mask to split
num_chunks: Number of chunks to split the block mask into
Returns:
chunk_block_masks: List of chunked block masks
"""
# BlockMask will broadcast if B is 1.
if block_mask.kv_num_blocks.size(0) == 1:
return [block_mask] * num_chunks
assert block_mask.kv_num_blocks.size(0) >= num_chunks, (
"Block mask has fewer batch size than the number of chunks. "
)
batch_dim = 0
kv_num_blocks_chunks = torch.tensor_split(
block_mask.kv_num_blocks, num_chunks, batch_dim
)
kv_indices_chunks = torch.tensor_split(block_mask.kv_indices, num_chunks, batch_dim)
full_kv_num_blocks_chunks = (
torch.tensor_split(block_mask.full_kv_num_blocks, num_chunks, batch_dim)
if block_mask.full_kv_num_blocks is not None
else [None] * num_chunks
)
full_kv_indices_chunks = (
torch.tensor_split(block_mask.full_kv_indices, num_chunks, batch_dim)
if block_mask.full_kv_indices is not None
else [None] * num_chunks
)
chunk_block_masks = []
batch_offset = 0
for chunk_idx in range(num_chunks):
def create_mask_mod(idx):
def batch_offset_mask_mod(b, h, q_idx, kv_idx):
b_offset = torch.full_like(b, idx)
return block_mask.mask_mod(b + b_offset, h, q_idx, kv_idx)
return batch_offset_mask_mod
chunk_block_masks.append(
BlockMask.from_kv_blocks(
kv_num_blocks=kv_num_blocks_chunks[chunk_idx],
kv_indices=kv_indices_chunks[chunk_idx],
full_kv_num_blocks=full_kv_num_blocks_chunks[chunk_idx],
full_kv_indices=full_kv_indices_chunks[chunk_idx],
BLOCK_SIZE=block_mask.BLOCK_SIZE,
mask_mod=create_mask_mod(batch_offset),
seq_lengths=block_mask.seq_lengths,
)
)
batch_offset += kv_num_blocks_chunks[chunk_idx].size(0)
return chunk_block_masks
def _split_tensor(
tensor: torch.Tensor,
spec: TensorChunkSpec,
num_chunks: int,
) -> Sequence[torch.Tensor]:
"""Given a tensor, and a chunking spec, split the tensor.
Args:
tensor: Tensor to split
spec: Chunking spec
num_chunks: Number of chunks to split the tensor into
Returns:
chunk_tensors: List of chunked tensors
"""
assert tensor.size(spec.split_dim) >= num_chunks, (
f"Tensor size {tensor.size(spec.split_dim)} is smaller than num_chunks"
)
chunk_tensors = torch.tensor_split(tensor, num_chunks, spec.split_dim)
if not _debug_mask_minibatches:
return chunk_tensors
expanded_chunks = []
split_dim_idx = 0
for chunk_tensor in chunk_tensors:
new_val = torch.zeros_like(tensor)
upper_idx = split_dim_idx + chunk_tensor.size(spec.split_dim)
slice_indices = [slice(None, None, None)] * new_val.ndim
slice_indices[spec.split_dim] = slice(split_dim_idx, upper_idx)
new_val[slice_indices] = chunk_tensor
expanded_chunks.append(new_val)
split_dim_idx += chunk_tensor.size(spec.split_dim)
return expanded_chunks
def _shard_dict_of_args(
args_dict,
args_chunk_spec,
num_chunks,
):
"""
Given a dictionary of args, and a dictionary of chunking specs, shard the
args according to the chunking specs.
Args:
args_dict: Dictionary of args
args_chunk_spec: Dictionary of chunking specs
num_chunks: Number of chunks to shard the args into
Returns:
args_split: List of sharded args
"""
if not args_dict:
return [{} for _ in range(num_chunks)]
assert len(args_dict) == len(args_chunk_spec), (
f"args_dict.keys() = {list(args_dict.keys())} "
f"args_chunk_spec.keys() = {list(args_chunk_spec.keys())}"
)
assert args_chunk_spec is not None # Should have been set by caller
values, tree_spec = tree_flatten(
args_dict, is_leaf=lambda x: isinstance(x, BlockMask)
)
chunk_specs, _ = tree_flatten(
args_chunk_spec, is_leaf=lambda x: isinstance(x, BlockMask)
)
# First check and find the actual number of chunks
split_sizes = []
for v, spec in zip(values, chunk_specs, strict=True):
# The original logic is "spec is _Replicate". This doesn't seem to be
# correct. But we keep it for backward compatibility.
if spec is _Replicate or isinstance(spec, _Replicate):
split_sizes.append(num_chunks)
elif isinstance(v, torch.Tensor):
assert isinstance(spec, TensorChunkSpec)
split_sizes.append(v.size(spec.split_dim))
elif isinstance(v, BlockMask):
assert isinstance(spec, TensorChunkSpec)
assert spec.split_dim == 0, "BlockMask only supports split_dim=0"
# BlockMask will broadcast if B is 1.
if v.kv_num_blocks.size(0) == 1:
split_sizes.append(num_chunks)
else:
split_sizes.append(v.kv_num_blocks.size(0))
else:
raise ValueError(
f"Unsupported chunk spec: {spec} and value: {v} combination."
)
result_num_chunks = min(*split_sizes, num_chunks)
flat_split_results: list[Any] = [[] for _ in range(result_num_chunks)]
for v, spec in zip(values, chunk_specs, strict=True):
v_splits: Sequence[Any] = []
if spec is _Replicate or isinstance(spec, _Replicate):
v_splits = [v] * result_num_chunks
elif isinstance(v, torch.Tensor):
v_splits = _split_tensor(v, spec, result_num_chunks)
elif isinstance(v, BlockMask):
v_splits = _split_block_mask(v, result_num_chunks)
else:
raise ValueError(
f"Unsupported chunk spec: {spec} and value: {v} combination."
)
for _flat_split_result, _v_split in zip(
flat_split_results, v_splits, strict=True
):
_flat_split_result.append(_v_split)
return [
tree_unflatten(_flat_split_result, tree_spec)
for _flat_split_result in flat_split_results
]
def split_args_kwargs_into_chunks(
args: tuple[Any, ...],
kwargs: dict[str, Any] | None,
chunks: int,
args_chunk_spec: tuple[TensorChunkSpec, ...] | None = None,
kwargs_chunk_spec: dict[str, TensorChunkSpec] | None = None,
) -> tuple[list[tuple], list[dict]]:
"""
Given a sequence of args and kwargs, split them into a number of chunks
according to their respective chunking specs.
Args:
args: Tuple of args
kwargs: Dict of kwargs
chunks: Number of chunks to split the args and kwargs into
args_chunk_spec: chunking specs for args, in same shape as args
kwargs_chunk_spec: chunking specs for kwargs, in same shape as kwargs
Returns:
args_split: List of sharded args
kwargs_split: List of sharded kwargs
"""
# Given `args` and `kwargs`, we want to yield a set of `chunks` args and kwargs such that
# the constituent Tensor values have been sharded/replicated according to the `args_chunk_spec`
# and `kwargs_chunk_spec` specifications. The steps are as follows:
#
# 1. Use pytree.tree_flatten to flatten each arg and its spec into nto a 1d array of values.
# To use a running example: suppose our inputs look like
#
# args = ([A, [B, C]], D) args_spec = ([None, [None, TensorChunkSpec]], None)
# (kwargs not shown but it's a similar process)
#
# Then for this step we would end up with
#
# args = ([A, B, C], D) args_spec = ([None, None, TensorChunkSpec], None)
#
# 2. Shard or replicate the arguments subject to the policy in the spec. Suppose chunks = 2
#
# args = ([[A, A], [B, B], [C_1, C_2]], [D, D])
#
# 3. Rotate the nesting order such that chunks are the outer dimension
#
# args_chunks = [
# ([A, B, C_1], D),
# ([A, B, C_2], D),
# ]
#
# 4. Unflatten each chunk according to the spec
#
# args_chunks = [
# ([A, [B, C_1]], D),
# ([A, [B, C_2]], D),
# ]
# TODO: _debug_mask_minibatches
# Handle the case where kwargs is None
if kwargs is None:
kwargs = {}
# If user did not provide args_chunk_spec or kwargs_chunk_spec, we extend
# their format and use default chunking along dim 0
def default_spec(v):
if isinstance(v, torch.Tensor | BlockMask):
return TensorChunkSpec(DEFAULT_CHUNK_DIM)
else:
return _Replicate()
if args_chunk_spec is None:
args_chunk_spec = tree_map(
default_spec, args, is_leaf=lambda v: isinstance(v, BlockMask)
)
if kwargs_chunk_spec is None:
kwargs_chunk_spec = tree_map(
default_spec, kwargs, is_leaf=lambda v: isinstance(v, BlockMask)
)
args_split_dict = _shard_dict_of_args(
dict(enumerate(args)),
dict(enumerate(args_chunk_spec)),
chunks,
)
real_num_chunks = len(args_split_dict)
kwargs_split = _shard_dict_of_args(
kwargs,
kwargs_chunk_spec,
real_num_chunks,
)
if len(kwargs_split) < real_num_chunks:
# In case kwargs are sharded into less chunks
# e.g. when `args` has no tensor, just values
real_num_chunks = len(kwargs_split)
# Re-shard args
args_split_dict = _shard_dict_of_args(
dict(enumerate(args)),
dict(enumerate(args_chunk_spec)),
real_num_chunks,
)
if len(args_split_dict) != len(kwargs_split):
raise RuntimeError(
"args and kwargs are split into different number of chunks: "
f"{len(args_split_dict)}, {len(kwargs_split)}"
)
args_split = [
tuple(chunk_args[i] for i in range(len(chunk_args)))
for chunk_args in args_split_dict
]
return args_split, kwargs_split
def merge_chunks(
chunks: list[Any],
chunk_spec,
):
"""
Given a list of chunks, merge them into a single value according to
the chunk spec.
Args:
chunks: list of chunks
chunk_spec: Chunking spec for the chunks
Returns:
value: Merged value
"""
# This is essentially the inverse of `split_args_kwargs_into_chunks`, so the
# steps are similar to the steps in that function but in reverse. Given the
# input values:
#
# chunks = [
# ([A, [B, C_1]], D),
# ([A, [B, C_2]], D),
# ]
# args_spec = ([None, [None, TensorChunkSpec]], None)
#
# 1. Flatten the chunks according to the chunk_spec
#
# chunks_flat = [
# ([A, B, C_1], D),
# ([A, B, C_2], D),
# ]
#
# 2. Rotate the nesting order such that chunks are the inner dimension
#
# value_inner = ([A, B, [C_1, C_2]], D)
#
# 3. Concatenate sharded arguments
#
# value_combined = ([A, B, C], D)
#
# 4. Unflatten the combined args given the spec
#
# value = ([A, [B, C]], D)
# Preliminary: flatten the chunk spec
if chunk_spec is not None:
spec_flattened, flatten_spec = tree_flatten(chunk_spec)
else:
# If chunk_spec is not provided, we will merge chunks along the default dimension (0), for all output fields
# We obtain the output structure by flattening chunk 0 and generate the chunk_spec
chunk0_flat, flatten_spec = tree_flatten(chunks[0])
spec_flattened = [TensorChunkSpec(DEFAULT_CHUNK_DIM)] * len(chunk0_flat)
# Stage 1: flatten chunks
# chunks_flattened : [num chunks, num args]
chunks_flattened = []
for chunk in chunks:
chunk_flattened, _ = tree_flatten(chunk)
if len(chunk_flattened) != len(spec_flattened):
raise ValueError(f"Chunk {chunk} did not match chunk spec {chunk_spec}")
chunks_flattened.append(chunk_flattened)
# Stage 2 and 3: Rotate nesting order s.t. chunks are inner dimension and
# concatenate sharded operands
# args_flattened : [num args]
args_flattened = []
for arg_idx, arg in enumerate(spec_flattened):
if isinstance(arg, TensorChunkSpec):
partial_values = [
chunks_flattened[chunk_idx][arg_idx]
for chunk_idx in range(len(chunks_flattened))
]
if _debug_mask_minibatches:
# Infer size of individual chunks by running `tensor_split` again
overall_shape = partial_values[0].shape
for val in partial_values[1:]:
assert val.shape == overall_shape
meta_chunks = torch.tensor_split(
torch.empty(*overall_shape, device="meta"),
sections=len(partial_values),
dim=arg.split_dim,
)
values_to_cat = []
chunk_start_idx = 0
assert len(partial_values) == len(meta_chunks)
for partial_value, meta_chunk in zip(
partial_values, meta_chunks, strict=True
):
chunk_end_idx = chunk_start_idx + meta_chunk.size(arg.split_dim)
slice_indices = [slice(None, None, None)] * partial_value.ndim
slice_indices[arg.split_dim] = slice(chunk_start_idx, chunk_end_idx)
sliced = partial_value[slice_indices]
values_to_cat.append(sliced)
chunk_start_idx = chunk_end_idx
else:
values_to_cat = partial_values
args_flattened.append(torch.cat(values_to_cat, dim=arg.split_dim))
elif isinstance(arg, _CustomReducer):
reduced_val = arg.init_value
for chunk_idx in range(len(chunks_flattened)):
reduced_val = arg.reduce_fn(
reduced_val, chunks_flattened[chunk_idx][arg_idx]
)
args_flattened.append(reduced_val)
else:
value = chunks_flattened[0][arg_idx]
for chunk_idx in range(1, len(chunks_flattened)):
assert chunks_flattened[chunk_idx][arg_idx] == value
args_flattened.append(value)
# Stage 4: Unflatten combined args
return tree_unflatten(args_flattened, flatten_spec)
| _Replicate |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/base.py | {
"start": 1099,
"end": 2253
} | class ____(BaseIndex[BST], Generic[BST]):
"""Base Struct Store Index."""
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
index_struct: Optional[BST] = None,
schema_extract_prompt: Optional[BasePromptTemplate] = None,
output_parser: Optional[OUTPUT_PARSER_TYPE] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self.schema_extract_prompt = (
schema_extract_prompt or DEFAULT_SCHEMA_EXTRACT_PROMPT
)
self.output_parser = output_parser or default_output_parser
super().__init__(
nodes=nodes,
index_struct=index_struct,
**kwargs,
)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for Struct Store Index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
raise NotImplementedError("Struct Store Index does not support ref_doc_info.")
| BaseStructStoreIndex |
python | RaRe-Technologies__gensim | gensim/test/test_miislita.py | {
"start": 658,
"end": 1474
} | class ____(corpora.TextCorpus):
stoplist = set('for a of the and to in on'.split())
def get_texts(self):
"""
Parse documents from the .cor file provided in the constructor. Lowercase
each document and ignore some stopwords.
.cor format: one document per line, words separated by whitespace.
"""
for doc in self.getstream():
yield [word for word in utils.to_unicode(doc).lower().split()
if word not in CorpusMiislita.stoplist]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
logger.info("caching corpus size (calculating number of documents)")
self.length = sum(1 for _ in self.get_texts())
return self.length
| CorpusMiislita |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 369602,
"end": 371755
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateRepository"""
__schema__ = github_schema
__field_names__ = (
"repository_id",
"name",
"description",
"template",
"homepage_url",
"has_wiki_enabled",
"has_issues_enabled",
"has_projects_enabled",
"has_discussions_enabled",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The ID of the repository to update."""
name = sgqlc.types.Field(String, graphql_name="name")
"""The new name of the repository."""
description = sgqlc.types.Field(String, graphql_name="description")
"""A new description for the repository. Pass an empty string to
erase the existing description.
"""
template = sgqlc.types.Field(Boolean, graphql_name="template")
"""Whether this repository should be marked as a template such that
anyone who can access it can create new repositories with the same
files and directory structure.
"""
homepage_url = sgqlc.types.Field(URI, graphql_name="homepageUrl")
"""The URL for a web page about this repository. Pass an empty string
to erase the existing URL.
"""
has_wiki_enabled = sgqlc.types.Field(Boolean, graphql_name="hasWikiEnabled")
"""Indicates if the repository should have the wiki feature enabled."""
has_issues_enabled = sgqlc.types.Field(Boolean, graphql_name="hasIssuesEnabled")
"""Indicates if the repository should have the issues feature
enabled.
"""
has_projects_enabled = sgqlc.types.Field(Boolean, graphql_name="hasProjectsEnabled")
"""Indicates if the repository should have the project boards feature
enabled.
"""
has_discussions_enabled = sgqlc.types.Field(Boolean, graphql_name="hasDiscussionsEnabled")
"""Indicates if the repository should have the discussions feature
enabled.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateRepositoryInput |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass1.py | {
"start": 144,
"end": 857
} | class ____(ABC):
@abstractmethod
def foo1(self):
pass
@abstractmethod
def foo2(self):
pass
def foo3(self):
return 3
@classmethod
def foo4(cls):
# This should not generate an error even though
# it would appear to be attempting to instantiate
# an abstract class. That's because we need to
# assume that the caller is making this call on
# a non-abstract subclass.
return cls()
v1 = [subclass() for subclass in AbstractClassA.__subclasses__()]
reveal_type(v1, expected_text="list[AbstractClassA]")
# This should generate an error because AbstractFoo
# is an abstract class.
a = AbstractClassA()
| AbstractClassA |
python | pyparsing__pyparsing | pyparsing/exceptions.py | {
"start": 9382,
"end": 9850
} | class ____(ParseBaseException):
"""
Exception thrown when a parse expression doesn't match the input string
Example:
.. testcode::
integer = Word(nums).set_name("integer")
try:
integer.parse_string("ABC")
except ParseException as pe:
print(pe, f"column: {pe.column}")
prints:
.. testoutput::
Expected integer, found 'ABC' (at char 0), (line:1, col:1) column: 1
"""
| ParseException |
python | python-openxml__python-docx | tests/oxml/text/test_run.py | {
"start": 174,
"end": 1156
} | class ____:
"""Unit-test suite for the CT_R (run, <w:r>) element."""
@pytest.mark.parametrize(
("initial_cxml", "text", "expected_cxml"),
[
("w:r", "foobar", 'w:r/w:t"foobar"'),
("w:r", "foobar ", 'w:r/w:t{xml:space=preserve}"foobar "'),
(
"w:r/(w:rPr/w:rStyle{w:val=emphasis}, w:cr)",
"foobar",
'w:r/(w:rPr/w:rStyle{w:val=emphasis}, w:cr, w:t"foobar")',
),
],
)
def it_can_add_a_t_preserving_edge_whitespace(
self, initial_cxml: str, text: str, expected_cxml: str
):
r = cast(CT_R, element(initial_cxml))
expected_xml = xml(expected_cxml)
r.add_t(text)
assert r.xml == expected_xml
def it_can_assemble_the_text_in_the_run(self):
cxml = 'w:r/(w:br,w:cr,w:noBreakHyphen,w:ptab,w:t"foobar",w:tab)'
r = cast(CT_R, element(cxml))
assert r.text == "\n\n-\tfoobar\t"
| DescribeCT_R |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 70663,
"end": 71430
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, domain_id: str, secret_key: str, start_date: str):
"""Airbyte Source for Shortio.
Documentation can be found at https://developers.short.io/reference
Args:
name (str): The name of the destination.
secret_key (str): Short.io Secret Key
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
"""
self.domain_id = check.str_param(domain_id, "domain_id")
self.secret_key = check.str_param(secret_key, "secret_key")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Shortio", name)
| ShortioSource |
python | django-guardian__django-guardian | guardian/testapp/migrations/0002_logentrywithgroup.py | {
"start": 124,
"end": 1134
} | class ____(migrations.Migration):
dependencies = [
("auth", "0001_initial"),
("admin", "0001_initial"),
("testapp", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="LogEntryWithGroup",
fields=[
(
"logentry_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="admin.LogEntry",
),
),
(
"group",
models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="auth.Group"
),
),
],
bases=("admin.logentry",),
),
]
| Migration |
python | pytorch__pytorch | test/distributed/checkpoint/fsdp/test_fsdp_dsd.py | {
"start": 1151,
"end": 22506
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.accelerator.device_count())
def _get_base_model(self, mlp_dim: int = 2):
base_model = nn.Sequential(
MLP(mlp_dim),
nn.Sequential(MLP(mlp_dim), nn.Linear(mlp_dim, mlp_dim)),
MLP(mlp_dim),
)
return base_model
@skip_if_lt_x_gpu(2)
def test_1d_fsdp_get_model_state_dict(self):
self.run_subtests(
{"mlp_dim": [2, 3, 4, 5]},
self._test_1d_fsdp_get_model_state_dict,
)
def _test_1d_fsdp_get_model_state_dict(self, mlp_dim: int):
"""
Test model.state_dict() and distributed_state_dict parity.
"""
base_model = self._get_base_model(mlp_dim)
# Default is `reshard_after_forward=True`
model1 = copy.deepcopy(base_model)
for module in model1:
fully_shard(module)
fully_shard(model1)
# osd: original state dict, dsd: distributed state dict
osd = model1.state_dict()
dsd = get_model_state_dict(model1)
self.assertEqual(osd, dsd)
# Check `reshard_after_forward=False` after a forward
model2 = copy.deepcopy(base_model)
for module in model2:
fully_shard(module, reshard_after_forward=False)
fully_shard(model2, reshard_after_forward=False)
inp = torch.randn((2, mlp_dim), device=device_type)
model2(inp) # parameters are not resharded after this forward
# Check that state dict hooks reshard
osd_2 = model2.state_dict()
dsd_2 = get_model_state_dict(model2)
self.assertEqual(osd_2, dsd_2)
@skip_if_lt_x_gpu(2)
def test_1d_fsdp_cpu_offload_full_model_state_dict(self):
"""
Test full_state_dict and cpu_offload works for FSDP2 state_dict.
"""
orig_model = self._get_base_model()
fsdp_model = copy.deepcopy(orig_model)
for module in fsdp_model:
fully_shard(module)
fully_shard(fsdp_model)
osd = orig_model.state_dict()
dsd = get_model_state_dict(
fsdp_model, options=StateDictOptions(full_state_dict=True, cpu_offload=True)
)
cpu_device = torch.device("cpu")
def is_cpu(v):
if isinstance(v, DTensor):
return v.device == torch.device("cpu")
else:
return v.device == cpu_device
if self.rank == 0:
self.assertEqual(osd, dsd)
self.assertTrue(tree_all_only((torch.Tensor, DTensor), is_cpu, osd))
else:
self.assertEqual(dsd, {})
@skip_if_lt_x_gpu(2)
def test_save_with_fsdp1_and_load_with_fsdp2(self):
self.run_subtests(
{
"state_dict_type": [
StateDictType.FULL_STATE_DICT,
StateDictType.SHARDED_STATE_DICT,
]
},
self._test_save_with_fsdp1_and_load_with_fsdp2,
)
@skip_if_lt_x_gpu(2)
@with_temp_dir
def _test_save_with_fsdp1_and_load_with_fsdp2(self, state_dict_type: StateDictType):
"""
Test that we can save a model with FSDP1 and load it with FSDP2.
"""
# Save state dict with model wrapped with FSDP1
fsdp1_model = FSDP(
self._get_base_model().to(device_type),
use_orig_params=True,
auto_wrap_policy=always_wrap_policy,
)
fsdp1_optim = torch.optim.AdamW(fsdp1_model.parameters(), lr=0.1)
fsdp1_model(torch.randn((2,), device=self.rank)).sum().backward()
fsdp1_optim.step()
with FSDP.state_dict_type(fsdp1_model, state_dict_type):
fsdp1_state_dict = {
"model": fsdp1_model.state_dict(),
"optim": FSDP.sharded_optim_state_dict(fsdp1_model, fsdp1_optim),
}
dcp.save(
fsdp1_state_dict,
checkpoint_id=self.temp_dir,
)
fsdp1_full_msd = get_model_state_dict(
fsdp1_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
fsdp1_full_osd = get_optimizer_state_dict(
fsdp1_model,
fsdp1_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Load state dict into model with FSDP2 applied
fsdp2_model = self._get_base_model()
for module in fsdp2_model:
fully_shard(module)
fully_shard(fsdp2_model)
fsdp2_optim = torch.optim.AdamW(fsdp2_model.parameters(), lr=0.1)
fsdp2_state_dict = {
"model": get_model_state_dict(fsdp2_model),
"optim": get_optimizer_state_dict(fsdp2_model, fsdp2_optim),
}
dcp.load(
fsdp2_state_dict,
checkpoint_id=self.temp_dir,
)
fsdp2_model.load_state_dict(fsdp2_state_dict["model"])
fsdp2_optim.load_state_dict(fsdp2_state_dict["optim"])
fsdp2_full_msd = get_model_state_dict(
fsdp2_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
fsdp2_full_osd = get_optimizer_state_dict(
fsdp2_model,
fsdp2_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Compare full state dict to make sure they are the same.
self.assertEqual(fsdp2_full_msd, fsdp1_full_msd)
self.assertEqual(fsdp1_full_osd, fsdp2_full_osd)
@skip_if_lt_x_gpu(4)
@with_temp_dir
def test_save_with_fsdp1_and_load_with_fsdp2_tp(self):
"""
Test that we can save a model with FSDP1 and load it with FSDP2 + TP on 2d mesh.
"""
def _get_base_model(mlp_dim: int = 2):
base_model = nn.Sequential(MLP(mlp_dim), MLP(mlp_dim), MLP(mlp_dim))
return base_model
# init device mesh
dp_size = 2
global_mesh = init_device_mesh(
device_type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
# Save state dict with original model
base_model = _get_base_model().to(device_type)
base_optim = torch.optim.AdamW(base_model.parameters(), lr=0.1)
# Save state dict with model wrapped with FSDP1
fsdp1_model = FSDP(
copy.deepcopy(base_model),
device_mesh=global_mesh,
use_orig_params=True,
auto_wrap_policy=always_wrap_policy,
)
fsdp1_optim = torch.optim.AdamW(fsdp1_model.parameters(), lr=0.1)
# one-step training to modify state dict
inp = torch.randn((2,), device=self.rank)
base_model(inp).sum().backward()
base_optim.step()
fsdp1_model(inp).sum().backward()
fsdp1_optim.step()
# obtain the full state dict
base_msd = get_model_state_dict(
base_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
base_osd = get_optimizer_state_dict(
base_model,
base_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# obtain the sharded state dict
fsdp1_msd = get_model_state_dict(
fsdp1_model,
options=StateDictOptions(full_state_dict=False),
)
fsdp1_osd = get_optimizer_state_dict(
fsdp1_model,
fsdp1_optim,
options=StateDictOptions(full_state_dict=False),
)
# save state dict to temp dir
source_state_dict = {
"model_full": base_msd,
"optim_full": base_osd,
"model_sharded": fsdp1_msd,
"optim_sharded": fsdp1_osd,
}
dcp.save(
source_state_dict,
checkpoint_id=self.temp_dir,
)
# FSDP + TP
fsdp2_tp_model = _get_base_model()
fsdp2_tp_model = parallelize_module(
fsdp2_tp_model,
device_mesh=tp_mesh,
parallelize_plan={
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
},
)
for module in fsdp2_tp_model:
fully_shard(module, mesh=dp_mesh)
fully_shard(fsdp2_tp_model, mesh=dp_mesh)
fsdp2_tp_optim = torch.optim.AdamW(fsdp2_tp_model.parameters(), lr=0.1)
# Load state dict into model with FSDP2 + TP applied
for src_state_dict_type in ["full", "sharded"]:
msd_name = f"model_{src_state_dict_type}"
osd_name = f"optim_{src_state_dict_type}"
fsdp2_tp_state_dict = {
msd_name: get_model_state_dict(fsdp2_tp_model),
osd_name: get_optimizer_state_dict(fsdp2_tp_model, fsdp2_tp_optim),
}
# load state dict from temp dir
dcp.load(
fsdp2_tp_state_dict,
checkpoint_id=self.temp_dir,
)
fsdp2_tp_model.load_state_dict(fsdp2_tp_state_dict[msd_name])
fsdp2_tp_optim.load_state_dict(fsdp2_tp_state_dict[osd_name])
fsdp2_tp_full_msd = get_model_state_dict(
fsdp2_tp_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
fsdp2_tp_full_osd = get_optimizer_state_dict(
fsdp2_tp_model,
fsdp2_tp_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Compare full state dict to make sure they are the same.
self.assertEqual(base_msd, fsdp2_tp_full_msd)
self.assertEqual(base_osd, fsdp2_tp_full_osd)
@skip_if_lt_x_gpu(4)
@with_temp_dir
def test_save_with_tp_and_load_with_fsdp2_tp(self):
"""
Test that we can save a model with TP and load it with FSDP2 + TP on 2d mesh.
"""
def _get_base_model(mlp_dim: int = 2):
base_model = nn.Sequential(MLP(mlp_dim), MLP(mlp_dim), MLP(mlp_dim))
return base_model
tp_parallelize_plan = {
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
}
# init device mesh
dp_size = 2
global_mesh_1d = init_device_mesh(
device_type, (self.world_size,), mesh_dim_names=("tp",)
)
global_mesh_2d = init_device_mesh(
device_type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh_2d["dp"], global_mesh_2d["tp"]
# Save state dict with original model
base_model = _get_base_model().to(device_type)
base_optim = torch.optim.AdamW(base_model.parameters(), lr=0.1)
# Save state dict with TP model
tp_model = copy.deepcopy(base_model)
tp_model = parallelize_module(
tp_model,
device_mesh=global_mesh_1d,
parallelize_plan=tp_parallelize_plan,
)
tp_model_optim = torch.optim.AdamW(tp_model.parameters(), lr=0.1)
# one-step training to modify state dict
inp = torch.randn((2,), device=self.rank)
base_model(inp).sum().backward()
base_optim.step()
tp_model(inp).sum().backward()
tp_model_optim.step()
# obtain the full state dict
base_msd = get_model_state_dict(
base_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
base_osd = get_optimizer_state_dict(
base_model,
base_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# obtain sharded state dict
tp_msd = get_model_state_dict(
tp_model,
options=StateDictOptions(full_state_dict=False),
)
tp_osd = get_optimizer_state_dict(
tp_model,
tp_model_optim,
options=StateDictOptions(full_state_dict=False),
)
# save state dict to temp dir
source_state_dict = {
"model_full": base_msd,
"optim_full": base_osd,
"model_sharded": tp_msd,
"optim_sharded": tp_osd,
}
dcp.save(
source_state_dict,
checkpoint_id=self.temp_dir,
)
# FSDP + TP
fsdp2_tp_model = _get_base_model()
fsdp2_tp_model = parallelize_module(
fsdp2_tp_model,
device_mesh=tp_mesh,
parallelize_plan=tp_parallelize_plan,
)
for module in fsdp2_tp_model:
fully_shard(module, mesh=dp_mesh)
fully_shard(fsdp2_tp_model, mesh=dp_mesh)
fsdp2_tp_optim = torch.optim.AdamW(fsdp2_tp_model.parameters(), lr=0.1)
# Load state dict into model with FSDP2 + TP applied
for src_state_dict_type in ["full", "sharded"]:
msd_name = f"model_{src_state_dict_type}"
osd_name = f"optim_{src_state_dict_type}"
fsdp2_tp_state_dict = {
msd_name: get_model_state_dict(fsdp2_tp_model),
osd_name: get_optimizer_state_dict(fsdp2_tp_model, fsdp2_tp_optim),
}
# load state dict from temp dir
dcp.load(
fsdp2_tp_state_dict,
checkpoint_id=self.temp_dir,
)
fsdp2_tp_model.load_state_dict(fsdp2_tp_state_dict[msd_name])
fsdp2_tp_optim.load_state_dict(fsdp2_tp_state_dict[osd_name])
fsdp2_tp_full_msd = get_model_state_dict(
fsdp2_tp_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
fsdp2_tp_full_osd = get_optimizer_state_dict(
fsdp2_tp_model,
fsdp2_tp_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Compare full state dict to make sure they are the same.
self.assertEqual(base_msd, fsdp2_tp_full_msd)
self.assertEqual(base_osd, fsdp2_tp_full_osd)
@skip_if_lt_x_gpu(4)
def test_save_with_fsdp2_tp_and_load_with_tp(self):
self.run_subtests(
{"allow_implicit_replication": [True, False]},
self._test_save_with_fsdp2_tp_and_load_with_tp,
)
@skip_if_lt_x_gpu(4)
@with_temp_dir
def _test_save_with_fsdp2_tp_and_load_with_tp(
self, allow_implicit_replication: bool
):
"""
Test that we can save a model with FSDP2 + TP on 2d mesh and load it with TP.
"""
mlp_dim = 5
def _get_base_model(mlp_dim):
# dim_multiplier=1 helps make it easier to hit corner cases in uneven sharding
# (e.g. in/out dim both=5 means unevenness is easier to hit depending on row/col sharding)
base_model = nn.Sequential(
MLP(mlp_dim, dim_multiplier=1),
MLP(mlp_dim, dim_multiplier=1),
MLP(mlp_dim, dim_multiplier=1),
)
return base_model
cm = (
implicit_replication()
if allow_implicit_replication
else contextlib.nullcontext()
)
# Must set 'use_local_output=False' in order to test uneven-sharding case
# see https://github.com/pytorch/pytorch/issues/150336
tp_parallelize_plan = {
"0.in_proj": ColwiseParallel(use_local_output=False),
"0.out_proj": RowwiseParallel(use_local_output=False),
"1.in_proj": ColwiseParallel(use_local_output=False),
"1.out_proj": RowwiseParallel(use_local_output=False),
"2.in_proj": ColwiseParallel(use_local_output=False),
"2.out_proj": RowwiseParallel(use_local_output=False),
}
if allow_implicit_replication:
# intentionally pop the plans for some tp layers so that the model is not fully tensor parallelized
tp_parallelize_plan.pop("0.in_proj")
tp_parallelize_plan.pop("0.out_proj")
with cm:
# init device mesh
dp_size = 2
global_mesh_1d = init_device_mesh(
device_type, (self.world_size,), mesh_dim_names=("tp",)
)
global_mesh_2d = init_device_mesh(
device_type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh_2d["dp"], global_mesh_2d["tp"]
for save_full_state_dict in [True, False]:
# Save state dict with original model
base_model = _get_base_model(mlp_dim).to(device_type)
base_optim = torch.optim.AdamW(base_model.parameters(), lr=0.1)
# Save state dict with FSDP2 + TP model
fsdp2_tp_model = copy.deepcopy(base_model)
fsdp2_tp_model = parallelize_module(
fsdp2_tp_model,
device_mesh=tp_mesh,
parallelize_plan=tp_parallelize_plan,
)
for module in fsdp2_tp_model:
fully_shard(module, mesh=dp_mesh)
fully_shard(fsdp2_tp_model, mesh=dp_mesh)
fsdp2_tp_optim = torch.optim.AdamW(fsdp2_tp_model.parameters(), lr=0.1)
# one-step training to modify state dict
inp = torch.randn((mlp_dim,), device=self.rank)
base_model(inp).sum().backward()
base_optim.step()
fsdp2_tp_model(inp).sum().backward()
fsdp2_tp_optim.step()
# obtain the unsharded state dict
base_msd = get_model_state_dict(
base_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
base_osd = get_optimizer_state_dict(
base_model,
base_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# obtain FSDP2 + TP state dict
fsdp2_tp_msd = get_model_state_dict(
fsdp2_tp_model,
options=StateDictOptions(full_state_dict=save_full_state_dict),
)
fsdp2_tp_osd = get_optimizer_state_dict(
fsdp2_tp_model,
fsdp2_tp_optim,
options=StateDictOptions(full_state_dict=save_full_state_dict),
)
fsdp2_tp_state_dict = {"model": fsdp2_tp_msd, "optim": fsdp2_tp_osd}
dcp.save(fsdp2_tp_state_dict, checkpoint_id=self.temp_dir)
fsdp2_tp_full_msd = get_model_state_dict(
fsdp2_tp_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
fsdp2_tp_full_osd = get_optimizer_state_dict(
fsdp2_tp_model,
fsdp2_tp_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Load state dict into model with TP applied
tp_model = _get_base_model(mlp_dim)
tp_model = parallelize_module(
tp_model,
device_mesh=global_mesh_1d,
parallelize_plan=tp_parallelize_plan,
)
tp_optim = torch.optim.AdamW(tp_model.parameters(), lr=0.1)
tp_state_dict = {
"model": get_model_state_dict(tp_model),
"optim": get_optimizer_state_dict(tp_model, tp_optim),
}
dcp.load(tp_state_dict, checkpoint_id=self.temp_dir)
tp_model.load_state_dict(tp_state_dict["model"])
tp_optim.load_state_dict(tp_state_dict["optim"])
tp_full_msd = get_model_state_dict(
tp_model,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
tp_full_osd = get_optimizer_state_dict(
tp_model,
tp_optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
# Compare full state dict to make sure they are the same.
self.assertEqual(base_msd, tp_full_msd)
self.assertEqual(base_osd, tp_full_osd)
self.assertEqual(fsdp2_tp_full_msd, tp_full_msd)
self.assertEqual(fsdp2_tp_full_osd, tp_full_osd)
if __name__ == "__main__":
run_tests()
| TestFullyShardWithDistributedStateDict |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 198537,
"end": 199070
} | class ____:
async def test_async_flow_inside_mapped_async_tasks(self):
"""this is a regression test for https://github.com/PrefectHQ/prefect/issues/17710"""
@flow
async def identity(item: int):
return item
@task
async def async_task(item: int):
return await identity(item)
@flow
async def async_flow():
return async_task.map([1, 2, 3]).result()
result = await async_flow()
assert result == [1, 2, 3]
| TestDeeplyNestedFlows |
python | sympy__sympy | sympy/physics/mechanics/actuator.py | {
"start": 1872,
"end": 9832
} | class ____(ActuatorBase):
"""Force-producing actuator.
Explanation
===========
A ``ForceActuator`` is an actuator that produces a (expansile) force along
its length.
A force actuator uses a pathway instance to determine the direction and
number of forces that it applies to a system. Consider the simplest case
where a ``LinearPathway`` instance is used. This pathway is made up of two
points that can move relative to each other, and results in a pair of equal
and opposite forces acting on the endpoints. If the positive time-varying
Euclidean distance between the two points is defined, then the "extension
velocity" is the time derivative of this distance. The extension velocity
is positive when the two points are moving away from each other and
negative when moving closer to each other. The direction for the force
acting on either point is determined by constructing a unit vector directed
from the other point to this point. This establishes a sign convention such
that a positive force magnitude tends to push the points apart, this is the
meaning of "expansile" in this context. The following diagram shows the
positive force sense and the distance between the points::
P Q
o<--- F --->o
| |
|<--l(t)--->|
Examples
========
To construct an actuator, an expression (or symbol) must be supplied to
represent the force it can produce, alongside a pathway specifying its line
of action. Let's also create a global reference frame and spatially fix one
of the points in it while setting the other to be positioned such that it
can freely move in the frame's x direction specified by the coordinate
``q``.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (ForceActuator, LinearPathway,
... Point, ReferenceFrame)
>>> from sympy.physics.vector import dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> force = symbols('F')
>>> pA, pB = Point('pA'), Point('pB')
>>> pA.set_vel(N, 0)
>>> pB.set_pos(pA, q*N.x)
>>> pB.pos_from(pA)
q(t)*N.x
>>> linear_pathway = LinearPathway(pA, pB)
>>> actuator = ForceActuator(force, linear_pathway)
>>> actuator
ForceActuator(F, LinearPathway(pA, pB))
Parameters
==========
force : Expr
The scalar expression defining the (expansile) force that the actuator
produces.
pathway : PathwayBase
The pathway that the actuator follows. This must be an instance of a
concrete subclass of ``PathwayBase``, e.g. ``LinearPathway``.
"""
def __init__(self, force, pathway):
"""Initializer for ``ForceActuator``.
Parameters
==========
force : Expr
The scalar expression defining the (expansile) force that the
actuator produces.
pathway : PathwayBase
The pathway that the actuator follows. This must be an instance of
a concrete subclass of ``PathwayBase``, e.g. ``LinearPathway``.
"""
self.force = force
self.pathway = pathway
@property
def force(self):
"""The magnitude of the force produced by the actuator."""
return self._force
@force.setter
def force(self, force):
if hasattr(self, '_force'):
msg = (
f'Can\'t set attribute `force` to {repr(force)} as it is '
f'immutable.'
)
raise AttributeError(msg)
self._force = sympify(force, strict=True)
@property
def pathway(self):
"""The ``Pathway`` defining the actuator's line of action."""
return self._pathway
@pathway.setter
def pathway(self, pathway):
if hasattr(self, '_pathway'):
msg = (
f'Can\'t set attribute `pathway` to {repr(pathway)} as it is '
f'immutable.'
)
raise AttributeError(msg)
if not isinstance(pathway, PathwayBase):
msg = (
f'Value {repr(pathway)} passed to `pathway` was of type '
f'{type(pathway)}, must be {PathwayBase}.'
)
raise TypeError(msg)
self._pathway = pathway
def to_loads(self):
"""Loads required by the equations of motion method classes.
Explanation
===========
``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be
passed to the ``loads`` parameters of its ``kanes_equations`` method
when constructing the equations of motion. This method acts as a
utility to produce the correctly-structred pairs of points and vectors
required so that these can be easily concatenated with other items in
the list of loads and passed to ``KanesMethod.kanes_equations``. These
loads are also in the correct form to also be passed to the other
equations of motion method classes, e.g. ``LagrangesMethod``.
Examples
========
The below example shows how to generate the loads produced by a force
actuator that follows a linear pathway. In this example we'll assume
that the force actuator is being used to model a simple linear spring.
First, create a linear pathway between two points separated by the
coordinate ``q`` in the ``x`` direction of the global frame ``N``.
>>> from sympy.physics.mechanics import (LinearPathway, Point,
... ReferenceFrame)
>>> from sympy.physics.vector import dynamicsymbols
>>> q = dynamicsymbols('q')
>>> N = ReferenceFrame('N')
>>> pA, pB = Point('pA'), Point('pB')
>>> pB.set_pos(pA, q*N.x)
>>> pathway = LinearPathway(pA, pB)
Now create a symbol ``k`` to describe the spring's stiffness and
instantiate a force actuator that produces a (contractile) force
proportional to both the spring's stiffness and the pathway's length.
Note that actuator classes use the sign convention that expansile
forces are positive, so for a spring to produce a contractile force the
spring force needs to be calculated as the negative for the stiffness
multiplied by the length.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import ForceActuator
>>> stiffness = symbols('k')
>>> spring_force = -stiffness*pathway.length
>>> spring = ForceActuator(spring_force, pathway)
The forces produced by the spring can be generated in the list of loads
form that ``KanesMethod`` (and other equations of motion methods)
requires by calling the ``to_loads`` method.
>>> spring.to_loads()
[(pA, k*q(t)*N.x), (pB, - k*q(t)*N.x)]
A simple linear damper can be modeled in a similar way. Create another
symbol ``c`` to describe the dampers damping coefficient. This time
instantiate a force actuator that produces a force proportional to both
the damper's damping coefficient and the pathway's extension velocity.
Note that the damping force is negative as it acts in the opposite
direction to which the damper is changing in length.
>>> damping_coefficient = symbols('c')
>>> damping_force = -damping_coefficient*pathway.extension_velocity
>>> damper = ForceActuator(damping_force, pathway)
Again, the forces produces by the damper can be generated by calling
the ``to_loads`` method.
>>> damper.to_loads()
[(pA, c*Derivative(q(t), t)*N.x), (pB, - c*Derivative(q(t), t)*N.x)]
"""
return self.pathway.to_loads(self.force)
def __repr__(self):
"""Representation of a ``ForceActuator``."""
return f'{self.__class__.__name__}({self.force}, {self.pathway})'
| ForceActuator |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0010_add_time_fields.py | {
"start": 156,
"end": 1519
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("core", "0009_historicaluserprofile"),
]
operations = [
migrations.AlterModelOptions(
name="userprofile",
options={"get_latest_by": "modified"},
),
migrations.AddField(
model_name="historicaluserprofile",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, null=True, verbose_name="created"
),
),
migrations.AddField(
model_name="historicaluserprofile",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, null=True, verbose_name="modified"
),
),
migrations.AddField(
model_name="userprofile",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, null=True, verbose_name="created"
),
),
migrations.AddField(
model_name="userprofile",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, null=True, verbose_name="modified"
),
),
]
| Migration |
python | google__python-fire | fire/console/platforms.py | {
"start": 12551,
"end": 16487
} | class ____(object):
"""Class to validate the Python version we are using.
The Cloud SDK officially supports Python 2.7.
However, many commands do work with Python 2.6, so we don't error out when
users are using this (we consider it sometimes "compatible" but not
"supported").
"""
# See class docstring for descriptions of what these mean
MIN_REQUIRED_PY2_VERSION = (2, 6)
MIN_SUPPORTED_PY2_VERSION = (2, 7)
MIN_SUPPORTED_PY3_VERSION = (3, 4)
ENV_VAR_MESSAGE = """\
If you have a compatible Python interpreter installed, you can use it by setting
the CLOUDSDK_PYTHON environment variable to point to it.
"""
def __init__(self, version=None):
if version:
self.version = version
elif hasattr(sys, 'version_info'):
self.version = sys.version_info[:2]
else:
self.version = None
def SupportedVersionMessage(self, allow_py3):
if allow_py3:
return 'Please use Python version {0}.{1}.x or {2}.{3} and up.'.format(
PythonVersion.MIN_SUPPORTED_PY2_VERSION[0],
PythonVersion.MIN_SUPPORTED_PY2_VERSION[1],
PythonVersion.MIN_SUPPORTED_PY3_VERSION[0],
PythonVersion.MIN_SUPPORTED_PY3_VERSION[1])
else:
return 'Please use Python version {0}.{1}.x.'.format(
PythonVersion.MIN_SUPPORTED_PY2_VERSION[0],
PythonVersion.MIN_SUPPORTED_PY2_VERSION[1])
def IsCompatible(self, allow_py3=False, raise_exception=False):
"""Ensure that the Python version we are using is compatible.
This will print an error message if not compatible.
Compatible versions are 2.6 and 2.7 and > 3.4 if allow_py3 is True.
We don't guarantee support for 2.6 so we want to warn about it.
Args:
allow_py3: bool, True if we should allow a Python 3 interpreter to run
gcloud. If False, this returns an error for Python 3.
raise_exception: bool, True to raise an exception rather than printing
the error and exiting.
Raises:
Error: If not compatible and raise_exception is True.
Returns:
bool, True if the version is valid, False otherwise.
"""
error = None
if not self.version:
# We don't know the version, not a good sign.
error = ('ERROR: Your current version of Python is not compatible with '
'the Google Cloud SDK. {0}\n'
.format(self.SupportedVersionMessage(allow_py3)))
else:
if self.version[0] < 3:
# Python 2 Mode
if self.version < PythonVersion.MIN_REQUIRED_PY2_VERSION:
error = ('ERROR: Python {0}.{1} is not compatible with the Google '
'Cloud SDK. {2}\n'
.format(self.version[0], self.version[1],
self.SupportedVersionMessage(allow_py3)))
else:
# Python 3 Mode
if not allow_py3:
error = ('ERROR: Python 3 and later is not compatible with the '
'Google Cloud SDK. {0}\n'
.format(self.SupportedVersionMessage(allow_py3)))
elif self.version < PythonVersion.MIN_SUPPORTED_PY3_VERSION:
error = ('ERROR: Python {0}.{1} is not compatible with the Google '
'Cloud SDK. {2}\n'
.format(self.version[0], self.version[1],
self.SupportedVersionMessage(allow_py3)))
if error:
if raise_exception:
raise Error(error)
sys.stderr.write(error)
sys.stderr.write(PythonVersion.ENV_VAR_MESSAGE)
return False
# Warn that 2.6 might not work.
if (self.version >= self.MIN_REQUIRED_PY2_VERSION and
self.version < self.MIN_SUPPORTED_PY2_VERSION):
sys.stderr.write("""\
WARNING: Python 2.6.x is no longer officially supported by the Google Cloud SDK
and may not function correctly. {0}
{1}""".format(self.SupportedVersionMessage(allow_py3),
PythonVersion.ENV_VAR_MESSAGE))
return True
| PythonVersion |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/cli/lazy_group.py | {
"start": 213,
"end": 1805
} | class ____(click.Group):
"""
A click Group that can lazily load subcommands.
"""
def __init__(self, *args: Any, lazy_subcommands: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# lazy_subcommands is a map of the form:
#
# {command-name} -> {module-name}.{command-object-name}
#
self.lazy_subcommands = lazy_subcommands or {}
def list_commands(self, ctx: click.Context) -> List[str]:
base = super().list_commands(ctx)
lazy = sorted(self.lazy_subcommands.keys())
return base + lazy
def get_command(self, ctx: click.Context, cmd_name: str) -> Optional[click.Command]:
if cmd_name in self.lazy_subcommands:
return self._lazy_load(cmd_name)
return super().get_command(ctx, cmd_name)
def _lazy_load(self, cmd_name: str) -> click.Command:
# lazily loading a command, first get the module name and attribute name
import_path = self.lazy_subcommands[cmd_name]
modname, cmd_object_name = import_path.rsplit(".", 1)
# do the import
mod = importlib.import_module(modname)
# get the Command object from that module
cmd_object = getattr(mod, cmd_object_name)
# check the result to make debugging easier
if not isinstance(cmd_object, click.Command):
print(f"{cmd_object} is of instance {type(cmd_object)}")
raise ValueError(f"Lazy loading of {import_path} failed by returning " "a non-command object")
return cmd_object
| LazyGroup |
python | Textualize__textual | src/textual/demo/home.py | {
"start": 4505,
"end": 7009
} | class ____(Vertical):
"""Widget to get and display GitHub star count."""
DEFAULT_CSS = """
StarCount {
dock: top;
height: 6;
border-bottom: hkey $background;
border-top: hkey $background;
layout: horizontal;
background: $boost;
padding: 0 1;
color: $text-warning;
#stars { align: center top; }
#forks { align: right top; }
Label { text-style: bold; color: $foreground; }
LoadingIndicator { background: transparent !important; }
Digits { width: auto; margin-right: 1; }
Label { margin-right: 1; }
align: center top;
&>Horizontal { max-width: 100;}
}
"""
stars = reactive(25251, recompose=True)
forks = reactive(776, recompose=True)
@work
async def get_stars(self):
"""Worker to get stars from GitHub API."""
if not HTTPX_AVAILABLE:
self.notify(
"Install httpx to update stars from the GitHub API.\n\n$ [b]pip install httpx[/b]",
title="GitHub Stars",
)
return
self.loading = True
try:
await asyncio.sleep(1) # Time to admire the loading indicator
async with httpx.AsyncClient() as client:
repository_json = (
await client.get("https://api.github.com/repos/textualize/textual")
).json()
self.stars = repository_json["stargazers_count"]
self.forks = repository_json["forks"]
except Exception:
self.notify(
"Unable to update star count (maybe rate-limited)",
title="GitHub stars",
severity="error",
)
self.loading = False
def compose(self) -> ComposeResult:
with Horizontal():
with Vertical(id="version"):
yield Label("Version")
yield Digits(version("textual"))
with Vertical(id="stars"):
yield Label("GitHub ★")
stars = f"{self.stars / 1000:.1f}K"
yield Digits(stars).with_tooltip(f"{self.stars} GitHub stars")
with Vertical(id="forks"):
yield Label("Forks")
yield Digits(str(self.forks)).with_tooltip(f"{self.forks} Forks")
def on_mount(self) -> None:
self.tooltip = "Click to refresh"
self.get_stars()
def on_click(self) -> None:
self.get_stars()
| StarCount |
python | pytorch__pytorch | test/onnx/model_defs/srresnet.py | {
"start": 975,
"end": 1440
} | class ____(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.upscaling_conv = nn.Conv2d(
n_filters, 4 * n_filters, kernel_size=3, padding=1
)
self.upscaling_shuffler = nn.PixelShuffle(2)
self.upscaling = nn.PReLU(n_filters)
_initialize_orthogonal(self.upscaling_conv)
def forward(self, x):
return self.upscaling(self.upscaling_shuffler(self.upscaling_conv(x)))
| UpscaleBlock |
python | pytorch__pytorch | test/run_test.py | {
"start": 4149,
"end": 65760
} | class ____(list):
def __init__(self, *args, **kwargs):
super().__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
FSDP_TEST = [test for test in TESTS if test.startswith("distributed/fsdp")]
WINDOWS_BLOCKLIST = [
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
"distributed/pipeline/sync/skip/test_inspect_skip_layout",
"distributed/pipeline/sync/skip/test_leak",
"distributed/pipeline/sync/skip/test_portal",
"distributed/pipeline/sync/skip/test_stash_pop",
"distributed/pipeline/sync/skip/test_tracker",
"distributed/pipeline/sync/skip/test_verify_skippables",
"distributed/pipeline/sync/test_balance",
"distributed/pipeline/sync/test_bugs",
"distributed/pipeline/sync/test_checkpoint",
"distributed/pipeline/sync/test_copy",
"distributed/pipeline/sync/test_deferred_batch_norm",
"distributed/pipeline/sync/test_dependency",
"distributed/pipeline/sync/test_inplace",
"distributed/pipeline/sync/test_microbatch",
"distributed/pipeline/sync/test_phony",
"distributed/pipeline/sync/test_pipe",
"distributed/pipeline/sync/test_pipeline",
"distributed/pipeline/sync/test_stream",
"distributed/pipeline/sync/test_transparency",
"distributed/pipeline/sync/test_worker",
"distributed/elastic/agent/server/test/api_test",
"distributed/elastic/multiprocessing/api_test",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
"distributed/_shard/sharding_spec/test_sharding_spec",
"distributed/_shard/sharding_plan/test_sharding_plan",
"distributed/_shard/sharded_tensor/test_sharded_tensor",
"distributed/_shard/sharded_tensor/test_sharded_tensor_reshard",
"distributed/_shard/sharded_tensor/ops/test_embedding",
"distributed/_shard/sharded_tensor/ops/test_embedding_bag",
"distributed/_shard/sharded_tensor/ops/test_binary_cmp",
"distributed/_shard/sharded_tensor/ops/test_init",
"distributed/_shard/sharded_optim/test_sharded_optim",
] + FSDP_TEST
ROCM_BLOCKLIST = [
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
"distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"inductor/test_max_autotune", # taking excessive time, many tests >30 min
"test_determination",
"test_jit_legacy",
"test_cuda_nvml_based_avail",
"test_jit_cuda_fuser",
"test_openreg",
]
S390X_BLOCKLIST = [
# these tests fail due to various reasons
"dynamo/test_misc",
"inductor/test_cpu_repro",
"inductor/test_cpu_select_algorithm",
"inductor/test_torchinductor_codegen_dynamic_shapes",
"lazy/test_meta_kernel",
"onnx/test_utility_funs",
"profiler/test_profiler",
"test_jit",
"dynamo/test_utils",
"test_nn",
# these tests run long and fail in addition to that
"dynamo/test_dynamic_shapes",
"test_quantization",
"inductor/test_torchinductor",
"inductor/test_torchinductor_dynamic_shapes",
"inductor/test_torchinductor_opinfo",
# these tests fail when cuda is not available
"inductor/test_aot_inductor",
"inductor/test_best_config",
"inductor/test_cudacodecache",
"inductor/test_inductor_utils",
"inductor/test_inplacing_pass",
"inductor/test_kernel_benchmark",
"inductor/test_max_autotune",
"inductor/test_move_constructors_to_gpu",
"inductor/test_multi_kernel",
"inductor/test_pattern_matcher",
"inductor/test_perf",
"inductor/test_select_algorithm",
"inductor/test_snode_runtime",
"inductor/test_triton_wrapper",
# these tests fail when mkldnn is not available
"inductor/test_custom_post_grad_passes",
"inductor/test_mkldnn_pattern_matcher",
"test_metal",
# lacks quantization support
"onnx/test_models_quantized_onnxruntime",
"onnx/test_pytorch_onnx_onnxruntime",
# sysctl -n hw.memsize is not available
"test_mps",
# https://github.com/pytorch/pytorch/issues/102078
"test_decomp",
# https://github.com/pytorch/pytorch/issues/146698
"test_model_exports_to_core_aten",
# runs very long, skip for now
"inductor/test_layout_optim",
"test_fx",
# some false errors
"doctests",
# new failures to investigate and fix
"test_tensorboard",
# onnx + protobuf failure, see
# https://github.com/protocolbuffers/protobuf/issues/22104
"dynamo/test_backends",
"dynamo/test_modules",
"inductor/test_config",
"test_public_bindings",
"test_testing",
# depend on z3-solver
"fx/test_z3_gradual_types",
"test_proxy_tensor",
"test_openreg",
]
XPU_BLOCKLIST = [
"test_autograd",
"profiler/test_memory_profiler",
"test_openreg",
]
XPU_TEST = [
"test_xpu",
]
# The tests inside these files should never be run in parallel with each other
RUN_PARALLEL_BLOCKLIST = [
"test_extension_utils",
"test_cpp_extensions_jit",
"test_cpp_extensions_stream_and_event",
"test_cpp_extensions_mtia_backend",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
"test_multiprocessing_spawn",
"test_namedtuple_return_api",
"test_openreg",
"test_overrides",
"test_show_pickle",
"test_tensorexpr",
"test_cuda_primary_ctx",
"test_cuda_trace",
"inductor/test_benchmark_fusion",
"test_cuda_nvml_based_avail",
# temporarily sets a global config
"test_autograd_fallback",
"inductor/test_compiler_bisector",
"test_privateuseone_python_backend",
] + FSDP_TEST
# Test files that should always be run serially with other test files,
# but it's okay if the tests inside them are run in parallel with each other.
CI_SERIAL_LIST = [
"test_nn",
"test_fake_tensor",
"test_cpp_api_parity",
"test_reductions",
"test_fx_backends",
"test_cpp_extensions_jit",
"test_torch",
"test_tensor_creation_ops",
"test_dispatch",
"test_python_dispatch", # torch.library creation and deletion must be serialized
"test_spectral_ops", # Cause CUDA illegal memory access https://github.com/pytorch/pytorch/issues/88916
"nn/test_pooling",
"nn/test_convolution", # Doesn't respect set_per_process_memory_fraction, results in OOM for other tests in slow gradcheck
"distributions/test_distributions",
"test_fx", # gets SIGKILL
"functorch/test_memory_efficient_fusion", # Cause CUDA OOM on ROCm
"test_utils", # OOM
"test_sort_and_select", # OOM
"test_backward_compatible_arguments", # OOM
"test_autocast", # OOM
"test_native_mha", # OOM
"test_module_hooks", # OOM
"inductor/test_max_autotune",
"inductor/test_cutlass_backend", # slow due to many nvcc compilation steps,
"inductor/test_flex_attention", # OOM
]
# A subset of onnx tests that cannot run in parallel due to high memory usage.
ONNX_SERIAL_LIST = [
"onnx/test_models",
"onnx/test_models_quantized_onnxruntime",
"onnx/test_models_onnxruntime",
"onnx/test_custom_ops",
"onnx/test_utility_funs",
]
# A subset of our TEST list that validates PyTorch's ops, modules, and autograd function as expected
CORE_TEST_LIST = [
"test_autograd",
"test_autograd_fallback",
"test_modules",
"test_nn",
"test_ops",
"test_ops_gradients",
"test_ops_fwd_gradients",
"test_ops_jit",
"test_torch",
]
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
num_gpus = torch.cuda.device_count()
DISTRIBUTED_TESTS_CONFIG["test"] = {"WORLD_SIZE": "1"}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG["mpi"] = {
"WORLD_SIZE": "3",
}
if dist.is_nccl_available() and num_gpus > 0:
DISTRIBUTED_TESTS_CONFIG["nccl"] = {
"WORLD_SIZE": f"{num_gpus}",
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG["gloo"] = {
# TODO: retire testing gloo with CUDA
"WORLD_SIZE": f"{num_gpus if num_gpus > 0 else 3}",
}
del num_gpus
# Test with UCC backend is deprecated.
# See https://github.com/pytorch/pytorch/pull/137161
# if dist.is_ucc_available():
# DISTRIBUTED_TESTS_CONFIG["ucc"] = {
# "WORLD_SIZE": f"{torch.cuda.device_count()}",
# "UCX_TLS": "tcp,cuda",
# "UCC_TLS": "nccl,ucp,cuda",
# "UCC_TL_UCP_TUNE": "cuda:0", # don't use UCP TL on CUDA as it is not well supported
# "UCC_EC_CUDA_USE_COOPERATIVE_LAUNCH": "n", # CI nodes (M60) fail if it is on
# }
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {
getattr(signal, n): n for n in dir(signal) if n.startswith("SIG") and "_" not in n
}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
"test_jit_profiling",
"test_jit_legacy",
"test_jit_fuser_legacy",
]
INDUCTOR_TESTS = [test for test in TESTS if test.startswith(INDUCTOR_TEST_PREFIX)]
DISTRIBUTED_TESTS = [test for test in TESTS if test.startswith(DISTRIBUTED_TEST_PREFIX)]
TORCH_EXPORT_TESTS = [test for test in TESTS if test.startswith("export")]
AOT_DISPATCH_TESTS = [
test for test in TESTS if test.startswith("functorch/test_aotdispatch")
]
FUNCTORCH_TESTS = [test for test in TESTS if test.startswith("functorch")]
DYNAMO_CORE_TESTS = [test for test in TESTS if test.startswith("dynamo")]
ONNX_TESTS = [test for test in TESTS if test.startswith("onnx")]
QUANTIZATION_TESTS = [test for test in TESTS if test.startswith("test_quantization")]
def _is_cpp_test(test):
# Note: tests underneath cpp_extensions are different from other cpp tests
# in that they utilize the usual python test infrastructure.
return test.startswith(CPP_TEST_PREFIX) and not test.startswith("cpp_extensions")
CPP_TESTS = [test for test in TESTS if _is_cpp_test(test)]
TESTS_REQUIRING_LAPACK = [
"distributions/test_constraints",
"distributions/test_distributions",
]
# These are just the slowest ones, this isn't an exhaustive list.
TESTS_NOT_USING_GRADCHECK = [
# Note that you should use skipIfSlowGradcheckEnv if you do not wish to
# skip all the tests in that file, e.g. test_mps
"doctests",
"test_meta",
"test_hub",
"test_fx",
"test_decomp",
"test_cpp_extensions_jit",
"test_jit",
"test_matmul_cuda",
"test_ops",
"test_ops_jit",
"dynamo/test_recompile_ux",
"inductor/test_compiled_optimizers",
"inductor/test_cutlass_backend",
"inductor/test_max_autotune",
"inductor/test_select_algorithm",
"inductor/test_smoke",
"test_quantization",
]
def print_to_stderr(message):
print(message, file=sys.stderr)
def get_executable_command(options, disable_coverage=False, is_cpp_test=False):
if options.coverage and not disable_coverage:
if not is_cpp_test:
executable = ["coverage", "run", "--parallel-mode", "--source=torch"]
else:
# TODO: C++ with coverage is not yet supported
executable = []
else:
if not is_cpp_test:
executable = [sys.executable, "-bb"]
else:
executable = ["pytest"]
return executable
def run_test(
test_module: ShardedTest,
test_directory,
options,
launcher_cmd=None,
extra_unittest_args=None,
env=None,
print_log=True,
) -> int:
scribe_token = os.getenv("SCRIBE_GRAPHQL_ACCESS_TOKEN", "")
if scribe_token:
print_to_stderr("SCRIBE_GRAPHQL_ACCESS_TOKEN is set")
else:
print_to_stderr("SCRIBE_GRAPHQL_ACCESS_TOKEN is NOT set")
env = env or os.environ.copy()
maybe_set_hip_visible_devies()
unittest_args = options.additional_args.copy()
test_file = test_module.name
stepcurrent_key = test_file
is_distributed_test = test_file.startswith(DISTRIBUTED_TEST_PREFIX)
is_cpp_test = _is_cpp_test(test_file)
# NB: Rerun disabled tests depends on pytest-flakefinder and it doesn't work with
# pytest-cpp atm. We also don't have support to disable C++ test yet, so it's ok
# to just return successfully here
if is_cpp_test and RERUN_DISABLED_TESTS:
print_to_stderr(
"Skipping C++ tests when running under RERUN_DISABLED_TESTS mode"
)
return 0
if is_cpp_test:
stepcurrent_key = f"{test_file}_{os.urandom(8).hex()}"
else:
unittest_args.extend(
[
f"--shard-id={test_module.shard}",
f"--num-shards={test_module.num_shards}",
]
)
stepcurrent_key = f"{test_file}_{test_module.shard}_{os.urandom(8).hex()}"
if options.verbose:
unittest_args.append(f"-{'v' * options.verbose}") # in case of pytest
if test_file in RUN_PARALLEL_BLOCKLIST:
unittest_args = [
arg for arg in unittest_args if not arg.startswith("--run-parallel")
]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args.extend(
get_pytest_args(
options,
is_cpp_test=is_cpp_test,
is_distributed_test=is_distributed_test,
)
)
unittest_args.extend(test_module.get_pytest_args())
replacement = {"-f": "-x", "-dist=loadfile": "--dist=loadfile"}
unittest_args = [replacement.get(arg, arg) for arg in unittest_args]
if options.showlocals:
if options.pytest:
unittest_args.extend(["--showlocals", "--tb=long", "--color=yes"])
else:
unittest_args.append("--locals")
# NB: These features are not available for C++ tests, but there is little incentive
# to implement it because we have never seen a flaky C++ test before.
if IS_CI and not is_cpp_test:
ci_args = ["--import-slow-tests", "--import-disabled-tests"]
if RERUN_DISABLED_TESTS:
ci_args.append("--rerun-disabled-tests")
# use the downloaded test cases configuration, not supported in pytest
unittest_args.extend(ci_args)
if test_file in PYTEST_SKIP_RETRIES:
if not options.pytest:
raise RuntimeError(
"A test running without pytest cannot skip retries using "
"the PYTEST_SKIP_RETRIES set."
)
unittest_args = [arg for arg in unittest_args if "--reruns" not in arg]
# Extra arguments are not supported with pytest
executable = get_executable_command(options, is_cpp_test=is_cpp_test)
if not executable:
# If there is no eligible executable returning here, it means an unsupported
# case such as coverage for C++ test. So just returning ok makes sense
return 0
if is_cpp_test:
# C++ tests are not the regular test directory
if CPP_TESTS_DIR:
cpp_test = os.path.join(
CPP_TESTS_DIR,
test_file.replace(f"{CPP_TEST_PREFIX}/", ""),
)
else:
cpp_test = os.path.join(
Path(test_directory).parent,
CPP_TEST_PATH,
test_file.replace(f"{CPP_TEST_PREFIX}/", ""),
)
argv = [
cpp_test if sys.platform != "win32" else cpp_test + ".exe"
] + unittest_args
else:
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_file + ".py"] + unittest_args
os.makedirs(REPO_ROOT / "test" / "test-reports", exist_ok=True)
if options.pipe_logs:
log_fd, log_path = tempfile.mkstemp(
dir=REPO_ROOT / "test" / "test-reports",
prefix=f"{sanitize_file_name(str(test_module))}_",
suffix="_toprint.log",
)
os.close(log_fd)
command = (launcher_cmd or []) + executable + argv
should_retry = (
"--subprocess" not in command
and not RERUN_DISABLED_TESTS
and not is_cpp_test
and "-n" not in command
)
timeout = (
None
if not options.enable_timeout
else THRESHOLD * 6
if IS_SLOW
else THRESHOLD * 3
if should_retry
and isinstance(test_module, ShardedTest)
and test_module.time is not None
else THRESHOLD * 3
if is_cpp_test
else None
)
print_to_stderr(f"Executing {command} ... [{datetime.now()}]")
with ExitStack() as stack:
output = None
if options.pipe_logs:
output = stack.enter_context(open(log_path, "w"))
if should_retry:
ret_code, was_rerun = run_test_retries(
command,
test_directory,
env,
timeout,
stepcurrent_key,
output,
options.continue_through_error,
test_file,
options,
)
else:
command.extend([f"--sc={stepcurrent_key}", "--print-items"])
ret_code, was_rerun = retry_shell(
command,
test_directory,
stdout=output,
stderr=output,
env=env,
timeout=timeout,
retries=0,
)
# Pytest return code 5 means no test is collected. Exit code 4 is
# returned when the binary is not a C++ test executable, but 4 can
# also be returned if the file fails before running any tests. All
# binary files under build/bin that are not C++ test at the time of
# this writing have been excluded and new ones should be added to
# the list of exclusions in tools/testing/discover_tests.py
ret_code = 0 if ret_code == 5 else ret_code
if options.pipe_logs and print_log:
handle_log_file(
test_module, log_path, failed=(ret_code != 0), was_rerun=was_rerun
)
return ret_code
def install_cpp_extensions(extensions_dir, env=os.environ):
# Wipe the build folder, if it exists already
build_dir = os.path.join(extensions_dir, "build")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
# Build the test cpp extensions modules
cmd = [
sys.executable,
"-m",
"pip",
"install",
"--no-build-isolation",
".",
"--root",
"./install",
]
return_code = shell(cmd, cwd=extensions_dir, env=env)
if return_code != 0:
return None, return_code
# Get the site-packages directory prepared for PYTHONPATH
platlib_path = sysconfig.get_paths()["platlib"]
platlib_rel = os.path.relpath(
platlib_path, os.path.splitdrive(platlib_path)[0] + os.sep
)
install_directory = os.path.join(extensions_dir, "install", platlib_rel)
assert install_directory, "install_directory must not be empty"
return install_directory, 0
@contextlib.contextmanager
def extend_python_path(install_directories):
python_path = os.environ.get("PYTHONPATH", "")
try:
os.environ["PYTHONPATH"] = os.pathsep.join(install_directories + [python_path])
yield
finally:
os.environ["PYTHONPATH"] = python_path
def try_set_cpp_stack_traces(env, command, set=True):
# Print full c++ stack traces during retries
env = env or {}
env["TORCH_SHOW_CPP_STACKTRACES"] = "1" if set else "0"
return env
def run_test_retries(
command,
test_directory,
env,
timeout,
stepcurrent_key,
output,
continue_through_error,
test_file,
options,
):
# Run the test with -x to stop at first failure. Rerun the test by itself.
# If it succeeds, move on to the rest of the tests in a new process. If it
# still fails, see below
#
# If continue through error is not set, then we fail fast.
#
# If continue through error is set, then we skip that test, and keep going.
# Basically if the same test fails 3 times in a row, skip the test on the
# next run, but still fail in the end. I take advantage of the value saved
# in stepcurrent to keep track of the most recently run test (which is the
# one that failed if there was a failure).
def print_to_file(s):
print(s, file=output, flush=True)
num_failures = defaultdict(int)
def read_pytest_cache(key: str) -> Any:
cache_file = (
REPO_ROOT / ".pytest_cache/v/cache/stepcurrent" / stepcurrent_key / key
)
try:
with open(cache_file) as f:
return f.read()
except FileNotFoundError:
return None
print_items = ["--print-items"]
sc_command = f"--sc={stepcurrent_key}"
while True:
ret_code, _ = retry_shell(
command + [sc_command] + print_items,
test_directory,
stdout=output,
stderr=output,
env=env,
timeout=timeout,
retries=0, # no retries here, we do it ourselves, this is because it handles timeout exceptions well
)
ret_code = 0 if ret_code == 5 else ret_code
if ret_code == 0 and not sc_command.startswith("--rs="):
break # Got to the end of the test suite successfully
signal_name = f" ({SIGNALS_TO_NAMES_DICT[-ret_code]})" if ret_code < 0 else ""
print_to_file(f"Got exit code {ret_code}{signal_name}")
# Read what just failed/ran
try:
current_failure = read_pytest_cache("lastrun")
if current_failure is None:
raise FileNotFoundError
if current_failure == "null":
current_failure = f"'{test_file}'"
except FileNotFoundError:
print_to_file(
"No stepcurrent file found. Either pytest didn't get to run (e.g. import error)"
+ " or file got deleted (contact dev infra)"
)
break
env = try_set_cpp_stack_traces(env, command, set=False)
if ret_code != 0:
num_failures[current_failure] += 1
if ret_code == 0:
# Rerunning the previously failing test succeeded, so now we can
# skip it and move on
sc_command = f"--scs={stepcurrent_key}"
print_to_file(
"Test succeeded in new process, continuing with the rest of the tests"
)
elif num_failures[current_failure] >= 3:
# This is for log classifier so it can prioritize consistently
# failing tests instead of reruns. [1:-1] to remove quotes
print_to_file(f"FAILED CONSISTENTLY: {current_failure[1:-1]}")
if (
read_pytest_cache("made_failing_xml") == "false"
and IS_CI
and options.upload_artifacts_while_running
):
upload_adhoc_failure_json(test_file, current_failure[1:-1])
if not continue_through_error:
print_to_file("Stopping at first consistent failure")
break
sc_command = f"--scs={stepcurrent_key}"
print_to_file(
"Test failed consistently, "
"continuing with the rest of the tests due to continue-through-error being set"
)
else:
env = try_set_cpp_stack_traces(env, command, set=True)
sc_command = f"--rs={stepcurrent_key}"
print_to_file("Retrying single test...")
print_items = [] # do not continue printing them, massive waste of space
consistent_failures = [x[1:-1] for x in num_failures if num_failures[x] >= 3]
flaky_failures = [x[1:-1] for x in num_failures if 0 < num_failures[x] < 3]
if len(flaky_failures) > 0:
print_to_file(
"The following tests failed and then succeeded when run in a new process"
+ f"{flaky_failures}",
)
if len(consistent_failures) > 0:
print_to_file(f"The following tests failed consistently: {consistent_failures}")
return 1, True
return ret_code, any(x > 0 for x in num_failures.values())
def run_test_with_subprocess(test_module, test_directory, options):
return run_test(
test_module, test_directory, options, extra_unittest_args=["--subprocess"]
)
def _test_cpp_extensions_aot(test_directory, options, use_ninja):
if use_ninja:
try:
from torch.utils import cpp_extension
cpp_extension.verify_ninja_availability()
except RuntimeError:
print_to_stderr(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, "cpp_extensions")
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, "build")
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env["USE_NINJA"] = str(1 if use_ninja else 0)
install_cmd = [
sys.executable,
"-m",
"pip",
"install",
"--no-build-isolation",
".",
"--root",
"./install",
]
wheel_cmd = [sys.executable, "-m", "build", "--wheel", "--no-isolation"]
return_code = shell(install_cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != "win32":
exts_to_build = [
(install_cmd, "no_python_abi_suffix_test"),
]
if TEST_CUDA or TEST_XPU:
exts_to_build.append((wheel_cmd, "python_agnostic_extension"))
if TEST_CUDA:
exts_to_build.append((install_cmd, "libtorch_agnostic_2_9_extension"))
exts_to_build.append((install_cmd, "libtorch_agnostic_2_10_extension"))
for cmd, extension_dir in exts_to_build:
return_code = shell(
cmd,
cwd=os.path.join(cpp_extensions_test_dir, extension_dir),
env=shell_env,
)
if return_code != 0:
return return_code
from shutil import copyfile
os.environ["USE_NINJA"] = shell_env["USE_NINJA"]
test_module = "test_cpp_extensions_aot" + ("_ninja" if use_ninja else "_no_ninja")
copyfile(
test_directory + "/test_cpp_extensions_aot.py",
test_directory + "/" + test_module + ".py",
)
try:
cpp_extensions = os.path.join(test_directory, "cpp_extensions")
install_directories = []
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, "install")):
for directory in directories:
if "-packages" in directory:
install_directories.append(os.path.join(root, directory))
for extension_name in [
"libtorch_agnostic_2_9_extension",
"libtorch_agnostic_2_10_extension",
]:
for root, directories, _ in os.walk(
os.path.join(cpp_extensions, extension_name, "install")
):
for directory in directories:
if "-packages" in directory:
install_directories.append(os.path.join(root, directory))
with extend_python_path(install_directories):
return run_test(ShardedTest(test_module, 1, 1), test_directory, options)
finally:
if os.path.exists(test_directory + "/" + test_module + ".py"):
os.remove(test_directory + "/" + test_module + ".py")
os.environ.pop("USE_NINJA")
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
def test_autoload_enable(test_module, test_directory, options):
return _test_autoload(test_directory, options, enable=True)
def test_autoload_disable(test_module, test_directory, options):
return _test_autoload(test_directory, options, enable=False)
def _test_autoload(test_directory, options, enable=True):
cpp_extensions_test_dir = os.path.join(test_directory, "cpp_extensions")
install_directory, return_code = install_cpp_extensions(cpp_extensions_test_dir)
if return_code != 0:
return return_code
try:
os.environ["TORCH_DEVICE_BACKEND_AUTOLOAD"] = str(int(enable))
with extend_python_path([install_directory]):
cmd = [sys.executable, "test_autoload.py"]
return_code = shell(cmd, cwd=test_directory, env=os.environ)
return return_code
finally:
os.environ.pop("TORCH_DEVICE_BACKEND_AUTOLOAD")
# test_openreg is designed to run all tests under torch_openreg, which
# is an torch backend similar to CUDA or MPS and implemented by using
# third-party accelerator integration mechanism. Therefore, if all the
# tests under torch_openreg are passing, it can means that the mechanism
# mentioned above is working as expected.
def test_openreg(test_module, test_directory, options):
openreg_dir = os.path.join(
test_directory, "cpp_extensions", "open_registration_extension", "torch_openreg"
)
install_dir, return_code = install_cpp_extensions(openreg_dir)
if return_code != 0:
return return_code
with extend_python_path([install_dir]):
cmd = [
sys.executable,
"-m",
"unittest",
"discover",
"-s",
os.path.join(openreg_dir, "tests"),
"-v",
]
return shell(cmd, cwd=test_directory, env=os.environ)
def test_distributed(test_module, test_directory, options):
mpi_available = shutil.which("mpiexec")
if options.verbose and not mpi_available:
print_to_stderr("MPI not available -- MPI backend tests will be skipped")
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == "win32" and backend != "gloo":
continue
if backend == "mpi" and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == "win32" and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
init_method = "file" if with_init_file else "env"
if options.verbose:
with_init = f"with {init_method} init_method"
print_to_stderr(
f"Running distributed tests for the {backend} backend {with_init}"
)
old_environ = dict(os.environ)
os.environ["TEMP_DIR"] = tmp_dir
os.environ["BACKEND"] = backend
os.environ.update(env_vars)
report_tag = f"dist-{backend}" if backend != "test" else ""
report_tag += f"-init-{init_method}"
os.environ["TEST_REPORT_SOURCE_OVERRIDE"] = report_tag
try:
os.mkdir(os.path.join(tmp_dir, "barrier"))
os.mkdir(os.path.join(tmp_dir, "test_dir"))
if backend == "mpi":
# test mpiexec for --noprefix option
with open(os.devnull, "w") as devnull:
allowrunasroot_opt = (
"--allow-run-as-root"
if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
noprefix_opt = (
"--noprefix"
if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""',
shell=True,
stdout=devnull,
stderr=subprocess.STDOUT,
)
== 0
else ""
)
mpiexec = ["mpiexec", "-n", "3", noprefix_opt, allowrunasroot_opt]
return_code = run_test(
test_module, test_directory, options, launcher_cmd=mpiexec
)
else:
return_code = run_test(
test_module,
test_directory,
options,
extra_unittest_args=["--subprocess"],
)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
os.environ.clear()
os.environ.update(old_environ)
return 0
def run_doctests(test_module, test_directory, options):
"""
Assumes the incoming test module is called doctest, and simply executes the
xdoctest runner on the torch library itself.
"""
import xdoctest
pkgpath = Path(torch.__file__).parent
exclude_module_list = ["torch._vendor.*"]
enabled = {
# TODO: expose these options to the user
# For now disable all feature-conditional tests
# 'lapack': 'auto',
# 'cuda': 'auto',
# 'cuda1': 'auto',
# 'qengine': 'auto',
"lapack": 0,
"cuda": 0,
"cuda1": 0,
"qengine": 0,
"autograd_profiler": 0,
"cpp_ext": 0,
"monitor": 0,
"onnx": "auto",
}
# Resolve "auto" based on a test to determine if the feature is available.
if enabled["cuda"] == "auto" and torch.cuda.is_available():
enabled["cuda"] = True
if (
enabled["cuda1"] == "auto"
and torch.cuda.is_available()
and torch.cuda.device_count() > 1
):
enabled["cuda1"] = True
if enabled["lapack"] == "auto" and torch._C.has_lapack:
enabled["lapack"] = True
if enabled["qengine"] == "auto":
try:
# Is there a better check if quantization is enabled?
import torch.ao.nn.quantized as nnq # NOQA: F401
torch.backends.quantized.engine = "qnnpack"
torch.backends.quantized.engine = "fbgemm"
except (ImportError, RuntimeError):
...
else:
enabled["qengine"] = True
if enabled["onnx"] == "auto":
try:
import onnx # NOQA: F401
import onnxruntime # NOQA: F401
import onnxscript # NOQA: F401
except ImportError:
exclude_module_list.append("torch.onnx.*")
enabled["onnx"] = False
else:
enabled["onnx"] = True
# Set doctest environment variables
if enabled["cuda"]:
os.environ["TORCH_DOCTEST_CUDA"] = "1"
if enabled["cuda1"]:
os.environ["TORCH_DOCTEST_CUDA1"] = "1"
if enabled["lapack"]:
os.environ["TORCH_DOCTEST_LAPACK"] = "1"
if enabled["qengine"]:
os.environ["TORCH_DOCTEST_QENGINE"] = "1"
if enabled["autograd_profiler"]:
os.environ["TORCH_DOCTEST_AUTOGRAD_PROFILER"] = "1"
if enabled["cpp_ext"]:
os.environ["TORCH_DOCTEST_CPP_EXT"] = "1"
if enabled["monitor"]:
os.environ["TORCH_DOCTEST_MONITOR"] = "1"
if enabled["onnx"]:
os.environ["TORCH_DOCTEST_ONNX"] = "1"
if torch.mps.is_available():
os.environ["TORCH_DOCTEST_MPS"] = "1"
if torch.distributed.is_available():
os.environ["TORCH_DOCTEST_DISTRIBUTED"] = "1"
if 0:
# TODO: could try to enable some of these
os.environ["TORCH_DOCTEST_QUANTIZED_DYNAMIC"] = "1"
os.environ["TORCH_DOCTEST_ANOMALY"] = "1"
os.environ["TORCH_DOCTEST_AUTOGRAD"] = "1"
os.environ["TORCH_DOCTEST_HUB"] = "1"
os.environ["TORCH_DOCTEST_DATALOADER"] = "1"
os.environ["TORCH_DOCTEST_FUTURES"] = "1"
pkgpath = os.path.dirname(torch.__file__)
xdoctest_config = {
"global_exec": r"\n".join(
[
"from torch import nn",
"import torch.nn.functional as F",
"import torch",
]
),
"analysis": "static", # set to "auto" to test doctests in compiled modules
"style": "google",
"options": "+IGNORE_WHITESPACE",
}
xdoctest_verbose = max(1, options.verbose)
run_summary = xdoctest.runner.doctest_module(
os.fspath(pkgpath),
config=xdoctest_config,
verbose=xdoctest_verbose,
command=options.xdoctest_command,
argv=[],
exclude=exclude_module_list,
)
result = 1 if run_summary.get("n_failed", 0) else 0
return result
def sanitize_file_name(file: str):
return file.replace("\\", ".").replace("/", ".").replace(" ", "_")
def handle_log_file(
test: ShardedTest, file_path: str, failed: bool, was_rerun: bool
) -> None:
test = str(test)
with open(file_path, errors="ignore") as f:
full_text = f.read()
new_file = "test/test-reports/" + sanitize_file_name(
f"{test}_{os.urandom(8).hex()}_.log"
)
os.rename(file_path, REPO_ROOT / new_file)
if not failed and not was_rerun and "=== RERUNS ===" not in full_text:
# If success + no retries (idk how else to check for test level retries
# other than reparse xml), print only what tests ran
print_to_stderr(
f"\n{test} was successful, full logs can be found in artifacts with path {new_file}"
)
for line in full_text.splitlines():
if re.search("Running .* items in this shard:", line):
print_to_stderr(line.rstrip())
print_to_stderr("")
return
# otherwise: print entire file
print_to_stderr(f"\nPRINTING LOG FILE of {test} ({new_file})")
print_to_stderr(full_text)
print_to_stderr(f"FINISHED PRINTING LOG FILE of {test} ({new_file})\n")
def get_pytest_args(options, is_cpp_test=False, is_distributed_test=False):
if is_distributed_test:
# Distributed tests do not support rerun, see https://github.com/pytorch/pytorch/issues/162978
rerun_options = ["-x", "--reruns=0"]
elif RERUN_DISABLED_TESTS:
# ASAN tests are too slow, so running them x50 will cause the jobs to timeout after
# 3+ hours. So, let's opt for less number of reruns. We need at least 150 instances of the
# test every 2 weeks to satisfy the SQL query (15 x 14 = 210).
count = 15 if TEST_WITH_ASAN else 50
# When under rerun-disabled-tests mode, run the same tests multiple times to determine their
# flakiness status. Default to 50 re-runs
rerun_options = ["--flake-finder", f"--flake-runs={count}"]
else:
# When under the normal mode, retry a failed test 2 more times. -x means stop at the first
# failure
rerun_options = ["-x", "--reruns=2"]
pytest_args = [
"-vv",
"-rfEX",
]
if not is_cpp_test:
# C++ tests need to be run with pytest directly, not via python
# We have a custom pytest shard that conflicts with the normal plugin
pytest_args.extend(["-p", "no:xdist", "--use-pytest"])
else:
# Use pytext-dist to run C++ tests in parallel as running them sequentially using run_test
# is much slower than running them directly
pytest_args.extend(["-n", str(NUM_PROCS)])
if TEST_SAVE_XML:
# Add the option to generate XML test report here as C++ tests
# won't go into common_utils
test_report_path = get_report_path(pytest=True)
pytest_args.extend(["--junit-xml-reruns", test_report_path])
if options.pytest_k_expr:
pytest_args.extend(["-k", options.pytest_k_expr])
pytest_args.extend(rerun_options)
return pytest_args
def run_ci_sanity_check(test: ShardedTest, test_directory, options):
assert test.name == "test_ci_sanity_check_fail", (
f"This handler only works for test_ci_sanity_check_fail, got {test.name}"
)
ret_code = run_test(test, test_directory, options, print_log=False)
# This test should fail
if ret_code != 1:
return 1
test_reports_dir = str(REPO_ROOT / "test/test-reports")
# Delete the log files and xmls generated by the test
for file in glob.glob(f"{test_reports_dir}/{test.name}*.log"):
os.remove(file)
for dirname in glob.glob(f"{test_reports_dir}/**/{test.name}"):
shutil.rmtree(dirname)
return 0
CUSTOM_HANDLERS = {
"test_cuda_primary_ctx": run_test_with_subprocess,
"test_cuda_nvml_based_avail": run_test_with_subprocess,
"test_cuda_trace": run_test_with_subprocess,
"test_cpp_extensions_aot_no_ninja": test_cpp_extensions_aot_no_ninja,
"test_cpp_extensions_aot_ninja": test_cpp_extensions_aot_ninja,
"distributed/test_distributed_spawn": test_distributed,
"distributed/algorithms/quantization/test_quantization": test_distributed,
"distributed/test_c10d_nccl": run_test_with_subprocess,
"distributed/test_c10d_gloo": run_test_with_subprocess,
"distributed/test_c10d_ucc": run_test_with_subprocess,
"distributed/test_c10d_common": run_test_with_subprocess,
"distributed/test_c10d_spawn_gloo": run_test_with_subprocess,
"distributed/test_c10d_spawn_nccl": run_test_with_subprocess,
"distributed/test_c10d_spawn_ucc": run_test_with_subprocess,
"distributed/test_store": run_test_with_subprocess,
"distributed/test_pg_wrapper": run_test_with_subprocess,
"distributed/rpc/test_faulty_agent": run_test_with_subprocess,
"distributed/rpc/test_tensorpipe_agent": run_test_with_subprocess,
"distributed/rpc/test_share_memory": run_test_with_subprocess,
"distributed/rpc/cuda/test_tensorpipe_agent": run_test_with_subprocess,
"doctests": run_doctests,
"test_ci_sanity_check_fail": run_ci_sanity_check,
"test_autoload_enable": test_autoload_enable,
"test_autoload_disable": test_autoload_disable,
"test_openreg": test_openreg,
}
PYTEST_SKIP_RETRIES = {"test_public_bindings"}
def parse_args():
parser = argparse.ArgumentParser(
description="Run the PyTorch unit test suite",
epilog="where TESTS is any of: {}".format(", ".join(TESTS)),
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Print verbose information and test-by-test results",
)
parser.add_argument(
"--showlocals",
action=argparse.BooleanOptionalAction,
default=strtobool(os.environ.get("TEST_SHOWLOCALS", "False")),
help="Show local variables in tracebacks (default: True)",
)
parser.add_argument("--jit", "--jit", action="store_true", help="run all jit tests")
parser.add_argument(
"--distributed-tests",
"--distributed-tests",
action="store_true",
help="Run all distributed tests",
)
parser.add_argument(
"--include-dynamo-core-tests",
"--include-dynamo-core-tests",
action="store_true",
help=(
"If this flag is present, we will only run dynamo tests. "
"If this flag is not present, we will run all tests "
"(including dynamo tests)."
),
)
parser.add_argument(
"--functorch",
"--functorch",
action="store_true",
help=(
"If this flag is present, we will only run functorch tests. "
"If this flag is not present, we will run all tests "
"(including functorch tests)."
),
)
parser.add_argument(
"--einops",
"--einops",
action="store_true",
help=(
"If this flag is present, we will only run einops tests. "
"If this flag is not present, we will run all tests "
"(including einops tests)."
),
)
parser.add_argument(
"--mps",
"--mps",
action="store_true",
help=("If this flag is present, we will only run test_mps and test_metal"),
)
parser.add_argument(
"--xpu",
"--xpu",
action="store_true",
help=("If this flag is present, we will run xpu tests except XPU_BLOCK_LIST"),
)
parser.add_argument(
"--cpp",
"--cpp",
action="store_true",
help=("If this flag is present, we will only run C++ tests"),
)
parser.add_argument(
"-core",
"--core",
action="store_true",
help="Only run core tests, or tests that validate PyTorch's ops, modules,"
"and autograd. They are defined by CORE_TEST_LIST.",
)
parser.add_argument(
"--onnx",
"--onnx",
action="store_true",
help=(
"Only run ONNX tests, or tests that validate PyTorch's ONNX export. "
"If this flag is not present, we will exclude ONNX tests."
),
)
parser.add_argument(
"-k",
"--pytest-k-expr",
default="",
help="Pass to pytest as its -k expr argument",
)
parser.add_argument(
"-c",
"--coverage",
action="store_true",
help="enable coverage",
default=PYTORCH_COLLECT_COVERAGE,
)
parser.add_argument(
"-i",
"--include",
nargs="+",
choices=TestChoices(TESTS),
default=TESTS,
metavar="TESTS",
help="select a set of tests to include (defaults to ALL tests)."
" tests must be a part of the TESTS list defined in run_test.py",
)
parser.add_argument(
"-x",
"--exclude",
nargs="+",
choices=TESTS,
metavar="TESTS",
default=[],
help="select a set of tests to exclude",
)
parser.add_argument(
"--ignore-win-blocklist",
action="store_true",
help="always run blocklisted windows tests",
)
# NS: Disable target determination until it can be made more reliable
# parser.add_argument(
# "--determine-from",
# help="File of affected source filenames to determine which tests to run.",
# )
parser.add_argument(
"--continue-through-error",
"--keep-going",
action="store_true",
help="Runs the full test suite despite one of the tests failing",
default=strtobool(os.environ.get("CONTINUE_THROUGH_ERROR", "False")),
)
parser.add_argument(
"--pipe-logs",
action="store_true",
help="Print logs to output file while running tests. True if in CI and env var is not set",
default=IS_CI and not strtobool(os.environ.get("VERBOSE_TEST_LOGS", "False")),
)
parser.add_argument(
"--enable-timeout",
action="store_true",
help="Set a timeout based on the test times json file. Only works if there are test times available",
default=IS_CI and not strtobool(os.environ.get("NO_TEST_TIMEOUT", "False")),
)
parser.add_argument(
"--enable-td",
action="store_true",
help="Enables removing tests based on TD",
default=IS_CI
and get_pr_number() is not None
and not strtobool(os.environ.get("NO_TD", "False"))
and not IS_MACOS
and "xpu" not in BUILD_ENVIRONMENT
and "onnx" not in BUILD_ENVIRONMENT
and os.environ.get("GITHUB_WORKFLOW", "slow")
in ("trunk", "pull", "rocm", "rocm-mi300"),
)
parser.add_argument(
"--shard",
nargs=2,
type=int,
help="runs a shard of the tests (taking into account other selections), e.g., "
"--shard 2 3 will break up the selected tests into 3 shards and run the tests "
"in the 2nd shard (the first number should not exceed the second)",
)
parser.add_argument(
"--exclude-jit-executor",
action="store_true",
help="exclude tests that are run for a specific jit config",
)
parser.add_argument(
"--exclude-torch-export-tests",
action="store_true",
help="exclude torch export tests",
)
parser.add_argument(
"--exclude-aot-dispatch-tests",
action="store_true",
help="exclude aot dispatch tests",
)
parser.add_argument(
"--exclude-distributed-tests",
action="store_true",
help="exclude distributed tests",
)
parser.add_argument(
"--exclude-inductor-tests",
action="store_true",
help="exclude inductor tests",
)
parser.add_argument(
"--exclude-quantization-tests",
action="store_true",
help="exclude quantization tests",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Only list the test that will run.",
)
parser.add_argument(
"--xdoctest-command",
default="all",
help=(
"Control the specific doctest action. "
"Use 'list' to simply parse doctests and check syntax. "
"Use 'all' to execute all doctests or specify a specific "
"doctest to run"
),
)
parser.add_argument(
"--no-translation-validation",
action="store_false",
help="Run tests without translation validation.",
)
parser.add_argument(
"--upload-artifacts-while-running",
action="store_true",
default=IS_CI,
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--dynamo",
action="store_true",
help="Run tests with TorchDynamo+EagerBackend turned on",
)
group.add_argument(
"--inductor",
action="store_true",
help="Run tests with TorchInductor turned on",
)
args, extra = parser.parse_known_args()
if "--" in extra:
extra.remove("--")
args.additional_args = extra
return args
def exclude_tests(
exclude_list, selected_tests, exclude_message=None, exact_match=False
):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if (
not exact_match and test.startswith(exclude_test)
) or test == exclude_test:
if exclude_message is not None:
print_to_stderr(f"Excluding {test} {exclude_message}")
selected_tests.remove(test)
return selected_tests
def must_serial(file: Union[str, ShardedTest]) -> bool:
if isinstance(file, ShardedTest):
file = file.name
return (
os.getenv("PYTORCH_TEST_RUN_EVERYTHING_IN_SERIAL", "0") == "1"
or DISTRIBUTED_TEST_PREFIX in os.getenv("TEST_CONFIG", "")
or DISTRIBUTED_TEST_PREFIX in file
or file in CUSTOM_HANDLERS
or file in RUN_PARALLEL_BLOCKLIST
or file in CI_SERIAL_LIST
or file in JIT_EXECUTOR_TESTS
or file in ONNX_SERIAL_LIST
or NUM_PROCS == 1
)
def can_run_in_pytest(test):
return os.getenv("PYTORCH_TEST_DO_NOT_USE_PYTEST", "0") == "0"
def get_selected_tests(options) -> list[str]:
selected_tests = options.include
# filter if there's JIT only and distributed only test options
if options.jit:
selected_tests = list(
filter(lambda test_name: "jit" in test_name, selected_tests)
)
if options.distributed_tests:
selected_tests = list(
filter(lambda test_name: test_name in DISTRIBUTED_TESTS, selected_tests)
)
# Filter to only run core tests when --core option is specified
if options.core:
selected_tests = list(
filter(lambda test_name: test_name in CORE_TEST_LIST, selected_tests)
)
# Filter to only run dynamo tests when --include-dynamo-core-tests option is specified
if options.include_dynamo_core_tests:
selected_tests = list(
filter(lambda test_name: test_name in DYNAMO_CORE_TESTS, selected_tests)
)
# Filter to only run functorch tests when --functorch option is specified
if options.functorch:
selected_tests = list(
filter(lambda test_name: test_name in FUNCTORCH_TESTS, selected_tests)
)
# Filter to only run einops tests when --einops option is specified
if options.einops:
selected_tests = list(
filter(
lambda test_name: test_name.startswith("dynamo/test_einops"),
selected_tests,
)
)
if options.cpp:
selected_tests = list(
filter(lambda test_name: test_name in CPP_TESTS, selected_tests)
)
else:
# Exclude all C++ tests otherwise as they are still handled differently
# than Python test at the moment
options.exclude.extend(CPP_TESTS)
if options.mps:
selected_tests = [
"test_mps",
"test_metal",
"test_modules",
"nn/test_convolution",
"nn/test_dropout",
"nn/test_pooling",
"test_view_ops",
"test_nn",
"inductor/test_mps_basic",
"inductor/test_torchinductor",
"inductor/test_aot_inductor",
"inductor/test_torchinductor_dynamic_shapes",
]
else:
# Exclude all mps tests otherwise
options.exclude.extend(["test_mps", "test_metal"])
if options.xpu:
selected_tests = exclude_tests(XPU_BLOCKLIST, selected_tests, "on XPU")
else:
# Exclude all xpu specific tests otherwise
options.exclude.extend(XPU_TEST)
# Filter to only run onnx tests when --onnx option is specified
onnx_tests = [tname for tname in selected_tests if tname in ONNX_TESTS]
if options.onnx:
selected_tests = onnx_tests
else:
# Exclude all onnx tests otherwise
options.exclude.extend(onnx_tests)
# process exclusion
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
if options.exclude_distributed_tests:
options.exclude.extend(DISTRIBUTED_TESTS)
if options.exclude_inductor_tests:
options.exclude.extend(INDUCTOR_TESTS)
if options.exclude_torch_export_tests:
options.exclude.extend(TORCH_EXPORT_TESTS)
if options.exclude_aot_dispatch_tests:
options.exclude.extend(AOT_DISPATCH_TESTS)
if options.exclude_quantization_tests:
options.exclude.extend(QUANTIZATION_TESTS)
# these tests failing in CUDA 11.6 temporary disabling. issue https://github.com/pytorch/pytorch/issues/75375
if torch.version.cuda is not None:
options.exclude.extend(["distributions/test_constraints"])
# these tests failing in Python 3.12 temporarily disabling
if sys.version_info >= (3, 12):
options.exclude.extend(
[
"functorch/test_dims",
"functorch/test_rearrange",
"functorch/test_parsing",
"functorch/test_memory_efficient_fusion",
"torch_np/numpy_tests/core/test_multiarray",
]
)
if sys.version_info[:2] < (3, 13) or sys.version_info[:2] >= (3, 14):
# Skip tests for older Python versions as they may use syntax or features
# not supported in those versions
options.exclude.extend(
[test for test in selected_tests if test.startswith("dynamo/cpython/3_13/")]
)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == "win32" and not options.ignore_win_blocklist:
target_arch = os.environ.get("VSCMD_ARG_TGT_ARCH")
if target_arch != "x64":
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_no_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_aot_ninja")
WINDOWS_BLOCKLIST.append("cpp_extensions_jit")
WINDOWS_BLOCKLIST.append("jit")
WINDOWS_BLOCKLIST.append("jit_fuser")
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, "on Windows")
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, "on ROCm")
elif IS_S390X:
selected_tests = exclude_tests(S390X_BLOCKLIST, selected_tests, "on s390x")
selected_tests = exclude_tests(
DISTRIBUTED_TESTS,
selected_tests,
"Skip distributed tests on s390x",
)
# skip all distributed tests if distributed package is not available.
if not dist.is_available():
selected_tests = exclude_tests(
DISTRIBUTED_TESTS,
selected_tests,
"PyTorch is built without distributed support.",
)
# skip tests that require LAPACK when it's not available
if not torch._C.has_lapack:
selected_tests = exclude_tests(
TESTS_REQUIRING_LAPACK,
selected_tests,
"PyTorch is built without LAPACK support.",
)
if TEST_WITH_SLOW_GRADCHECK:
selected_tests = exclude_tests(
TESTS_NOT_USING_GRADCHECK,
selected_tests,
"Running in slow gradcheck mode, skipping tests that don't use gradcheck.",
exact_match=True,
)
selected_tests = [parse_test_module(x) for x in selected_tests]
return selected_tests
def load_test_times_from_file(file: str) -> dict[str, Any]:
# Load previous test times to make sharding decisions
path = os.path.join(str(REPO_ROOT), file)
if not os.path.exists(path):
print_to_stderr(
f"::warning:: Failed to find test times file `{path}`. Using round robin sharding."
)
return {}
with open(path) as f:
test_times_file = cast(dict[str, Any], json.load(f))
job_name = os.environ.get("JOB_NAME")
if job_name is None or job_name == "":
# If job name isn't available, use build environment as a backup
job_name = os.environ.get("BUILD_ENVIRONMENT")
else:
job_name = job_name.split(" / test (")[0]
test_config = os.environ.get("TEST_CONFIG")
if test_config in test_times_file.get(job_name, {}):
print_to_stderr("Found test times from artifacts")
return test_times_file[job_name][test_config]
elif test_config in test_times_file["default"]:
print_to_stderr(
f"::warning:: Gathered no stats from artifacts for {job_name} build env"
f" and {test_config} test config. Using default job name and {test_config} test config instead."
)
return test_times_file["default"][test_config]
else:
print_to_stderr(
f"::warning:: Gathered no stats from artifacts for job name {job_name} build env"
f" and {test_config} test config. Using default job name and default test config instead."
)
return test_times_file["default"]["default"]
def load_test_file_times(
file: str = ADDITIONAL_CI_FILES_FOLDER / TEST_TIMES_FILE,
) -> dict[str, float]:
return cast(dict[str, float], load_test_times_from_file(file))
def load_test_class_times(
file: str = ADDITIONAL_CI_FILES_FOLDER / TEST_CLASS_TIMES_FILE,
) -> dict[str, dict[str, float]]:
return cast(dict[str, dict[str, float]], load_test_times_from_file(file))
def get_sharding_opts(options) -> tuple[int, int]:
which_shard, num_shards = 1, 1
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, (
"Selected shard must be less than or equal to total number of shards"
)
return (which_shard, num_shards)
def do_sharding(
options,
selected_tests: Sequence[TestRun],
test_file_times: dict[str, float],
test_class_times: dict[str, dict[str, float]],
sort_by_time: bool = True,
) -> tuple[float, list[ShardedTest]]:
which_shard, num_shards = get_sharding_opts(options)
# Do sharding
shards = calculate_shards(
num_shards,
selected_tests,
test_file_times,
test_class_times=test_class_times,
must_serial=must_serial,
sort_by_time=sort_by_time,
)
return shards[which_shard - 1]
| TestChoices |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 10854,
"end": 11048
} | class ____(PrefectBaseModel):
"""Filter by `TaskRun.id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of task run ids to include"
)
| TaskRunFilterId |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 6123,
"end": 6319
} | class ____(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`.
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
| CsvHeader |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 24707,
"end": 25195
} | class ____(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ("v1.0",)
| W29 |
python | huggingface__transformers | src/transformers/models/oneformer/convert_to_hf_oneformer.py | {
"start": 1928,
"end": 3141
} | class ____:
def __init__(self, to_track: dict):
"""This class "tracks" a python dictionary by keeping track of which item is accessed.
Args:
to_track (Dict): The dictionary we wish to track
"""
self.to_track = to_track
self._seen: set[str] = set()
def __getitem__(self, key: str) -> Any:
return self.to_track[key]
def __setitem__(self, key: str, item: Any):
self._seen.add(key)
self.to_track[key] = item
def diff(self) -> list[str]:
"""This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
This is an effective method to check if we have update all the keys
Returns:
list[str]: List of keys not yet updated
"""
return set(self.to_track.keys()) - self._seen
def copy(self) -> dict:
# proxy the call to the internal dictionary
return self.to_track.copy()
# Image to verify the result
def prepare_img():
url = "https://praeclarumjj3.github.io/files/coco.jpeg"
img_data = requests.get(url, stream=True).raw
im = Image.open(img_data)
return im
@dataclass
| TrackedStateDict |
python | chardet__chardet | chardet/latin1prober.py | {
"start": 3834,
"end": 5314
} | class ____(CharSetProber):
def __init__(self) -> None:
super().__init__()
self._last_char_class = OTH
self._freq_counter: List[int] = []
self.reset()
def reset(self) -> None:
self._last_char_class = OTH
self._freq_counter = [0] * FREQ_CAT_NUM
super().reset()
@property
def charset_name(self) -> str:
return "ISO-8859-1"
@property
def language(self) -> str:
return ""
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
byte_str = self.remove_xml_tags(byte_str)
for c in byte_str:
char_class = Latin1_CharToClass[c]
freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
if freq == 0:
self._state = ProbingState.NOT_ME
break
self._freq_counter[freq] += 1
self._last_char_class = char_class
return self.state
def get_confidence(self) -> float:
if self.state == ProbingState.NOT_ME:
return 0.01
total = sum(self._freq_counter)
confidence = (
0.0
if total < 0.01
else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
)
confidence = max(confidence, 0.0)
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence *= 0.73
return confidence
| Latin1Prober |
python | pytorch__pytorch | test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py | {
"start": 734,
"end": 5298
} | class ____(ShardedTensorTestBase):
"""Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor"""
seed = 42
def get_random_tensors(
self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0
):
pg1 = _get_default_group() if pg1 is None else pg1
pg2 = _get_default_group() if pg2 is None else pg2
torch.manual_seed(TestShardedTensorBinaryOps.seed)
st1 = sharded_tensor.rand(spec1, sizes, process_group=pg1)
torch.manual_seed(TestShardedTensorBinaryOps.seed + seed_offset)
st2 = sharded_tensor.rand(spec2, sizes, process_group=pg2)
TestShardedTensorBinaryOps.seed += 1
return st1, st2
def get_gpu_specs(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
alt_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:1/cuda:1",
"rank:0/cuda:0",
"rank:3/cuda:3",
"rank:2/cuda:2",
],
)
return spec, alt_spec
def _test_common_failures(self, cmp_op):
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
if self.rank == 0:
torch.nn.init.uniform_(st1.local_shards()[0].tensor)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 5)
self.assertFalse(cmp_op(st1, st2))
st1, st2 = self.get_random_tensors(spec, alt_spec, 10, 10)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.zeros(spec, 10, 10)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 10, dtype=torch.double)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 10, requires_grad=True)
self.assertFalse(cmp_op(st1, st2))
cpu_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
st1 = sharded_tensor.ones(cpu_spec, 10, 10)
st2 = sharded_tensor.ones(cpu_spec, 10, 10, pin_memory=True)
self.assertFalse(cmp_op(st1, st2))
pg = dist.new_group([1, 0, 3, 2])
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, pg2=pg)
with self.assertRaisesRegex(
RuntimeError, "All distributed tensors should use the same ProcessGroup"
):
cmp_op(st1, st2)
pg = dist.new_group([0, 1, 2, 3])
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, pg2=pg)
with self.assertRaisesRegex(
RuntimeError, "All distributed tensors should use the same ProcessGroup"
):
cmp_op(st1, st2)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal_tensor_specs(self):
self._test_common_failures(torch.equal)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal(self):
"""Test torch.equal(ShardedTensor, ShardedTensor)"""
spec, _ = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.equal(st1, st2))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose_tensor_specs(self):
self._test_common_failures(torch.allclose)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose(self):
"""Test torch.allclose(ShardedTensor, ShardedTensor)"""
spec, _ = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.allclose(st1, st2))
self.assertTrue(torch.allclose(st1, st2, atol=0))
# compare different arrays
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, seed_offset=1)
self.assertFalse(torch.allclose(st1, st2))
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
if __name__ == "__main__":
run_tests()
| TestShardedTensorBinaryOps |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_a_non_bot_user_agent.py | {
"start": 1899,
"end": 9140
} | class ____(ColumnMapExpectation):
"""Expect column values to be non-bot user agents."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"user_agents": [
"Mozilla/5.0 (Linux; Android 7.0; SM-G892A Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.107 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
"Mozilla/5.0 (iPhone9,3; U; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1",
"Mozilla/5.0 (Linux; Android 5.0.2; LG-V410/V41020c Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/34.0.1847.118 Safari/537.36",
"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36"
"Mozilla/5.0 (compatible; Bingbot/2.0; +http://www.bing.com/bingbot.htm)",
]
},
"tests": [
{
"title": "positive_test_with_mostly",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "user_agents", "mostly": 0.9},
"out": {
"success": True,
},
},
{
"title": "negative_test_with_mostly",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "user_agents", "mostly": 1.0},
"out": {
"success": False,
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental"], # Tags for this Expectation in the gallery
"contributors": ["@ktshannon"],
"requirements": ["user_agents"],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.equal_non_bot_user_agent"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToBeANonBotUserAgent().print_diagnostic_checklist()
| ExpectColumnValuesToBeANonBotUserAgent |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 7837,
"end": 8832
} | class ____(DataType):
"""Base class for numeric data types."""
@classmethod
def max(cls) -> pl.Expr:
"""
Return a literal expression representing the maximum value of this data type.
Examples
--------
>>> pl.select(pl.Int8.max() == 127)
shape: (1, 1)
┌─────────┐
│ literal │
│ --- │
│ bool │
╞═════════╡
│ true │
└─────────┘
"""
return pl.Expr._from_pyexpr(plr._get_dtype_max(cls))
@classmethod
def min(cls) -> pl.Expr:
"""
Return a literal expression representing the minimum value of this data type.
Examples
--------
>>> pl.select(pl.Int8.min() == -128)
shape: (1, 1)
┌─────────┐
│ literal │
│ --- │
│ bool │
╞═════════╡
│ true │
└─────────┘
"""
return pl.Expr._from_pyexpr(plr._get_dtype_min(cls))
| NumericType |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 108427,
"end": 109084
} | class ____(BaseModel):
distribution: Optional[str] = Field(default=None, description="")
distribution_version: Optional[str] = Field(default=None, description="")
is_docker: bool = Field(..., description="")
cores: Optional[int] = Field(default=None, description="")
ram_size: Optional[int] = Field(default=None, description="")
disk_size: Optional[int] = Field(default=None, description="")
cpu_flags: str = Field(..., description="")
cpu_endian: Optional["CpuEndian"] = Field(default=None, description="")
gpu_devices: Optional[List["GpuDeviceTelemetry"]] = Field(default=None, description="")
| RunningEnvironmentTelemetry |
python | has2k1__plotnine | plotnine/positions/position_fill.py | {
"start": 45,
"end": 160
} | class ____(position_stack):
"""
Normalise stacked objects to unit height
"""
fill = True
| position_fill |
python | getsentry__sentry | src/sentry/plugins/utils.py | {
"start": 140,
"end": 436
} | class ____(IssueTrackingPlugin2):
"""This is only used in tests."""
slug = "issuetrackingplugin2"
feature_descriptions = [
FeatureDescription(
"""
Create issues
""",
IntegrationFeatures.ISSUE_BASIC,
)
]
| TestIssuePlugin2 |
python | fsspec__filesystem_spec | fsspec/implementations/tests/local/local_test.py | {
"start": 194,
"end": 267
} | class ____(abstract.AbstractGetTests, LocalFixtures):
pass
| TestLocalGet |
python | kamyu104__LeetCode-Solutions | Python/find-minimum-diameter-after-merging-two-trees.py | {
"start": 1530,
"end": 2597
} | class ____(object):
def minimumDiameterAfterMerge(self, edges1, edges2):
"""
:type edges1: List[List[int]]
:type edges2: List[List[int]]
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//2
def tree_diameter(edges):
def dfs(u, p):
mx = 0
for v in adj[u]:
if v == p:
continue
curr = dfs(v, u)
result[0] = max(result[0], mx+(curr+1))
mx = max(mx, curr+1)
return mx
adj = [[] for _ in range(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
result = [0]
dfs(0, -1)
return result[0]
d1 = tree_diameter(edges1)
d2 = tree_diameter(edges2)
return max(ceil_divide(d1, 2)+1+ceil_divide(d2, 2), d1, d2)
# Time: O(n + m)
# Space: O(n + m)
# bfs, tree dp, tree diameter
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 2111,
"end": 8045
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
# Test unary ops with optional dtype arg.
@parameterized.parameters(
("WeakTensor", WeakTensor),
("Python", WeakTensor),
("NumPy", tensor.Tensor),
("Tensor", tensor.Tensor),
)
def testReduceAllDims(self, input_type, result_type):
test_input = _convert_to_input_type(
[[1, 2, 3], [4, 5, 6]], input_type, dtypes.int32
)
with test_util.device(use_gpu=True):
res = math_ops.reduce_sum(test_input)
self.assertIsInstance(res, result_type)
self.assertEqual(self.evaluate(res), 21)
def testReduceExtendType(self):
test_in = np.random.randn(1000, 1000).astype(np.float32)
in_f32 = _get_weak_tensor(test_in, dtypes.float32)
in_bfl6 = math_ops.cast(test_in, dtypes.bfloat16)
out_f32 = self.evaluate(math_ops.reduce_sum(in_f32))
out_bf16 = self.evaluate(math_ops.reduce_sum(in_bfl6))
expected = math_ops.cast(out_f32, dtypes.bfloat16)
self.assertAllClose(out_bf16, expected, 1e-3)
def testCountNonzero(self):
# simple case
x = _get_weak_tensor([[0, -2, 0], [4, 0, 0]], dtypes.int32)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 2)
# boolean input
x = math_ops.not_equal(x, 0)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 2)
# would overflow if int8 would be used for internal calculations
x = 2 * np.ones(512, dtype=np.int8)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 512)
@parameterized.parameters(
("WeakTensor", WeakTensor),
("Python", WeakTensor),
("NumPy", tensor.Tensor),
("Tensor", tensor.Tensor),
)
def testReduceExplicitAxes(self, input_type, result_type):
x = _convert_to_input_type([[1, 2, 3], [4, 5, 6]], input_type, dtypes.int32)
with test_util.device(use_gpu=True):
for axis in (0, -2):
res = math_ops.reduce_sum(x, axis=axis)
self.assertIsInstance(res, result_type)
self.assertAllEqual(res, [5, 7, 9])
for axis in (1, -1):
res = math_ops.reduce_sum(x, axis=axis)
self.assertIsInstance(res, result_type)
self.assertAllEqual(res, [6, 15])
for axis in (None, (0, 1), (1, 0), (-1, 0), (0, -1), (-2, 1), (1, -2),
(-1, -2), (-2, -1)):
res = math_ops.reduce_sum(x, axis=axis)
self.assertIsInstance(res, result_type)
self.assertEqual(self.evaluate(res), 21)
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
# it misses the check, magically return result given wrong shape.
return
x = _get_weak_tensor([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegex(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
def testReduceVar(self):
x = _get_weak_tensor([[0, 0, 0], [0, 0, 0]], dtype=dtypes.float32)
self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])
x = _get_weak_tensor([[1, 2, 1, 1], [1, 1, 0, 1]])
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_variance(x)
x = _get_weak_tensor([[1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0]])
self.assertEqual(self.evaluate(math_ops.reduce_variance(x)), 0.25)
x_np = np.array([[1, 2, 1, 1], [1, 1, 0, 1]], "float32")
self.assertEqual(np.var(x_np), 0.25)
self.assertEqual(self.evaluate(math_ops.reduce_variance(x_np)), 0.25)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_variance(x, axis=0), [0., 16., 1., 0.])
def testReduceVarComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_variance(y)
self.assertEqual(np.var(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
def testReduceStd(self):
x = _get_weak_tensor([[0, 0, 0], [0, 0, 0]], dtypes.float32)
self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])
x = _get_weak_tensor([[1, 2, 1, 1], [1, 1, 0, 1]])
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_std(x)
x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
res = math_ops.reduce_std(x)
self.assertEqual(self.evaluate(res), 0.5)
self.assertIsInstance(res, WeakTensor)
x_np = np.array(x)
self.assertEqual(np.std(x_np), 0.5)
self.assertEqual(self.evaluate(math_ops.reduce_std(x_np)), 0.5)
self.assertIsInstance(math_ops.reduce_std(x_np), tensor.Tensor)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_std(x, axis=0), [0., 4., 1., 0.])
def testReduceStdComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_std(y)
self.assertEqual(np.std(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
@test_util.run_all_in_graph_and_eager_modes
| ReduceTest |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_storage_version_list.py | {
"start": 383,
"end": 7137
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1alpha1StorageVersion]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1StorageVersionList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1alpha1StorageVersionList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1StorageVersionList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1StorageVersionList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1StorageVersionList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1alpha1StorageVersionList. # noqa: E501
Items holds a list of StorageVersion # noqa: E501
:return: The items of this V1alpha1StorageVersionList. # noqa: E501
:rtype: list[V1alpha1StorageVersion]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1alpha1StorageVersionList.
Items holds a list of StorageVersion # noqa: E501
:param items: The items of this V1alpha1StorageVersionList. # noqa: E501
:type: list[V1alpha1StorageVersion]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1alpha1StorageVersionList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1StorageVersionList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1StorageVersionList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1StorageVersionList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1StorageVersionList. # noqa: E501
:return: The metadata of this V1alpha1StorageVersionList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1StorageVersionList.
:param metadata: The metadata of this V1alpha1StorageVersionList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1StorageVersionList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1StorageVersionList):
return True
return self.to_dict() != other.to_dict()
| V1alpha1StorageVersionList |
python | huggingface__transformers | src/transformers/models/cohere2/modeling_cohere2.py | {
"start": 2047,
"end": 5098
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Cohere2Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Cohere2Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat()
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
| Cohere2RotaryEmbedding |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI029.py | {
"start": 300,
"end": 417
} | class ____:
def __repr__(self) -> str:
...
def __str__(self) -> builtins.str:
...
| ShouldRemove |
python | joke2k__faker | faker/providers/person/es_ES/__init__.py | {
"start": 72,
"end": 39494
} | class ____(PersonProvider):
formats_male: Tuple[str, ...] = (
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{prefix}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{first_name_male}} {{first_name_male}} {{last_name}} {{last_name}}",
)
formats_female: Tuple[str, ...] = (
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{prefix}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{first_name_female}} {{first_name_female}} {{last_name}} {{last_name}}",
)
formats: Tuple[str, ...] = formats_male + formats_female
# 477 male first names, alphabetically.
# Source: Álvaro Mondéjar Rubio <mondejar1994@gmail.com>
first_names_male: Tuple[str, ...] = (
"Aarón",
"Abel",
"Abilio",
"Abraham",
"Adalberto",
"Adelardo",
"Adolfo",
"Adrián",
"Adán",
"Agapito",
"Agustín",
"Aitor",
"Albano",
"Albert",
"Alberto",
"Albino",
"Alcides",
"Ale",
"Alejandro",
"Alejo",
"Alex",
"Alfonso",
"Alfredo",
"Alonso",
"Amado",
"Amador",
"Amancio",
"Amando",
"Amaro",
"Ambrosio",
"Amor",
"Américo",
"Amílcar",
"Anacleto",
"Anastasio",
"Andrés",
"Andrés Felipe",
"Angelino",
"Anselmo",
"Antonio",
"Aníbal",
"Apolinar",
"Ariel",
"Aristides",
"Armando",
"Arsenio",
"Artemio",
"Arturo",
"Asdrubal",
"Atilio",
"Augusto",
"Aureliano",
"Aurelio",
"Baldomero",
"Balduino",
"Baltasar",
"Bartolomé",
"Basilio",
"Baudelio",
"Bautista",
"Benigno",
"Benito",
"Benjamín",
"Bernabé",
"Bernardino",
"Bernardo",
"Berto",
"Blas",
"Bonifacio",
"Borja",
"Bruno",
"Buenaventura",
"Calisto",
"Calixto",
"Camilo",
"Candelario",
"Carlito",
"Carlos",
"Carmelo",
"Casemiro",
"Cayetano",
"Cebrián",
"Cecilio",
"Ceferino",
"Celestino",
"Celso",
"Cesar",
"Che",
"Chema",
"Chucho",
"Chus",
"Chuy",
"Cipriano",
"Ciriaco",
"Cirino",
"Ciro",
"Ciríaco",
"Claudio",
"Clemente",
"Cleto",
"Clímaco",
"Conrado",
"Cornelio",
"Cosme",
"Cristian",
"Cristian",
"Cristóbal",
"Cruz",
"Curro",
"Custodio",
"Cándido",
"César",
"Damián",
"Dan",
"Dani",
"Daniel",
"Danilo",
"Darío",
"David",
"Demetrio",
"Desiderio",
"Diego",
"Dimas",
"Dionisio",
"Domingo",
"Donato",
"Duilio",
"Edelmiro",
"Edgardo",
"Edmundo",
"Edu",
"Eduardo",
"Efraín",
"Eladio",
"Eleuterio",
"Eligio",
"Eliseo",
"Eloy",
"Elpidio",
"Elías",
"Emigdio",
"Emiliano",
"Emilio",
"Enrique",
"Epifanio",
"Erasmo",
"Eric",
"Ernesto",
"Espiridión",
"Esteban",
"Eugenio",
"Eusebio",
"Eustaquio",
"Eutimio",
"Eutropio",
"Evaristo",
"Ezequiel",
"Fabio",
"Fabián",
"Fabricio",
"Faustino",
"Fausto",
"Federico",
"Feliciano",
"Felipe",
"Felix",
"Fermín",
"Fernando",
"Fidel",
"Fito",
"Flavio",
"Florencio",
"Florentino",
"Fortunato",
"Francisco",
"Francisco Javier",
"Francisco Jose",
"Fulgencio",
"Félix",
"Gabino",
"Gabriel",
"Galo",
"Gaspar",
"Gastón",
"Geraldo",
"Gerardo",
"Germán",
"Gervasio",
"Gerónimo",
"Gil",
"Gilberto",
"Glauco",
"Godofredo",
"Gonzalo",
"Goyo",
"Graciano",
"Gregorio",
"Guadalupe",
"Guillermo",
"Guiomar",
"Gustavo",
"Haroldo",
"Hector",
"Heliodoro",
"Heraclio",
"Herberto",
"Heriberto",
"Hermenegildo",
"Herminio",
"Hernando",
"Hernán",
"Hilario",
"Hipólito",
"Horacio",
"Hugo",
"Humberto",
"Héctor",
"Ibán",
"Ignacio",
"Iker",
"Ildefonso",
"Inocencio",
"Isaac",
"Isaías",
"Isidoro",
"Isidro",
"Ismael",
"Iván",
"Jacinto",
"Jacobo",
"Jafet",
"Jaime",
"Javi",
"Javier",
"Jenaro",
"Jeremías",
"Jerónimo",
"Jesús",
"Joan",
"Joaquín",
"Joel",
"Jonatan",
"Jordi",
"Jordán",
"Jorge",
"Jose",
"Jose Angel",
"Jose Antonio",
"Jose Carlos",
"Jose Francisco",
"Jose Ignacio",
"Jose Luis",
"Jose Manuel",
"Jose Miguel",
"Jose Ramón",
"Josep",
"Josué",
"José",
"José Antonio",
"José Luis",
"José Manuel",
"José Mari",
"José María",
"José Ángel",
"Juan",
"Juan Antonio",
"Juan Bautista",
"Juan Carlos",
"Juan Francisco",
"Juan José",
"Juan Luis",
"Juan Manuel",
"Juan Pablo",
"Juanito",
"Julio",
"Julio César",
"Julián",
"Kike",
"Lalo",
"Leandro",
"Leocadio",
"Leonardo",
"Leoncio",
"Leonel",
"Leopoldo",
"León",
"Lino",
"Lisandro",
"Lope",
"Lorenzo",
"Loreto",
"Lucas",
"Lucho",
"Luciano",
"Lucio",
"Luis",
"Luis Miguel",
"Luis Ángel",
"Lupe",
"Luís",
"Lázaro",
"Macario",
"Manolo",
"Manu",
"Manuel",
"Marc",
"Marcelino",
"Marcelo",
"Marcial",
"Marciano",
"Marcio",
"Marco",
"Marcos",
"Mariano",
"Marino",
"Mario",
"Martin",
"Martín",
"María",
"Mateo",
"Matías",
"Mauricio",
"Maxi",
"Maximiano",
"Maximiliano",
"Maximino",
"Melchor",
"Miguel",
"Miguel Ángel",
"Modesto",
"Mohamed",
"Moisés",
"Moreno",
"Máximo",
"Nacho",
"Nacio",
"Nando",
"Narciso",
"Natalio",
"Natanael",
"Nazaret",
"Nazario",
"Nicanor",
"Nico",
"Nicodemo",
"Nicolás",
"Nilo",
"Norberto",
"Noé",
"Néstor",
"Octavio",
"Olegario",
"Omar",
"Onofre",
"Osvaldo",
"Ovidio",
"Pablo",
"Paco",
"Pancho",
"Pascual",
"Pastor",
"Patricio",
"Paulino",
"Pedro",
"Pelayo",
"Pepe",
"Pepito",
"Plinio",
"Plácido",
"Poncio",
"Porfirio",
"Primitivo",
"Prudencio",
"Pánfilo",
"Pío",
"Quique",
"Quirino",
"Rafa",
"Rafael",
"Raimundo",
"Ramiro",
"Ramón",
"Raúl",
"Reinaldo",
"Remigio",
"Renato",
"René",
"Reyes",
"Reynaldo",
"Ricardo",
"Rico",
"Roberto",
"Rodolfo",
"Rodrigo",
"Rogelio",
"Rolando",
"Roldán",
"Román",
"Roque",
"Rosario",
"Rosendo",
"Ruben",
"Rubén",
"Rufino",
"Ruperto",
"Ruy",
"Régulo",
"Rómulo",
"Sabas",
"Salomón",
"Salvador",
"Samu",
"Samuel",
"Sancho",
"Sandalio",
"Santiago",
"Santos",
"Saturnino",
"Sebastian",
"Sebastián",
"Segismundo",
"Sergio",
"Seve",
"Severiano",
"Severino",
"Severo",
"Sigfrido",
"Silvestre",
"Silvio",
"Simón",
"Sosimo",
"Tadeo",
"Telmo",
"Teo",
"Teobaldo",
"Teodoro",
"Teodosio",
"Teófilo",
"Tiburcio",
"Timoteo",
"Tito",
"Tomás",
"Toni",
"Toribio",
"Toño",
"Trinidad",
"Tristán",
"Ulises",
"Urbano",
"Valentín",
"Valerio",
"Valero",
"Vasco",
"Venceslás",
"Vicente",
"Victor",
"Victor Manuel",
"Victoriano",
"Victorino",
"Vidal",
"Vinicio",
"Virgilio",
"Vito",
"Víctor",
"Wilfredo",
"Wálter",
"Xavier",
"Yago",
"Zacarías",
"Álvaro",
"Ángel",
"Édgar",
"Íñigo",
"Óscar",
)
# 477 female first names, alphabetically.
# Source: Álvaro Mondéjar Rubio <mondejar1994@gmail.com>
first_names_female: Tuple[str, ...] = (
"Abigaíl",
"Abril",
"Adela",
"Adelaida",
"Adelia",
"Adelina",
"Adora",
"Adoración",
"Adriana",
"Agustina",
"Ainara",
"Ainoa",
"Aitana",
"Alba",
"Albina",
"Ale",
"Alejandra",
"Alexandra",
"Alicia",
"Alma",
"Almudena",
"Alondra",
"Amada",
"Amalia",
"Amanda",
"Amarilis",
"Amaya",
"Amelia",
"Amor",
"Amparo",
"América",
"Ana",
"Ana Belén",
"Ana Sofía",
"Anabel",
"Anastasia",
"Andrea",
"Angelina",
"Angelita",
"Angélica",
"Ani",
"Anita",
"Anna",
"Anselma",
"Antonia",
"Anunciación",
"Apolonia",
"Araceli",
"Arcelia",
"Ariadna",
"Ariel",
"Armida",
"Aroa",
"Aránzazu",
"Ascensión",
"Asunción",
"Aura",
"Aurelia",
"Aurora",
"Azahar",
"Azahara",
"Azeneth",
"Azucena",
"Beatriz",
"Begoña",
"Belen",
"Belén",
"Benigna",
"Benita",
"Bernarda",
"Bernardita",
"Berta",
"Bibiana",
"Bienvenida",
"Blanca",
"Brunilda",
"Brígida",
"Bárbara",
"Calista",
"Calixta",
"Camila",
"Candela",
"Candelaria",
"Candelas",
"Caridad",
"Carina",
"Carla",
"Carlota",
"Carmela",
"Carmelita",
"Carmen",
"Carmina",
"Carolina",
"Casandra",
"Catalina",
"Cayetana",
"Cecilia",
"Celestina",
"Celia",
"Charo",
"Chelo",
"Chita",
"Chus",
"Cintia",
"Clara",
"Clarisa",
"Claudia",
"Clementina",
"Cloe",
"Clotilde",
"Concepción",
"Concha",
"Constanza",
"Consuela",
"Consuelo",
"Coral",
"Corona",
"Crescencia",
"Cristina",
"Cruz",
"Custodia",
"Cándida",
"Dafne",
"Dalila",
"Daniela",
"Delfina",
"Delia",
"Diana",
"Dionisia",
"Dolores",
"Dominga",
"Domitila",
"Dora",
"Dorita",
"Dorotea",
"Dulce",
"Débora",
"Edelmira",
"Elba",
"Elena",
"Eli",
"Eliana",
"Eligia",
"Elisa",
"Elisabet",
"Elodia",
"Eloísa",
"Elvira",
"Ema",
"Emelina",
"Emilia",
"Emiliana",
"Emma",
"Emperatriz",
"Encarna",
"Encarnacion",
"Encarnación",
"Encarnita",
"Esmeralda",
"Esperanza",
"Estefanía",
"Estela",
"Ester",
"Esther",
"Estrella",
"Etelvina",
"Eufemia",
"Eugenia",
"Eulalia",
"Eusebia",
"Eva",
"Eva María",
"Evangelina",
"Evelia",
"Evita",
"Fabiana",
"Fabiola",
"Fanny",
"Febe",
"Felicia",
"Feliciana",
"Felicidad",
"Felipa",
"Felisa",
"Fernanda",
"Fidela",
"Filomena",
"Flavia",
"Flor",
"Flora",
"Florencia",
"Florentina",
"Florina",
"Florinda",
"Fortunata",
"Francisca",
"Fátima",
"Gabriela",
"Gala",
"Gema",
"Genoveva",
"Georgina",
"Gertrudis",
"Gisela",
"Gloria",
"Gracia",
"Graciana",
"Graciela",
"Griselda",
"Guadalupe",
"Guiomar",
"Haydée",
"Herminia",
"Hilda",
"Hortensia",
"Ignacia",
"Ileana",
"Imelda",
"Inmaculada",
"Inés",
"Irene",
"Iris",
"Irma",
"Isa",
"Isabel",
"Isabela",
"Isaura",
"Isidora",
"Itziar",
"Jacinta",
"Javiera",
"Jennifer",
"Jenny",
"Jessica",
"Jesusa",
"Jimena",
"Joaquina",
"Jordana",
"Josefa",
"Josefina",
"José",
"Jovita",
"Juana",
"Juanita",
"Judith",
"Julia",
"Juliana",
"Julie",
"Julieta",
"Lara",
"Laura",
"Leandra",
"Leire",
"Leocadia",
"Leonor",
"Leticia",
"Leyre",
"Lidia",
"Ligia",
"Lilia",
"Liliana",
"Lina",
"Loida",
"Lola",
"Lorena",
"Lorenza",
"Loreto",
"Lourdes",
"Luciana",
"Lucila",
"Lucía",
"Luisa",
"Luisina",
"Luna",
"Lupe",
"Lupita",
"Luz",
"Macarena",
"Macaria",
"Magdalena",
"Maite",
"Malena",
"Mamen",
"Manola",
"Manu",
"Manuela",
"Manuelita",
"Mar",
"Marcela",
"Marcia",
"Margarita",
"Mariana",
"Marianela",
"Maribel",
"Maricela",
"Maricruz",
"Marina",
"Marisa",
"Marisela",
"Marisol",
"Maristela",
"Marita",
"Marta",
"Martina",
"Martirio",
"María",
"María Belén",
"María Carmen",
"María Cristina",
"María Del Carmen",
"María Dolores",
"María Fernanda",
"María Jesús",
"María José",
"María Luisa",
"María Manuela",
"María Pilar",
"María Teresa",
"María Ángeles",
"Matilde",
"Maura",
"Maxi",
"Mayte",
"Melania",
"Melisa",
"Mercedes",
"Merche",
"Micaela",
"Miguela",
"Milagros",
"Mireia",
"Miriam",
"Mirta",
"Modesta",
"Montserrat",
"Morena",
"Máxima",
"Mónica",
"Nadia",
"Narcisa",
"Natalia",
"Natividad",
"Nayara",
"Nazaret",
"Nerea",
"Nereida",
"Nicolasa",
"Nidia",
"Nieves",
"Nilda",
"Noa",
"Noelia",
"Noemí",
"Nuria",
"Nydia",
"Nélida",
"Obdulia",
"Octavia",
"Odalis",
"Odalys",
"Ofelia",
"Olalla",
"Olga",
"Olimpia",
"Olivia",
"Oriana",
"Otilia",
"Paca",
"Pacífica",
"Palmira",
"Paloma",
"Paola",
"Pascuala",
"Pastora",
"Patricia",
"Paula",
"Paulina",
"Paz",
"Pepita",
"Perla",
"Perlita",
"Petrona",
"Piedad",
"Pilar",
"Pili",
"Primitiva",
"Priscila",
"Prudencia",
"Purificación",
"Pía",
"Rafaela",
"Ramona",
"Raquel",
"Rebeca",
"Regina",
"Reina",
"Remedios",
"Renata",
"Reyes",
"Reyna",
"Ricarda",
"Rita",
"Roberta",
"Rocío",
"Rosa",
"Rosa María",
"Rosalina",
"Rosalinda",
"Rosalva",
"Rosalía",
"Rosario",
"Rosaura",
"Rosenda",
"Roxana",
"Rufina",
"Ruperta",
"Ruth",
"Sabina",
"Salomé",
"Salud",
"Samanta",
"Sandra",
"Sara",
"Sarita",
"Saturnina",
"Selena",
"Serafina",
"Silvia",
"Socorro",
"Sofía",
"Sol",
"Soledad",
"Sonia",
"Soraya",
"Susana",
"Susanita",
"Tamara",
"Tania",
"Tatiana",
"Tecla",
"Teodora",
"Tere",
"Teresa",
"Teresita",
"Teófila",
"Tomasa",
"Trini",
"Trinidad",
"Valentina",
"Valeria",
"Vanesa",
"Vera",
"Verónica",
"Vicenta",
"Victoria",
"Vilma",
"Violeta",
"Virginia",
"Visitación",
"Viviana",
"Ximena",
"Xiomara",
"Yaiza",
"Yolanda",
"Yésica",
"Yéssica",
"Zaida",
"Zaira",
"Zoraida",
"África",
"Ágata",
"Águeda",
"Ámbar",
"Ángela",
"Ángeles",
"Áurea",
"Íngrid",
"Úrsula",
)
first_names = first_names_male + first_names_female
last_names = (
"Abad",
"Abascal",
"Abella",
"Abellán",
"Abril",
"Acedo",
"Acero",
"Acevedo",
"Acosta",
"Acuña",
"Adadia",
"Adán",
"Aguado",
"Agudo",
"Aguilar",
"Aguilera",
"Aguiló",
"Aguirre",
"Agullo",
"Agustí",
"Agustín",
"Alarcón",
"Alba",
"Alberdi",
"Albero",
"Alberola",
"Alberto",
"Alcalde",
"Alcalá",
"Alcaraz",
"Alcolea",
"Alcántara",
"Alcázar",
"Alegre",
"Alegria",
"Alemany",
"Alemán",
"Alfaro",
"Alfonso",
"Aliaga",
"Aller",
"Almagro",
"Almansa",
"Almazán",
"Almeida",
"Alonso",
"Alsina",
"Alvarado",
"Alvarez",
"Amador",
"Amat",
"Amaya",
"Amigó",
"Amo",
"Amor",
"Amores",
"Amorós",
"Anaya",
"Andrade",
"Andres",
"Andreu",
"Andrés",
"Anglada",
"Anguita",
"Angulo",
"Antón",
"Antúnez",
"Aparicio",
"Aragonés",
"Aragón",
"Aramburu",
"Arana",
"Aranda",
"Araujo",
"Arce",
"Arco",
"Arcos",
"Arellano",
"Arenas",
"Arias",
"Ariza",
"Ariño",
"Arjona",
"Armas",
"Armengol",
"Arnaiz",
"Arnal",
"Arnau",
"Aroca",
"Arranz",
"Arregui",
"Arribas",
"Arrieta",
"Arroyo",
"Arteaga",
"Artigas",
"Arévalo",
"Asenjo",
"Asensio",
"Atienza",
"Avilés",
"Ayala",
"Ayllón",
"Ayuso",
"Azcona",
"Aznar",
"Azorin",
"Badía",
"Baena",
"Baeza",
"Balaguer",
"Ballester",
"Ballesteros",
"Baquero",
"Barba",
"Barbero",
"Barberá",
"Barceló",
"Barco",
"Barragán",
"Barral",
"Barranco",
"Barreda",
"Barrena",
"Barrera",
"Barriga",
"Barrio",
"Barrios",
"Barros",
"Barroso",
"Bartolomé",
"Baró",
"Barón",
"Bas",
"Bastida",
"Batalla",
"Batlle",
"Bautista",
"Bauzà",
"Bayo",
"Bayona",
"Bayón",
"Baños",
"Becerra",
"Bejarano",
"Belda",
"Bellido",
"Bello",
"Belmonte",
"Beltran",
"Beltrán",
"Benavent",
"Benavente",
"Benavides",
"Benet",
"Benitez",
"Benito",
"Benítez",
"Berenguer",
"Bermejo",
"Bermudez",
"Bermúdez",
"Bernad",
"Bernal",
"Bernat",
"Berrocal",
"Bertrán",
"Bilbao",
"Blanca",
"Blanch",
"Blanco",
"Blanes",
"Blasco",
"Blazquez",
"Blázquez",
"Boada",
"Boix",
"Bolaños",
"Bonet",
"Bonilla",
"Borja",
"Borrego",
"Borrell",
"Borrás",
"Bosch",
"Botella",
"Bou",
"Bravo",
"Briones",
"Bru",
"Buendía",
"Bueno",
"Burgos",
"Busquets",
"Bustamante",
"Bustos",
"Báez",
"Bárcena",
"Caballero",
"Cabanillas",
"Cabañas",
"Cabello",
"Cabeza",
"Cabezas",
"Cabo",
"Cabrera",
"Cabrero",
"Cadenas",
"Cal",
"Calatayud",
"Calderon",
"Calderón",
"Calleja",
"Calvet",
"Calvo",
"Calzada",
"Camacho",
"Camino",
"Campillo",
"Campo",
"Campos",
"Campoy",
"Camps",
"Canales",
"Canals",
"Canet",
"Cano",
"Cantero",
"Cantón",
"Caparrós",
"Capdevila",
"Carbajo",
"Carballo",
"Carbonell",
"Carbó",
"Cardona",
"Carlos",
"Carmona",
"Carnero",
"Caro",
"Carpio",
"Carranza",
"Carrasco",
"Carrera",
"Carreras",
"Carretero",
"Carreño",
"Carrillo",
"Carrión",
"Carro",
"Carvajal",
"Casado",
"Casal",
"Casals",
"Casanova",
"Casanovas",
"Casares",
"Casas",
"Cases",
"Castañeda",
"Castejón",
"Castell",
"Castellanos",
"Castells",
"Castelló",
"Castilla",
"Castillo",
"Castrillo",
"Castro",
"Catalá",
"Catalán",
"Cazorla",
"Cañas",
"Cañellas",
"Cañete",
"Cañizares",
"Cepeda",
"Cerdá",
"Cerdán",
"Cerezo",
"Cerro",
"Cervantes",
"Cervera",
"Chacón",
"Chamorro",
"Chaparro",
"Chaves",
"Checa",
"Chico",
"Cid",
"Cifuentes",
"Cisneros",
"Clavero",
"Clemente",
"Cobo",
"Cobos",
"Coca",
"Codina",
"Coello",
"Coll",
"Collado",
"Colom",
"Coloma",
"Colomer",
"Comas",
"Company",
"Conde",
"Conesa",
"Contreras",
"Corbacho",
"Cordero",
"Cornejo",
"Corominas",
"Coronado",
"Corral",
"Correa",
"Cortes",
"Cortina",
"Cortés",
"Costa",
"Crespi",
"Crespo",
"Criado",
"Cruz",
"Cuadrado",
"Cuenca",
"Cuervo",
"Cuesta",
"Cueto",
"Cuevas",
"Cuéllar",
"Cáceres",
"Cámara",
"Cánovas",
"Cárdenas",
"Céspedes",
"Córdoba",
"Cózar",
"Dalmau",
"Daza",
"Delgado",
"Diaz",
"Diego",
"Diez",
"Diéguez",
"Domingo",
"Dominguez",
"Doménech",
"Domínguez",
"Donaire",
"Donoso",
"Duarte",
"Dueñas",
"Duque",
"Duran",
"Durán",
"Dávila",
"Díaz",
"Díez",
"Echevarría",
"Echeverría",
"Egea",
"Elorza",
"Elías",
"Enríquez",
"Escalona",
"Escamilla",
"Escobar",
"Escolano",
"Escribano",
"Escrivá",
"Escudero",
"Espada",
"Esparza",
"España",
"Español",
"Espejo",
"Espinosa",
"Esteban",
"Esteve",
"Estevez",
"Estrada",
"Estévez",
"Exposito",
"Expósito",
"Fabra",
"Fabregat",
"Fajardo",
"Falcó",
"Falcón",
"Farré",
"Feijoo",
"Feliu",
"Fernandez",
"Fernández",
"Ferrando",
"Ferrer",
"Ferrera",
"Ferreras",
"Ferrero",
"Ferrán",
"Ferrández",
"Ferrándiz",
"Figueras",
"Figueroa",
"Figuerola",
"Fiol",
"Flor",
"Flores",
"Folch",
"Fonseca",
"Font",
"Fortuny",
"Franch",
"Francisco",
"Franco",
"Frutos",
"Frías",
"Fuente",
"Fuentes",
"Fuertes",
"Fuster",
"Fábregas",
"Gabaldón",
"Galan",
"Galiano",
"Galindo",
"Gallardo",
"Gallart",
"Gallego",
"Gallo",
"Galvez",
"Galván",
"Galán",
"Garay",
"Garcia",
"Garcés",
"García",
"Gargallo",
"Garmendia",
"Garrido",
"Garriga",
"Garzón",
"Gascón",
"Gaya",
"Gelabert",
"Gibert",
"Gil",
"Gilabert",
"Gimenez",
"Gimeno",
"Giménez",
"Giner",
"Giralt",
"Girona",
"Girón",
"Gisbert",
"Godoy",
"Goicoechea",
"Gomez",
"Gomila",
"Gomis",
"Gonzalez",
"Gonzalo",
"González",
"Gordillo",
"Goñi",
"Gracia",
"Granados",
"Grande",
"Gras",
"Grau",
"Gual",
"Guardia",
"Guardiola",
"Guerra",
"Guerrero",
"Guijarro",
"Guillen",
"Guillén",
"Guitart",
"Gutierrez",
"Gutiérrez",
"Guzman",
"Guzmán",
"Gálvez",
"Gámez",
"Gárate",
"Gómez",
"Haro",
"Heras",
"Heredia",
"Hernandez",
"Hernando",
"Hernández",
"Herranz",
"Herrera",
"Herrero",
"Hervia",
"Hervás",
"Hidalgo",
"Hierro",
"Higueras",
"Hoyos",
"Hoz",
"Huerta",
"Huertas",
"Huguet",
"Hurtado",
"Ibarra",
"Ibañez",
"Iborra",
"Ibáñez",
"Iglesia",
"Iglesias",
"Infante",
"Iniesta",
"Iriarte",
"Isern",
"Izaguirre",
"Izquierdo",
"Iñiguez",
"Jara",
"Jaume",
"Jaén",
"Jerez",
"Jimenez",
"Jiménez",
"Jordá",
"Jordán",
"Jove",
"Jover",
"Juan",
"Juliá",
"Julián",
"Jurado",
"Juárez",
"Jáuregui",
"Jódar",
"Lago",
"Laguna",
"Lamas",
"Landa",
"Lara",
"Larrañaga",
"Larrea",
"Lasa",
"Lastra",
"Leal",
"Ledesma",
"Leiva",
"Leon",
"Lerma",
"León",
"Lillo",
"Linares",
"Llabrés",
"Lladó",
"Llamas",
"Llano",
"Llanos",
"Lledó",
"Llobet",
"Llopis",
"Llorens",
"Llorente",
"Lloret",
"Lluch",
"Lobato",
"Lobo",
"Lopez",
"Lorenzo",
"Losa",
"Losada",
"Lozano",
"Lucas",
"Lucena",
"Luján",
"Lumbreras",
"Luna",
"Luque",
"Luz",
"Luís",
"López",
"Machado",
"Macias",
"Macías",
"Madrid",
"Madrigal",
"Maestre",
"Maldonado",
"Malo",
"Mancebo",
"Manjón",
"Manrique",
"Manso",
"Manuel",
"Manzanares",
"Manzano",
"Marco",
"Marcos",
"Marin",
"Mariscal",
"Mariño",
"Marquez",
"Marqués",
"Marti",
"Martin",
"Martinez",
"Martorell",
"Martí",
"Martín",
"Martínez",
"Marí",
"Marín",
"Mas",
"Mascaró",
"Mata",
"Matas",
"Mate",
"Mateo",
"Mateos",
"Mateu",
"Mayo",
"Mayol",
"Mayoral",
"Maza",
"Medina",
"Melero",
"Meléndez",
"Mena",
"Mendez",
"Mendizábal",
"Mendoza",
"Menendez",
"Menéndez",
"Mercader",
"Merino",
"Mesa",
"Miguel",
"Milla",
"Millán",
"Mir",
"Miralles",
"Miranda",
"Miró",
"Moles",
"Molina",
"Moliner",
"Molins",
"Moll",
"Monreal",
"Montalbán",
"Montaña",
"Montenegro",
"Montero",
"Montes",
"Montesinos",
"Montoya",
"Montserrat",
"Mora",
"Moraleda",
"Morales",
"Morante",
"Morata",
"Morcillo",
"Morell",
"Moreno",
"Morera",
"Morillo",
"Morán",
"Mosquera",
"Moya",
"Mulet",
"Mur",
"Murcia",
"Murillo",
"Muro",
"Muñoz",
"Mármol",
"Márquez",
"Méndez",
"Mínguez",
"Múgica",
"Múñiz",
"Nadal",
"Naranjo",
"Narváez",
"Navarrete",
"Navarro",
"Navas",
"Nebot",
"Neira",
"Nevado",
"Nicolau",
"Nicolás",
"Nieto",
"Niño",
"Nogueira",
"Noguera",
"Nogués",
"Noriega",
"Novoa",
"Nuñez",
"Núñez",
"Ocaña",
"Ochoa",
"Ojeda",
"Oliva",
"Olivares",
"Oliver",
"Olivera",
"Oliveras",
"Olivé",
"Oller",
"Olmedo",
"Olmo",
"Ordóñez",
"Orozco",
"Ortega",
"Ortiz",
"Ortuño",
"Osorio",
"Osuna",
"Otero",
"Pablo",
"Pacheco",
"Padilla",
"Pagès",
"Palacio",
"Palacios",
"Palau",
"Pallarès",
"Palma",
"Palmer",
"Palomar",
"Palomares",
"Palomino",
"Palomo",
"Paniagua",
"Pardo",
"Paredes",
"Pareja",
"Parejo",
"Parra",
"Pascual",
"Pastor",
"Patiño",
"Pavón",
"Paz",
"Pazos",
"Pedraza",
"Pedrero",
"Pedro",
"Pedrosa",
"Peinado",
"Peiró",
"Pelayo",
"Pellicer",
"Peláez",
"Pera",
"Peral",
"Perales",
"Peralta",
"Perea",
"Pereira",
"Perelló",
"Perera",
"Perez",
"Peña",
"Peñalver",
"Peñas",
"Pi",
"Pina",
"Pineda",
"Pinedo",
"Pinilla",
"Pino",
"Pinto",
"Pintor",
"Piquer",
"Pizarro",
"Piña",
"Piñeiro",
"Piñol",
"Pla",
"Plana",
"Planas",
"Plaza",
"Pol",
"Polo",
"Pomares",
"Pombo",
"Ponce",
"Pons",
"Pont",
"Porcel",
"Porras",
"Porta",
"Portero",
"Portillo",
"Posada",
"Pou",
"Poza",
"Pozo",
"Pozuelo",
"Prada",
"Prado",
"Prat",
"Prats",
"Priego",
"Prieto",
"Puente",
"Puerta",
"Puga",
"Puig",
"Pujadas",
"Pujol",
"Pulido",
"Páez",
"Pérez",
"Quero",
"Querol",
"Quesada",
"Quevedo",
"Quintana",
"Quintanilla",
"Quintero",
"Quiroga",
"Quirós",
"Ramirez",
"Ramis",
"Ramos",
"Ramírez",
"Ramón",
"Raya",
"Real",
"Rebollo",
"Recio",
"Redondo",
"Reguera",
"Reig",
"Reina",
"Requena",
"Revilla",
"Rey",
"Reyes",
"Riba",
"Ribas",
"Ribera",
"Ribes",
"Ricart",
"Rico",
"Riera",
"Rincón",
"Rios",
"Ripoll",
"Riquelme",
"Rius",
"Rivas",
"Rivera",
"Rivero",
"Robledo",
"Robles",
"Roca",
"Rocamora",
"Rocha",
"Roda",
"Rodrigo",
"Rodriguez",
"Rodríguez",
"Roig",
"Rojas",
"Roldan",
"Roldán",
"Roma",
"Roman",
"Romero",
"Romeu",
"Román",
"Ropero",
"Ros",
"Rosa",
"Rosado",
"Rosales",
"Rosell",
"Roselló",
"Rosselló",
"Roura",
"Rovira",
"Royo",
"Rozas",
"Ruano",
"Rubio",
"Rueda",
"Ruiz",
"Río",
"Ríos",
"Ródenas",
"Saavedra",
"Sabater",
"Sacristán",
"Saez",
"Sainz",
"Sala",
"Salamanca",
"Salas",
"Salazar",
"Salcedo",
"Saldaña",
"Sales",
"Salgado",
"Salinas",
"Salmerón",
"Salom",
"Salvador",
"Salvà",
"Samper",
"Sanabria",
"Sanchez",
"Sancho",
"Sandoval",
"Sanjuan",
"Sanmartín",
"Sanmiguel",
"Sans",
"Santamaria",
"Santamaría",
"Santana",
"Santiago",
"Santos",
"Sanz",
"Sarabia",
"Sarmiento",
"Sastre",
"Saura",
"Sebastián",
"Seco",
"Sedano",
"Segarra",
"Segovia",
"Segura",
"Seguí",
"Serna",
"Serra",
"Serrano",
"Sevilla",
"Sevillano",
"Sierra",
"Silva",
"Simó",
"Sobrino",
"Sola",
"Solana",
"Solano",
"Soler",
"Solera",
"Solsona",
"Solé",
"Solís",
"Somoza",
"Soria",
"Soriano",
"Sosa",
"Sotelo",
"Soto",
"Suarez",
"Sureda",
"Suárez",
"Sáenz",
"Sáez",
"Sánchez",
"Taboada",
"Talavera",
"Tamarit",
"Tamayo",
"Tapia",
"Tejada",
"Tejedor",
"Tejera",
"Tejero",
"Tello",
"Tena",
"Tenorio",
"Terrón",
"Teruel",
"Tirado",
"Toledo",
"Tolosa",
"Tomas",
"Tomás",
"Tomé",
"Tormo",
"Toro",
"Torralba",
"Torre",
"Torrecilla",
"Torrens",
"Torrent",
"Torrents",
"Torres",
"Torrijos",
"Tovar",
"Trillo",
"Trujillo",
"Tudela",
"Tur",
"Téllez",
"Ugarte",
"Ureña",
"Uriarte",
"Uribe",
"Urrutia",
"Uría",
"Valbuena",
"Valcárcel",
"Valderrama",
"Valdés",
"Valencia",
"Valenciano",
"Valentín",
"Valenzuela",
"Valera",
"Valero",
"Vall",
"Valle",
"Vallejo",
"Valls",
"Vallés",
"Valverde",
"Vaquero",
"Vara",
"Varela",
"Vargas",
"Vazquez",
"Vega",
"Velasco",
"Velázquez",
"Vendrell",
"Vera",
"Verdejo",
"Verdugo",
"Verdú",
"Vergara",
"Viana",
"Vicens",
"Vicente",
"Vidal",
"Vigil",
"Vila",
"Vilalta",
"Vilanova",
"Vilaplana",
"Vilar",
"Villa",
"Villalba",
"Villalobos",
"Villalonga",
"Villanueva",
"Villar",
"Villaverde",
"Villegas",
"Villena",
"Vives",
"Vizcaíno",
"Viña",
"Viñas",
"Vázquez",
"Vélez",
"Yuste",
"Yáñez",
"Zabala",
"Zabaleta",
"Zamora",
"Zamorano",
"Zapata",
"Zaragoza",
"Zorrilla",
"Zurita",
"Águila",
"Álamo",
"Álvarez",
"Álvaro",
"Ángel",
"Ávila",
)
prefixes = ("de", "del")
| Provider |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 23737,
"end": 24530
} | class ____(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
_no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"]
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
@auto_docstring
| AriaTextPreTrainedModel |
python | doocs__leetcode | solution/0000-0099/0045.Jump Game II/Solution.py | {
"start": 0,
"end": 258
} | class ____:
def jump(self, nums: List[int]) -> int:
ans = mx = last = 0
for i, x in enumerate(nums[:-1]):
mx = max(mx, i + x)
if last == i:
ans += 1
last = mx
return ans
| Solution |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 107015,
"end": 107139
} | class ____(PackageError):
"""Raised when someone tries perform an invalid operation on a package."""
| InvalidPackageOpError |
python | pytorch__pytorch | torch/ao/nn/quantized/dynamic/modules/rnn.py | {
"start": 1621,
"end": 2400
} | class ____(torch.nn.Module):
def __init__(self, param):
super().__init__()
self.param = param
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "param"] = self.param
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.param = state_dict[prefix + "param"]
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
| PackedParameter |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 7454,
"end": 7665
} | class ____(Event):
name: str = "log_metric"
@classmethod
def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None:
return {"synchronous": arguments.get("synchronous")}
| LogMetricEvent |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 10019,
"end": 10675
} | class ____(Exception):
"""
Raised when a Dag cannot be deserialized.
This exception should be raised using exception chaining:
`raise DeserializationError(dag_id) from original_exception`
"""
def __init__(self, dag_id: str | None = None, message: str | None = None):
self.dag_id = dag_id
if message:
# Use custom message if provided
super().__init__(message)
elif dag_id is None:
super().__init__("Missing Dag ID in serialized Dag")
else:
super().__init__(f"An unexpected error occurred while trying to deserialize Dag '{dag_id}'")
| DeserializationError |
python | PyCQA__pylint | doc/data/messages/u/using-final-decorator-in-unsupported-version/bad.py | {
"start": 84,
"end": 197
} | class ____(Animal):
@final # [using-final-decorator-in-unsupported-version]
def lay_egg(self): ...
| Playtypus |
python | ray-project__ray | rllib/models/tf/visionnet.py | {
"start": 450,
"end": 10646
} | class ____(TFModelV2):
"""Generic vision network implemented in ModelV2 API.
An additional post-conv fully connected stack can be added and configured
via the config keys:
`post_fcnet_hiddens`: Dense layer sizes after the Conv2D stack.
`post_fcnet_activation`: Activation function to use for this FC stack.
"""
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: int,
model_config: ModelConfigDict,
name: str,
):
if not model_config.get("conv_filters"):
model_config["conv_filters"] = get_filter_config(obs_space.shape)
super(VisionNetwork, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
activation = get_activation_fn(
self.model_config.get("conv_activation"), framework="tf"
)
filters = self.model_config["conv_filters"]
assert len(filters) > 0, "Must provide at least 1 entry in `conv_filters`!"
# Post FC net config.
post_fcnet_hiddens = model_config.get("post_fcnet_hiddens", [])
post_fcnet_activation = get_activation_fn(
model_config.get("post_fcnet_activation"), framework="tf"
)
no_final_linear = self.model_config.get("no_final_linear")
vf_share_layers = self.model_config.get("vf_share_layers")
input_shape = obs_space.shape
self.data_format = "channels_last"
inputs = tf.keras.layers.Input(shape=input_shape, name="observations")
last_layer = inputs
# Whether the last layer is the output of a Flattened (rather than
# a n x (1,1) Conv2D).
self.last_layer_is_flattened = False
# Build the action layers
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=stride
if isinstance(stride, (list, tuple))
else (stride, stride),
activation=activation,
padding="same",
data_format="channels_last",
name="conv{}".format(i),
)(last_layer)
out_size, kernel, stride = filters[-1]
# No final linear: Last layer has activation function and exits with
# num_outputs nodes (this could be a 1x1 conv or a FC layer, depending
# on `post_fcnet_...` settings).
if no_final_linear and num_outputs:
last_layer = tf.keras.layers.Conv2D(
out_size if post_fcnet_hiddens else num_outputs,
kernel,
strides=stride
if isinstance(stride, (list, tuple))
else (stride, stride),
activation=activation,
padding="valid",
data_format="channels_last",
name="conv_out",
)(last_layer)
# Add (optional) post-fc-stack after last Conv2D layer.
layer_sizes = post_fcnet_hiddens[:-1] + (
[num_outputs] if post_fcnet_hiddens else []
)
feature_out = last_layer
for i, out_size in enumerate(layer_sizes):
feature_out = last_layer
last_layer = tf.keras.layers.Dense(
out_size,
name="post_fcnet_{}".format(i),
activation=post_fcnet_activation,
kernel_initializer=normc_initializer(1.0),
)(last_layer)
# Finish network normally (w/o overriding last layer size with
# `num_outputs`), then add another linear one of size `num_outputs`.
else:
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=stride
if isinstance(stride, (list, tuple))
else (stride, stride),
activation=activation,
padding="valid",
data_format="channels_last",
name="conv{}".format(len(filters)),
)(last_layer)
# num_outputs defined. Use that to create an exact
# `num_output`-sized (1,1)-Conv2D.
if num_outputs:
if post_fcnet_hiddens:
last_cnn = last_layer = tf.keras.layers.Conv2D(
post_fcnet_hiddens[0],
[1, 1],
activation=post_fcnet_activation,
padding="same",
data_format="channels_last",
name="conv_out",
)(last_layer)
# Add (optional) post-fc-stack after last Conv2D layer.
for i, out_size in enumerate(
post_fcnet_hiddens[1:] + [num_outputs]
):
feature_out = last_layer
last_layer = tf.keras.layers.Dense(
out_size,
name="post_fcnet_{}".format(i + 1),
activation=post_fcnet_activation
if i < len(post_fcnet_hiddens) - 1
else None,
kernel_initializer=normc_initializer(1.0),
)(last_layer)
else:
feature_out = last_layer
last_cnn = last_layer = tf.keras.layers.Conv2D(
num_outputs,
[1, 1],
activation=None,
padding="same",
data_format="channels_last",
name="conv_out",
)(last_layer)
if last_cnn.shape[1] != 1 or last_cnn.shape[2] != 1:
raise ValueError(
"Given `conv_filters` ({}) do not result in a [B, 1, "
"1, {} (`num_outputs`)] shape (but in {})! Please "
"adjust your Conv2D stack such that the dims 1 and 2 "
"are both 1.".format(
self.model_config["conv_filters"],
self.num_outputs,
list(last_cnn.shape),
)
)
# num_outputs not known -> Flatten, then set self.num_outputs
# to the resulting number of nodes.
else:
self.last_layer_is_flattened = True
last_layer = tf.keras.layers.Flatten(data_format="channels_last")(
last_layer
)
# Add (optional) post-fc-stack after last Conv2D layer.
for i, out_size in enumerate(post_fcnet_hiddens):
last_layer = tf.keras.layers.Dense(
out_size,
name="post_fcnet_{}".format(i),
activation=post_fcnet_activation,
kernel_initializer=normc_initializer(1.0),
)(last_layer)
feature_out = last_layer
self.num_outputs = last_layer.shape[1]
logits_out = last_layer
# Build the value layers
if vf_share_layers:
if not self.last_layer_is_flattened:
feature_out = tf.keras.layers.Lambda(
lambda x: tf.squeeze(x, axis=[1, 2])
)(feature_out)
value_out = tf.keras.layers.Dense(
1,
name="value_out",
activation=None,
kernel_initializer=normc_initializer(0.01),
)(feature_out)
else:
# build a parallel set of hidden layers for the value net
last_layer = inputs
for i, (out_size, kernel, stride) in enumerate(filters[:-1], 1):
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=stride
if isinstance(stride, (list, tuple))
else (stride, stride),
activation=activation,
padding="same",
data_format="channels_last",
name="conv_value_{}".format(i),
)(last_layer)
out_size, kernel, stride = filters[-1]
last_layer = tf.keras.layers.Conv2D(
out_size,
kernel,
strides=stride
if isinstance(stride, (list, tuple))
else (stride, stride),
activation=activation,
padding="valid",
data_format="channels_last",
name="conv_value_{}".format(len(filters)),
)(last_layer)
last_layer = tf.keras.layers.Conv2D(
1,
[1, 1],
activation=None,
padding="same",
data_format="channels_last",
name="conv_value_out",
)(last_layer)
value_out = tf.keras.layers.Lambda(lambda x: tf.squeeze(x, axis=[1, 2]))(
last_layer
)
self.base_model = tf.keras.Model(inputs, [logits_out, value_out])
def forward(
self,
input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType,
) -> (TensorType, List[TensorType]):
obs = input_dict["obs"]
if self.data_format == "channels_first":
obs = tf.transpose(obs, [0, 2, 3, 1])
# Explicit cast to float32 needed in eager.
model_out, self._value_out = self.base_model(tf.cast(obs, tf.float32))
# Our last layer is already flat.
if self.last_layer_is_flattened:
return model_out, state
# Last layer is a n x [1,1] Conv2D -> Flatten.
else:
return tf.squeeze(model_out, axis=[1, 2]), state
def value_function(self) -> TensorType:
return tf.reshape(self._value_out, [-1])
| VisionNetwork |
python | celery__celery | t/unit/worker/test_strategy.py | {
"start": 11578,
"end": 12408
} | class ____:
def setup_method(self):
self.message = Mock(name='message', headers={"custom": "header"})
self.body = {
'args': (1,),
'kwargs': {'foo': 'baz'},
'utc': False,
'taskset': '123',
}
def test_retries_default_value(self):
_, headers, _, _ = hybrid_to_proto2(self.message, self.body)
assert headers.get('retries') == 0
def test_retries_custom_value(self):
_custom_value = 3
self.body['retries'] = _custom_value
_, headers, _, _ = hybrid_to_proto2(self.message, self.body)
assert headers.get('retries') == _custom_value
def test_custom_headers(self):
_, headers, _, _ = hybrid_to_proto2(self.message, self.body)
assert headers.get("custom") == "header"
| test_hybrid_to_proto2 |
python | encode__django-rest-framework | tests/test_prefetch_related.py | {
"start": 527,
"end": 2607
} | class ____(TestCase):
def setUp(self):
self.user = User.objects.create(username='tom', email='tom@example.com')
self.groups = [Group.objects.create(name='a'), Group.objects.create(name='b')]
self.user.groups.set(self.groups)
def test_prefetch_related_updates(self):
view = UserUpdate.as_view()
pk = self.user.pk
groups_pk = self.groups[0].pk
request = factory.put('/', {'username': 'new', 'groups': [groups_pk]}, format='json')
response = view(request, pk=pk)
assert User.objects.get(pk=pk).groups.count() == 1
expected = {
'id': pk,
'username': 'new',
'groups': [1],
'email': 'tom@example.com'
}
assert response.data == expected
def test_prefetch_related_excluding_instance_from_original_queryset(self):
"""
Regression test for https://github.com/encode/django-rest-framework/issues/4661
"""
view = UserUpdate.as_view()
pk = self.user.pk
groups_pk = self.groups[0].pk
request = factory.put('/', {'username': 'exclude', 'groups': [groups_pk]}, format='json')
response = view(request, pk=pk)
assert User.objects.get(pk=pk).groups.count() == 1
expected = {
'id': pk,
'username': 'exclude',
'groups': [1],
'email': 'tom@example.com'
}
assert response.data == expected
def test_can_update_without_queryset_on_class_view(self):
class UserUpdateWithoutQuerySet(generics.UpdateAPIView):
serializer_class = UserSerializer
def get_object(self):
return User.objects.get(pk=self.kwargs['pk'])
request = factory.patch('/', {'username': 'new'})
response = UserUpdateWithoutQuerySet.as_view()(request, pk=self.user.pk)
assert response.data['id'] == self.user.id
assert response.data['username'] == 'new'
self.user.refresh_from_db()
assert self.user.username == 'new'
| TestPrefetchRelatedUpdates |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.