language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | readthedocs__readthedocs.org | readthedocs/search/faceted_search.py | {
"start": 702,
"end": 7779
} | class ____(FacetedSearch):
"""Custom wrapper around FacetedSearch."""
# Search for both 'and' and 'or' operators.
# The score of and should be higher as it satisfies both or and and.
operators = ["and", "or"]
# Sources to be excluded from results.
excludes = []
_highlight_options = {
"encoder": "html",
"number_of_fragments": 1,
"pre_tags": ["<span>"],
"post_tags": ["</span>"],
}
def __init__(
self,
query=None,
filters=None,
projects=None,
aggregate_results=True,
use_advanced_query=True,
**kwargs,
):
"""
Custom wrapper around FacetedSearch.
:param string query: Query to search for
:param dict filters: Filters to be used with the query.
:param projects: A dictionary of project slugs mapped to a `VersionData` object.
Or a list of project slugs.
Results are filter with these values.
:param use_advanced_query: If `True` forces to always use
`SimpleQueryString` for the text query object.
:param bool aggregate_results: If results should be aggregated,
this is returning the number of results within other facets.
:param bool use_advanced_query: Always use SimpleQueryString.
Set this to `False` to use the experimental fuzzy search.
"""
self.use_advanced_query = use_advanced_query
self.aggregate_results = aggregate_results
self.projects = projects or {}
# Hack a fix to our broken connection pooling
# This creates a new connection on every request,
# but actually works :)
log.debug("Hacking Elastic to fix search connection pooling")
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL["default"])
filters = filters or {}
# We may receive invalid filters
valid_filters = {k: v for k, v in filters.items() if k in self.facets}
super().__init__(query=query, filters=valid_filters, **kwargs)
def _get_queries(self, *, query, fields):
"""
Get a list of query objects according to the query.
If the query is a single term we try to match partial words and substrings
(available only with the DEFAULT_TO_FUZZY_SEARCH feature flag),
otherwise we use the SimpleQueryString query.
"""
get_queries_function = (
self._get_single_term_queries if self._is_single_term(query) else self._get_text_queries
)
return get_queries_function(
query=query,
fields=fields,
)
def _get_text_queries(self, *, query, fields):
"""
Returns a list of query objects according to the query.
SimpleQueryString provides a syntax to let advanced users manipulate
the results explicitly.
We need to search for both "and" and "or" operators.
The score of "and" should be higher as it satisfies both "or" and "and".
For valid options, see:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html # noqa
"""
queries = []
is_advanced_query = self.use_advanced_query or self._is_advanced_query(query)
for operator in self.operators:
if is_advanced_query:
# See all valid options at:
# https://www.elastic.co/docs/reference/query-languages/query-dsl/query-dsl-simple-query-string-query.
query_string = SimpleQueryString(
query=query,
fields=fields,
default_operator=operator,
# Restrict fuzziness to avoid timeouts with complex queries.
fuzzy_prefix_length=1,
fuzzy_max_expansions=15,
)
else:
query_string = self._get_fuzzy_query(
query=query,
fields=fields,
operator=operator,
)
queries.append(query_string)
return queries
def _get_single_term_queries(self, query, fields):
"""
Returns a list of query objects for fuzzy and partial results.
We need to search for both "and" and "or" operators.
The score of "and" should be higher as it satisfies both "or" and "and".
We use the Wildcard query with the query suffixed by ``*`` to match substrings.
For valid options, see:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html # noqa
.. note::
Doing a prefix **and** suffix search is slow on big indexes like ours.
"""
query_string = self._get_fuzzy_query(
query=query,
fields=fields,
)
queries = [query_string]
for field in fields:
# Remove boosting from the field,
field = re.sub(r"\^.*$", "", field)
kwargs = {
field: {"value": f"{query}*"},
}
queries.append(Wildcard(**kwargs))
return queries
def _get_fuzzy_query(self, *, query, fields, operator="or"):
"""
Returns a query object used for fuzzy results.
For valid options, see:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html
"""
return MultiMatch(
query=query,
fields=fields,
operator=operator,
fuzziness="AUTO:4,6",
prefix_length=1,
)
def _is_single_term(self, query):
"""
Check if the query is a single term.
A query is a single term if it is a single word,
if it doesn't contain the syntax from a simple query string,
and if `self.use_advanced_query` is False.
"""
is_single_term = (
not self.use_advanced_query
and query
and len(query.split()) <= 1
and not self._is_advanced_query(query)
)
return is_single_term
def _is_advanced_query(self, query):
"""
Check if query looks like to be using the syntax from a simple query string.
.. note::
We don't check if the syntax is valid.
The tokens used aren't very common in a normal query, so checking if
the query contains any of them should be enough to determinate if
it's an advanced query.
Simple query syntax:
https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html#simple-query-string-syntax
"""
tokens = {"+", "|", "-", '"', "*", "(", ")", "~"}
query_tokens = set(query)
return not tokens.isdisjoint(query_tokens)
def aggregate(self, search):
"""Overridden to decide if we should aggregate or not."""
if self.aggregate_results:
super().aggregate(search)
| RTDFacetedSearch |
python | sqlalchemy__sqlalchemy | test/sql/test_selectable.py | {
"start": 65264,
"end": 69094
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""test anonymous_fromclause for aliases.
In 1.4 this function is only for ORM internal use. The public version
join.alias() is deprecated.
"""
__dialect__ = "default"
def test_flat_ok_on_non_join(self):
a = table("a", column("a"))
s = a.select()
self.assert_compile(
s.alias(flat=True).select(),
"SELECT anon_1.a FROM (SELECT a.a AS a FROM a) AS anon_1",
)
def test_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_standalone_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(),
"SELECT a.a AS a_a, b.b AS b_b FROM a JOIN b ON a.a = b.b",
)
def test_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_join_standalone_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b",
)
def test_join_alias_name_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
self.assert_compile(
a.join(b, a.c.a == b.c.b)._anonymous_fromclause(
name="foo", flat=True
),
"a AS foo_a JOIN b AS foo_b ON foo_a.a = foo_b.b",
)
def test_composed_join_alias_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
# note in 1.4 the flat=True flag now descends into the whole join,
# as it should
self.assert_compile(
j1.join(j2, b.c.b == c.c.c)._anonymous_fromclause(flat=True),
"a AS a_1 JOIN b AS b_1 ON a_1.a = b_1.b JOIN "
"(c AS c_1 JOIN d AS d_1 ON c_1.c = d_1.d) "
"ON b_1.b = c_1.c",
)
def test_composed_join_alias_name_flat(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
j1.join(j2, b.c.b == c.c.c)._anonymous_fromclause(
name="foo", flat=True
),
"a AS foo_a JOIN b AS foo_b ON foo_a.a = foo_b.b JOIN "
"(c AS foo_c JOIN d AS foo_d ON foo_c.c = foo_d.d) "
"ON foo_b.b = foo_c.c",
)
def test_composed_join_alias(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
d = table("d", column("d"))
j1 = a.join(b, a.c.a == b.c.b)
j2 = c.join(d, c.c.c == d.c.d)
self.assert_compile(
select(j1.join(j2, b.c.b == c.c.c)._anonymous_fromclause()),
"SELECT anon_1.a_a, anon_1.b_b, anon_1.c_c, anon_1.d_d "
"FROM (SELECT a.a AS a_a, b.b AS b_b, c.c AS c_c, d.d AS d_d "
"FROM a JOIN b ON a.a = b.b "
"JOIN (c JOIN d ON c.c = d.d) ON b.b = c.c) AS anon_1",
)
| JoinAnonymizingTest |
python | django__django | tests/admin_views/tests.py | {
"start": 4889,
"end": 9715
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 1",
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
title="Article 2",
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.color1 = Color.objects.create(value="Red", warm=True)
cls.color2 = Color.objects.create(value="Orange", warm=True)
cls.color3 = Color.objects.create(value="Blue", warm=False)
cls.color4 = Color.objects.create(value="Green", warm=False)
cls.fab1 = Fabric.objects.create(surface="x")
cls.fab2 = Fabric.objects.create(surface="y")
cls.fab3 = Fabric.objects.create(surface="plain")
cls.b1 = Book.objects.create(name="Book 1")
cls.b2 = Book.objects.create(name="Book 2")
cls.pro1 = Promo.objects.create(name="Promo 1", book=cls.b1)
cls.pro1 = Promo.objects.create(name="Promo 2", book=cls.b2)
cls.chap1 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b1
)
cls.chap3 = Chapter.objects.create(
title="Chapter 1", content="[ insert contents here ]", book=cls.b2
)
cls.chap4 = Chapter.objects.create(
title="Chapter 2", content="[ insert contents here ]", book=cls.b2
)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra="ChapterXtra1 1")
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra="ChapterXtra1 2")
Actor.objects.create(name="Palin", age=27)
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will
# fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.force_login(self.superuser)
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(
response.content.index(text1.encode()),
response.content.index(text2.encode()),
(failing_msg or "") + "\nResponse:\n" + response.text,
)
| AdminViewBasicTestCase |
python | pandas-dev__pandas | asv_bench/benchmarks/categoricals.py | {
"start": 5349,
"end": 6506
} | class ____:
def setup(self):
N = 10**5
ncats = 15
self.s_str = pd.Series(np.random.randint(0, ncats, size=N).astype(str))
self.s_str_cat = pd.Series(self.s_str, dtype="category")
with warnings.catch_warnings(record=True):
str_cat_type = pd.CategoricalDtype(set(self.s_str), ordered=True)
self.s_str_cat_ordered = self.s_str.astype(str_cat_type)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = pd.Series(self.s_int, dtype="category")
with warnings.catch_warnings(record=True):
int_cat_type = pd.CategoricalDtype(set(self.s_int), ordered=True)
self.s_int_cat_ordered = self.s_int.astype(int_cat_type)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
| Rank |
python | huggingface__transformers | src/transformers/models/convnextv2/modeling_convnextv2.py | {
"start": 4693,
"end": 5722
} | class ____(nn.Module):
"""This class is comparable to (and inspired by) the SwinEmbeddings class
found in src/transformers/models/swin/modeling_swin.py.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = nn.Conv2d(
config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
)
self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
self.num_channels = config.num_channels
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.layernorm(embeddings)
return embeddings
| ConvNextV2Embeddings |
python | keras-team__keras | keras/src/quantizers/gptq_test.py | {
"start": 3202,
"end": 20002
} | class ____(testing.TestCase):
def test_initialization_with_dense_layer(self):
mock_layer = _get_test_layer("Dense", kernel_shape=(64, 128))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 128)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_initialization_with_einsumdense_3d(self):
mock_layer = _get_test_layer("EinsumDense", kernel_shape=(64, 4, 32))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 4 * 32)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_update_hessian(self):
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
dense_gptq = GPTQ(dense)
rng = np.random.default_rng(seed=42)
batch1 = rng.standard_normal(size=(8, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch1)
self.assertEqual(dense_gptq.num_samples, 8)
H1 = dense_gptq.hessian
batch2 = rng.standard_normal(size=(4, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch2)
self.assertEqual(dense_gptq.num_samples, 12)
H2 = dense_gptq.hessian
self.assertNotAllClose(H1, H2)
def test_gptq_on_single_layer(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
config = GPTQConfig(
dataset=None,
tokenizer=None,
weight_bits=4,
symmetric=False,
group_size=-1,
)
dense.quantize("gptq", config=config)
dense_gptq = GPTQ(
dense,
config,
)
calibration_data = rng.standard_normal(size=(128, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(calibration_data)
dense_gptq.quantize_and_correct_layer()
self.assertEqual(backend.standardize_dtype(dense.kernel.dtype), "uint8")
dense_gptq.free()
self.assertIsNone(getattr(dense_gptq, "hessian", None))
self.assertIsNone(getattr(dense_gptq, "layer", None))
def test_unsupported_layer_error(self):
unsupported_layer = _get_test_layer("Unsupported", kernel_shape=None)
with self.assertRaisesRegex(TypeError, "Unsupported layer type"):
GPTQ(unsupported_layer)
def test_update_hessian_invalid_input(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
gptq_instance = GPTQ(dense)
with self.assertRaisesRegex(ValueError, "cannot be None"):
gptq_instance.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
gptq_instance.update_hessian_with_batch(np.empty((0, 16)))
with self.assertRaisesRegex(ValueError, "match input features"):
bad_input = rng.standard_normal(size=(8, 99))
gptq_instance.update_hessian_with_batch(bad_input)
def test_streaming_equals_big_batch(self):
"""Tests that streaming updates match big batch updates."""
# dummy inputs
x = ops.array(np.random.randn(100, 7), "float32")
# One-shot hessian update
layer_1 = layers.Dense(5, use_bias=False)
layer_1.build(input_shape=(None, 7))
g1 = GPTQ(layer_1)
g1.update_hessian_with_batch(x)
# Streamed hessian update
layer_2 = layers.Dense(5, use_bias=False)
layer_2.build(input_shape=(None, 7))
g2 = GPTQ(layer_2)
g2.update_hessian_with_batch(x[:50])
g2.update_hessian_with_batch(x[50:])
# Both the one-shot and streamed hessian updates should match
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_hessian_matches_closed_form(self):
"""Tests that the Hessian matches the closed-form solution."""
x = ops.array(np.random.randn(128, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
expected = ops.multiply(
ops.divide(2.0, x.shape[0]), ops.matmul(ops.transpose(x), x)
)
self.assertAllClose(g.hessian, expected, rtol=1e-6, atol=1e-6)
def test_higher_rank_inputs_are_reshaped(self):
"""Tests that higher-rank inputs are reshaped correctly."""
# x: [batch, time, feat]
x = ops.array(np.random.randn(10, 4, 7), "float32")
x_flat = ops.reshape(x, (-1, ops.shape(x)[-1]))
layer1 = layers.Dense(5, use_bias=False)
layer1.build((None, 7))
g1 = GPTQ(layer1)
g1.update_hessian_with_batch(x)
layer2 = layers.Dense(5, use_bias=False)
layer2.build((None, 7))
g2 = GPTQ(layer2)
g2.update_hessian_with_batch(x_flat)
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_raises_on_feature_mismatch(self):
x = ops.array(np.random.randn(8, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 6)) # wrong in_features
g = GPTQ(layer)
with self.assertRaisesRegex(ValueError, "do not match input features"):
g.update_hessian_with_batch(x)
with self.assertRaisesRegex(ValueError, "cannot be None"):
g.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
g.update_hessian_with_batch(
ops.array(np.empty((0, 7), dtype="float32"))
)
def test_num_samples_accumulates_correctly(self):
"""Tests that the number of samples is accumulated correctly when
streaming updates are used."""
x = ops.array(np.random.randn(64, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x[:5])
g.update_hessian_with_batch(x[5:30])
g.update_hessian_with_batch(x[30:])
self.assertEqual(g.num_samples, 64)
def test_numeric_stability_large_values(self):
"""Tests numeric stability of hessian update with large input values."""
x = ops.multiply(ops.array(np.random.randn(32, 7), "float32"), 1e6)
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
# Should be finite and symmetric
self.assertTrue(ops.all(ops.isfinite(g.hessian)))
self.assertTrue(ops.all(ops.equal(g.hessian, ops.transpose(g.hessian))))
def test_einsumdense_2d_kernel_hessian_shape(self):
x = layers.Input((7,))
y = layers.EinsumDense("ab,bc->ac", output_shape=(5,))(x)
model = keras.Model(x, y)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
g = GPTQ(einsum_dense_layer)
# should infer rows==7
self.assertEqual(ops.shape(g.hessian), (7, 7))
def test_einsumdense_3d_kernel_streaming_equals_big_batch(self):
"""Tests that streaming updates to the Hessian are equivalent to a big
batch update."""
# Construct a tiny attention-like einsum with 3D kernel
x = layers.Input((7,))
qkv = layers.EinsumDense("bf,fhk->bhk", output_shape=(2, 3))(
x
) # heads=2, head_dim=3
model = keras.Model(x, qkv)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
x = ops.array(np.random.randn(50, 7), "float32")
g1 = GPTQ(einsum_dense_layer)
g1.update_hessian_with_batch(x)
g2 = GPTQ(einsum_dense_layer)
g2.update_hessian_with_batch(x[:20])
g2.update_hessian_with_batch(x[20:])
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_identity_inv_hessian_matches_direct_quantization(self):
"""Tests that the matrix quantization without error correction
matches the direct implementation."""
in_features, out_features = 16, 8
weights = ops.reshape(
ops.linspace(
-0.9, 1.1, in_features * out_features, dtype="float32"
),
(in_features, out_features),
)
weights_transpose = ops.transpose(weights)
# inverse_hessian = identity; no cross-feature correction
# (since all off-diagonal elements are zero), which means
# there is no interaction between different features
inverse_hessian = ops.eye(in_features, dtype="float32")
quantized_weights, scale_map, zero_map, g_idx = gptq_quantize_matrix(
weights_transpose,
inverse_hessian,
blocksize=128,
group_size=1, # per-column quantization
activation_order=False,
compute_scale_zero=_compute_scale_zero,
)
dequantized_weights = dequantize_with_sz_map(
quantized_weights, scale_map, zero_map, g_idx
)
# Compare function output with columnwise direct application
# of quantization.
out = ops.zeros_like(weights_transpose)
for j in range(ops.shape(weights_transpose)[1]):
column = weights_transpose[:, j : j + 1]
scale, zero, maxq = _compute_scale_zero(column)
quantized_col = quantize_with_zero_point(column, scale, zero, maxq)
dequantized = dequantize_with_zero_point(quantized_col, scale, zero)
out = ops.slice_update(
out, (0, j), ops.expand_dims(dequantized[:, 0], 1)
)
self.assertAllClose(dequantized_weights, out, atol=1e-6)
def test_activation_order_produces_equivalent_weights(self):
"""
Tests that quantizing with `activation_order=True` yields the same
final weights as `activation_order=False`, because the internal
permutation should be undone.
"""
# Set up shared inputs and a non-trivial permutation.
in_features, out_features = 8, 6
initial_weights = ops.array(
np.random.randn(in_features, out_features), "float32"
)
# Generate a Hessian that creates a non-trivial permutation.
hessian_diag = ops.random.shuffle(
ops.linspace(10.0, 1.0, in_features, dtype="float32")
)
hessian_matrix = ops.diag(hessian_diag)
# Sanity check: ensure the permutation is not the identity.
perm = _stable_permutation(hessian_diag)
self.assertFalse(ops.all(ops.equal(perm, ops.arange(in_features))))
def create_and_quantize(use_activation_order):
layer = layers.Dense(out_features, use_bias=False)
layer.build((None, in_features))
layer.set_weights([ops.copy(initial_weights)])
config = GPTQConfig(
dataset=None,
tokenizer=None,
group_size=-1,
activation_order=use_activation_order,
)
layer.quantize("gptq", config=config)
quantizer = GPTQ(layer, config)
quantizer.hessian = hessian_matrix
quantizer.quantize_and_correct_layer()
return layer
# Quantize two layers, one with and one without activation ordering.
ordered_layer = create_and_quantize(use_activation_order=True)
unordered_layer = create_and_quantize(use_activation_order=False)
self.assertAllClose(
ordered_layer.get_weights()[0],
unordered_layer.get_weights()[0],
msg="Weights should be identical as the permutation is undone.",
)
def _compute_scale_zero(x, **_):
# Per-column asymmetric int4 example
# scale = (max-min)/maxq, zero = round(-min/scale)
maxq = 15.0
xmin = ops.min(x, axis=0, keepdims=True)
xmax = ops.max(x, axis=0, keepdims=True)
scale = ops.divide(ops.subtract(xmax, xmin), ops.add(maxq, 1e-8))
zero = ops.round(ops.divide(ops.negative(xmin), ops.add(scale, 1e-8)))
return scale, zero, maxq
def _get_sequence_classifier():
"""Transformer-based sequence classifier
tokens -> Embedding -> Transformer -> GAP -> Dense(num_classes).
"""
embed_dim = 32
num_heads = 4
ff_dim = 32
class SimpleTransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, **kwargs):
super().__init__(**kwargs)
self.att = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim // num_heads
)
self.ffn = models.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
attention_output = self.att(inputs, inputs)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
return self.layernorm2(out1 + ffn_output)
inputs = layers.Input(shape=(SEQ_LEN,), dtype="int32")
x = layers.Embedding(VOCAB_SIZE, embed_dim)(inputs)
x = SimpleTransformerBlock(embed_dim, num_heads, ff_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(NUM_CLASSES)(x)
return models.Model(inputs, outputs)
def _get_simple_model():
return models.Sequential([layers.Dense(10, input_shape=(5,))])
def _mean_kl(p, q):
# Add small epsilon for numerical stability
eps = 1e-8
p = ops.clip(p, eps, 1.0)
q = ops.clip(q, eps, 1.0)
# Compute KL divergence
# D_KL(P || Q) = sum(P * log(P / Q))
return ops.mean(
ops.sum(ops.multiply(p, ops.subtract(ops.log(p), ops.log(q))), axis=-1)
)
def _top1_match_rate(a_logits, b_logits):
"""Calculates the top-1 match rate between two sets of logits.
Formula: T = 1/N * sum(1{argmax(a_i) == argmax(b_i)})
"""
return ops.mean(
ops.equal(ops.argmax(a_logits, axis=-1), ops.argmax(b_logits, axis=-1))
)
DATASETS = {
"string_dataset": lambda: _string_dataset(
CALIBRATION_TEXT, NUM_SAMPLES, SEQ_LEN
),
"token_dataset": lambda: _token_dataset(NUM_SAMPLES, SEQ_LEN),
}
CONFIGS = {
"default": {},
"per_channel": {"group_size": -1, "per_channel": True},
"act_order": {"activation_order": True},
"symmetric": {"symmetric": True},
"group_wise": {"group_size": 8},
"group_wise_act_order": {"group_size": 8, "activation_order": True},
"symmetric_act_order": {"symmetric": True, "activation_order": True},
"symmetric_per_channel": {"symmetric": True, "per_channel": True},
"group_wise_symmetric_8bit": {
"group_size": 8,
"symmetric": True,
"weight_bits": 8,
},
}
def _pad_or_trim_1d(ids, length):
"""Pads or trims a 1D array to a specified length."""
ids = ops.ravel(ops.array(ids, "int64"))
if len(ids) < length:
ids = ops.concatenate(
[ids, ops.zeros(length - len(ids), dtype=ids.dtype)]
)
else:
ids = ids[:length]
return ids
def _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN):
"""Tokenizes strings to char-IDs or passes through int arrays;
outputs shape (1, seq_len)."""
def _tok(x):
if isinstance(x, str):
ids = ops.convert_to_tensor(
np.fromiter((ord(c) % vocab_size for c in x), dtype=np.int64)
)
else:
ids = np.asarray(x, dtype=np.int64)
ids = _pad_or_trim_1d(ids, seq_len)
return ids[None, :]
_tok.tokenize = _tok
return _tok
def _string_dataset(
long_text, num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN
):
"""Yields string slices"""
rng = np.random.default_rng(seed=0)
L = max(1, len(long_text) - sequence_length)
for _ in range(num_samples):
start = rng.integers(0, L) if L > 1 else 0
yield long_text[start : start + sequence_length]
def _token_dataset(
num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN, vocab_size=VOCAB_SIZE
):
"""Yields tokenized samples."""
rng = np.random.default_rng(seed=0)
for _ in range(num_samples):
yield rng.integers(
low=0, high=vocab_size, size=(1, sequence_length), dtype=np.int64
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="torch gives low accuracy on CI, but works well locally",
)
| GPTQTest |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 18870,
"end": 20386
} | class ____:
"""Test th_TH company provider methods"""
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in ThThCompanyProvider.company_suffixes
def test_company_limited_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_limited_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.company_limited_prefixes
def test_company_limited_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_limited_suffix()
assert isinstance(suffix, str)
assert suffix in ThThCompanyProvider.company_limited_suffixes
def test_nonprofit_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.nonprofit_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.nonprofit_prefixes
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
| TestThTh |
python | django__django | tests/delete/models.py | {
"start": 560,
"end": 630
} | class ____(models.Model):
t = models.ForeignKey(T, models.CASCADE)
| U |
python | pytorch__pytorch | torch/ao/quantization/pt2e/_affine_quantization.py | {
"start": 33819,
"end": 35231
} | class ____(AffineQuantizedObserverBase):
def __init__(
self,
mapping_type: MappingType,
target_dtype: torch.dtype,
granularity: Granularity,
quant_min: int | None = None,
quant_max: int | None = None,
eps: float | None = None,
is_dynamic=False,
scale_dtype: torch.dtype | None = None,
zero_point_dtype: torch.dtype | None = None,
preserve_zero: bool = True,
zero_point_domain: ZeroPointDomain | None = ZeroPointDomain.INT,
# there could be some extra args that's ignored
**kwargs,
):
self.is_dynamic = is_dynamic
super().__init__(
mapping_type=mapping_type,
target_dtype=target_dtype,
granularity=granularity,
quant_min=quant_min,
quant_max=quant_max,
eps=eps,
scale_dtype=scale_dtype,
zero_point_dtype=zero_point_dtype,
preserve_zero=preserve_zero,
zero_point_domain=zero_point_domain,
)
def forward(self, input):
self.block_size = get_block_size(input.shape, self.granularity)
self.original_dtype = input.dtype
return input
def calculate_qparams(self):
raise Exception( # noqa: TRY002
"calculate_qparams should not be called for PlaceholderObserver"
)
| AffineQuantizedPlaceholderObserver |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 3713,
"end": 4924
} | class ____(wtforms.Form):
__params__ = ["name", "public_email"]
name = wtforms.StringField(
validators=[
wtforms.validators.Length(
max=100,
message=_(
"The name is too long. "
"Choose a name with 100 characters or less."
),
)
]
)
public_email = wtforms.SelectField(choices=[("", "Not displayed")])
def __init__(self, *args, user_service, user_id, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
user = user_service.get_user(user_id)
self.public_email.choices.extend(
[(e.email, e.email) for e in user.emails if e.verified]
)
def validate_public_email(self, field):
if field.data:
user = self.user_service.get_user(self.user_id)
verified_emails = [e.email for e in user.emails if e.verified]
if field.data not in verified_emails:
raise wtforms.validators.ValidationError(
f"{field.data} is not a verified email for {user.username}"
)
| SaveAccountForm |
python | pennersr__django-allauth | allauth/socialaccount/providers/saml/views.py | {
"start": 7147,
"end": 7359
} | class ____(SAMLViewMixin, BaseLoginView):
def get_provider(self):
app = self.get_app(self.kwargs["organization_slug"])
return app.get_provider(self.request)
login = LoginView.as_view()
| LoginView |
python | numba__numba | numba/tests/test_refop_pruning.py | {
"start": 4629,
"end": 5950
} | class ____(TestCase):
def setUp(self):
warnings.simplefilter('error', NumbaInvalidConfigWarning)
def tearDown(self):
warnings.resetwarnings()
def test_warn_invalid_flags(self):
with set_refprune_flags('abc,per_bb,cde'):
with self.assertWarns(NumbaInvalidConfigWarning) as cm:
optval = _parse_refprune_flags()
self.assertEqual(len(cm.warnings), 2)
self.assertIn('abc', str(cm.warnings[0].message))
self.assertIn('cde', str(cm.warnings[1].message))
self.assertEqual(optval, llvm.RefPruneSubpasses.PER_BB)
def test_valid_flag(self):
with set_refprune_flags('per_bb, diamond, fanout,fanout_raise'):
optval = _parse_refprune_flags()
self.assertEqual(optval, llvm.RefPruneSubpasses.ALL)
def test_the_all_flag(self):
with set_refprune_flags('all'):
optval = _parse_refprune_flags()
self.assertEqual(optval, llvm.RefPruneSubpasses.ALL)
def test_some_flags(self):
with set_refprune_flags('per_bb, fanout'):
optval = _parse_refprune_flags()
enumcls = llvm.RefPruneSubpasses
self.assertEqual(optval, enumcls.PER_BB | enumcls.FANOUT)
if __name__ == "__main__":
unittest.main()
| TestRefPruneFlags |
python | PyCQA__pylint | tests/functional/s/super/super_with_arguments.py | {
"start": 365,
"end": 872
} | class ____(Foo):
def __init__(self):
super(InvalidSuperCall.__class__, self).__init__()
def method_accepting_cls(cls, self):
# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
super(cls, self).__init__()
| InvalidSuperCall |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6459,
"end": 6542
} | class ____(ResampleReduction):
how = "nunique"
fill_value = 0
| ResampleNUnique |
python | Textualize__textual | src/textual/_context.py | {
"start": 299,
"end": 1028
} | class ____(RuntimeError):
"""Runtime error raised if we try to retrieve the active app when there is none."""
active_app: ContextVar["App[Any]"] = ContextVar("active_app")
active_message_pump: ContextVar["MessagePump"] = ContextVar("active_message_pump")
prevent_message_types_stack: ContextVar[list[set[type[Message]]]] = ContextVar(
"prevent_message_types_stack"
)
visible_screen_stack: ContextVar[list[Screen[object]]] = ContextVar(
"visible_screen_stack"
)
"""A stack of visible screens (with background alpha < 1), used in the screen render process."""
message_hook: ContextVar[Callable[[Message], None]] = ContextVar("message_hook")
"""A callable that accepts a message. Used by App.run_test."""
| NoActiveAppError |
python | pydantic__pydantic | pydantic-core/tests/test_config.py | {
"start": 1070,
"end": 5158
} | class ____:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def test_on_model_class():
v = SchemaValidator(
cs.model_schema(
cls=MyModel,
config=CoreConfig(str_max_length=5),
schema=cs.model_fields_schema(fields={'f': cs.model_field(schema=cs.str_schema())}),
)
)
assert 'max_length:Some(5)' in plain_repr(v)
assert v.isinstance_python({'f': 'test'}) is True
assert v.isinstance_python({'f': 'test long'}) is False
def test_field_priority_model():
v = SchemaValidator(
cs.model_schema(
cls=MyModel,
config=CoreConfig(str_max_length=10),
schema=cs.model_fields_schema(fields={'f': cs.model_field(schema=cs.str_schema(max_length=5))}),
)
)
assert 'max_length:Some(5)' in plain_repr(v)
assert v.isinstance_python({'f': 'test'}) is True
assert v.isinstance_python({'f': 'test long'}) is False
@pytest.mark.parametrize(
'config,float_field_schema,input_value,expected',
[
(
CoreConfig(),
cs.float_schema(),
{'x': 'nan'},
IsInstance(MyModel) & HasAttributes(x=FunctionCheck(math.isnan)),
),
(
CoreConfig(allow_inf_nan=True),
cs.float_schema(),
{'x': 'nan'},
IsInstance(MyModel) & HasAttributes(x=FunctionCheck(math.isnan)),
),
(
CoreConfig(allow_inf_nan=False),
cs.float_schema(),
{'x': 'nan'},
Err('Input should be a finite number [type=finite_number,'),
),
# field `allow_inf_nan` (if set) should have priority over global config
(
CoreConfig(allow_inf_nan=True),
cs.float_schema(allow_inf_nan=False),
{'x': 'nan'},
Err('Input should be a finite number [type=finite_number,'),
),
(
CoreConfig(allow_inf_nan=False),
cs.float_schema(allow_inf_nan=True),
{'x': 'nan'},
IsInstance(MyModel) & HasAttributes(x=FunctionCheck(math.isnan)),
),
],
ids=repr,
)
def test_allow_inf_nan(config: CoreConfig, float_field_schema, input_value, expected):
v = SchemaValidator(
cs.model_schema(
cls=MyModel,
schema=cs.model_fields_schema(fields={'x': cs.model_field(schema=float_field_schema)}),
config=config,
)
)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=re.escape(expected.message)):
v.validate_python(input_value)
else:
output_dict = v.validate_python(input_value)
assert output_dict == expected
@pytest.mark.parametrize(
'config,input_str',
(
(CoreConfig(), 'type=string_type, input_value=123, input_type=int'),
(CoreConfig(hide_input_in_errors=False), 'type=string_type, input_value=123, input_type=int'),
(CoreConfig(hide_input_in_errors=True), 'type=string_type'),
),
)
def test_hide_input_in_errors(config, input_str):
v = SchemaValidator(
cs.model_schema(
cls=MyModel, schema=cs.model_fields_schema(fields={'f': cs.model_field(schema=cs.str_schema())})
),
config=config,
)
with pytest.raises(ValidationError, match=re.escape(f'Input should be a valid string [{input_str}]')):
assert v.validate_python({'f': 123})
def test_cache_strings():
v = SchemaValidator(cs.str_schema())
assert 'cache_strings=True' in plain_repr(v)
v = SchemaValidator(cs.str_schema(), config=CoreConfig(cache_strings=True))
assert 'cache_strings=True' in plain_repr(v)
v = SchemaValidator(cs.str_schema(), config=CoreConfig(cache_strings=False))
assert 'cache_strings=False' in plain_repr(v)
v = SchemaValidator(cs.str_schema(), config=CoreConfig(cache_strings='keys'))
assert "cache_strings='keys'" in plain_repr(v)
| MyModel |
python | scrapy__scrapy | tests/test_command_crawl.py | {
"start": 854,
"end": 1383
} | class ____(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('It works!')
return
yield
"""
log = self.get_log(spider_code, proj_path)
assert "[myspider] DEBUG: It works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Spider closed (finished)" in log
def test_output(self, proj_path: Path) -> None:
spider_code = """
import scrapy
| MySpider |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 83970,
"end": 84085
} | class ____(BaseModel):
type: Literal[
"mmap",
] = Field(..., description="")
| PayloadStorageTypeOneOf2 |
python | spack__spack | lib/spack/spack/database.py | {
"start": 8390,
"end": 8589
} | class ____:
def __getattr__(self, name):
raise ForbiddenLockError(f"Cannot access attribute '{name}' of lock")
def __reduce__(self):
return ForbiddenLock, tuple()
| ForbiddenLock |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/progress.py | {
"start": 36519,
"end": 60376
} | class ____(JupyterMixin):
"""Renders an auto-updating progress bar(s).
Args:
console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout.
auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None.
speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
transient: (bool, optional): Clear the progress on exit. Defaults to False.
redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
disable (bool, optional): Disable progress display. Defaults to False
expand (bool, optional): Expand tasks table to fit width. Defaults to False.
"""
def __init__(
self,
*columns: Union[str, ProgressColumn],
console: Optional[Console] = None,
auto_refresh: bool = True,
refresh_per_second: float = 10,
speed_estimate_period: float = 30.0,
transient: bool = False,
redirect_stdout: bool = True,
redirect_stderr: bool = True,
get_time: Optional[GetTimeCallable] = None,
disable: bool = False,
expand: bool = False,
) -> None:
assert refresh_per_second > 0, "refresh_per_second must be > 0"
self._lock = RLock()
self.columns = columns or self.get_default_columns()
self.speed_estimate_period = speed_estimate_period
self.disable = disable
self.expand = expand
self._tasks: Dict[TaskID, Task] = {}
self._task_index: TaskID = TaskID(0)
self.live = Live(
console=console or get_console(),
auto_refresh=auto_refresh,
refresh_per_second=refresh_per_second,
transient=transient,
redirect_stdout=redirect_stdout,
redirect_stderr=redirect_stderr,
get_renderable=self.get_renderable,
)
self.get_time = get_time or self.console.get_time
self.print = self.console.print
self.log = self.console.log
@classmethod
def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
"""Get the default columns used for a new Progress instance:
- a text column for the description (TextColumn)
- the bar itself (BarColumn)
- a text column showing completion percentage (TextColumn)
- an estimated-time-remaining column (TimeRemainingColumn)
If the Progress instance is created without passing a columns argument,
the default columns defined here will be used.
You can also create a Progress instance using custom columns before
and/or after the defaults, as in this example:
progress = Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
"Elapsed:",
TimeElapsedColumn(),
)
This code shows the creation of a Progress display, containing
a spinner to the left, the default columns, and a labeled elapsed
time column.
"""
return (
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
TimeRemainingColumn(),
)
@property
def console(self) -> Console:
return self.live.console
@property
def tasks(self) -> List[Task]:
"""Get a list of Task instances."""
with self._lock:
return list(self._tasks.values())
@property
def task_ids(self) -> List[TaskID]:
"""A list of task IDs."""
with self._lock:
return list(self._tasks.keys())
@property
def finished(self) -> bool:
"""Check if all tasks have been completed."""
with self._lock:
if not self._tasks:
return True
return all(task.finished for task in self._tasks.values())
def start(self) -> None:
"""Start the progress display."""
if not self.disable:
self.live.start(refresh=True)
def stop(self) -> None:
"""Stop the progress display."""
self.live.stop()
if not self.console.is_interactive and not self.console.is_jupyter:
self.console.print()
def __enter__(self) -> Self:
self.start()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
def track(
self,
sequence: Union[Iterable[ProgressType], Sequence[ProgressType]],
total: Optional[float] = None,
completed: int = 0,
task_id: Optional[TaskID] = None,
description: str = "Working...",
update_period: float = 0.1,
) -> Iterable[ProgressType]:
"""Track progress by iterating over a sequence.
Args:
sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress.
total: (float, optional): Total number of steps. Default is len(sequence).
completed (int, optional): Number of steps completed so far. Defaults to 0.
task_id: (TaskID): Task to track. Default is new task.
description: (str, optional): Description of task, if new task is created.
update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
Returns:
Iterable[ProgressType]: An iterable of values taken from the provided sequence.
"""
if total is None:
total = float(length_hint(sequence)) or None
if task_id is None:
task_id = self.add_task(description, total=total, completed=completed)
else:
self.update(task_id, total=total, completed=completed)
if self.live.auto_refresh:
with _TrackThread(self, task_id, update_period) as track_thread:
for value in sequence:
yield value
track_thread.completed += 1
else:
advance = self.advance
refresh = self.refresh
for value in sequence:
yield value
advance(task_id, 1)
refresh()
def wrap_file(
self,
file: BinaryIO,
total: Optional[int] = None,
*,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
"""Track progress file reading from a binary file.
Args:
file (BinaryIO): A file-like object opened in binary mode.
total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When no total value can be extracted from the arguments or the task.
"""
# attempt to recover the total from the task
total_bytes: Optional[float] = None
if total is not None:
total_bytes = total
elif task_id is not None:
with self._lock:
total_bytes = self._tasks[task_id].total
if total_bytes is None:
raise ValueError(
f"unable to get the total number of bytes, please specify 'total'"
)
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total_bytes)
else:
self.update(task_id, total=total_bytes)
return _Reader(file, self, task_id, close_handle=False)
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Literal["rb"],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> BinaryIO:
pass
@typing.overload
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["r"], Literal["rt"]],
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> TextIO:
pass
def open(
self,
file: Union[str, "PathLike[str]", bytes],
mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
*,
total: Optional[int] = None,
task_id: Optional[TaskID] = None,
description: str = "Reading...",
) -> Union[BinaryIO, TextIO]:
"""Track progress while reading from a binary file.
Args:
path (Union[str, PathLike[str]]): The path to the file to read.
mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
buffering (int): The buffering strategy to use, see :func:`io.open`.
encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When an invalid mode is given.
"""
# normalize the mode (always rb, rt)
_mode = "".join(sorted(mode, reverse=False))
if _mode not in ("br", "rt", "r"):
raise ValueError(f"invalid mode {mode!r}")
# patch buffering to provide the same behaviour as the builtin `open`
line_buffering = buffering == 1
if _mode == "br" and buffering == 1:
warnings.warn(
"line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
RuntimeWarning,
)
buffering = -1
elif _mode in ("rt", "r"):
if buffering == 0:
raise ValueError("can't have unbuffered text I/O")
elif buffering == 1:
buffering = -1
# attempt to get the total with `os.stat`
if total is None:
total = stat(file).st_size
# update total of task or create new task
if task_id is None:
task_id = self.add_task(description, total=total)
else:
self.update(task_id, total=total)
# open the file in binary mode,
handle = io.open(file, "rb", buffering=buffering)
reader = _Reader(handle, self, task_id, close_handle=True)
# wrap the reader in a `TextIOWrapper` if text mode
if mode in ("r", "rt"):
return io.TextIOWrapper(
reader,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
return reader
def start_task(self, task_id: TaskID) -> None:
"""Start a task.
Starts a task (used when calculating elapsed time). You may need to call this manually,
if you called ``add_task`` with ``start=False``.
Args:
task_id (TaskID): ID of task.
"""
with self._lock:
task = self._tasks[task_id]
if task.start_time is None:
task.start_time = self.get_time()
def stop_task(self, task_id: TaskID) -> None:
"""Stop a task.
This will freeze the elapsed time on the task.
Args:
task_id (TaskID): ID of task.
"""
with self._lock:
task = self._tasks[task_id]
current_time = self.get_time()
if task.start_time is None:
task.start_time = current_time
task.stop_time = current_time
def update(
self,
task_id: TaskID,
*,
total: Optional[float] = None,
completed: Optional[float] = None,
advance: Optional[float] = None,
description: Optional[str] = None,
visible: Optional[bool] = None,
refresh: bool = False,
**fields: Any,
) -> None:
"""Update information associated with a task.
Args:
task_id (TaskID): Task id (returned by add_task).
total (float, optional): Updates task.total if not None.
completed (float, optional): Updates task.completed if not None.
advance (float, optional): Add a value to task.completed if not None.
description (str, optional): Change task description if not None.
visible (bool, optional): Set visible flag if not None.
refresh (bool): Force a refresh of progress information. Default is False.
**fields (Any): Additional data fields required for rendering.
"""
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
if total is not None and total != task.total:
task.total = total
task._reset()
if advance is not None:
task.completed += advance
if completed is not None:
task.completed = completed
if description is not None:
task.description = description
if visible is not None:
task.visible = visible
task.fields.update(fields)
update_completed = task.completed - completed_start
current_time = self.get_time()
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
if update_completed > 0:
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
if refresh:
self.refresh()
def reset(
self,
task_id: TaskID,
*,
start: bool = True,
total: Optional[float] = None,
completed: int = 0,
visible: Optional[bool] = None,
description: Optional[str] = None,
**fields: Any,
) -> None:
"""Reset a task so completed is 0 and the clock is reset.
Args:
task_id (TaskID): ID of task.
start (bool, optional): Start the task after reset. Defaults to True.
total (float, optional): New total steps in task, or None to use current total. Defaults to None.
completed (int, optional): Number of steps completed. Defaults to 0.
visible (bool, optional): Enable display of the task. Defaults to True.
description (str, optional): Change task description if not None. Defaults to None.
**fields (str): Additional data fields required for rendering.
"""
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
task._reset()
task.start_time = current_time if start else None
if total is not None:
task.total = total
task.completed = completed
if visible is not None:
task.visible = visible
if fields:
task.fields = fields
if description is not None:
task.description = description
task.finished_time = None
self.refresh()
def advance(self, task_id: TaskID, advance: float = 1) -> None:
"""Advance task by a number of steps.
Args:
task_id (TaskID): ID of task.
advance (float): Number of steps to advance. Default is 1.
"""
current_time = self.get_time()
with self._lock:
task = self._tasks[task_id]
completed_start = task.completed
task.completed += advance
update_completed = task.completed - completed_start
old_sample_time = current_time - self.speed_estimate_period
_progress = task._progress
popleft = _progress.popleft
while _progress and _progress[0].timestamp < old_sample_time:
popleft()
while len(_progress) > 1000:
popleft()
_progress.append(ProgressSample(current_time, update_completed))
if (
task.total is not None
and task.completed >= task.total
and task.finished_time is None
):
task.finished_time = task.elapsed
task.finished_speed = task.speed
def refresh(self) -> None:
"""Refresh (render) the progress information."""
if not self.disable and self.live.is_started:
self.live.refresh()
def get_renderable(self) -> RenderableType:
"""Get a renderable for the progress display."""
renderable = Group(*self.get_renderables())
return renderable
def get_renderables(self) -> Iterable[RenderableType]:
"""Get a number of renderables for the progress display."""
table = self.make_tasks_table(self.tasks)
yield table
def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
"""Get a table to render the Progress display.
Args:
tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
Returns:
Table: A table instance.
"""
table_columns = (
(
Column(no_wrap=True)
if isinstance(_column, str)
else _column.get_table_column().copy()
)
for _column in self.columns
)
table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
for task in tasks:
if task.visible:
table.add_row(
*(
(
column.format(task=task)
if isinstance(column, str)
else column(task)
)
for column in self.columns
)
)
return table
def __rich__(self) -> RenderableType:
"""Makes the Progress class itself renderable."""
with self._lock:
return self.get_renderable()
def add_task(
self,
description: str,
start: bool = True,
total: Optional[float] = 100.0,
completed: int = 0,
visible: bool = True,
**fields: Any,
) -> TaskID:
"""Add a new 'task' to the Progress display.
Args:
description (str): A description of the task.
start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
you will need to call `start` manually. Defaults to True.
total (float, optional): Number of total steps in the progress if known.
Set to None to render a pulsing animation. Defaults to 100.
completed (int, optional): Number of steps completed so far. Defaults to 0.
visible (bool, optional): Enable display of the task. Defaults to True.
**fields (str): Additional data fields required for rendering.
Returns:
TaskID: An ID you can use when calling `update`.
"""
with self._lock:
task = Task(
self._task_index,
description,
total,
completed,
visible=visible,
fields=fields,
_get_time=self.get_time,
_lock=self._lock,
)
self._tasks[self._task_index] = task
if start:
self.start_task(self._task_index)
new_task_index = self._task_index
self._task_index = TaskID(int(self._task_index) + 1)
self.refresh()
return new_task_index
def remove_task(self, task_id: TaskID) -> None:
"""Delete a task if it exists.
Args:
task_id (TaskID): A task ID.
"""
with self._lock:
del self._tasks[task_id]
if __name__ == "__main__": # pragma: no coverage
import random
import time
from .panel import Panel
from .rule import Rule
from .syntax import Syntax
from .table import Table
syntax = Syntax(
'''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value''',
"python",
line_numbers=True,
)
table = Table("foo", "bar", "baz")
table.add_row("1", "2", "3")
progress_renderables = [
"Text may be printed while the progress bars are rendering.",
Panel("In fact, [i]any[/i] renderable will work"),
"Such as [magenta]tables[/]...",
table,
"Pretty printed structures...",
{"type": "example", "text": "Pretty printed"},
"Syntax...",
syntax,
Rule("Give it a try!"),
]
from itertools import cycle
examples = cycle(progress_renderables)
console = Console(record=True)
with Progress(
SpinnerColumn(),
*Progress.get_default_columns(),
TimeElapsedColumn(),
console=console,
transient=False,
) as progress:
task1 = progress.add_task("[red]Downloading", total=1000)
task2 = progress.add_task("[green]Processing", total=1000)
task3 = progress.add_task("[yellow]Thinking", total=None)
while not progress.finished:
progress.update(task1, advance=0.5)
progress.update(task2, advance=0.3)
time.sleep(0.01)
if random.randint(0, 100) < 1:
progress.log(next(examples))
| Progress |
python | openai__openai-python | src/openai/types/responses/response_reasoning_summary_part_added_event.py | {
"start": 223,
"end": 400
} | class ____(BaseModel):
text: str
"""The text of the summary part."""
type: Literal["summary_text"]
"""The type of the summary part. Always `summary_text`."""
| Part |
python | optuna__optuna | optuna/progress_bar.py | {
"start": 403,
"end": 729
} | class ____(logging.StreamHandler):
def emit(self, record: Any) -> None:
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
| _TqdmLoggingHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-commcare/source_commcare/source.py | {
"start": 7778,
"end": 10408
} | class ____(IncrementalStream):
"""
docs: https://www.commcarehq.org/a/[domain]/api/[version]/form/
"""
cursor_field = "indexed_on"
primary_key = "id"
def __init__(self, start_date, app_id, name, xmlns, schema, **kwargs):
super().__init__(**kwargs)
self.app_id = app_id
self._cursor_value = datetime.strptime(start_date, "%Y-%m-%dT%H:%M:%SZ")
self.streamname = name
self.xmlns = xmlns
self.schema = schema
@property
def name(self):
return self.streamname
def get_json_schema(self):
return self.schema
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "form"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
# if self.cursor_field in self.state else self.initial_date
ix = self.state[self.cursor_field]
params = {
"format": "json",
"app_id": self.app_id,
"indexed_on_start": ix.strftime(self.dateformat),
"order_by": "indexed_on",
"limit": "1000",
"xmlns": self.xmlns,
}
if next_page_token:
params.update(next_page_token)
return params
def read_records(self, *args, **kwargs) -> Iterable[Mapping[str, Any]]:
upd = {"streamname": self.streamname, "xmlns": self.xmlns}
for record in super().read_records(*args, **kwargs):
self._cursor_value = datetime.strptime(record[self.cursor_field], self.dateformat)
CommcareStream.forms.add(record["id"])
form = record["form"]
form.update(upd)
# Append Z to make it timezone aware
form.update({"id": record["id"], "indexed_on": record["indexed_on"] + "Z"})
newform = self.scrubUnwantedFields(form)
yield flatten(newform)
if self._cursor_value.microsecond == 0:
# Airbyte converts the cursor_field value (datetime) to string when it saves the state and
# our state setter parses the saved state with a format that contains microseconds
# self._cursor_value must have non-zero microseconds for the formatting and parsing to work correctly.
# This issue would also occur if an incoming record had a timestamp with zero microseconds
self._cursor_value = self._cursor_value.replace(microsecond=10)
# Source
| Form |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 6179,
"end": 7540
} | class ____(HandlerBase):
"""
A legend handler that shows *numpoints* points in the legend entry.
"""
def __init__(self, marker_pad=0.3, numpoints=None, **kwargs):
"""
Parameters
----------
marker_pad : float
Padding between points in legend entry.
numpoints : int
Number of points to show in legend entry.
**kwargs
Keyword arguments forwarded to `.HandlerBase`.
"""
super().__init__(**kwargs)
self._numpoints = numpoints
self._marker_pad = marker_pad
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.numpoints
else:
return self._numpoints
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
numpoints = self.get_numpoints(legend)
if numpoints > 1:
# we put some pad here to compensate the size of the marker
pad = self._marker_pad * fontsize
xdata = np.linspace(-xdescent + pad,
-xdescent + width - pad,
numpoints)
xdata_marker = xdata
else:
xdata = [-xdescent, -xdescent + width]
xdata_marker = [-xdescent + 0.5 * width]
return xdata, xdata_marker
| HandlerNpoints |
python | numba__numba | numba/cuda/tests/cudapy/test_vectorize_decor.py | {
"start": 1548,
"end": 1842
} | class ____(BaseVectorizeNopythonArg, CUDATestCase):
def test_target_cuda_nopython(self):
warnings = ["nopython kwarg for cuda target is redundant"]
self._test_target_nopython('cuda', warnings)
@skip_on_cudasim('ufunc API unsupported in the simulator')
| TestVectorizeNopythonArg |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_add_column_with_notnull_db_default_app/migrations/0002_add_field_notnull_db_default.py | {
"start": 153,
"end": 475
} | class ____(CheckedMigration):
dependencies = [
("good_flow_add_column_with_notnull_db_default_app", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="testtable",
name="field",
field=models.IntegerField(db_default=0),
),
]
| Migration |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 54782,
"end": 56753
} | class ____(BaseModel):
"""Configuration for sparse vector index."""
model_config = {"arbitrary_types_allowed": True, "extra": "forbid"}
# TODO(Sanket): Change this to the appropriate sparse ef and use a default here.
embedding_function: Optional[Any] = None
source_key: Optional[
str
] = None # key to source the sparse vector from (accepts str or Key)
bm25: Optional[bool] = None
@field_validator("source_key", mode="before")
@classmethod
def validate_source_key_field(cls, v: Any) -> Optional[str]:
"""Convert Key objects to strings automatically. Accepts both str and Key types."""
if v is None:
return None
# Import Key at runtime to avoid circular import
from chromadb.execution.expression.operator import Key as KeyType
if isinstance(v, KeyType):
v = v.name # Extract string from Key
elif isinstance(v, str):
pass # Already a string
else:
raise ValueError(f"source_key must be str or Key, got {type(v).__name__}")
# Validate: only #document is allowed if key starts with #
if v.startswith("#") and v != "#document":
raise ValueError(
"source_key cannot begin with '#'. "
"The only valid key starting with '#' is Key.DOCUMENT or '#document'."
)
return v # type: ignore[no-any-return]
@field_validator("embedding_function", mode="before")
@classmethod
def validate_embedding_function_field(cls, v: Any) -> Any:
# Validate sparse vector function for sparse vector index
if v is None:
return v
if callable(v):
# Use the sparse vector function validation
validate_sparse_embedding_function(v)
return v
raise ValueError(
"embedding_function must be a callable SparseEmbeddingFunction or None"
)
| SparseVectorIndexConfig |
python | google__pytype | pytype/tests/test_import1.py | {
"start": 378,
"end": 47780
} | class ____(test_base.BaseTest):
"""Tests for import."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.builder = imports_map_loader.ImportsMapBuilder(FakeOptions())
def build_imports_map(self, path):
return self.builder.build_from_file(path)
def test_basic_import(self):
ty = self.Infer("""
import sys
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
""",
)
def test_basic_import2(self):
ty = self.Infer(
"""
import bad_import # doesn't exist
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
bad_import = ... # type: Any
""",
)
def test_from_import_smoke(self):
self.assertNoCrash(
self.Check,
"""
from sys import exit
from path.to.module import bar, baz
""",
)
def test_long_from(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/my_module.pyi"),
"def foo() -> str: ...",
)
ty = self.Infer(
"""
from path.to import my_module
def foo():
return my_module.foo()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from path.to import my_module
def foo() -> str: ...
""",
)
def test_star_import_smoke(self):
self.Check("""
from sys import *
""")
def test_star_import_unknown_smoke(self):
self.assertNoCrash(
self.Check,
"""
from unknown_module import *
""",
)
def test_star_import(self):
with test_utils.Tempdir() as d:
d.create_file(
"my_module.pyi",
"""
def f() -> str: ...
class A:
pass
a = ... # type: A
""",
)
ty = self.Infer(
"""
from my_module import *
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
A = ... # type: Type[my_module.A]
a = ... # type: my_module.A
def f() -> str: ...
""",
)
def test_star_import_any(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
ty = self.Infer(
"""
from a import *
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def __getattr__(name) -> Any: ...
""",
)
def test_star_import_in_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
class X: ...
""",
)
d.create_file(
"b.pyi",
"""
from a import *
class Y(X): ...
""",
)
ty = self.Infer(
"""
from b import *
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
import b
from typing import Type
X = ... # type: Type[a.X]
Y = ... # type: Type[b.Y]
""",
)
def test_bad_star_import(self):
ty, _ = self.InferWithErrors("""
from nonsense import * # import-error
from other_nonsense import * # import-error
x = foo.bar()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def __getattr__(name) -> Any: ...
x = ... # type: Any
""",
)
def test_path_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/my_module.pyi"),
"def qqsv() -> str: ...",
)
d.create_file(file_utils.replace_separator("path/to/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("path/__init__.pyi"), "")
ty = self.Infer(
"""
import path.to.my_module
def foo():
return path.to.my_module.qqsv()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import path
def foo() -> str: ...
""",
)
def test_path_import2(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/my_module.pyi"),
"def qqsv() -> str: ...",
)
d.create_file(file_utils.replace_separator("path/to/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("path/__init__.pyi"), "")
ty = self.Infer(
"""
import nonexistent_path.to.my_module # doesn't exist
def foo():
return path.to.my_module.qqsv()
""",
report_errors=False,
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
nonexistent_path = ... # type: Any
def foo() -> Any: ...
""",
)
def test_import_all(self):
self.assertNoCrash(
self.Check,
"""
from module import *
from path.to.module import *
""",
)
def test_assign_member(self):
self.Check("""
import sys
sys.path = []
""")
def test_return_module(self):
ty = self.Infer("""
import sys
def f():
return sys
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
def f() -> module: ...
""",
)
def test_match_module(self):
ty = self.Infer("""
import sys
def f():
if getattr(sys, "foobar"):
return list({sys: sys}.keys())[0]
else:
return sys
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
def f() -> module: ...
""",
)
def test_sys(self):
ty = self.Infer("""
import sys
def f():
return sys.path
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
import sys
def f() -> List[str]: ...
""",
)
def test_from_sys_import(self):
ty = self.Infer("""
from sys import path
def f():
return path
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
path = ... # type: List[str]
def f() -> List[str]: ...
""",
)
def test_stdlib(self):
ty = self.Infer("""
import datetime
def f():
return datetime.timedelta().total_seconds()
""")
self.assertTypesMatchPytd(
ty,
"""
import datetime
def f() -> float: ...
""",
)
def test_import_pytd(self):
with test_utils.Tempdir() as d:
d.create_file(
"other_file.pyi",
"""
def f() -> int: ...
""",
)
d.create_file(
"main.py",
"""
from other_file import f
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
""",
)
def test_import_pytd2(self):
with test_utils.Tempdir() as d:
d.create_file(
"other_file.pyi",
"""
def f() -> int: ...
""",
)
d.create_file(
"main.py",
"""
from other_file import f
def g():
return f()
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
def g() -> int: ...
""",
)
def test_import_directory(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("sub/other_file.pyi"),
"def f() -> int: ...",
)
d.create_file(
file_utils.replace_separator("sub/bar/baz.pyi"),
"def g() -> float: ...",
)
d.create_file(file_utils.replace_separator("sub/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("sub/bar/__init__.pyi"), "")
d.create_file(
"main.py",
"""
from sub import other_file
import sub.bar.baz
from sub.bar.baz import g
def h():
return other_file.f()
def i():
return g()
def j():
return sub.bar.baz.g()
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
import sub # from 'import sub.bar.baz'
from sub import other_file
def g() -> float: ...
def h() -> int: ...
def i() -> float: ...
def j() -> float: ...
""",
)
def test_import_init(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("sub/__init__.pyi"),
"""
def f() -> int: ...
""",
)
d.create_file(
"main.py",
"""
from sub import f
def g():
return f()
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
def g() -> int: ...
""",
)
def test_import_name(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
pass
def f() -> A: ...
""",
)
d.create_file(
"main.py",
"""
from foo import f
def g():
return f()
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
def f() -> foo.A: ...
def g() -> foo.A: ...
""",
)
def test_deep_dependency(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", "x = ... # type: bar.Bar")
d.create_file(
"bar.pyi",
"""
class Bar:
def bar(self) -> int: ...
""",
)
d.create_file(
"main.py",
"""
from foo import x
def f():
return x.bar()
""",
)
ty = self.InferFromFile(filename=d["main.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: bar.Bar
def f() -> int: ...
""",
)
def test_relative_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/baz.pyi"),
"""x = ... # type: int""",
)
d.create_file(
file_utils.replace_separator("foo/bar.py"),
"""
from . import baz
def f():
return baz.x
""",
)
d.create_file(file_utils.replace_separator("foo/__init__.pyi"), "")
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("foo/bar.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import baz
def f() -> int: ...
""",
)
def test_dot_package(self):
# This tests up one level: note that the test file (foo.py)
# is tested in the context of the up-level director "up1".
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("up1/foo.py"),
"""
from .bar import x
""",
)
d.create_file(
file_utils.replace_separator("up1/bar.pyi"),
"""x = ... # type: int""",
)
d.create_file(file_utils.replace_separator("up1/__init__.pyi"), "")
d.create_file("__init__.pyi", "")
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("up1/foo.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_dot_dot_package(self):
# Similar to testDotPackage, except two levels
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("up2/baz/foo.py"),
"""
from ..bar import x
""",
)
d.create_file(
file_utils.replace_separator("up2/bar.pyi"),
"""x = ... # type: int""",
)
d.create_file("__init__.pyi", "")
d.create_file(file_utils.replace_separator("up2/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("up2/baz/__init__.pyi"), "")
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("up2/baz/foo.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_dot_package_no_init(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.py",
"""
from .bar import x
""",
)
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(filename=d["foo.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_dot_dot_packag_no_init(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("baz/foo.py"),
"""
from ..bar import x
""",
)
d.create_file("bar.pyi", """x = ... # type: int""")
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("baz/foo.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: int
""",
)
def test_dot_dot(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/baz.pyi"),
"""x = ... # type: int""",
)
d.create_file(
file_utils.replace_separator("foo/deep/bar.py"),
"""
from .. import baz
def f():
return baz.x
""",
)
d.create_file(file_utils.replace_separator("foo/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("foo/deep/__init__.pyi"), "")
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("foo/deep/bar.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import baz
def f() -> int: ...
""",
)
def test_dot_dot_package_in_pyi(self):
# Similar to testDotDotPackage, except for a pyi file.
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("up2/baz/foo.pyi"),
"""
from ..bar import X
""",
)
d.create_file(file_utils.replace_separator("up2/bar.pyi"), "class X: ...")
d.create_file(
"top.py",
"""
from up2.baz.foo import X
x = X()
""",
)
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
import up2.bar
X = ... # type: Type[up2.bar.X]
x = ... # type: up2.bar.X
""",
)
def test_dot_dot_in_pyi(self):
# Similar to testDotDot except in a pyi file.
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/baz.pyi"), "x: int")
d.create_file(
file_utils.replace_separator("foo/deep/bar.py"),
"""
from .. import baz
a = baz.x
""",
)
ty = self.InferFromFile(
filename=d[file_utils.replace_separator("foo/deep/bar.py")],
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import baz
a: int
""",
)
def test_too_many_dots_in_package_in_pyi(self):
# Trying to go up more directories than the package path contains
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("up/foo.pyi"), "from ..bar import X"
)
d.create_file(file_utils.replace_separator("up/bar.pyi"), "class X: ...")
_, err = self.InferWithErrors(
"from up.foo import X # pyi-error[e]", pythonpath=[d.path]
)
self.assertErrorRegexes(
err, {"e": r"Cannot resolve relative import \.\.bar"}
)
def test_from_dot_in_pyi(self):
# from . import module
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/a.pyi"), "class X: ...")
d.create_file(
file_utils.replace_separator("foo/b.pyi"),
"""
from . import a
Y = a.X""",
)
d.create_file(
"top.py",
"""
import foo.b
x = foo.b.Y() """,
)
ty = self.InferFromFile(filename=d["top.py"], pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
import foo
x = ... # type: foo.a.X
""",
)
def test_unused_from_dot_in_pyi(self):
# A `from . import module` that does not subsequently use the module should
# not raise an unreplaced NamedType error.
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/a.pyi"), "class X: ...")
d.create_file(
file_utils.replace_separator("foo/b.pyi"), "from . import a"
)
self.Check("import foo.b", pythonpath=[d.path])
def test_file_import1(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/some/module.pyi"),
"def foo(x:int) -> str: ...",
)
d.create_file(
file_utils.replace_separator("path/to/some/__init__.pyi"), ""
)
d.create_file(file_utils.replace_separator("path/to/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("path/__init__.pyi"), "")
ty = self.Infer(
"""
import path.to.some.module
def my_foo(x):
return path.to.some.module.foo(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import path
def my_foo(x) -> str: ...
""",
)
def test_file_import2(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/some/module.pyi"),
"def foo(x:int) -> str: ...",
)
d.create_file(
file_utils.replace_separator("path/to/some/__init__.pyi"), ""
)
d.create_file(file_utils.replace_separator("path/to/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("path/__init__.pyi"), "")
ty = self.Infer(
"""
from path.to.some import module
def my_foo(x):
return module.foo(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from path.to.some import module
def my_foo(x) -> str: ...
""",
)
@test_base.skip("flaky")
def test_solve_for_imported(self):
ty = self.Infer("""
import StringIO
def my_foo(x):
return x.read()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Union
StringIO = ... # type: module
def my_foo(x: Union[StringIO.StringIO[object], typing.IO[object],
typing.BinaryIO, typing.TextIO]) -> Any
""",
)
def test_import_builtins(self):
ty = self.Infer("""
import builtins as __builtin__
def f():
return __builtin__.int()
""")
self.assertTypesMatchPytd(
ty,
"""
import builtins as __builtin__
def f() -> int: ...
""",
)
def test_imported_method_as_class_attribute(self):
ty = self.Infer("""
import os
class Foo:
kill = os.kill
""")
self.assertTypesMatchPytd(
ty,
"""
import os
class Foo:
def kill(pid: int, signal: int, /) -> None: ...
""",
)
def test_match_against_imported(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo:
pass
class Bar:
def f1(self, x: Foo) -> Baz: ...
class Baz:
pass
""",
)
ty = self.Infer(
"""
import foo
def f(x, y):
return x.f1(y)
def g(x):
return x.f1(foo.Foo())
class FooSub(foo.Foo):
pass
def h(x):
return x.f1(FooSub())
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
import foo
def f(x, y) -> Any: ...
def g(x) -> Any: ...
def h(x) -> Any: ...
class FooSub(foo.Foo):
pass
""",
)
def test_imported_constants(self):
with test_utils.Tempdir() as d:
d.create_file(
"module.pyi",
"""
x = ... # type: int
class Foo:
x = ... # type: float
""",
)
ty = self.Infer(
"""
import module
def f():
return module.x
def g():
return module.Foo().x
def h():
return module.Foo.x
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import module
def f() -> int: ...
def g() -> float: ...
def h() -> float: ...
""",
)
def test_circular(self):
with test_utils.Tempdir() as d:
d.create_file(
"x.pyi",
"""
class X:
pass
y = ... # type: y.Y
z = ... # type: z.Z
""",
)
d.create_file(
"y.pyi",
"""
class Y:
pass
x = ... # type: x.X
""",
)
d.create_file(
"z.pyi",
"""
class Z:
pass
x = ... # type: x.X
""",
)
ty = self.Infer(
"""
import x
xx = x.X()
yy = x.y
zz = x.z
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import x
xx = ... # type: x.X
yy = ... # type: y.Y
zz = ... # type: z.Z
""",
)
def test_reimport(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from collections import OrderedDict as MyOrderedDict
""",
)
ty = self.Infer(
"""
import foo
d = foo.MyOrderedDict()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import OrderedDict
d = ... # type: OrderedDict[nothing, nothing]
""",
)
def test_import_function(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import SupportsFloat
def pow(__x: SupportsFloat, __y: SupportsFloat) -> float: ...
""",
)
d.create_file(
"bar.pyi",
"""
from foo import pow as mypow
""",
)
ty = self.Infer(
"""
import bar
d = bar.mypow
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import SupportsFloat
import bar
def d(__x: SupportsFloat, __y: SupportsFloat) -> float: ...
""",
)
def test_import_constant(self):
with test_utils.Tempdir() as d:
d.create_file(
"mymath.pyi",
"""
from math import pi as half_tau
""",
)
ty = self.Infer(
"""
import mymath
from mymath import half_tau as x
y = mymath.half_tau
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import mymath
x = ... # type: float
y = ... # type: float
""",
)
def test_import_map(self):
with test_utils.Tempdir() as d:
foo_filename = d.create_file(
"foo.pyi",
"""
bar = ... # type: int
""",
)
imports_map_filename = d.create_file(
"imports_map.txt",
"""
foo %s
""" % foo_filename,
)
imports_map = self.build_imports_map(imports_map_filename)
ty = self.Infer(
"""
from foo import bar
""",
imports_map=imports_map,
pythonpath=[""],
)
self.assertTypesMatchPytd(
ty,
"""
bar = ... # type: int
""",
)
def test_import_resolve_on_dummy(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", DEFAULT_PYI)
d.create_file(
"b.pyi",
"""
from a import Foo
def f(x: Foo) -> Foo: ...
""",
)
ty = self.Infer(
"""
import b
foo = b.Foo()
bar = b.f(foo)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import b
from typing import Any
foo = ... # type: Any
bar = ... # type: Any
""",
)
def test_two_level(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
+++ /&* unparsable *&/ +++
""",
)
d.create_file(
"b.pyi",
"""
import a
class B(a.A):
pass
""",
)
_, errors = self.InferWithErrors(
"""
import b # pyi-error[e]
x = b.B()
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"a\.pyi"})
def test_subdir_and_module_with_same_name_as_package(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("pkg/__init__.pyi"),
"""
from pkg.pkg.pkg import *
from pkg.bar import *""",
)
d.create_file(
file_utils.replace_separator("pkg/pkg/pkg.pyi"),
"""
class X: pass""",
)
d.create_file(
file_utils.replace_separator("pkg/bar.pyi"),
"""
class Y: pass""",
)
ty = self.Infer(
"""
import pkg
a = pkg.X()
b = pkg.Y()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import pkg
a = ... # type: pkg.pkg.pkg.X
b = ... # type: pkg.bar.Y
""",
)
def test_redefined_builtin(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
object = ... # type: Any
def f(x) -> Any: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f("")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
import foo
x = ... # type: Any
""",
)
def test_redefined_builtin2(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class object:
def foo(self) -> None: ...
def f(x: object) -> object: ...
""",
)
ty, _ = self.InferWithErrors(
"""
import foo
x = foo.f(foo.object())
y = foo.f(foo.object())
foo.f(object()) # wrong-arg-types
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x = ... # type: foo.object
y = ... # type: foo.object
""",
)
def test_no_fail_on_bad_symbol_lookup(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: FooBar) -> FooBar: ...
""",
)
self.assertNoCrash(
self.Check,
"""
import foo
""",
pythonpath=[d.path],
)
@test_base.skip("instantiating 'type' should use 'Type[Any]', not 'Any'")
def test_import_type_factory(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
def factory() -> type: ...
""",
)
ty = self.Infer(
"""
import a
A = a.factory()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
A = ... # type: type
""",
)
def test_get_bad_submodule_as_attribute(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("foo/bar.pyi"), "nonsense")
self.assertNoCrash(
self.Check,
"""
import foo
x = foo.bar
""",
pythonpath=[d.path],
)
def test_ignored_import(self):
ty = self.Infer("""
import sys # type: ignore
import foobar # type: ignore
from os import path # type: ignore
a = sys.rumplestiltskin
b = sys.stderr
c = foobar.rumplestiltskin
d = path.curdir
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
sys = ... # type: Any
foobar = ... # type: Any
path = ... # type: Any
a = ... # type: Any
b = ... # type: Any
c = ... # type: Any
d = ... # type: Any
""",
)
def test_attribute_on_module(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
foo = ... # type: int
""",
)
_, errors = self.InferWithErrors(
"""
from a import foo, bar # import-error[e1]
import a
a.baz # module-attr[e2]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e1": r"bar", "e2": r"baz"})
def test_from_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/b.pyi"),
"""
from foo import c
class bar(c.X): ...
""",
)
d.create_file(
file_utils.replace_separator("foo/c.pyi"),
"""
class X: ...
""",
)
self.Check(
"""
from foo import b
class Foo(b.bar):
pass
""",
pythonpath=[d.path],
)
def test_submodule_lookup(self):
# Tests a common Blaze pattern: when mod/__init__.py and mod/submod.py are
# in the same target, they are analyzed twice, and we should not use the
# first-pass __init__.pyi to look up types for the second pass, as the
# former contains a 'submod: Any' entry that masks the actual submodule.
# The "%s" is used to silence the import error from the first pass.
init_py = """
from mod import submod%s
X = submod.X
"""
submod_py = """
class X:
pass
"""
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__"
)
submod_pyi_1 = self.Infer(submod_py, module_name="mod.submod")
with test_utils.Tempdir() as d:
init_path = d.create_file(
file_utils.replace_separator("mod/__init__.pyi"),
pytd_utils.Print(init_pyi_1),
)
submod_path = d.create_file(
file_utils.replace_separator("mod/submod.pyi"),
pytd_utils.Print(submod_pyi_1),
)
imports_info = d.create_file(
"imports_info",
f"""
{file_utils.replace_separator('mod/__init__')} {init_path}
{file_utils.replace_separator('mod/submod')} {submod_path}
""",
)
imports_map = self.build_imports_map(imports_info)
init_pyi = self.Infer(
init_py % "", imports_map=imports_map, module_name="mod.__init__"
)
self.assertTypesMatchPytd(
init_pyi,
"""
from mod import submod
from typing import Type
X: Type[mod.submod.X]
""",
)
def test_circular_dep(self):
# This test imitates how analyze_project handles circular dependencies.
# See https://github.com/google/pytype/issues/760. In the test, the circular
# dep is between a module's __init__.py and a submodule to make it harder
# for pytype to distinguish this case from test_submodule_lookup.
# "%s" is used to silence import errors from the first-pass analysis.
submod_py = """
from mod import Y%s
class X:
pass
"""
init_py = """
import typing
if typing.TYPE_CHECKING:
from mod.submod import X%s
class Y:
def __init__(self, x):
# type: ('X') -> None
pass
"""
submod_pyi_1, _ = self.InferWithErrors(
submod_py % " # import-error", module_name="mod.submod"
)
init_pyi_1, _ = self.InferWithErrors(
init_py % " # import-error", module_name="mod.__init__"
)
with test_utils.Tempdir() as d:
submod_path = d.create_file(
file_utils.replace_separator("mod/submod.pyi"),
pytd_utils.Print(submod_pyi_1),
)
init_path = d.create_file(
file_utils.replace_separator("mod/__init__.pyi"),
pytd_utils.Print(init_pyi_1),
)
imports_info = d.create_file(
"imports_info",
f"""
{file_utils.replace_separator('mod/submod')} {submod_path}
{file_utils.replace_separator('mod/__init__')} {init_path}
""",
)
imports_map = self.build_imports_map(imports_info)
submod_pyi = self.Infer(
submod_py % "", imports_map=imports_map, module_name="mod.submod"
)
with open(submod_path, "w") as f:
f.write(pytd_utils.Print(submod_pyi))
init_pyi = self.Infer(
init_py % "", imports_map=imports_map, module_name="mod.__init__"
)
self.assertTypesMatchPytd(
init_pyi,
"""
import mod.submod
import typing
from typing import Type
X: Type[mod.submod.X]
class Y:
def __init__(self, x: X) -> None: ...
""",
)
def test_mutual_imports(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("pkg/a.pyi"),
"""
from typing import TypeVar, Generic, List
from .b import Foo
T = TypeVar('T')
class Bar(Foo, List[T], Generic[T]): ...
class Baz(List[T], Generic[T]): ...
""",
)
d.create_file(
file_utils.replace_separator("pkg/b.pyi"),
"""
from typing import TypeVar, Generic
from .a import Baz
T = TypeVar('T')
class Foo(): ...
class Quux(Baz[T], Generic[T]): ...
""",
)
ty = self.Infer("""from pkg.a import *""", pythonpath=[d.path])
self.assertTypesMatchPytd(
ty,
"""
import pkg.a
import pkg.b
from typing import Type, TypeVar
Bar = ... # type: Type[pkg.a.Bar]
Baz = ... # type: Type[pkg.a.Baz]
Foo = ... # type: Type[pkg.b.Foo]
T = TypeVar('T')
""",
)
def test_module_reexports_and_aliases(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("pkg/a.pyi"),
"""
from pkg import b as c
from pkg.b import e as f
import pkg.d as x
import pkg.g # should not cause unused import errors
""",
)
d.create_file(
file_utils.replace_separator("pkg/b.pyi"),
"""
class X: ...
class e: ...
""",
)
d.create_file(
file_utils.replace_separator("pkg/d.pyi"),
"""
class Y: ...
""",
)
d.create_file(
file_utils.replace_separator("pkg/g.pyi"),
"""
class Z: ...
""",
)
ty = self.Infer(
"""
import pkg.a
s = pkg.a.c.X()
t = pkg.a.f()
u = pkg.a.x
v = u.Y()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import pkg
from pkg import d as u
s = ... # type: pkg.b.X
t = ... # type: pkg.b.e
v = ... # type: u.Y
""",
)
def test_import_package_as_alias(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", "class A: ...")
d.create_file(
"b.pyi",
"""
import a as _a
f: _a.A
""",
)
self.Check(
"""
import b
c = b.f
""",
pythonpath=[d.path],
)
def test_import_package_alias_name_conflict(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file(
"b.pyi",
"""
import a as _a
class a:
A: int
x = _a.A
y = a.A
""",
)
ty = self.Infer(
"""
import b
x = b.x
y = b.y
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import b
x: str
y: int
""",
)
def test_import_package_alias_name_conflict2(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file(
"c.pyi",
"""
import a as _a
import b as a
x = _a.A
y = a.A
""",
)
ty = self.Infer(
"""
import c
x = c.x
y = c.y
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import c
x: str
y: int
""",
)
def test_import_package_alias_name_conflict3(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", "A: str")
d.create_file("b.pyi", "A: int")
d.create_file(
"c.pyi",
"""
import b as a
import a as _a
x = _a.A
y = a.A
""",
)
ty = self.Infer(
"""
import c
x = c.x
y = c.y
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import c
x: str
y: int
""",
)
def test_module_class_conflict(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/bar.pyi"), DEFAULT_PYI)
ty = self.Infer(
"""
from foo import bar
class foo:
def __new__(cls):
return object.__new__(cls)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import bar
from typing import TypeVar
_Tfoo = TypeVar("_Tfoo", bound=foo)
class foo:
def __new__(cls: type[_Tfoo]) -> _Tfoo: ...
""",
)
def test_class_alias(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/bar.pyi"), DEFAULT_PYI)
ty = self.Infer(
"""
from foo import bar
class foo:
pass
baz = foo
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import bar
from typing import Type
class foo: ...
baz: Type[foo]
""",
)
def test_relative_star_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/bar.pyi"), "from .baz.qux import *"
)
d.create_file(
file_utils.replace_separator("foo/baz/qux.pyi"),
"v = ... # type: int",
)
ty = self.Infer(
"""
from foo.bar import *
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
v = ... # type: int
""",
)
def test_relative_star_import2(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/bar/baz.pyi"),
"from ..bar.qux import *",
)
d.create_file(
file_utils.replace_separator("foo/bar/qux.pyi"),
"v = ... # type: int",
)
ty = self.Infer(
"""
from foo.bar.baz import *
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
v = ... # type: int
""",
)
def test_unimported_submodule_failure(self):
"""Fail when accessing a submodule we haven't imported."""
self.options.tweak(strict_import=True)
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("sub/bar/baz.pyi"), "class A: ..."
)
d.create_file(
file_utils.replace_separator("sub/bar/quux.pyi"), "class B: ..."
)
d.create_file(file_utils.replace_separator("sub/__init__.pyi"), "")
d.create_file(file_utils.replace_separator("sub/bar/__init__.pyi"), "")
_, errors = self.InferWithErrors(
"""
import sub.bar.baz
x = sub.bar.baz.A()
y = sub.bar.quux.B() # module-attr[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"quux.*sub\.bar"})
def test_submodule_attribute_error(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("package/__init__.pyi"),
"submodule: module",
)
d.create_file(file_utils.replace_separator("package/submodule.pyi"), "")
self.CheckWithErrors(
"""
from package import submodule
submodule.asd # module-attr
""",
pythonpath=[d.path],
)
def test_init_only_submodule(self):
"""Test a submodule without its own stub file."""
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("package/__init__.pyi"),
"submodule: module",
)
self.Check(
"""
from package import submodule
submodule.asd
""",
pythonpath=[d.path],
)
def test_import_alias(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/__init__.pyi"), "")
d.create_file(
file_utils.replace_separator("foo/bar.pyi"),
"""
from foo import baz as qux
X = qux.X
""",
)
d.create_file(file_utils.replace_separator("foo/baz.pyi"), "X = str")
self.Check("from foo import bar", pythonpath=[d.path])
def test_subpackage(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/__init__.pyi"),
"from .bar import baz as baz",
)
d.create_file(file_utils.replace_separator("foo/bar/baz.pyi"), "v: str")
ty = self.Infer(
"""
import foo
v = foo.baz.v
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
v: str
""",
)
def test_attr_and_module(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/__init__.pyi"), "class X: ..."
)
d.create_file(file_utils.replace_separator("foo/bar.pyi"), "v: str")
d.create_file(
"other.pyi",
"""
from foo import X as X
from foo import bar as bar
""",
)
ty = self.Infer(
"""
import other
X = other.X
v = other.bar.v
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
import foo
import other
X: Type[foo.X]
v: str
""",
)
def test_submodule_imports_info(self):
# Tests that the presence of a submodule in imports_info doesn't prevent
# pytype from finding attributes in a module's __init__ file.
with test_utils.Tempdir() as d:
empty = d.create_file("empty.pyi")
imports_info = d.create_file(
"imports_info", f"email/_header_value_parser {empty}"
)
imports_map = self.build_imports_map(imports_info)
self.Check(
"""
from email import message_from_bytes
""",
imports_map=imports_map,
)
def test_directory_module_clash(self):
with test_utils.Tempdir() as d:
foo = d.create_file("foo.pyi", "x: int")
foo_bar = d.create_file(
file_utils.replace_separator("foo/bar.pyi"), "y: str"
)
imports_info = d.create_file(
"imports_info",
f"""
foo {foo}
{file_utils.replace_separator('foo/bar')} {foo_bar}
""",
)
imports_map = self.build_imports_map(imports_info)
# When both foo.py and a foo/ package exist, the latter shadows the
# former, so `import foo` gets you the (empty) foo/__init__.py.
self.CheckWithErrors(
"""
import foo
x = foo.x # module-attr
""",
imports_map=imports_map,
)
def test_missing_submodule(self):
with test_utils.Tempdir() as d:
foo = d.create_file(
file_utils.replace_separator("foo/__init__.pyi"),
"import bar.baz as baz",
)
foo_bar = d.create_file(
file_utils.replace_separator("foo/bar.pyi"), "y: str"
)
imports_info = d.create_file(
file_utils.replace_separator("imports_info"),
f"""
foo {foo}
{file_utils.replace_separator('foo/bar')} {foo_bar}
""",
)
imports_map = self.build_imports_map(imports_info)
self.CheckWithErrors(
"""
from foo import baz # import-error
""",
imports_map=imports_map,
)
def test_module_prefix_alias(self):
with test_utils.Tempdir() as d:
foo_bar = d.create_file(
file_utils.replace_separator("foo/bar.pyi"),
"""
import foo as _foo
x: _foo.baz.X
""",
)
foo_baz = d.create_file(
file_utils.replace_separator("foo/baz.pyi"), "class X: ..."
)
imports_info = d.create_file(
file_utils.replace_separator("imports_info"),
f"""
{file_utils.replace_separator('foo/bar')} {foo_bar}
{file_utils.replace_separator('foo/baz')} {foo_baz}
""",
)
imports_map = self.build_imports_map(imports_info)
self.Check(
"""
from foo import bar
""",
imports_map=imports_map,
)
if __name__ == "__main__":
test_base.main()
| ImportTest |
python | sqlalchemy__sqlalchemy | test/ext/test_serializer.py | {
"start": 10006,
"end": 11815
} | class ____(
AssertsCompiledSQL, fixtures.DeclarativeMappedTest
):
__dialect__ = "default"
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
global TestTable
class TestTable(Base):
__tablename__ = "test"
id = Column(Integer, primary_key=True, autoincrement=True)
_some_id = Column("some_id", String)
some_primary_id = column_property(
func.left(_some_id, 6).cast(Integer)
)
def test_deserailize_colprop(self):
TestTable = self.classes.TestTable
s = scoped_session(sessionmaker())
expr = s.query(TestTable).filter(TestTable.some_primary_id == 123456)
expr2 = serializer.loads(serializer.dumps(expr), TestTable.metadata, s)
# note in the original, the same bound parameter is used twice
self.assert_compile(
expr,
"SELECT "
"CAST(left(test.some_id, :left_1) AS INTEGER) AS anon_1, "
"test.id AS test_id, test.some_id AS test_some_id FROM test WHERE "
"CAST(left(test.some_id, :left_1) AS INTEGER) = :param_1",
checkparams={"left_1": 6, "param_1": 123456},
)
# in the deserialized, it's two separate parameter objects which
# need to have different anonymous names. they still have
# the same value however
self.assert_compile(
expr2,
"SELECT CAST(left(test.some_id, :left_1) AS INTEGER) AS anon_1, "
"test.id AS test_id, test.some_id AS test_some_id FROM test WHERE "
"CAST(left(test.some_id, :left_2) AS INTEGER) = :param_1",
checkparams={"left_1": 6, "left_2": 6, "param_1": 123456},
)
| ColumnPropertyWParamTest |
python | python__mypy | mypy/nodes.py | {
"start": 146168,
"end": 148652
} | class ____(SymbolNode):
"""Temporary symbol node that will later become a real SymbolNode.
These are only present during semantic analysis when using the new
semantic analyzer. These are created if some essential dependencies
of a definition are not yet complete.
A typical use is for names imported from a module which is still
incomplete (within an import cycle):
from m import f # Initially may create PlaceholderNode
This is particularly important if the imported shadows a name from
an enclosing scope or builtins:
from m import int # Placeholder avoids mixups with builtins.int
Another case where this is useful is when there is another definition
or assignment:
from m import f
def f() -> None: ...
In the above example, the presence of PlaceholderNode allows us to
handle the second definition as a redefinition.
They are also used to create PlaceholderType instances for types
that refer to incomplete types. Example:
class C(Sequence[C]): ...
We create a PlaceholderNode (with becomes_typeinfo=True) for C so
that the type C in Sequence[C] can be bound.
Attributes:
fullname: Full name of the PlaceholderNode.
node: AST node that contains the definition that caused this to
be created. This is useful for tracking order of incomplete definitions
and for debugging.
becomes_typeinfo: If True, this refers something that could later
become a TypeInfo. It can't be used with type variables, in
particular, as this would cause issues with class type variable
detection.
The long-term purpose of placeholder nodes/types is to evolve into
something that can support general recursive types.
"""
__slots__ = ("_fullname", "node", "becomes_typeinfo")
def __init__(
self, fullname: str, node: Node, line: int, *, becomes_typeinfo: bool = False
) -> None:
self._fullname = fullname
self.node = node
self.becomes_typeinfo = becomes_typeinfo
self.line = line
@property
def name(self) -> str:
return self._fullname.split(".")[-1]
@property
def fullname(self) -> str:
return self._fullname
def serialize(self) -> JsonDict:
assert False, "PlaceholderNode can't be serialized"
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_placeholder_node(self)
| PlaceholderNode |
python | pytorch__pytorch | test/onnx/pytorch_test_common.py | {
"start": 607,
"end": 11582
} | class ____(Enum):
TORCH_NN_MODULE = auto()
TORCH_EXPORT_EXPORTEDPROGRAM = auto()
def _skipper(condition, reason):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return f(*args, **kwargs)
return wrapper
return decorator
skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(), "CUDA is not available")
skipIfTravis = _skipper(lambda: os.getenv("TRAVIS"), "Skip In Travis")
skipIfNoBFloat16Cuda = _skipper(
lambda: not torch.cuda.is_bf16_supported(), "BFloat16 CUDA is not available"
)
skipIfQuantizationBackendQNNPack = _skipper(
lambda: torch.backends.quantized.engine == "qnnpack",
"Not compatible with QNNPack quantization backend",
)
# skips tests for all versions below min_opset_version.
# add this wrapper to prevent running the test for opset_versions
# smaller than `min_opset_version`.
def skipIfUnsupportedMinOpsetVersion(min_opset_version):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version < min_opset_version:
raise unittest.SkipTest(
f"Unsupported opset_version: {self.opset_version} < {min_opset_version}"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# skips tests for all versions above max_opset_version.
# add this wrapper to prevent running the test for opset_versions
# higher than `max_opset_version`.
def skipIfUnsupportedMaxOpsetVersion(max_opset_version):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version > max_opset_version:
raise unittest.SkipTest(
f"Unsupported opset_version: {self.opset_version} > {max_opset_version}"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# skips tests for all opset versions.
def skipForAllOpsetVersions():
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version:
raise unittest.SkipTest(
"Skip verify test for unsupported opset_version"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipTraceTest(skip_before_opset_version: Optional[int] = None, reason: str = ""):
"""Skip tracing test for opset version less than skip_before_opset_version.
Args:
skip_before_opset_version: The opset version before which to skip tracing test.
If None, tracing test is always skipped.
reason: The reason for skipping tracing test.
Returns:
A decorator for skipping tracing test.
"""
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if skip_before_opset_version is not None:
self.skip_this_opset = self.opset_version < skip_before_opset_version
else:
self.skip_this_opset = True
if self.skip_this_opset and not self.is_script:
raise unittest.SkipTest(f"Skip verify test for torch trace. {reason}")
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipScriptTest(skip_before_opset_version: Optional[int] = None, reason: str = ""):
"""Skip scripting test for opset version less than skip_before_opset_version.
Args:
skip_before_opset_version: The opset version before which to skip scripting test.
If None, scripting test is always skipped.
reason: The reason for skipping scripting test.
Returns:
A decorator for skipping scripting test.
"""
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if skip_before_opset_version is not None:
self.skip_this_opset = self.opset_version < skip_before_opset_version
else:
self.skip_this_opset = True
if self.skip_this_opset and self.is_script:
raise unittest.SkipTest(f"Skip verify test for TorchScript. {reason}")
return func(self, *args, **kwargs)
return wrapper
return skip_dec
# NOTE: This decorator is currently unused, but we may want to use it in the future when
# we have more tests that are not supported in released ORT.
def skip_min_ort_version(reason: str, version: str, dynamic_only: bool = False):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if (
packaging.version.parse(self.ort_version).release
< packaging.version.parse(version).release
):
if dynamic_only and not self.dynamic_shapes:
return func(self, *args, **kwargs)
raise unittest.SkipTest(
f"ONNX Runtime version: {version} is older than required version {version}. "
f"Reason: {reason}."
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def xfail_dynamic_fx_test(
error_message: str,
model_type: Optional[TorchModelType] = None,
reason: Optional[str] = None,
):
"""Xfail dynamic exporting test.
Args:
reason: The reason for xfailing dynamic exporting test.
model_type (TorchModelType): The model type to xfail dynamic exporting test for.
When None, model type is not used to xfail dynamic tests.
Returns:
A decorator for xfailing dynamic exporting test.
"""
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.dynamic_shapes and (
not model_type or self.model_type == model_type
):
return xfail(error_message, reason)(func)(self, *args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skip_dynamic_fx_test(reason: str, model_type: TorchModelType = None):
"""Skip dynamic exporting test.
Args:
reason: The reason for skipping dynamic exporting test.
model_type (TorchModelType): The model type to skip dynamic exporting test for.
When None, model type is not used to skip dynamic tests.
Returns:
A decorator for skipping dynamic exporting test.
"""
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.dynamic_shapes and (
not model_type or self.model_type == model_type
):
raise unittest.SkipTest(
f"Skip verify dynamic shapes test for FX. {reason}"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skip_in_ci(reason: str):
"""Skip test in CI.
Args:
reason: The reason for skipping test in CI.
Returns:
A decorator for skipping test in CI.
"""
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if os.getenv("CI"):
raise unittest.SkipTest(f"Skip test in CI. {reason}")
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def xfail(error_message: str, reason: Optional[str] = None):
"""Expect failure.
Args:
reason: The reason for expected failure.
Returns:
A decorator for expecting test failure.
"""
def wrapper(func):
@functools.wraps(func)
def inner(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except Exception as e:
assert error_message in str(e), (
f"Expected error message: {error_message} NOT in {str(e)}"
)
pytest.xfail(reason if reason else f"Expected failure: {error_message}")
else:
pytest.fail("Unexpected success!")
return inner
return wrapper
# skips tests for opset_versions listed in unsupported_opset_versions.
# if the PyTorch test cannot be run for a specific version, add this wrapper
# (for example, an op was modified but the change is not supported in PyTorch)
def skipIfUnsupportedOpsetVersion(unsupported_opset_versions):
def skip_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.opset_version in unsupported_opset_versions:
raise unittest.SkipTest(
"Skip verify test for unsupported opset_version"
)
return func(self, *args, **kwargs)
return wrapper
return skip_dec
def skipShapeChecking(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_shape = False
return func(self, *args, **kwargs)
return wrapper
def skipDtypeChecking(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_dtype = False
return func(self, *args, **kwargs)
return wrapper
def xfail_if_model_type_is_exportedprogram(
error_message: str, reason: Optional[str] = None
):
"""xfail test with models using ExportedProgram as input.
Args:
error_message: The error message to raise when the test is xfailed.
reason: The reason for xfail the ONNX export test.
Returns:
A decorator for xfail tests.
"""
def xfail_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.model_type == TorchModelType.TORCH_EXPORT_EXPORTEDPROGRAM:
return xfail(error_message, reason)(func)(self, *args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
return xfail_dec
def xfail_if_model_type_is_not_exportedprogram(
error_message: str, reason: Optional[str] = None
):
"""xfail test without models using ExportedProgram as input.
Args:
reason: The reason for xfail the ONNX export test.
Returns:
A decorator for xfail tests.
"""
def xfail_dec(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.model_type != TorchModelType.TORCH_EXPORT_EXPORTEDPROGRAM:
return xfail(error_message, reason)(func)(self, *args, **kwargs)
return func(self, *args, **kwargs)
return wrapper
return xfail_dec
def flatten(x):
return tuple(function._iter_filter(lambda o: isinstance(o, torch.Tensor))(x))
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
| TorchModelType |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 27507,
"end": 29959
} | class ____(DefinedFunction):
r"""
Two-argument Inverse error function. The erf2inv function is defined as:
.. math ::
\mathrm{erf2}(x, w) = y \quad \Rightarrow \quad \mathrm{erf2inv}(x, y) = w
Examples
========
>>> from sympy import erf2inv, oo
>>> from sympy.abc import x, y
Several special values are known:
>>> erf2inv(0, 0)
0
>>> erf2inv(1, 0)
1
>>> erf2inv(0, 1)
oo
>>> erf2inv(0, y)
erfinv(y)
>>> erf2inv(oo, y)
erfcinv(-y)
Differentiation with respect to $x$ and $y$ is supported:
>>> from sympy import diff
>>> diff(erf2inv(x, y), x)
exp(-x**2 + erf2inv(x, y)**2)
>>> diff(erf2inv(x, y), y)
sqrt(pi)*exp(erf2inv(x, y)**2)/2
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erfi: Imaginary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse complementary error function.
References
==========
.. [1] https://functions.wolfram.com/GammaBetaErf/InverseErf2/
"""
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return exp(self.func(x,y)**2-x**2)
elif argindex == 2:
return sqrt(pi)*S.Half*exp(self.func(x,y)**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y):
if x is S.NaN or y is S.NaN:
return S.NaN
elif x.is_zero and y.is_zero:
return S.Zero
elif x.is_zero and y is S.One:
return S.Infinity
elif x is S.One and y.is_zero:
return S.One
elif x.is_zero:
return erfinv(y)
elif x is S.Infinity:
return erfcinv(-y)
elif y.is_zero:
return x
elif y is S.Infinity:
return erfinv(x)
if x.is_zero:
if y.is_zero:
return S.Zero
else:
return erfinv(y)
if y.is_zero:
return x
def _eval_is_zero(self):
x, y = self.args
if x.is_zero and y.is_zero:
return True
###############################################################################
#################### EXPONENTIAL INTEGRALS ####################################
###############################################################################
| erf2inv |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 10014,
"end": 11263
} | class ____(GreatExpectationsError):
def __init__(self, module_name, package_name, class_name) -> None:
# noinspection PyUnresolvedReferences
module_spec: Optional[importlib.machinery.ModuleSpec] = importlib.util.find_spec(
module_name, package=package_name
)
if not module_spec:
if not package_name:
package_name = ""
self.message = f"""No module named "{package_name + module_name}" could be found in the repository. \
Please make sure that the file, corresponding to this package and module, exists and that dynamic loading of code \
modules, templates, and assets is supported in your execution environment. This error is unrecoverable.
""" # noqa: E501 # FIXME CoP
else:
self.message = f"""The module "{module_name}" exists; however, the system is unable to create an instance \
of the class "{class_name}", searched for inside this module. Please make sure that the class named "{class_name}" is \
properly defined inside its intended module and declared correctly by the calling entity. This error is unrecoverable.
""" # noqa: E501 # FIXME CoP
super().__init__(self.message)
| ClassInstantiationError |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/types.py | {
"start": 2055,
"end": 2288
} | class ____(int, Enum):
"""Enum representing each status type for a run in dbt Cloud's ontology."""
QUEUED = 1
STARTING = 2
RUNNING = 3
SUCCESS = 10
ERROR = 20
CANCELLED = 30
@record
| DbtCloudJobRunStatusType |
python | kamyu104__LeetCode-Solutions | Python/maximal-rectangle.py | {
"start": 31,
"end": 925
} | class ____(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
def largestRectangleArea(heights):
stk, result, i = [-1], 0, 0
for i in xrange(len(heights)+1):
while stk[-1] != -1 and (i == len(heights) or heights[stk[-1]] >= heights[i]):
result = max(result, heights[stk.pop()]*((i-1)-stk[-1]))
stk.append(i)
return result
if not matrix:
return 0
result = 0
heights = [0]*len(matrix[0])
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
heights[j] = heights[j] + 1 if matrix[i][j] == '1' else 0
result = max(result, largestRectangleArea(heights))
return result
# Time: O(n^2)
# Space: O(n)
# DP solution.
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/text_delta.py | {
"start": 185,
"end": 260
} | class ____(BaseModel):
text: str
type: Literal["text_delta"]
| TextDelta |
python | xlwings__xlwings | xlwings/pro/_xlcalamine.py | {
"start": 2578,
"end": 3233
} | class ____(base_classes.App):
_next_pid = -1
def __init__(self, apps, add_book=True, **kwargs):
self.apps = apps
self._pid = App._next_pid
App._next_pid -= 1
self._books = Books(self)
if add_book:
self._books.add()
def kill(self):
self.apps._apps.remove(self)
self.apps = None
@property
def engine(self):
return engine
@property
def books(self):
return self._books
@property
def pid(self):
return self._pid
@property
def visible(self):
return True
def activate(self, steal_focus=None):
pass
| App |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataflow.py | {
"start": 1832,
"end": 2180
} | class ____(Enum):
"""
Helper enum for choosing what to do if job is already running.
IgnoreJob - do not check if running
FinishIfRunning - finish current dag run with no action
WaitForRun - wait for job to finish and then continue with new job
"""
IgnoreJob = 1
FinishIfRunning = 2
WaitForRun = 3
| CheckJobRunning |
python | doocs__leetcode | solution/1900-1999/1915.Number of Wonderful Substrings/Solution.py | {
"start": 0,
"end": 331
} | class ____:
def wonderfulSubstrings(self, word: str) -> int:
cnt = Counter({0: 1})
ans = st = 0
for c in word:
st ^= 1 << (ord(c) - ord("a"))
ans += cnt[st]
for i in range(10):
ans += cnt[st ^ (1 << i)]
cnt[st] += 1
return ans
| Solution |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 82988,
"end": 83720
} | class ____(LinprogIPTests):
options = {"sparse": False}
# see https://github.com/scipy/scipy/issues/20216 for skip reason
@pytest.mark.skipif(
sys.platform == 'darwin',
reason="Fails on some macOS builds for reason not relevant to test"
)
def test_bug_6139(self):
super().test_bug_6139()
if has_cholmod:
class TestLinprogIPSparseCholmod(LinprogIPTests):
options = {"sparse": True, "cholesky": True}
if has_umfpack:
class TestLinprogIPSparseUmfpack(LinprogIPTests):
options = {"sparse": True, "cholesky": False}
def test_network_flow_limited_capacity(self):
pytest.skip("Failing due to numerical issues on some platforms.")
| TestLinprogIPDense |
python | walkccc__LeetCode | solutions/2325. Decode the Message/2325.py | {
"start": 0,
"end": 301
} | class ____:
def decodeMessage(self, key: str, message: str) -> str:
keyToActual = {' ': ' '}
currChar = 'a'
for c in key:
if c not in keyToActual:
keyToActual[c] = currChar
currChar = chr(ord(currChar) + 1)
return ''.join(keyToActual[c] for c in message)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/copy-list-with-random-pointer.py | {
"start": 1759,
"end": 2209
} | class ____(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
clone = defaultdict(lambda: Node(0))
clone[None] = None
cur = head
while cur:
clone[cur].val = cur.val
clone[cur].next = clone[cur.next]
clone[cur].random = clone[cur.random]
cur = cur.next
return clone[head]
| Solution3 |
python | apache__airflow | airflow-core/src/airflow/jobs/triggerer_job_runner.py | {
"start": 30270,
"end": 44902
} | class ____:
"""
Runtime environment for all triggers.
Mainly runs inside its own process, where it hands control off to an asyncio
event loop. All communication between this and it's (sync) supervisor is done via sockets
"""
# Maps trigger IDs to their running tasks and other info
triggers: dict[int, TriggerDetails]
# Cache for looking up triggers by classpath
trigger_cache: dict[str, type[BaseTrigger]]
# Inbound queue of new triggers
to_create: deque[workloads.RunTrigger]
# Inbound queue of deleted triggers
to_cancel: deque[int]
# Outbound queue of events
events: deque[tuple[int, events.TriggerEvent]]
# Outbound queue of failed triggers
failed_triggers: deque[tuple[int, BaseException | None]]
# Should-we-stop flag
# TODO: set this in a sig-int handler
stop: bool = False
# TODO: connect this to the parent process
log: FilteringBoundLogger = structlog.get_logger()
comms_decoder: TriggerCommsDecoder
def __init__(self):
super().__init__()
self.triggers = {}
self.trigger_cache = {}
self.to_create = deque()
self.to_cancel = deque()
self.events = deque()
self.failed_triggers = deque()
self.job_id = None
def run(self):
"""Sync entrypoint - just run a run in an async loop."""
asyncio.run(self.arun())
async def arun(self):
"""
Run trigger addition/deletion/cleanup; main (asynchronous) logic loop.
Actual triggers run in their own separate coroutines.
"""
# Make sure comms are initialized before allowing any Triggers to run
await self.init_comms()
watchdog = asyncio.create_task(self.block_watchdog())
last_status = time.monotonic()
try:
while not self.stop:
# Raise exceptions from the tasks
if watchdog.done():
watchdog.result()
# Run core logic
finished_ids = await self.cleanup_finished_triggers()
# This also loads the triggers we need to create or cancel
await self.sync_state_to_supervisor(finished_ids)
await self.create_triggers()
await self.cancel_triggers()
# Sleep for a bit
await asyncio.sleep(1)
# Every minute, log status
if (now := time.monotonic()) - last_status >= 60:
watchers = len([trigger for trigger in self.triggers.values() if trigger["is_watcher"]])
triggers = len(self.triggers) - watchers
self.log.info("%i triggers currently running", triggers)
self.log.info("%i watchers currently running", watchers)
last_status = now
except Exception:
with suppress(BrokenPipeError):
await log.aexception("Trigger runner failed")
self.stop = True
raise
# Wait for supporting tasks to complete
await watchdog
async def init_comms(self):
"""
Set up the communications pipe between this process and the supervisor.
This also sets up the SUPERVISOR_COMMS so that TaskSDK code can work as expected too (but that will
need to be wrapped in an ``sync_to_async()`` call)
"""
from airflow.sdk.execution_time import task_runner
# Yes, we read and write to stdin! It's a socket, not a normal stdin.
reader, writer = await asyncio.open_connection(sock=socket(fileno=0))
self.comms_decoder = TriggerCommsDecoder(
async_writer=writer,
async_reader=reader,
)
task_runner.SUPERVISOR_COMMS = self.comms_decoder
msg = await self.comms_decoder._aget_response(expect_id=0)
if not isinstance(msg, messages.StartTriggerer):
raise RuntimeError(f"Required first message to be a messages.StartTriggerer, it was {msg}")
async def create_triggers(self):
"""Drain the to_create queue and create all new triggers that have been requested in the DB."""
while self.to_create:
await asyncio.sleep(0)
workload = self.to_create.popleft()
trigger_id = workload.id
if trigger_id in self.triggers:
self.log.warning("Trigger %s had insertion attempted twice", trigger_id)
continue
try:
trigger_class = self.get_trigger_by_classpath(workload.classpath)
except BaseException as e:
# Either the trigger code or the path to it is bad. Fail the trigger.
self.log.error("Trigger failed to load code", error=e, classpath=workload.classpath)
self.failed_triggers.append((trigger_id, e))
continue
# Loading the trigger class could have been expensive. Lets give other things a chance to run!
await asyncio.sleep(0)
try:
from airflow.serialization.serialized_objects import smart_decode_trigger_kwargs
# Decrypt and clean trigger kwargs before for execution
# Note: We only clean up serialization artifacts (__var, __type keys) here,
# not in `_decrypt_kwargs` because it is used during hash comparison in
# add_asset_trigger_references and could lead to adverse effects like hash mismatches
# that could cause None values in collections.
kw = Trigger._decrypt_kwargs(workload.encrypted_kwargs)
deserialised_kwargs = {k: smart_decode_trigger_kwargs(v) for k, v in kw.items()}
trigger_instance = trigger_class(**deserialised_kwargs)
except TypeError as err:
self.log.error("Trigger failed to inflate", error=err)
self.failed_triggers.append((trigger_id, err))
continue
trigger_instance.trigger_id = trigger_id
trigger_instance.triggerer_job_id = self.job_id
trigger_instance.task_instance = ti = workload.ti
trigger_instance.timeout_after = workload.timeout_after
trigger_name = (
f"{ti.dag_id}/{ti.run_id}/{ti.task_id}/{ti.map_index}/{ti.try_number} (ID {trigger_id})"
if ti
else f"ID {trigger_id}"
)
self.triggers[trigger_id] = {
"task": asyncio.create_task(
self.run_trigger(trigger_id, trigger_instance, workload.timeout_after), name=trigger_name
),
"is_watcher": isinstance(trigger_instance, events.BaseEventTrigger),
"name": trigger_name,
"events": 0,
}
async def cancel_triggers(self):
"""
Drain the to_cancel queue and ensure all triggers that are not in the DB are cancelled.
This allows the cleanup job to delete them.
"""
while self.to_cancel:
trigger_id = self.to_cancel.popleft()
if trigger_id in self.triggers:
# We only delete if it did not exit already
self.triggers[trigger_id]["task"].cancel()
await asyncio.sleep(0)
async def cleanup_finished_triggers(self) -> list[int]:
"""
Go through all trigger tasks (coroutines) and clean up entries for ones that have exited.
Optionally warn users if the exit was not normal.
"""
finished_ids: list[int] = []
for trigger_id, details in list(self.triggers.items()):
if details["task"].done():
finished_ids.append(trigger_id)
# Check to see if it exited for good reasons
saved_exc = None
try:
result = details["task"].result()
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
# These are "expected" exceptions and we stop processing here
# If we don't, then the system requesting a trigger be removed -
# which turns into CancelledError - results in a failure.
del self.triggers[trigger_id]
continue
except BaseException as e:
# This is potentially bad, so log it.
self.log.exception(
"Trigger %s exited with error %s", details["name"], e, trigger_id=trigger_id
)
saved_exc = e
else:
# See if they foolishly returned a TriggerEvent
if isinstance(result, events.TriggerEvent):
self.log.error(
"Trigger returned a TriggerEvent rather than yielding it",
trigger=details["name"],
trigger_id=trigger_id,
)
# See if this exited without sending an event, in which case
# any task instances depending on it need to be failed
if details["events"] == 0:
self.log.error(
"Trigger exited without sending an event. Dependent tasks will be failed.",
name=details["name"],
trigger_id=trigger_id,
)
# TODO: better formatting of the exception?
self.failed_triggers.append((trigger_id, saved_exc))
del self.triggers[trigger_id]
await asyncio.sleep(0)
return finished_ids
async def sync_state_to_supervisor(self, finished_ids: list[int]):
# Copy out of our dequeues in threadsafe manner to sync state with parent
events_to_send = []
while self.events:
data = self.events.popleft()
events_to_send.append(data)
failures_to_send = []
while self.failed_triggers:
id, exc = self.failed_triggers.popleft()
tb = format_exception(type(exc), exc, exc.__traceback__) if exc else None
failures_to_send.append((id, tb))
msg = messages.TriggerStateChanges(
events=events_to_send, finished=finished_ids, failures=failures_to_send
)
if not events_to_send:
msg.events = None
if not failures_to_send:
msg.failures = None
if not finished_ids:
msg.finished = None
# Tell the monitor that we've finished triggers so it can update things
try:
resp = await self.comms_decoder.asend(msg)
except asyncio.IncompleteReadError:
if task := asyncio.current_task():
task.cancel("EOF - shutting down")
return
raise
if not isinstance(resp, messages.TriggerStateSync):
raise RuntimeError(f"Expected to get a TriggerStateSync message, instead we got {type(msg)}")
self.to_create.extend(resp.to_create)
self.to_cancel.extend(resp.to_cancel)
async def block_watchdog(self):
"""
Watchdog loop that detects blocking (badly-written) triggers.
Triggers should be well-behaved async coroutines and await whenever
they need to wait; this loop tries to run every 100ms to see if
there are badly-written triggers taking longer than that and blocking
the event loop.
Unfortunately, we can't tell what trigger is blocking things, but
we can at least detect the top-level problem.
"""
while not self.stop:
last_run = time.monotonic()
await asyncio.sleep(0.1)
# We allow a generous amount of buffer room for now, since it might
# be a busy event loop.
time_elapsed = time.monotonic() - last_run
if time_elapsed > 0.2:
await self.log.ainfo(
"Triggerer's async thread was blocked for %.2f seconds, "
"likely by a badly-written trigger. Set PYTHONASYNCIODEBUG=1 "
"to get more information on overrunning coroutines.",
time_elapsed,
)
Stats.incr("triggers.blocked_main_thread")
async def run_trigger(self, trigger_id: int, trigger: BaseTrigger, timeout_after: datetime | None = None):
"""Run a trigger (they are async generators) and push their events into our outbound event deque."""
if not os.environ.get("AIRFLOW_DISABLE_GREENBACK_PORTAL", "").lower() == "true":
import greenback
await greenback.ensure_portal()
bind_log_contextvars(trigger_id=trigger_id)
name = self.triggers[trigger_id]["name"]
self.log.info("trigger %s starting", name)
try:
async for event in trigger.run():
await self.log.ainfo(
"Trigger fired event", name=self.triggers[trigger_id]["name"], result=event
)
self.triggers[trigger_id]["events"] += 1
self.events.append((trigger_id, event))
except asyncio.CancelledError:
# We get cancelled by the scheduler changing the task state. But if we do lets give a nice error
# message about it
if timeout := timeout_after:
timeout = timeout.replace(tzinfo=timezone.utc) if not timeout.tzinfo else timeout
if timeout < timezone.utcnow():
await self.log.aerror("Trigger cancelled due to timeout")
raise
finally:
# CancelledError will get injected when we're stopped - which is
# fine, the cleanup process will understand that, but we want to
# allow triggers a chance to cleanup, either in that case or if
# they exit cleanly. Exception from cleanup methods are ignored.
with suppress(Exception):
await trigger.cleanup()
await self.log.ainfo("trigger completed", name=name)
def get_trigger_by_classpath(self, classpath: str) -> type[BaseTrigger]:
"""
Get a trigger class by its classpath ("path.to.module.classname").
Uses a cache dictionary to speed up lookups after the first time.
"""
if classpath not in self.trigger_cache:
self.trigger_cache[classpath] = import_string(classpath)
return self.trigger_cache[classpath]
| TriggerRunner |
python | pytorch__pytorch | test/onnx/model_defs/rnn_model_with_packed_sequence.py | {
"start": 1117,
"end": 1626
} | class ____(nn.Module):
def __init__(self, model, batch_first):
super().__init__()
self.model = model
self.batch_first = batch_first
def forward(self, input, hx, seq_lengths):
input = rnn_utils.pack_padded_sequence(input, seq_lengths, self.batch_first)
rets = self.model(input, hx)
ret, rets = rets[0], rets[1:]
ret, _ = rnn_utils.pad_packed_sequence(ret, self.batch_first)
return list([ret] + list(rets))
| RnnModelWithPackedSequenceWithState |
python | run-llama__llama_index | llama-index-core/llama_index/core/chat_ui/models/artifact.py | {
"start": 496,
"end": 639
} | class ____(BaseModel):
created_at: Optional[int] = None
type: ArtifactType
data: Union[CodeArtifactData, DocumentArtifactData]
| Artifact |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 2508,
"end": 3996
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_product_set.return_value = {}
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id="id"
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=None,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_already_exists(self, mock_hook):
mock_hook.return_value.create_product_set.side_effect = AlreadyExists(message="")
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id="mock-project-id",
task_id="id",
)
result = op.execute(None)
assert result == PRODUCTSET_ID_TEST
| TestCloudVisionProductSetCreate |
python | walkccc__LeetCode | solutions/333. Largest BST Subtree/333.py | {
"start": 60,
"end": 202
} | class ____:
mn: int # the minimum value in the subtree
mx: int # the maximum value in the subtree
size: int # the size of the subtree
| T |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 39602,
"end": 40145
} | class ____(serializers.ModelSerializer):
"""
Stripped version of the OrganizationSerializer to be used when listing projects.
This serializer is used to avoid leaking information about the organization through a public project.
Instead of checking if user has access to the organization, we just show the name and slug.
"""
_links = OrganizationLinksSerializer(source="*")
class Meta:
model = Organization
fields = (
"slug",
"_links",
)
| RelatedOrganizationSerializer |
python | huggingface__transformers | src/transformers/data/data_collator.py | {
"start": 21111,
"end": 27499
} | class ____:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`], *optional*):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
model: Any | None = None
padding: bool | str | PaddingStrategy = True
max_length: int | None = None
pad_to_multiple_of: int | None = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
label_name = "label" if "label" in features[0] else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
# reconvert list[None] to None if necessary
# this might occur when we pass {..., "labels": None}
if labels is not None and all(label is None for label in labels):
labels = None
non_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
# run through tokenizer without labels to ensure no side effects
batch = pad_without_fast_tokenizer_warning(
self.tokenizer,
non_labels_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# we have to pad the labels manually as we cannot rely on `tokenizer.pad` and we need them to be of the same length to return tensors
no_padding = self.padding is False or self.padding == PaddingStrategy.DO_NOT_PAD
if labels is not None:
if no_padding:
if isinstance(features[0][label_name], list):
batch["labels"] = list(labels)
else:
batch["labels"] = [np.concatenate([label, []]) for label in labels]
else:
max_padding = self.padding == PaddingStrategy.MAX_LENGTH and self.max_length is not None
max_label_length = max(len(l) for l in labels) if not max_padding else self.max_length
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
if isinstance(features[0][label_name], list):
batch["labels"] = [
label + [self.label_pad_token_id] * (max_label_length - len(label))
if padding_side == "right"
else [self.label_pad_token_id] * (max_label_length - len(label)) + label
for label in labels
]
else:
batch["labels"] = [
np.concatenate(
[
label,
np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64),
]
)
if padding_side == "right"
else np.concatenate(
[
np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64),
label,
]
)
for label in labels
]
# reintroduce side effects via tokenizer that return respective datatypes for the `return_tensors` argument
if batch.get("labels", None) is not None:
if return_tensors == "pt":
import torch
batch["labels"] = torch.tensor(batch["labels"], dtype=torch.int64)
else:
batch["labels"] = np.array(batch["labels"], dtype=np.int64)
else:
batch["labels"] = None
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=batch["labels"])
batch["decoder_input_ids"] = decoder_input_ids
return batch
@dataclass
| DataCollatorForSeq2Seq |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 12610,
"end": 13093
} | class ____(TypedDict, total=False):
approval_request_id: Required[str]
"""The ID of the approval request being answered."""
approve: Required[bool]
"""Whether the request was approved."""
type: Required[Literal["mcp_approval_response"]]
"""The type of the item. Always `mcp_approval_response`."""
id: Optional[str]
"""The unique ID of the approval response"""
reason: Optional[str]
"""Optional reason for the decision."""
| McpApprovalResponse |
python | psf__black | tests/data/cases/stub.py | {
"start": 602,
"end": 742
} | class ____:
class dirty: ...
class little: ...
class secret:
def who_has_to_know(self): ...
def verse(self): ...
| Nested |
python | sympy__sympy | sympy/core/tests/test_expr.py | {
"start": 2334,
"end": 4367
} | class ____:
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
number: int | float
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rtruediv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __truediv__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
| DummyNumber |
python | pytorch__pytorch | test/optim/test_optim.py | {
"start": 3815,
"end": 27375
} | class ____(TestCase):
def test_sgd(self):
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand(10, requires_grad=True, dtype=torch.float64)
state = {"momentum_buffer": mbuff}
gradcheck(
_diff_fn,
(
p,
grad,
state,
SGD,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_adam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
Adam,
{"lr": 0.9, "differentiable": True, "amsgrad": True},
*state.values(),
),
)
def test_rmsprop(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["step"] = torch.zeros((), dtype=torch.float64)
state["square_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["momentum_buffer"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# This can cause issues with large values and nan due to sqrt ops
state["grad_avg"] = 1e-2 * torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
RMSprop,
{
"lr": 0.9,
"maximize": True,
"momentum": 0.9,
"differentiable": True,
"centered": True,
"weight_decay": 0.1,
},
*state.values(),
),
)
def test_adadelta(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["square_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["acc_delta"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
Adadelta,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
def test_adagrad(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["sum"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
Adagrad,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
def test_adamax(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_inf"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
Adamax,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
@skipIfTorchDynamo(
"The inplace mu update fails with dynamo, "
"since this is only happening when differentiable is enabled, skipping for now"
)
def test_asgd(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` `eta` & `mu` are not continuous variables (even though we define them as floats)
# and so they shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["eta"] = torch.tensor(0.9, requires_grad=False, dtype=torch.float64)
state["mu"] = torch.tensor(1.0, requires_grad=False, dtype=torch.float64)
state["ax"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
ASGD,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_rprop(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["prev"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["step_size"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
Rprop,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_adamw(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
AdamW,
{"lr": 0.9, "differentiable": True, "amsgrad": True},
*state.values(),
),
)
def test_nadam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["mu_product"] = torch.tensor(1.0, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
NAdam,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
NAdam,
{"lr": 0.9, "decoupled_weight_decay": True, "differentiable": True},
*state.values(),
),
)
def test_radam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
RAdam,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
RAdam,
{
"lr": 0.9,
"weight_decay": 0.1,
"decoupled_weight_decay": True,
"differentiable": True,
},
*state.values(),
),
)
def test_adam_differentiable_lr(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
kwargs: dict[str, Any] = {"lr": lr, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
Adam,
kwargs, # includes lr
*state.values(),
*kwargs.values(),
),
)
def test_adam_differentiable_weight_decay(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.999, requires_grad=True, dtype=torch.float64)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
kwargs: dict[str, Any] = {"weight_decay": weight_decay, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
Adam,
kwargs, # includes weight_decay
*state.values(),
*kwargs.values(),
),
)
def test_adam_differentiable_betas(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor([0.001], requires_grad=True, dtype=torch.float64)
betas = (
torch.tensor(0.9, requires_grad=True, dtype=torch.float64),
torch.tensor(0.999, requires_grad=True, dtype=torch.float64),
)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# Have to pass in beta1 and beta2 separately
# so they're passed in as Tensors (not a tuple) and recognized by gradcheck.
# In the test, this is called: kwargs.update({betas: (beta1, beta2)})
kwargs: dict[str, Any] = {
"beta1": betas[0],
"beta2": betas[1],
"lr": lr,
"differentiable": True,
}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
Adam,
kwargs, # includes betas
*state.values(),
*kwargs.values(),
),
)
def test_adam_differentiable_all_hyperparams(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.999, requires_grad=True, dtype=torch.float64)
betas = (
torch.tensor(0.9, requires_grad=True, dtype=torch.float64),
torch.tensor(0.999, requires_grad=True, dtype=torch.float64),
)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# Have to pass in beta1 and beta2 separately
# so they're passed in as Tensors (not a tuple) and recognized by gradcheck.
# In the test, this is called: kwargs.update({betas: (beta1, beta2)})
kwargs: dict[str, Any] = {
"lr": lr,
"weight_decay": weight_decay,
"beta1": betas[0],
"beta2": betas[1],
"differentiable": True,
}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
Adam,
kwargs, # includes betas
*state.values(),
*kwargs.values(),
),
)
def test_adamw_differentiable_lr(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
kwargs: dict[str, Any] = {"lr": lr, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
AdamW,
kwargs, # includes lr
*state.values(),
*kwargs.values(),
),
)
def test_adamw_differentiable_weight_decay(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.999, requires_grad=True, dtype=torch.float64)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
kwargs: dict[str, Any] = {"weight_decay": weight_decay, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
AdamW,
kwargs, # includes weight_decay
*state.values(),
*kwargs.values(),
),
)
def test_adamw_differentiable_betas(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
betas = (
torch.tensor(0.9, requires_grad=True, dtype=torch.float64),
torch.tensor(0.999, requires_grad=True, dtype=torch.float64),
)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# Have to pass in beta1 and beta2 separately
# so they're passed in as Tensors (not a tuple) and recognized by gradcheck.
# In the test, this is called: kwargs.update({betas: (beta1, beta2)})
kwargs: dict[str, Any] = {
"beta1": betas[0],
"beta2": betas[1],
"differentiable": True,
}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
AdamW,
kwargs, # includes betas
*state.values(),
*kwargs.values(),
),
)
def test_adamw_differentiable_all_hyperparams(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.999, requires_grad=True, dtype=torch.float64)
betas = (
torch.tensor(0.9, requires_grad=True, dtype=torch.float64),
torch.tensor(0.999, requires_grad=True, dtype=torch.float64),
)
state = {}
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# Have to pass in beta1 and beta2 separately
# so they're passed in as Tensors (not a tuple) and recognized by gradcheck.
# In the test, this is called: kwargs.update({betas: (beta1, beta2)})
kwargs: dict[str, Any] = {
"lr": lr,
"weight_decay": weight_decay,
"beta1": betas[0],
"beta2": betas[1],
"differentiable": True,
}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
AdamW,
kwargs, # includes betas
*state.values(),
*kwargs.values(),
),
)
def test_differentiable_lr(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
state = {"momentum_buffer": mbuff}
kwargs: dict[str, Any] = {"lr": lr, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
SGD,
kwargs, # includes lr
*state.values(),
*kwargs.values(),
),
)
def test_differentiable_weight_decay(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.9, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
state = {"momentum_buffer": mbuff}
kwargs: dict[str, Any] = {"weight_decay": weight_decay, "differentiable": True}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
SGD,
kwargs, # includes weight_decay
*state.values(),
*kwargs.values(),
),
)
def test_differentiable_weight_decay_and_lr(self):
params = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
weight_decay = torch.tensor(0.9, requires_grad=True, dtype=torch.float64)
lr = torch.tensor(0.001, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand_like(params, requires_grad=True, dtype=torch.float64)
state = {"momentum_buffer": mbuff}
kwargs: dict[str, Any] = {
"weight_decay": weight_decay,
"lr": lr,
"differentiable": True,
}
gradcheck(
_multistep_backprop_diff_hyperparams_fn,
(
params,
grad,
state,
SGD,
kwargs, # includes lr & weight_decay
*state.values(),
*kwargs.values(),
),
)
if __name__ == "__main__":
print("These tests should be run through test/test_optim.py instead")
| TestDifferentiableOptimizer |
python | huggingface__transformers | tests/models/sam/test_processing_sam.py | {
"start": 1126,
"end": 6685
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = SamProcessor
def prepare_mask_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
mask_inputs = [np.random.randint(255, size=(30, 400), dtype=np.uint8)]
mask_inputs = [Image.fromarray(x) for x in mask_inputs]
return mask_inputs
def test_chat_template_save_loading(self):
self.skipTest("SamProcessor does not have a tokenizer")
def test_image_processor_defaults_preserved_by_image_kwargs(self):
self.skipTest("SamProcessor does not have a tokenizer")
def test_kwargs_overrides_default_image_processor_kwargs(self):
self.skipTest("SamProcessor does not have a tokenizer")
def test_kwargs_overrides_default_tokenizer_kwargs(self):
self.skipTest("SamProcessor does not have a tokenizer")
def test_tokenizer_defaults_preserved_by_kwargs(self):
self.skipTest("SamProcessor does not have a tokenizer")
def test_image_processor_no_masks(self):
image_processor = self.get_component("image_processor")
processor = SamProcessor(image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="pt")
input_processor = processor(images=image_input, return_tensors="pt")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
for image in input_feat_extract.pixel_values:
self.assertEqual(image.shape, (3, 1024, 1024))
for original_size in input_feat_extract.original_sizes:
np.testing.assert_array_equal(original_size, np.array([30, 400]))
for reshaped_input_size in input_feat_extract.reshaped_input_sizes:
np.testing.assert_array_equal(
reshaped_input_size, np.array([77, 1024])
) # reshaped_input_size value is before padding
def test_image_processor_with_masks(self):
image_processor = self.get_component("image_processor")
processor = SamProcessor(image_processor=image_processor)
image_input = self.prepare_image_inputs()
mask_input = self.prepare_mask_inputs()
input_feat_extract = image_processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
input_processor = processor(images=image_input, segmentation_maps=mask_input, return_tensors="pt")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
for label in input_feat_extract.labels:
self.assertEqual(label.shape, (256, 256))
@require_torch
def test_post_process_masks(self):
image_processor = self.get_component("image_processor")
processor = SamProcessor(image_processor=image_processor)
dummy_masks = [torch.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
reshaped_input_size = [[683, 1024]]
masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(
dummy_masks, torch.tensor(original_sizes), torch.tensor(reshaped_input_size)
)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(TypeError):
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size))
def test_rle_encoding(self):
"""
Test the run-length encoding function.
"""
# Test that a mask of all zeros returns a single run [height * width].
input_mask = torch.zeros((1, 2, 2), dtype=torch.long) # shape: 1 x 2 x 2
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-zero mask, we expect a single run of length 4:
self.assertEqual(rle[0]["counts"], [4])
# Test that a mask of all ones returns [0, height * width].
input_mask = torch.ones((1, 2, 2), dtype=torch.long) # shape: 1 x 2 x 2
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
# For a 2x2 all-one mask, we expect two runs: [0, 4].
self.assertEqual(rle[0]["counts"], [0, 4])
# Test a mask with mixed 0s and 1s to ensure the run-length encoding is correct.
# Example mask:
# Row 0: [0, 1]
# Row 1: [1, 1]
# This is shape (1, 2, 2).
# Flattened in Fortran order -> [0, 1, 1, 1].
# The RLE for [0,1,1,1] is [1, 3].
input_mask = torch.tensor([[[0, 1], [1, 1]]], dtype=torch.long)
rle = _mask_to_rle_pytorch(input_mask)
self.assertEqual(len(rle), 1)
self.assertEqual(rle[0]["size"], [2, 2])
self.assertEqual(rle[0]["counts"], [1, 3]) # 1 zero, followed by 3 ones
| SamProcessorTest |
python | numpy__numpy | numpy/_core/tests/test_getlimits.py | {
"start": 2651,
"end": 3285
} | class ____:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
assert_iinfo_equal(iinfo(dt1), iinfo(dt2))
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np._core.sctypes['uint']
for T in types:
with np.errstate(over="ignore"):
max_calculated = T(0) - T(1)
assert_equal(iinfo(T).max, max_calculated)
| TestIinfo |
python | getsentry__sentry | src/sentry/shared_integrations/exceptions/__init__.py | {
"start": 2827,
"end": 2905
} | class ____(Protocol):
@property
def url(self) -> str: ...
| _RequestHasUrl |
python | pytest-dev__pytest-xdist | testing/acceptance_test.py | {
"start": 53994,
"end": 56476
} | class ____:
@pytest.fixture
def fake_request(self) -> pytest.FixtureRequest:
class FakeOption:
def __init__(self) -> None:
self.dist = "load"
class FakeConfig:
def __init__(self) -> None:
self.workerinput = {"workerid": "gw5"}
self.option = FakeOption()
class FakeRequest:
def __init__(self) -> None:
self.config = FakeConfig()
return cast(pytest.FixtureRequest, FakeRequest())
def test_is_xdist_worker(self, fake_request: pytest.FixtureRequest) -> None:
assert xdist.is_xdist_worker(fake_request)
del fake_request.config.workerinput # type: ignore[attr-defined]
assert not xdist.is_xdist_worker(fake_request)
def test_is_xdist_controller(self, fake_request: pytest.FixtureRequest) -> None:
assert not xdist.is_xdist_master(fake_request)
assert not xdist.is_xdist_controller(fake_request)
del fake_request.config.workerinput # type: ignore[attr-defined]
assert xdist.is_xdist_master(fake_request)
assert xdist.is_xdist_controller(fake_request)
fake_request.config.option.dist = "no"
assert not xdist.is_xdist_master(fake_request)
assert not xdist.is_xdist_controller(fake_request)
def test_get_xdist_worker_id(self, fake_request: pytest.FixtureRequest) -> None:
assert xdist.get_xdist_worker_id(fake_request) == "gw5"
del fake_request.config.workerinput # type: ignore[attr-defined]
assert xdist.get_xdist_worker_id(fake_request) == "master"
def test_collection_crash(pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
assert 0
"""
)
result = pytester.runpytest(p1, "-n1")
assert result.ret == 1
result.stdout.fnmatch_lines(
[
"created: 1/1 worker",
"1 worker [[]0 items[]]",
"*_ ERROR collecting test_collection_crash.py _*",
"E assert 0",
"*= 1 error in *",
]
)
def test_dist_in_addopts(pytester: pytest.Pytester) -> None:
"""Users can set a default distribution in the configuration file (#789)."""
pytester.makepyfile(
"""
def test():
pass
"""
)
pytester.makeini(
"""
[pytest]
addopts = --dist loadscope
"""
)
result = pytester.runpytest()
assert result.ret == 0
| TestAPI |
python | oauthlib__oauthlib | tests/oauth1/rfc5849/test_client.py | {
"start": 1318,
"end": 3551
} | class ____(TestCase):
def test_convert_to_unicode_resource_owner(self):
client = Client('client-key',
resource_owner_key=b'owner key')
self.assertNotIsInstance(client.resource_owner_key, bytes)
self.assertEqual(client.resource_owner_key, 'owner key')
def test_give_explicit_timestamp(self):
client = Client('client-key', timestamp='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_timestamp'], '1')
def test_give_explicit_nonce(self):
client = Client('client-key', nonce='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_nonce'], '1')
def test_decoding(self):
client = Client('client_key', decoding='utf-8')
uri, headers, body = client.sign('http://a.b/path?query',
http_method='POST', body='a=b',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertIsInstance(uri, bytes)
self.assertIsInstance(body, bytes)
for k, v in headers.items():
self.assertIsInstance(k, bytes)
self.assertIsInstance(v, bytes)
def test_hmac_sha1(self):
client = Client('client_key')
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA1],
client.SIGNATURE_METHODS[client.signature_method])
def test_hmac_sha256(self):
client = Client('client_key', signature_method=SIGNATURE_HMAC_SHA256)
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA256],
client.SIGNATURE_METHODS[client.signature_method])
def test_rsa(self):
client = Client('client_key', signature_method=SIGNATURE_RSA)
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_RSA],
client.SIGNATURE_METHODS[client.signature_method])
# don't need an RSA key to instantiate
self.assertIsNone(client.rsa_key)
| ClientConstructorTests |
python | django-extensions__django-extensions | django_extensions/management/jobs.py | {
"start": 661,
"end": 711
} | class ____(BaseJob):
when = "monthly"
| MonthlyJob |
python | openai__openai-python | src/openai/resources/embeddings.py | {
"start": 904,
"end": 6141
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> EmbeddingsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return EmbeddingsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> EmbeddingsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return EmbeddingsWithStreamingResponse(self)
def create(
self,
*,
input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]],
model: Union[str, EmbeddingModel],
dimensions: int | Omit = omit,
encoding_format: Literal["float", "base64"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> CreateEmbeddingResponse:
"""
Creates an embedding vector representing the input text.
Args:
input: Input text to embed, encoded as a string or array of tokens. To embed multiple
inputs in a single request, pass an array of strings or array of token arrays.
The input must not exceed the max input tokens for the model (8192 tokens for
all embedding models), cannot be an empty string, and any array must be 2048
dimensions or less.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens. In addition to the per-input token limit, all embedding
models enforce a maximum of 300,000 tokens summed across all inputs in a single
request.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
encoding_format: The format to return the embeddings in. Can be either `float` or
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
params = {
"input": input,
"model": model,
"user": user,
"dimensions": dimensions,
"encoding_format": encoding_format,
}
if not is_given(encoding_format):
params["encoding_format"] = "base64"
def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
if is_given(encoding_format):
# don't modify the response object if a user explicitly asked for a format
return obj
if not obj.data:
raise ValueError("No embedding data received")
for embedding in obj.data:
data = cast(object, embedding.embedding)
if not isinstance(data, str):
continue
if not has_numpy():
# use array for base64 optimisation
embedding.embedding = array.array("f", base64.b64decode(data)).tolist()
else:
embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call]
base64.b64decode(data), dtype="float32"
).tolist()
return obj
return self._post(
"/embeddings",
body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=CreateEmbeddingResponse,
)
| Embeddings |
python | getsentry__sentry | src/sentry/notifications/notifications/activity/archive.py | {
"start": 149,
"end": 411
} | class ____(GroupActivityNotification):
metrics_key = "archived_activity"
title = "Archived Issue"
def get_description(self) -> tuple[str, str | None, Mapping[str, Any]]:
return "{author} archived {an issue}", None, {}
| ArchiveActivityNotification |
python | PrefectHQ__prefect | tests/server/services/test_loop_service.py | {
"start": 488,
"end": 4115
} | class ____(LoopService):
def service_settings(cls) -> ServicesBaseSetting:
return ServicesBaseSetting(enabled=True)
async def run_once(self) -> None:
pass
async def test_loop_service_seconds_can_be_set_at_init():
l1 = ExampleService()
assert l1.loop_seconds == 60
l2 = ExampleService(loop_seconds=100)
assert l2.loop_seconds == 100
async def test_service_name_from_class():
assert ExampleService().name == "ExampleService"
async def test_loop_service_run_once():
class MyService(ExampleService):
loop_seconds = 0
counter = 0
async def run_once(self):
assert self._should_stop is False
assert self._is_running is True
self.counter += 1
if self.counter == 3:
await self.stop(block=False)
# run the service
service = MyService()
await service.start()
assert service.counter == 3
assert service._is_running is False
async def test_loop_service_loops_kwarg():
class MyService(ExampleService):
loop_seconds = 0
counter = 0
async def run_once(self):
assert self._should_stop is False
assert self._is_running is True
self.counter += 1
# run the service
service = MyService()
await service.start(loops=3)
assert service.counter == 3
assert service._is_running is False
async def test_loop_service_run_multiple_times():
class MyService(ExampleService):
loop_seconds = 0
counter = 0
async def run_once(self):
self.counter += 1
# run the service
service = MyService()
await service.start(loops=3)
await service.start(loops=2)
await service.start(loops=1)
assert service.counter == 6
async def test_loop_service_calls_on_start_on_stop_once():
class MyService(ExampleService):
state = []
loop_seconds = 0
async def _on_start(self):
self.state.append("_on_start")
await super()._on_start()
async def run_once(self):
pass
async def _on_stop(self):
self.state.append("_on_stop")
await super()._on_stop()
service = MyService()
await service.start(loops=3)
assert service.state == ["_on_start", "_on_stop"]
async def test_early_stop():
"""Test that stop criterion is evaluated without waiting for loop_seconds"""
LOOP_INTERVAL = 120
service = ExampleService(loop_seconds=LOOP_INTERVAL)
asyncio.create_task(service.start())
# yield to let the service start
await asyncio.sleep(0.1)
assert service._is_running is True
assert service._should_stop is False
dt = datetime.now(timezone.utc)
await service.stop()
dt2 = datetime.now(timezone.utc)
assert service._should_stop is True
assert service._is_running is False
assert dt2 - dt < timedelta(seconds=LOOP_INTERVAL)
async def test_stop_block_escapes_deadlock(caplog):
"""Test that calling a blocking stop inside the service eventually returns"""
LOOP_INTERVAL = 0.1
class MyService(ExampleService):
loop_seconds = LOOP_INTERVAL
async def run_once(self):
# calling a blocking stop inside run_once should create a deadlock
await self.stop(block=True)
service = MyService()
asyncio.create_task(service.start())
# sleep for longer than one loop interval
await asyncio.sleep(LOOP_INTERVAL * 5)
assert service._is_running is False
assert "`stop(block=True)` was called on MyService but" in caplog.text
| ExampleService |
python | Pylons__pyramid | tests/test_registry.py | {
"start": 2859,
"end": 10675
} | class ____(unittest.TestCase):
def _getTargetClass(slf):
from pyramid.registry import Introspector
return Introspector
def _makeOne(self):
return self._getTargetClass()()
def test_conformance(self):
from zope.interface.verify import verifyClass, verifyObject
from pyramid.interfaces import IIntrospector
verifyClass(IIntrospector, self._getTargetClass())
verifyObject(IIntrospector, self._makeOne())
def test_add(self):
inst = self._makeOne()
intr = DummyIntrospectable()
inst.add(intr)
self.assertEqual(intr.order, 0)
category = {'discriminator': intr, 'discriminator_hash': intr}
self.assertEqual(inst._categories, {'category': category})
def test_get_success(self):
inst = self._makeOne()
intr = DummyIntrospectable()
inst.add(intr)
self.assertEqual(inst.get('category', 'discriminator'), intr)
def test_get_success_byhash(self):
inst = self._makeOne()
intr = DummyIntrospectable()
inst.add(intr)
self.assertEqual(inst.get('category', 'discriminator_hash'), intr)
def test_get_fail(self):
inst = self._makeOne()
intr = DummyIntrospectable()
inst.add(intr)
self.assertEqual(inst.get('category', 'wontexist', 'foo'), 'foo')
def test_get_category(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr2)
inst.add(intr)
expected = [
{'introspectable': intr2, 'related': []},
{'introspectable': intr, 'related': []},
]
self.assertEqual(inst.get_category('category'), expected)
def test_get_category_returns_default_on_miss(self):
inst = self._makeOne()
self.assertEqual(inst.get_category('category', '123'), '123')
def test_get_category_with_sortkey(self):
import operator
inst = self._makeOne()
intr = DummyIntrospectable()
intr.foo = 2
intr2 = DummyIntrospectable()
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
intr2.foo = 1
inst.add(intr)
inst.add(intr2)
expected = [
{'introspectable': intr2, 'related': []},
{'introspectable': intr, 'related': []},
]
self.assertEqual(
inst.get_category('category', sort_key=operator.attrgetter('foo')),
expected,
)
def test_categorized(self):
import operator
inst = self._makeOne()
intr = DummyIntrospectable()
intr.foo = 2
intr2 = DummyIntrospectable()
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
intr2.foo = 1
inst.add(intr)
inst.add(intr2)
expected = [
(
'category',
[
{'introspectable': intr2, 'related': []},
{'introspectable': intr, 'related': []},
],
)
]
self.assertEqual(
inst.categorized(sort_key=operator.attrgetter('foo')), expected
)
def test_categories(self):
inst = self._makeOne()
inst._categories['a'] = 1
inst._categories['b'] = 2
self.assertEqual(list(inst.categories()), ['a', 'b'])
def test_remove(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.category_name = 'category2'
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr)
inst.add(intr2)
inst.relate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
inst.remove('category', 'discriminator')
self.assertEqual(
inst._categories,
{
'category': {},
'category2': {
'discriminator2': intr2,
'discriminator2_hash': intr2,
},
},
)
self.assertEqual(inst._refs.get(intr), None)
self.assertEqual(inst._refs[intr2], [])
def test_remove_fail(self):
inst = self._makeOne()
self.assertEqual(inst.remove('a', 'b'), None)
def test_relate(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.category_name = 'category2'
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr)
inst.add(intr2)
inst.relate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
self.assertEqual(
inst._categories,
{
'category': {
'discriminator': intr,
'discriminator_hash': intr,
},
'category2': {
'discriminator2': intr2,
'discriminator2_hash': intr2,
},
},
)
self.assertEqual(inst._refs[intr], [intr2])
self.assertEqual(inst._refs[intr2], [intr])
def test_relate_fail(self):
inst = self._makeOne()
intr = DummyIntrospectable()
inst.add(intr)
self.assertRaises(
KeyError,
inst.relate,
('category', 'discriminator'),
('category2', 'discriminator2'),
)
def test_unrelate(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.category_name = 'category2'
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr)
inst.add(intr2)
inst.relate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
inst.unrelate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
self.assertEqual(
inst._categories,
{
'category': {
'discriminator': intr,
'discriminator_hash': intr,
},
'category2': {
'discriminator2': intr2,
'discriminator2_hash': intr2,
},
},
)
self.assertEqual(inst._refs[intr], [])
self.assertEqual(inst._refs[intr2], [])
def test_related(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.category_name = 'category2'
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr)
inst.add(intr2)
inst.relate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
self.assertEqual(inst.related(intr), [intr2])
def test_related_fail(self):
inst = self._makeOne()
intr = DummyIntrospectable()
intr2 = DummyIntrospectable()
intr2.category_name = 'category2'
intr2.discriminator = 'discriminator2'
intr2.discriminator_hash = 'discriminator2_hash'
inst.add(intr)
inst.add(intr2)
inst.relate(
('category', 'discriminator'), ('category2', 'discriminator2')
)
del inst._categories['category']
self.assertRaises(KeyError, inst.related, intr)
| TestIntrospector |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/device.py | {
"start": 1480,
"end": 3191
} | class ____(forms.Form):
user_code = forms.CharField(required=True)
def clean_user_code(self):
"""
Performs validation on the user_code provided by the user and adds to the cleaned_data dict
the "device_grant" object associated with the user_code, which is useful to process the
response in the DeviceUserCodeView.
It can raise one of the following ValidationErrors, with the associated codes:
* incorrect_user_code: if a device grant associated with the user_code does not exist
* expired_user_code: if the device grant associated with the user_code has expired
* user_code_already_used: if the device grant associated with the user_code has been already
approved or denied. The only accepted state of the device grant is AUTHORIZATION_PENDING.
"""
cleaned_data = super().clean()
user_code: str = cleaned_data["user_code"]
try:
device_grant: DeviceGrant = get_device_grant_model().objects.get(user_code=user_code)
except DeviceGrant.DoesNotExist:
raise ValidationError("Incorrect user code", code="incorrect_user_code")
if device_grant.is_expired():
raise ValidationError("Expired user code", code="expired_user_code")
# User of device has already made their decision for this device.
if device_grant.status != device_grant.AUTHORIZATION_PENDING:
raise ValidationError("User code has already been used", code="user_code_already_used")
# Make the device_grant available to the View, saving one additional db call.
cleaned_data["device_grant"] = device_grant
return user_code
| DeviceGrantForm |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/unit_tests/test_component_decl.py | {
"start": 1045,
"end": 3881
} | class ____(dg.Component):
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
raise NotImplementedError("Not implemented")
def test_component_loader_decl(component_tree: MockComponentTree):
my_component = MyComponent()
decl = ComponentLoaderDecl(
context=component_tree.decl_load_context,
path=ComponentPath.from_path(Path(__file__).parent),
component_node_fn=lambda context: my_component,
)
component_tree.set_root_decl(decl)
assert component_tree.load_root_component() == my_component
def test_composite_python_decl(component_tree: MockComponentTree):
my_component = MyComponent()
loader_decl = ComponentLoaderDecl(
context=component_tree.decl_load_context,
path=ComponentPath.from_path(Path(__file__).parent, "my_component"),
component_node_fn=lambda context: my_component,
)
decl = PythonFileDecl(
path=ComponentPath.from_path(Path(__file__).parent),
context=component_tree.decl_load_context,
decls={"my_component": loader_decl},
)
component_tree.set_root_decl(decl)
loaded_component = component_tree.load_root_component()
assert isinstance(loaded_component, PythonFileComponent)
assert loaded_component.components["my_component"] == my_component
def test_defs_folder_decl(component_tree: MockComponentTree):
my_component = MyComponent()
loader_decl = ComponentLoaderDecl(
context=component_tree.decl_load_context,
path=ComponentPath.from_path(Path(__file__).parent / "my_component"),
component_node_fn=lambda context: my_component,
)
my_other_component = MyComponent()
my_other_loader_decl = ComponentLoaderDecl(
context=component_tree.decl_load_context,
path=ComponentPath.from_path(Path(__file__).parent / "my_other_component"),
component_node_fn=lambda context: my_other_component,
)
defs_path = Path(__file__).parent
decl = DefsFolderDecl(
context=component_tree.decl_load_context,
path=ComponentPath.from_path(defs_path),
children={
defs_path / "my_component": loader_decl,
defs_path / "my_other_component": my_other_loader_decl,
},
source_tree=None,
component_file_model=None,
)
component_tree.set_root_decl(decl)
loaded_component = component_tree.load_root_component()
assert isinstance(loaded_component, dg.DefsFolderComponent)
assert loaded_component.children[defs_path / "my_component"] == my_component
assert component_tree.find_decl_at_path(defs_path) == decl
assert component_tree.find_decl_at_path(defs_path / "my_component") == loader_decl
assert (
component_tree.find_decl_at_path(defs_path / "my_other_component") == my_other_loader_decl
)
| MyComponent |
python | eth-brownie__brownie | brownie/network/web3.py | {
"start": 839,
"end": 8373
} | class ____(_Web3):
"""Brownie Web3 subclass"""
def __init__(self) -> None:
super().__init__(HTTPProvider("null"))
self.provider = None
self._mainnet_w3: Optional[_Web3] = None
self._genesis_hash: Optional[HexStr] = None
self._chain_uri: Optional[str] = None
self._custom_middleware: Set = set()
self._supports_traces = None
self._chain_id: Optional[int] = None
def _remove_middlewares(self) -> None:
for middleware in self._custom_middleware:
try:
self.middleware_onion.remove(middleware)
except ValueError:
pass
middleware.uninstall()
self._custom_middleware.clear()
def connect(self, uri: str, timeout: int = 30) -> None:
"""Connects to a provider"""
self._remove_middlewares()
self.provider = None
uri = _expand_environment_vars(uri)
try:
if Path(uri).exists():
self.provider = IPCProvider(uri, timeout=timeout)
except OSError:
pass
if self.provider is None:
if uri.startswith("ws"):
self.provider = WebsocketProvider(uri, {"close_timeout": timeout})
elif uri.startswith("http"):
self.provider = HTTPProvider(uri, {"timeout": timeout})
else:
raise ValueError(
"Unknown URI - must be a path to an IPC socket, a websocket "
"beginning with 'ws' or a URL beginning with 'http'"
)
try:
if self.isConnected():
self.reset_middlewares()
except Exception:
# checking an invalid connection sometimes raises on windows systems
pass
def reset_middlewares(self) -> None:
"""
Uninstall and reinject all custom middlewares.
"""
if self.provider is None:
raise ConnectionError("web3 is not currently connected")
self._remove_middlewares()
middleware_layers = get_middlewares(self, CONFIG.network_type)
# middlewares with a layer below zero are injected
to_inject = sorted((i for i in middleware_layers if i < 0), reverse=True)
for layer in to_inject:
for obj in middleware_layers[layer]:
middleware = obj(self)
self.middleware_onion.inject(middleware, layer=0)
self._custom_middleware.add(middleware)
# middlewares with a layer of zero or greater are added
to_add = sorted(i for i in middleware_layers if i >= 0)
for layer in to_add:
for obj in middleware_layers[layer]:
middleware = obj(self)
self.middleware_onion.add(middleware)
self._custom_middleware.add(middleware)
def disconnect(self) -> None:
"""Disconnects from a provider"""
if self.provider:
self.provider = None
self._genesis_hash = None
self._chain_uri = None
self._supports_traces = None
self._chain_id = None
self._remove_middlewares()
def is_connected(self) -> bool:
return super().is_connected() if self.provider else False
def isConnected(self) -> bool:
# retained to avoid breaking an interface explicitly defined in brownie
return self.is_connected()
@property
def supports_traces(self) -> bool:
if not self.provider:
return False
# Send a malformed request to `debug_traceTransaction`. If the error code
# returned is -32601 "endpoint does not exist/is not available" we know
# traces are not possible. Any other error code means the endpoint is open.
if self._supports_traces is None:
try:
response = self.provider.make_request("debug_traceTransaction", [])
self._supports_traces = response["error"]["code"] != -32601
except HTTPError:
self._supports_traces = False
return self._supports_traces
@property
def _mainnet(self) -> _Web3:
# a web3 instance connected to the mainnet
if self.is_connected() and CONFIG.active_network["id"] == "mainnet":
return self
try:
mainnet = CONFIG.networks["mainnet"]
except KeyError:
raise MainnetUndefined("No 'mainnet' network defined") from None
if not self._mainnet_w3:
uri = _expand_environment_vars(mainnet["host"])
self._mainnet_w3 = _Web3(HTTPProvider(uri))
return self._mainnet_w3
@property
def genesis_hash(self) -> HexStr:
"""The genesis hash of the currently active network."""
if self.provider is None:
raise ConnectionError("web3 is not currently connected")
if self._genesis_hash is None:
# removeprefix is used for compatibility with both hexbytes<1 and >=1
self._genesis_hash = HexStr(self.eth.get_block(0)["hash"].hex().removeprefix("0x"))
return self._genesis_hash
@property
def chain_uri(self) -> str:
if self.provider is None:
raise ConnectionError("web3 is not currently connected")
if self.genesis_hash not in _chain_uri_cache:
block_number = max(self.eth.block_number - 16, 0)
# removeprefix is used for compatibility with both hexbytes<1 and >=1
block_hash = self.eth.get_block(block_number)["hash"].hex().removeprefix("0x")
chain_uri = f"blockchain://{self.genesis_hash}/block/{block_hash}"
_chain_uri_cache[self.genesis_hash] = chain_uri
return _chain_uri_cache[self.genesis_hash]
@property
def chain_id(self) -> int:
# chain ID is needed each time we a sign a transaction, however we
# cache it after the first request to avoid redundant RPC calls
if self.provider is None:
raise ConnectionError("web3 is not currently connected")
if self._chain_id is None:
self._chain_id = self.eth.chain_id
return self._chain_id
def _expand_environment_vars(uri: str) -> str:
if "$" not in uri:
return uri
expanded = os.path.expandvars(uri)
if uri != expanded:
return expanded
raise ValueError(f"Unable to expand environment variable in host setting: '{uri}'")
def _get_path() -> Path:
return _get_data_folder().joinpath("ens.json")
def _resolve_address(domain: str) -> ChecksumAddress:
# convert ENS domain to address
if not isinstance(domain, str) or "." not in domain:
return to_address(domain)
domain = domain.lower()
if domain not in _ens_cache or time.time() - _ens_cache[domain][1] > 86400:
try:
ns = ENS.from_web3(web3._mainnet)
except MainnetUndefined as e:
raise MainnetUndefined(f"Cannot resolve ENS address - {e}") from None
address = ns.address(domain)
_ens_cache[domain] = [address, int(time.time())]
with _get_path().open("w") as fp:
ujson_dump(_ens_cache, fp)
if _ens_cache[domain][0] is None:
raise UnsetENSName(f"ENS domain '{domain}' is not set")
return _ens_cache[domain][0]
web3 = Web3()
web3.eth.set_gas_price_strategy(rpc_gas_price_strategy)
try:
with _get_path().open() as fp:
_ens_cache: Dict = ujson_load(fp)
except (FileNotFoundError, JSONDecodeError):
_ens_cache = {}
| Web3 |
python | getsentry__sentry | tests/sentry/relocation/api/endpoints/artifacts/test_details.py | {
"start": 6491,
"end": 9966
} | class ____(GetRelocationArtifactDetailsTest):
def setUp(self) -> None:
super().setUp()
dir = f"runs/{self.relocation.uuid}"
self.relocation_storage = get_relocation_storage()
# These files are unencrypted, so just save the file name as the content for testing
# purposes.
self.relocation_storage.save(
f"{dir}/somedir/file.json", StringIO(f'"{dir}/somedir/file.json"')
)
@override_options({"staff.ga-rollout": True})
def test_bad_unprivileged_user(self) -> None:
self.login_as(user=self.owner, superuser=False, staff=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), "somedir", "file.json", status_code=403)
def test_bad_superuser_disabled(self) -> None:
self.add_user_permission(self.superuser, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.superuser, superuser=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), "somedir", "file.json", status_code=403)
@override_options({"staff.ga-rollout": True})
def test_bad_staff_disabled(self) -> None:
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=False)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), "somedir", "file.json", status_code=403)
def test_bad_has_superuser_but_no_relocation_admin_permission(self) -> None:
self.login_as(user=self.superuser, superuser=True)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
response = self.get_error_response(
str(does_not_exist_uuid), "somedir", "file.json", status_code=403
)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
@override_options({"staff.ga-rollout": True})
def test_bad_has_staff_but_no_relocation_admin_permission(self) -> None:
self.login_as(user=self.staff_user, staff=True)
# Ensures we don't reveal existence info to improperly authenticated users.
does_not_exist_uuid = uuid4().hex
response = self.get_error_response(
str(does_not_exist_uuid), "somedir", "file.json", status_code=403
)
assert response.data.get("detail") == ERR_NEED_RELOCATION_ADMIN
@override_options({"staff.ga-rollout": True})
def test_bad_relocation_not_found(self) -> None:
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=True)
does_not_exist_uuid = uuid4().hex
self.get_error_response(str(does_not_exist_uuid), "somedir", "file.json", status_code=404)
@override_options({"staff.ga-rollout": True})
def test_bad_file_not_found(self) -> None:
self.add_user_permission(self.staff_user, RELOCATION_ADMIN_PERMISSION)
self.login_as(user=self.staff_user, staff=True)
self.get_error_response(
str(self.relocation.uuid), "nonexistent", "file.json", status_code=404
)
| GetRelocationArtifactDetailsBadTest |
python | spack__spack | lib/spack/spack/util/elf.py | {
"start": 1128,
"end": 1452
} | class ____:
MAGIC = b"\x7fELF"
CLASS32 = 1
CLASS64 = 2
DATA2LSB = 1
DATA2MSB = 2
ET_EXEC = 2
ET_DYN = 3
PT_LOAD = 1
PT_DYNAMIC = 2
PT_INTERP = 3
DT_NULL = 0
DT_NEEDED = 1
DT_STRTAB = 5
DT_SONAME = 14
DT_RPATH = 15
DT_RUNPATH = 29
SHT_STRTAB = 3
| ELF_CONSTANTS |
python | realpython__materials | fastapi-python-web-apis/main.py | {
"start": 963,
"end": 1027
} | class ____(BaseModel):
message: str
item: str
| ItemResponse |
python | redis__redis-py | tests/test_asyncio/test_multidb/test_failover.py | {
"start": 2261,
"end": 5837
} | class ____:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"mock_db",
[
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
],
indirect=True,
)
async def test_execute_returns_valid_database_with_failover_attempts(
self, mock_db, mock_fs
):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
mock_db,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
for i in range(failover_attempts + 1):
try:
database = await executor.execute()
assert database == mock_db
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
await asyncio.sleep(0.11)
pass
assert mock_fs.database.call_count == 4
@pytest.mark.asyncio
async def test_execute_throws_exception_on_attempts_exceed(self, mock_fs):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
with pytest.raises(NoValidDatabaseException):
for i in range(failover_attempts + 1):
try:
await executor.execute()
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
await asyncio.sleep(0.11)
pass
assert mock_fs.database.call_count == 4
@pytest.mark.asyncio
async def test_execute_throws_exception_on_attempts_does_not_exceed_delay(
self, mock_fs
):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
with pytest.raises(
TemporaryUnavailableException,
match=(
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
),
):
for i in range(failover_attempts + 1):
try:
await executor.execute()
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
if i == failover_attempts:
raise e
assert mock_fs.database.call_count == 4
| TestDefaultStrategyExecutor |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_pii.py | {
"start": 8081,
"end": 9353
} | class ____:
"""Test mask strategy."""
def test_mask_email(self):
middleware = PIIMiddleware("email", strategy="mask")
state = {"messages": [HumanMessage("Email: user@example.com")]}
result = middleware.before_model(state, None)
assert result is not None
content = result["messages"][0].content
assert "user@****.com" in content
assert "user@example.com" not in content
def test_mask_credit_card(self):
middleware = PIIMiddleware("credit_card", strategy="mask")
# Valid test card
state = {"messages": [HumanMessage("Card: 4532015112830366")]}
result = middleware.before_model(state, None)
assert result is not None
content = result["messages"][0].content
assert "0366" in content # Last 4 digits visible
assert "4532015112830366" not in content
def test_mask_ip(self):
middleware = PIIMiddleware("ip", strategy="mask")
state = {"messages": [HumanMessage("IP: 192.168.1.100")]}
result = middleware.before_model(state, None)
assert result is not None
content = result["messages"][0].content
assert "*.*.*.100" in content
assert "192.168.1.100" not in content
| TestMaskStrategy |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_claim_spec.py | {
"start": 383,
"end": 3414
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'devices': 'V1DeviceClaim'
}
attribute_map = {
'devices': 'devices'
}
def __init__(self, devices=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceClaimSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._devices = None
self.discriminator = None
if devices is not None:
self.devices = devices
@property
def devices(self):
"""Gets the devices of this V1ResourceClaimSpec. # noqa: E501
:return: The devices of this V1ResourceClaimSpec. # noqa: E501
:rtype: V1DeviceClaim
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1ResourceClaimSpec.
:param devices: The devices of this V1ResourceClaimSpec. # noqa: E501
:type: V1DeviceClaim
"""
self._devices = devices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceClaimSpec):
return True
return self.to_dict() != other.to_dict()
| V1ResourceClaimSpec |
python | ray-project__ray | python/ray/autoscaler/v2/tests/util.py | {
"start": 423,
"end": 1015
} | class ____:
def __init__(self, logger) -> None:
self._logs = defaultdict(list)
self._logger = logger
def info(self, s):
self._logger.info(s)
self._logs["info"].append(s)
def warning(self, s):
self._logger.warning(s)
self._logs["warning"].append(s)
def error(self, s):
self._logger.error(s)
self._logs["error"].append(s)
def debug(self, s):
self._logger.debug(s)
self._logs["debug"].append(s)
def get_logs(self, level: str) -> List[str]:
return self._logs[level]
| MockEventLogger |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/unicode_script_op_test.py | {
"start": 2016,
"end": 2774
} | class ____(test.Benchmark):
# Generate some random-ish input by jumping around in unicode characters
def _generateBenchmarkInput(self, size):
chars = []
i = 0
offset = 0
continuity_size = 20
while i < size:
chars.append(ord("a") + offset)
i += 1
offset += 1
if i % continuity_size == 0:
offset += 100
if offset > 0x1F940:
offset = 0
return chars
def benchmark_unicode_script(self):
with session.Session(config=benchmark.benchmark_config()) as sess:
chars = self._generateBenchmarkInput(1000000)
script = string_ops.unicode_script(chars)
self.run_op_benchmark(sess, script.op, min_iters=100)
if __name__ == "__main__":
test.main()
| UnicodeScriptBenchmarks |
python | django__django | tests/schema/models.py | {
"start": 3988,
"end": 4142
} | class ____(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
class Meta:
apps = new_apps
| Tag |
python | huggingface__transformers | examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py | {
"start": 10353,
"end": 32972
} | class ____:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: AutoProcessor
padding: Union[bool, str] = "longest"
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
labels_batch = self.processor.pad(
labels=label_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
if "attention_mask" in batch:
batch["attention_mask"] = batch["attention_mask"].to(torch.long)
return batch
def create_vocabulary_from_data(
datasets: DatasetDict,
word_delimiter_token: Optional[str] = None,
unk_token: Optional[str] = None,
pad_token: Optional[str] = None,
):
# Given training and test labels create vocabulary
def extract_all_chars(batch):
all_text = " ".join(batch["target_text"])
vocab = list(set(all_text))
return {"vocab": [vocab], "all_text": [all_text]}
vocabs = datasets.map(
extract_all_chars,
batched=True,
batch_size=-1,
keep_in_memory=True,
remove_columns=datasets["train"].column_names,
)
# take union of all unique characters in each dataset
vocab_set = functools.reduce(
lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]),
vocabs.values(),
)
vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))}
# replace white space with delimiter token
if word_delimiter_token is not None:
vocab_dict[word_delimiter_token] = vocab_dict[" "]
del vocab_dict[" "]
# add unk and pad token
if unk_token is not None:
vocab_dict[unk_token] = len(vocab_dict)
if pad_token is not None:
vocab_dict[pad_token] = len(vocab_dict)
return vocab_dict
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_process_index) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_process_index):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# 1. First, let's load the dataset
raw_datasets = DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'."
" Make sure to set `--audio_column_name` to the correct audio column - one of"
f" {', '.join(raw_datasets['train'].column_names)}."
)
if data_args.text_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.max_train_samples is not None:
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if training_args.do_eval:
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
# 2. We remove some special characters from the datasets
# that make training complicated and do not help in transcribing the speech
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
# that could be easily picked up by the model
chars_to_ignore_regex = (
f"[{''.join(data_args.chars_to_ignore)}]" if data_args.chars_to_ignore is not None else None
)
text_column_name = data_args.text_column_name
def remove_special_characters(batch):
if chars_to_ignore_regex is not None:
batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
else:
batch["target_text"] = batch[text_column_name].lower() + " "
return batch
with training_args.main_process_first(desc="dataset map special characters removal"):
raw_datasets = raw_datasets.map(
remove_special_characters,
remove_columns=[text_column_name],
desc="remove special characters from datasets",
)
# save special tokens for tokenizer
word_delimiter_token = data_args.word_delimiter_token
unk_token = data_args.unk_token
pad_token = data_args.pad_token
# 3. Next, let's load the config as we might need it to create
# the tokenizer
# load config
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# 4. Next, if no tokenizer file is defined,
# we create the vocabulary of the model by extracting all unique characters from
# the training and evaluation datasets
# We need to make sure that only first rank saves vocabulary
# make sure all processes wait until vocab is created
tokenizer_name_or_path = model_args.tokenizer_name_or_path
tokenizer_kwargs = {}
vocab_dict = {}
if tokenizer_name_or_path is not None:
# load vocabulary of other adapter languages so that new language can be appended
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
vocab_dict = tokenizer.vocab.copy()
if tokenizer.target_lang is None:
raise ValueError("Make sure to load a multi-lingual tokenizer with a set target language.")
if data_args.target_language in tokenizer.vocab and not data_args.overwrite_lang_vocab:
logger.info(
"Adapter language already exists."
" Skipping vocabulary creating. If you want to create a new vocabulary"
f" for {data_args.target_language} make sure to add '--overwrite_lang_vocab'"
)
else:
tokenizer_name_or_path = None
if tokenizer_name_or_path is None:
# save vocab in training output dir
tokenizer_name_or_path = training_args.output_dir
vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
with training_args.main_process_first():
if os.path.isfile(vocab_file):
try:
os.remove(vocab_file)
except OSError:
# in shared file-systems it might be the case that
# two processes try to delete the vocab file at the some time
pass
with training_args.main_process_first(desc="dataset map vocabulary creation"):
if not os.path.isfile(vocab_file):
os.makedirs(tokenizer_name_or_path, exist_ok=True)
lang_dict = create_vocabulary_from_data(
raw_datasets,
word_delimiter_token=word_delimiter_token,
unk_token=unk_token,
pad_token=pad_token,
)
# if we doing adapter language training, save
# vocab with adapter language
if data_args.target_language is not None:
vocab_dict[data_args.target_language] = lang_dict
# save vocab dict to be loaded into tokenizer
with open(vocab_file, "w") as file:
json.dump(vocab_dict, file)
# if tokenizer has just been created
# it is defined by `tokenizer_class` if present in config else by `model_type`
tokenizer_kwargs = {
"config": config if config.tokenizer_class is not None else None,
"tokenizer_type": (config.model_type if config.tokenizer_class is None else None),
"unk_token": unk_token,
"pad_token": pad_token,
"word_delimiter_token": word_delimiter_token,
"target_lang": data_args.target_language,
}
# 5. Now we can instantiate the feature extractor, tokenizer and model
# Note for distributed training, the .from_pretrained methods guarantee that only
# one local process can concurrently download model & vocab.
# load feature_extractor and tokenizer
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
**tokenizer_kwargs,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# adapt config
config.update(
{
"final_dropout": model_args.final_dropout,
"mask_time_prob": model_args.mask_time_prob,
"mask_time_length": model_args.mask_time_length,
"mask_feature_prob": model_args.mask_feature_prob,
"mask_feature_length": model_args.mask_feature_length,
"gradient_checkpointing": training_args.gradient_checkpointing,
"layerdrop": model_args.layerdrop,
"ctc_loss_reduction": model_args.ctc_loss_reduction,
"pad_token_id": tokenizer.pad_token_id,
"vocab_size": len(tokenizer),
"adapter_attn_dim": model_args.adapter_attn_dim,
}
)
# create model
model = AutoModelForCTC.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
config=config,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
ignore_mismatched_sizes=True,
)
# if attn adapter is defined, freeze all non-adapter weights
if model.config.adapter_attn_dim is not None:
model.init_adapter_layers()
# first we freeze the whole base model
model.freeze_base_model()
# next we unfreeze all adapter layers
adapter_weights = model._get_adapters()
for param in adapter_weights.values():
param.requires_grad = True
# 6. Now we preprocess the datasets including loading the audio, resampling and normalization
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
# so that we just need to set the correct target sampling rate and normalize the input
# via the `feature_extractor`
# make sure that dataset decodes audio with correct sampling rate
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name,
datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate),
)
# derive max & min input length for sample rate & max duration
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
num_workers = data_args.preprocessing_num_workers
# Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
def prepare_dataset(batch):
# load audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
batch["input_values"] = inputs.input_values[0]
batch["input_length"] = len(batch["input_values"])
# encode targets
batch["labels"] = tokenizer(batch["target_text"]).input_ids
return batch
with training_args.main_process_first(desc="dataset map preprocessing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=next(iter(raw_datasets.values())).column_names,
num_proc=num_workers,
desc="preprocess datasets",
)
def is_audio_in_length_range(length):
return length > min_input_length and length < max_input_length
# filter data that is shorter than min_input_length
vectorized_datasets = vectorized_datasets.filter(
is_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_length"],
)
# 7. Next, we can prepare the training.
# Let's use word error rate (WER) as our evaluation metric,
# instantiate a data collator and the trainer
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
# for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
return
def compute_metrics(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
return metrics
# Now save everything to be able to create a single processor later
# make sure all processes wait until data is saved
with training_args.main_process_first():
# only the main process saves them
if is_main_process(training_args.local_process_index):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
try:
processor = AutoProcessor.from_pretrained(training_args.output_dir)
except (OSError, KeyError):
warnings.warn(
"Loading a processor from a feature extractor config that does not"
" include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
" attribute to your `preprocessor_config.json` file to suppress this warning: "
" `'processor_class': 'Wav2Vec2Processor'`",
FutureWarning,
)
processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
# Instantiate custom data collator
data_collator = DataCollatorCTCWithPadding(processor=processor)
# Initialize Trainer
trainer = Trainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
processing_class=processor,
)
# 8. Finally, we can start training
# Training
if training_args.do_train:
# use last checkpoint if exist
if os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(vectorized_datasets["train"])
)
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
)
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "automatic-speech-recognition",
"tags": ["automatic-speech-recognition", data_args.dataset_name, "mms"],
"dataset_args": (
f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:"
f" {data_args.eval_split_name}"
),
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
}
if "common_voice" in data_args.dataset_name:
kwargs["language"] = config_name
# make sure that adapter weights are saved separately
adapter_file = WAV2VEC2_ADAPTER_SAFE_FILE.format(data_args.target_language)
adapter_file = os.path.join(training_args.output_dir, adapter_file)
logger.info(f"Saving adapter weights under {adapter_file}...")
safe_save_file(model._get_adapters(), adapter_file, metadata={"format": "pt"})
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| DataCollatorCTCWithPadding |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 19306,
"end": 19509
} | class ____(BaseModel, Generic[T]):
"""Range with a lower and upper bound."""
lower_bound_gte: T | None
lower_bound_gt: T | None
upper_bound_lte: T | None
upper_bound_lt: T | None
| Range |
python | fastapi__sqlmodel | docs_src/tutorial/where/tutorial002.py | {
"start": 100,
"end": 1172
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name != "Deadpond")
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | getsentry__sentry | src/sentry/integrations/vsts_extension/integration.py | {
"start": 1627,
"end": 2248
} | class ____:
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
response = pipeline.finish_pipeline()
integration = getattr(pipeline, "integration", None)
if not integration:
return response
messages.add_message(request, messages.SUCCESS, "VSTS Extension installed.")
assert pipeline.organization is not None
return HttpResponseRedirect(
absolute_uri(
f"/settings/{pipeline.organization.slug}/integrations/vsts-extension/{integration.id}/"
)
)
| VstsExtensionFinishedView |
python | openai__openai-python | src/openai/types/fine_tuning/checkpoints/permission_retrieve_params.py | {
"start": 214,
"end": 610
} | class ____(TypedDict, total=False):
after: str
"""Identifier for the last permission ID from the previous pagination request."""
limit: int
"""Number of permissions to retrieve."""
order: Literal["ascending", "descending"]
"""The order in which to retrieve permissions."""
project_id: str
"""The ID of the project to get permissions for."""
| PermissionRetrieveParams |
python | mlflow__mlflow | mlflow/prophet/__init__.py | {
"start": 13560,
"end": 14103
} | class ____:
def __init__(self, pr_model):
self.pr_model = pr_model
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.pr_model
def predict(self, dataframe, params: dict[str, Any] | None = None):
"""
Args:
dataframe: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
return self.pr_model.predict(dataframe)
| _ProphetModelWrapper |
python | doocs__leetcode | solution/1100-1199/1147.Longest Chunked Palindrome Decomposition/Solution2.py | {
"start": 0,
"end": 898
} | class ____:
def longestDecomposition(self, text: str) -> int:
def get(l, r):
return (h[r] - h[l - 1] * p[r - l + 1]) % mod
n = len(text)
base = 131
mod = int(1e9) + 7
h = [0] * (n + 10)
p = [1] * (n + 10)
for i, c in enumerate(text):
t = ord(c) - ord('a') + 1
h[i + 1] = (h[i] * base) % mod + t
p[i + 1] = (p[i] * base) % mod
ans = 0
i, j = 0, n - 1
while i <= j:
k = 1
ok = False
while i + k - 1 < j - k + 1:
if get(i + 1, i + k) == get(j - k + 2, j + 1):
ans += 2
i += k
j -= k
ok = True
break
k += 1
if not ok:
ans += 1
break
return ans
| Solution |
python | PrefectHQ__prefect | tests/events/server/triggers/test_composite_triggers.py | {
"start": 11918,
"end": 36775
} | class ____:
@pytest.fixture
async def compound_automation_all_no_match(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires all of the triggers to be met.
"""
compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.NotExistent"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=compound_automation
)
compound_automation.created = persisted.created
compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return compound_automation
async def test_compound_automation_all_no_match_trigger_does_not_act(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_no_match: Automation,
):
"""
Ensures that when a stream of events is received,
the compound trigger is not called if the expected event is not present.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_not_called()
@pytest.fixture
async def compound_automation_all_with_match(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires all of the triggers to be met.
"""
compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=compound_automation
)
compound_automation.created = persisted.created
compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return compound_automation
async def test_compound_automation_all_match_trigger_acts(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_with_match: Automation,
):
"""
Ensures that when a stream of events is received,
the compound trigger is called if all expected events are present.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_called_once()
firing: Firing = act.call_args.args[0]
assert firing.trigger.id == compound_automation_all_with_match.trigger.id
async def test_compound_automation_all_will_not_double_fire(
self,
db: PrefectDBInterface,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_with_match: Automation,
monkeypatch: pytest.MonkeyPatch,
):
"""
Special case test for the race condition where two identical firings land at the
same time. This can't be reproduced reliably during tests, so it is mocked.
"""
# Replace the get_child_firings function with one that returns firings that
# are _not_ the one that should have just happened
async def get_child_firings(
session: AsyncSession, firing: Firing
) -> List[db.CompositeTriggerChildFiring]:
assert isinstance(
compound_automation_all_with_match.trigger, CompoundTrigger
)
return [
db.CompositeTriggerChildFiring(
child_firing=Firing(
id=uuid4(),
trigger=compound_automation_all_with_match.trigger.triggers[0],
trigger_states={TriggerState.Triggered},
triggered=now("UTC"),
)
),
db.CompositeTriggerChildFiring(
child_firing=Firing(
id=uuid4(),
trigger=compound_automation_all_with_match.trigger.triggers[1],
trigger_states={TriggerState.Triggered},
triggered=now("UTC"),
)
),
]
monkeypatch.setattr(
"prefect.server.events.triggers.get_child_firings", get_child_firings
)
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_not_called()
@pytest.fixture
async def compound_automation_all_double_nested_anys(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires any of the triggers to be met in both compound triggers.
"""
nested_compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
CompoundTrigger(
require="any",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
CompoundTrigger(
require="any",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.NonExistent"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=nested_compound_automation
)
nested_compound_automation.created = persisted.created
nested_compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return nested_compound_automation
async def test_compound_automation_all_double_nested_trigger_anys_acts(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_double_nested_anys: Automation,
):
"""
Ensures that when a stream of events is received,
the compound trigger is called if any expected events are present in both compound triggers.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_called_once()
firing: Firing = act.call_args.args[0]
assert (
firing.trigger.id == compound_automation_all_double_nested_anys.trigger.id
)
@pytest.fixture
async def compound_automation_all_double_nested_all_and_any(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires any of the triggers to be met.
"""
nested_compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
CompoundTrigger(
require="any",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.NonExistent"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=nested_compound_automation
)
nested_compound_automation.created = persisted.created
nested_compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return nested_compound_automation
async def test_compound_automation_all_double_nested_trigger_any_all_does_not_act(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_double_nested_all_and_any: Automation,
):
"""
Ensures that when a stream of events is received,
the compound trigger is not called if the expected event is not present.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.call_count == 2
firing: Firing = act.call_args.args[0]
assert (
firing.trigger.id
== compound_automation_all_double_nested_all_and_any.trigger.id
)
@pytest.fixture
async def compound_automation_all_double_nested_all_and_all_no_match(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires all of the triggers to be met.
"""
nested_compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.NonExistent"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=nested_compound_automation
)
nested_compound_automation.created = persisted.created
nested_compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return nested_compound_automation
async def test_compound_automation_all_double_nested_trigger_all_all_does_not_act(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_double_nested_all_and_all_no_match: Automation,
):
"""
Ensures that when a stream of events is received,
the compound trigger is not called if the expected event is not present.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_not_called()
@pytest.fixture
async def compound_automation_all_double_nested_events_all_and_all_with_match(
self,
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
):
"""
This automation has a compound trigger that requires all of the triggers to be met.
"""
nested_compound_automation = Automation(
name="Compound Automation",
description="",
trigger=CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Failed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
CompoundTrigger(
require="all",
within=timedelta(minutes=5),
triggers=[
EventTrigger(
expect={"prefect.flow-run.Completed"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"prefect.flow-run.Pending"},
match={"prefect.resource.id": "prefect.flow-run.*"},
posture=Posture.Reactive,
threshold=1,
),
],
),
],
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
session=automations_session, automation=nested_compound_automation
)
nested_compound_automation.created = persisted.created
nested_compound_automation.updated = persisted.updated
triggers.load_automation(persisted)
await automations_session.commit()
return nested_compound_automation
async def test_compound_automation_all_double_nested_trigger_acts(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_double_nested_events_all_and_all_with_match: Automation,
):
"""
Ensure that the compound trigger is fired when all triggers are met.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.call_count == 1
firing: Firing = act.call_args.args[0]
assert (
firing.trigger.id
== compound_automation_all_double_nested_events_all_and_all_with_match.trigger.id
)
async def test_compound_automation_all_double_nested_trigger_acts_and_queues_action(
self,
act: mock.AsyncMock,
flow_run_events: List[ReceivedEvent],
compound_automation_all_double_nested_events_all_and_all_with_match: Automation,
publish: mock.AsyncMock,
create_publisher: mock.MagicMock,
):
"""
Ensure one action is queued after the compound trigger is fired.
"""
for received_event in flow_run_events:
await triggers.reactive_evaluation(received_event)
act.assert_awaited_once()
@pytest.fixture
async def chonk_baker(
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
) -> Automation:
"""
Create a sequence automation that triggers on the following events:
- ingredients.buy
- ingredients.mix
- cake.bake
"""
sequence_automation = Automation(
name="Sequence Automation",
trigger=SequenceTrigger(
triggers=[
EventTrigger(
expect={"ingredients.buy"},
match={"prefect.resource.id": "prefect.ingredients.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"ingredients.mix"},
match={"prefect.resource.id": "prefect.ingredients.*"},
posture=Posture.Reactive,
threshold=1,
),
EventTrigger(
expect={"cake.bake"},
match={"prefect.resource.id": "prefect.cake.*"},
posture=Posture.Reactive,
threshold=1,
),
],
within=5,
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
automations_session, sequence_automation
)
sequence_automation.created = persisted.created
sequence_automation.updated = persisted.updated
triggers.load_automation(sequence_automation)
await automations_session.commit()
return sequence_automation
@pytest.fixture
async def chonk_twins(
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
) -> Automation:
"""
Create a sequence automation that triggers on the following events:
- ingredients.buy
- ingredients.buy
"""
sequence_automation = Automation(
name="Sequence Automation",
trigger=SequenceTrigger(
triggers=[
EventTrigger(
expect={"ingredients.buy"},
match={"prefect.resource.id": "prefect.ingredients.*"},
posture=Posture.Reactive,
),
EventTrigger(
expect={"ingredients.buy"},
match={"prefect.resource.id": "prefect.ingredients.*"},
posture=Posture.Reactive,
),
],
within=5,
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
automations_session, sequence_automation
)
sequence_automation.created = persisted.created
sequence_automation.updated = persisted.updated
triggers.load_automation(sequence_automation)
await automations_session.commit()
return sequence_automation
@pytest.fixture
async def chonk_buyer(
automations_session: AsyncSession,
cleared_buckets: None,
cleared_automations: None,
) -> Automation:
sequence_automation = Automation(
name="Sequence Automation",
trigger=SequenceTrigger(
triggers=[
EventTrigger(
expect={"cake.buy"},
match={"prefect.resource.id": "prefect.cake.*"},
posture=Posture.Reactive,
threshold=1,
),
],
within=5,
),
actions=[actions.DoNothing()],
)
persisted = await automations.create_automation(
automations_session, sequence_automation
)
sequence_automation.created = persisted.created
sequence_automation.updated = persisted.updated
triggers.load_automation(sequence_automation)
await automations_session.commit()
return sequence_automation
| TestCompoundTriggerAll |
python | google__pytype | pytype/overlays/typed_dict.py | {
"start": 14346,
"end": 15047
} | class ____(overlay_utils.TypingContainer):
"""typing.(Not)Required."""
_REQUIREDNESS = None
def _get_value_info(self, inner, ellipses, allowed_ellipses=frozenset()):
template, processed_inner, abstract_class = super()._get_value_info(
inner, ellipses, allowed_ellipses
)
for annotation in processed_inner:
req = _is_required(annotation)
if req not in (None, self._REQUIREDNESS):
error = "Cannot mark a TypedDict item as both Required and NotRequired"
self.ctx.errorlog.invalid_annotation(
stack=self.ctx.vm.frames, annot=self.name, details=error
)
return template, processed_inner, abstract_class
| _TypedDictItemRequiredness |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/lsp.py | {
"start": 234,
"end": 674
} | class ____:
Text = 1
Method = 2
Function = 3
Constructor = 4
Field = 5
Variable = 6
Class = 7
Interface = 8
Module = 9
Property = 10
Unit = 11
Value = 12
Enum = 13
Keyword = 14
Snippet = 15
Color = 16
File = 17
Reference = 18
Folder = 19
EnumMember = 20
Constant = 21
Struct = 22
Event = 23
Operator = 24
TypeParameter = 25
| CompletionItemKind |
python | huggingface__transformers | src/transformers/models/florence2/modeling_florence2.py | {
"start": 19375,
"end": 19684
} | class ____(PreTrainedModel):
config_class = Florence2VisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@auto_docstring
| Florence2VisionPreTrainedModel |
python | altair-viz__altair | altair/datasets/_loader.py | {
"start": 776,
"end": 8568
} | class ____(Generic[IntoDataFrameT, IntoFrameT]):
"""
Load example datasets *remotely* from `vega-datasets`_, with caching.
A new ``Loader`` must be initialized by specifying a backend::
from altair.datasets import Loader
load = Loader.from_backend("polars")
load
Loader[polars]
.. _vega-datasets:
https://github.com/vega/vega-datasets
"""
_reader: Reader[IntoDataFrameT, IntoFrameT]
@overload
@classmethod
def from_backend(
cls, backend_name: Literal["polars"] = ..., /
) -> Loader[pl.DataFrame, pl.LazyFrame]: ...
@overload
@classmethod
def from_backend(
cls, backend_name: Literal["pandas", "pandas[pyarrow]"], /
) -> Loader[pd.DataFrame, pd.DataFrame]: ...
@overload
@classmethod
def from_backend(
cls, backend_name: Literal["pyarrow"], /
) -> Loader[pa.Table, pa.Table]: ...
@classmethod
def from_backend(
cls: type[Loader[Any, Any]], backend_name: _Backend = "polars", /
) -> Loader[Any, Any]:
"""
Initialize a new loader, with the specified backend.
Parameters
----------
backend_name
DataFrame package/config used to return data.
* *polars*: Using `polars defaults`_
* *pandas*: Using `pandas defaults`_.
* *pandas[pyarrow]*: Using ``dtype_backend="pyarrow"``
* *pyarrow*: (*Experimental*)
.. warning::
Most datasets use a `JSON format not supported`_ by ``pyarrow``
Examples
--------
Using ``polars``::
from altair.datasets import Loader
load = Loader.from_backend("polars")
cars = load("cars")
type(cars)
polars.dataframe.frame.DataFrame
Using ``pandas``::
load = Loader.from_backend("pandas")
cars = load("cars")
type(cars)
pandas.core.frame.DataFrame
Using ``pandas``, backed by ``pyarrow`` dtypes::
load = Loader.from_backend("pandas[pyarrow]")
co2 = load("co2")
type(co2)
pandas.core.frame.DataFrame
co2.dtypes
Date datetime64[ns]
CO2 double[pyarrow]
adjusted CO2 double[pyarrow]
dtype: object
.. _polars defaults:
https://docs.pola.rs/api/python/stable/reference/io.html
.. _pandas defaults:
https://pandas.pydata.org/docs/reference/io.html
.. _JSON format not supported:
https://arrow.apache.org/docs/python/json.html#reading-json-files
"""
return cls.from_reader(_reader._from_backend(backend_name))
@classmethod
def from_reader(cls, reader: Reader[IntoDataFrameT, IntoFrameT], /) -> Self:
obj = cls.__new__(cls)
obj._reader = reader
return obj
def __call__(
self,
name: Dataset | LiteralString,
suffix: Extension | None = None,
/,
**kwds: Any,
) -> IntoDataFrameT:
"""
Get a remote dataset and load as tabular data.
Parameters
----------
name
Name of the dataset/`Path.stem`_.
suffix
File extension/`Path.suffix`_.
.. note::
Only needed if ``name`` is available in multiple formats.
**kwds
Arguments passed to the underlying read function.
Examples
--------
Using ``polars``::
from altair.datasets import Loader
load = Loader.from_backend("polars")
source = load("iowa_electricity")
source.columns
['year', 'source', 'net_generation']
source.head(5)
shape: (5, 3)
┌────────────┬──────────────┬────────────────┐
│ year ┆ source ┆ net_generation │
│ --- ┆ --- ┆ --- │
│ date ┆ str ┆ i64 │
╞════════════╪══════════════╪════════════════╡
│ 2001-01-01 ┆ Fossil Fuels ┆ 35361 │
│ 2002-01-01 ┆ Fossil Fuels ┆ 35991 │
│ 2003-01-01 ┆ Fossil Fuels ┆ 36234 │
│ 2004-01-01 ┆ Fossil Fuels ┆ 36205 │
│ 2005-01-01 ┆ Fossil Fuels ┆ 36883 │
└────────────┴──────────────┴────────────────┘
Using ``pandas``::
load = Loader.from_backend("pandas")
source = load("iowa_electricity")
source.columns
Index(['year', 'source', 'net_generation'], dtype='object')
source.head(5)
year source net_generation
0 2001-01-01 Fossil Fuels 35361
1 2002-01-01 Fossil Fuels 35991
2 2003-01-01 Fossil Fuels 36234
3 2004-01-01 Fossil Fuels 36205
4 2005-01-01 Fossil Fuels 36883
Using ``pyarrow``::
load = Loader.from_backend("pyarrow")
source = load("iowa_electricity")
source.column_names
['year', 'source', 'net_generation']
source.slice(0, 5)
pyarrow.Table
year: date32[day]
source: string
net_generation: int64
----
year: [[2001-01-01,2002-01-01,2003-01-01,2004-01-01,2005-01-01]]
source: [["Fossil Fuels","Fossil Fuels","Fossil Fuels","Fossil Fuels","Fossil Fuels"]]
net_generation: [[35361,35991,36234,36205,36883]]
.. _Path.stem:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.stem
.. _Path.suffix:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.suffix
"""
return self._reader.dataset(name, suffix, **kwds)
def url(
self,
name: Dataset | LiteralString,
suffix: Extension | None = None,
/,
) -> str:
"""
Return the address of a remote dataset.
Parameters
----------
name
Name of the dataset/`Path.stem`_.
suffix
File extension/`Path.suffix`_.
.. note::
Only needed if ``name`` is available in multiple formats.
.. _Path.stem:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.stem
.. _Path.suffix:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.suffix
Examples
--------
The returned url will always point to an accessible dataset::
import altair as alt
from altair.datasets import Loader
load = Loader.from_backend("polars")
load.url("cars")
"https://cdn.jsdelivr.net/npm/vega-datasets@v2.11.0/data/cars.json"
We can pass the result directly to a chart::
url = load.url("cars")
alt.Chart(url).mark_point().encode(x="Horsepower:Q", y="Miles_per_Gallon:Q")
"""
return self._reader.url(name, suffix)
@property
def cache(self) -> DatasetCache:
"""
Caching of remote dataset requests.
Configure cache path::
self.cache.path = "..."
Download the latest datasets *ahead-of-time*::
self.cache.download_all()
Remove all downloaded datasets::
self.cache.clear()
Disable caching::
self.cache.path = None
"""
return self._reader.cache
def __repr__(self) -> str:
return f"{type(self).__name__}[{self._reader._name}]"
@final
| Loader |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 333716,
"end": 347596
} | class ____(rv_continuous):
r"""An upper truncated Pareto continuous random variable.
%(before_notes)s
See Also
--------
pareto : Pareto distribution
Notes
-----
The probability density function for `truncpareto` is:
.. math::
f(x, b, c) = \frac{b}{1 - c^{-b}} \frac{1}{x^{b+1}}
for :math:`b \neq 0`, :math:`c > 1` and :math:`1 \le x \le c`.
`truncpareto` takes `b` and `c` as shape parameters for :math:`b` and
:math:`c`.
Notice that the upper truncation value :math:`c` is defined in
standardized form so that random values of an unscaled, unshifted variable
are within the range ``[1, c]``.
If ``u_r`` is the upper bound to a scaled and/or shifted variable,
then ``c = (u_r - loc) / scale``. In other words, the support of the
distribution becomes ``(scale + loc) <= x <= (c*scale + loc)`` when
`scale` and/or `loc` are provided.
The ``fit`` method assumes that :math:`b` is positive; it does not produce
good results when the data is more consistent with negative :math:`b`.
`truncpareto` can also be used to model a general power law distribution
with PDF:
.. math::
f(x; a, l, h) = \frac{a}{h^a - l^a} x^{a-1}
for :math:`a \neq 0` and :math:`0 < l < x < h`. Suppose :math:`a`,
:math:`l`, and :math:`h` are represented in code as ``a``, ``l``, and
``h``, respectively. In this case, use `truncpareto` with parameters
``b = -a``, ``c = h / l``, ``scale = l``, and ``loc = 0``.
%(after_notes)s
References
----------
.. [1] Burroughs, S. M., and Tebbens S. F.
"Upper-truncated power laws in natural systems."
Pure and Applied Geophysics 158.4 (2001): 741-757.
%(example)s
"""
def _shape_info(self):
ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
ic = _ShapeInfo("c", False, (1.0, np.inf), (False, False))
return [ib, ic]
def _argcheck(self, b, c):
return (b != 0.) & (c > 1.)
def _get_support(self, b, c):
return self.a, c
def _pdf(self, x, b, c):
# here and below, avoid int to negative int power
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return b * x**-(b+1) / (1 - 1/c**b)
def _logpdf(self, x, b, c):
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return xpx.apply_where(b > 0, (x, b, c), self._logpdf_pos_b, super()._logpdf)
def _logpdf_pos_b(self, x, b, c):
return np.log(b) - np.log(-np.expm1(-b*np.log(c))) - (b+1)*np.log(x)
def _cdf(self, x, b, c):
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return (1 - x**-b) / (1 - 1/c**b)
def _logcdf(self, x, b, c):
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return xpx.apply_where(b > 0, (x, b, c), self._logcdf_pos_b, super()._logcdf)
def _logcdf_pos_b(self, x, b, c):
return np.log1p(-x**-b) - np.log1p(-1/c**b)
def _ppf(self, q, b, c):
q, b, c = xp_promote(q, b, c, force_floating=True, xp=np)
return pow(1 - (1 - 1/c**b)*q, -1/b)
def _sf(self, x, b, c):
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return (x**-b - 1/c**b) / (1 - 1/c**b)
def _logsf(self, x, b, c):
x, b, c = xp_promote(x, b, c, force_floating=True, xp=np)
return xpx.apply_where(b > 0, (x, b, c), self._logsf_pos_b, super()._logsf)
def _logsf_pos_b(self, x, b, c):
return np.log(x**-b - 1/c**b) - np.log1p(-1/c**b)
def _isf(self, q, b, c):
q, b, c = xp_promote(q, b, c, force_floating=True, xp=np)
return pow(1/c**b + (1 - 1/c**b)*q, -1/b)
def _entropy(self, b, c):
return -(np.log(b/(1 - 1/c**b))
+ (b+1)*(np.log(c)/(c**b - 1) - 1/b))
def _munp(self, n, b, c):
n, b, c = xp_promote(n, b, c, force_floating=True, xp=np)
if (n == b).all():
return b*np.log(c) / (1 - 1/c**b)
else:
return b / (b-n) * (c**b - c**n) / (c**b - 1)
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
b, loc, scale = pareto.fit(data)
c = (max(data) - loc)/scale
return b, c, loc, scale
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop("superfit", False):
return super().fit(data, *args, **kwds)
def log_mean(x):
return np.mean(np.log(x))
def harm_mean(x):
return 1/np.mean(1/x)
def get_b(c, loc, scale):
u = (data-loc)/scale
harm_m = harm_mean(u)
log_m = log_mean(u)
quot = (harm_m-1)/log_m
return (1 - (quot-1) / (quot - (1 - 1/c)*harm_m/np.log(c)))/log_m
def get_c(loc, scale):
return (mx - loc)/scale
def get_loc(fc, fscale):
if fscale: # (fscale and fc) or (fscale and not fc)
loc = mn - fscale
return loc
if fc:
loc = (fc*mn - mx)/(fc - 1)
return loc
def get_scale(loc):
return mn - loc
# Functions used for optimisation; partial derivatives of
# the Lagrangian, set to equal 0.
def dL_dLoc(loc, b_=None):
# Partial derivative wrt location.
# Optimised upon when no parameters, or only b, are fixed.
scale = get_scale(loc)
c = get_c(loc, scale)
b = get_b(c, loc, scale) if b_ is None else b_
harm_m = harm_mean((data - loc)/scale)
return 1 - (1 + (c - 1)/(c**(b+1) - c)) * (1 - 1/(b+1)) * harm_m
def dL_dB(b, logc, logm):
# Partial derivative wrt b.
# Optimised upon whenever at least one parameter but b is fixed,
# and b is free.
return b - np.log1p(b*logc / (1 - b*logm)) / logc
def fallback(data, *args, **kwargs):
# Should any issue arise, default to the general fit method.
return super(truncpareto_gen, self).fit(data, *args, **kwargs)
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fb, fc, floc, fscale = parameters
mn, mx = data.min(), data.max()
mn_inf = np.nextafter(mn, -np.inf)
if (fb is not None
and fc is not None
and floc is not None
and fscale is not None):
raise ValueError("All parameters fixed."
"There is nothing to optimize.")
elif fc is None and floc is None and fscale is None:
if fb is None:
def cond_b(loc):
# b is positive only if this function is positive
scale = get_scale(loc)
c = get_c(loc, scale)
harm_m = harm_mean((data - loc)/scale)
return (1 + 1/(c-1)) * np.log(c) / harm_m - 1
# This gives an upper bound on loc allowing for a positive b.
# Iteratively look for a bracket for root_scalar.
mn_inf = np.nextafter(mn, -np.inf)
rbrack = mn_inf
i = 0
lbrack = rbrack - 1
while ((lbrack > -np.inf)
and (cond_b(lbrack)*cond_b(rbrack) >= 0)):
i += 1
lbrack = rbrack - np.power(2., i)
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(cond_b, bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
# Determine the MLE for loc.
# Iteratively look for a bracket for root_scalar.
rbrack = res.root - 1e-3 # grad_loc is numerically ill-behaved
lbrack = rbrack - 1
i = 0
while ((lbrack > -np.inf)
and (dL_dLoc(lbrack)*dL_dLoc(rbrack) >= 0)):
i += 1
lbrack = rbrack - np.power(2., i)
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(dL_dLoc, bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
loc = res.root
scale = get_scale(loc)
c = get_c(loc, scale)
b = get_b(c, loc, scale)
std_data = (data - loc)/scale
# The expression of b relies on b being bounded above.
up_bound_b = min(1/log_mean(std_data),
1/(harm_mean(std_data)-1))
if not (b < up_bound_b):
return fallback(data, *args, **kwds)
else:
# We know b is positive (or a FitError will be triggered)
# so we let loc get close to min(data).
rbrack = mn_inf
lbrack = mn_inf - 1
i = 0
# Iteratively look for a bracket for root_scalar.
while (lbrack > -np.inf
and (dL_dLoc(lbrack, fb)
* dL_dLoc(rbrack, fb) >= 0)):
i += 1
lbrack = rbrack - 2**i
if not lbrack > -np.inf:
return fallback(data, *args, **kwds)
res = root_scalar(dL_dLoc, (fb,),
bracket=(lbrack, rbrack))
if not res.converged:
return fallback(data, *args, **kwds)
loc = res.root
scale = get_scale(loc)
c = get_c(loc, scale)
b = fb
else:
# At least one of the parameters determining the support is fixed;
# the others then have analytical expressions from the constraints.
# The completely determined case (fixed c, loc and scale)
# has to be checked for not overflowing the support.
# If not fixed, b has to be determined numerically.
loc = floc if floc is not None else get_loc(fc, fscale)
scale = fscale or get_scale(loc)
c = fc or get_c(loc, scale)
# Unscaled, translated values should be positive when the location
# is fixed. If it is not the case, we end up with negative `scale`
# and `c`, which would trigger a FitError before exiting the
# method.
if floc is not None and data.min() - floc < 0:
raise FitDataError("truncpareto", lower=1, upper=c)
# Standardised values should be within the distribution support
# when all parameters controlling it are fixed. If it not the case,
# `fc` is overridden by `c` determined from `floc` and `fscale` when
# raising the exception.
if fc and (floc is not None) and fscale:
if data.max() > fc*fscale + floc:
raise FitDataError("truncpareto", lower=1,
upper=get_c(loc, scale))
# The other constraints should be automatically satisfied
# from the analytical expressions of the parameters.
# If fc or fscale are respectively less than one or less than 0,
# a FitError is triggered before exiting the method.
if fb is None:
std_data = (data - loc)/scale
logm = log_mean(std_data)
logc = np.log(c)
# Condition for a positive root to exist.
if not (2*logm < logc):
return fallback(data, *args, **kwds)
lbrack = 1/logm + 1/(logm - logc)
rbrack = np.nextafter(1/logm, 0)
try:
res = root_scalar(dL_dB, (logc, logm),
bracket=(lbrack, rbrack))
# we should then never get there
if not res.converged:
return fallback(data, *args, **kwds)
b = res.root
except ValueError:
b = rbrack
else:
b = fb
# The distribution requires that `scale+loc <= data <= c*scale+loc`.
# To avoid numerical issues, some tuning may be necessary.
# We adjust `scale` to satisfy the lower bound, and we adjust
# `c` to satisfy the upper bound.
if not (scale+loc) < mn:
if fscale:
loc = np.nextafter(loc, -np.inf)
else:
scale = get_scale(loc)
scale = np.nextafter(scale, 0)
if not (c*scale+loc) > mx:
c = get_c(loc, scale)
c = np.nextafter(c, np.inf)
if not (np.all(self._argcheck(b, c)) and (scale > 0)):
return fallback(data, *args, **kwds)
params_override = b, c, loc, scale
if floc is None and fscale is None:
# Based on testing in gh-16782, the following methods are only
# reliable if either `floc` or `fscale` are provided. They are
# fast, though, so might as well see if they are better than the
# generic method.
params_super = fallback(data, *args, **kwds)
nllf_override = self.nnlf(params_override, data)
nllf_super = self.nnlf(params_super, data)
if nllf_super < nllf_override:
return params_super
return params_override
truncpareto = truncpareto_gen(a=1.0, name='truncpareto')
truncpareto._support = (1.0, 'c')
| truncpareto_gen |
python | fluentpython__example-code-2e | 24-class-metaprog/bulkfood/bulkfood_v7.py | {
"start": 1737,
"end": 2102
} | class ____(model.Entity): # <1>
description = model.NonBlank()
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# end::LINEITEM_V7[]
| LineItem |
python | pypa__pip | src/pip/_vendor/rich/segment.py | {
"start": 1206,
"end": 21880
} | class ____(NamedTuple):
"""A piece of text with associated style. Segments are produced by the Console render process and
are ultimately converted in to strings to be written to the terminal.
Args:
text (str): A piece of text.
style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
control (Tuple[ControlCode], optional): Optional sequence of control codes.
Attributes:
cell_length (int): The cell length of this Segment.
"""
text: str
style: Optional[Style] = None
control: Optional[Sequence[ControlCode]] = None
@property
def cell_length(self) -> int:
"""The number of terminal cells required to display self.text.
Returns:
int: A number of cells.
"""
text, _style, control = self
return 0 if control else cell_len(text)
def __rich_repr__(self) -> Result:
yield self.text
if self.control is None:
if self.style is not None:
yield self.style
else:
yield self.style
yield self.control
def __bool__(self) -> bool:
"""Check if the segment contains text."""
return bool(self.text)
@property
def is_control(self) -> bool:
"""Check if the segment contains control codes."""
return self.control is not None
@classmethod
@lru_cache(1024 * 16)
def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
"""Split a segment in to two at a given cell position.
Note that splitting a double-width character, may result in that character turning
into two spaces.
Args:
segment (Segment): A segment to split.
cut (int): A cell position to cut on.
Returns:
A tuple of two segments.
"""
text, style, control = segment
_Segment = Segment
cell_length = segment.cell_length
if cut >= cell_length:
return segment, _Segment("", style, control)
cell_size = get_character_cell_size
pos = int((cut / cell_length) * len(text))
while True:
before = text[:pos]
cell_pos = cell_len(before)
out_by = cell_pos - cut
if not out_by:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
if out_by == -1 and cell_size(text[pos]) == 2:
return (
_Segment(text[:pos] + " ", style, control),
_Segment(" " + text[pos + 1 :], style, control),
)
if out_by == +1 and cell_size(text[pos - 1]) == 2:
return (
_Segment(text[: pos - 1] + " ", style, control),
_Segment(" " + text[pos:], style, control),
)
if cell_pos < cut:
pos += 1
else:
pos -= 1
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
"""Split segment in to two segments at the specified column.
If the cut point falls in the middle of a 2-cell wide character then it is replaced
by two spaces, to preserve the display width of the parent segment.
Args:
cut (int): Offset within the segment to cut.
Returns:
Tuple[Segment, Segment]: Two segments.
"""
text, style, control = self
assert cut >= 0
if _is_single_cell_widths(text):
# Fast path with all 1 cell characters
if cut >= len(text):
return self, Segment("", style, control)
return (
Segment(text[:cut], style, control),
Segment(text[cut:], style, control),
)
return self._split_cells(self, cut)
@classmethod
def line(cls) -> "Segment":
"""Make a new line segment."""
return cls("\n")
@classmethod
def apply_style(
cls,
segments: Iterable["Segment"],
style: Optional[Style] = None,
post_style: Optional[Style] = None,
) -> Iterable["Segment"]:
"""Apply style(s) to an iterable of segments.
Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
Args:
segments (Iterable[Segment]): Segments to process.
style (Style, optional): Base style. Defaults to None.
post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
Returns:
Iterable[Segments]: A new iterable of segments (possibly the same iterable).
"""
result_segments = segments
if style:
apply = style.__add__
result_segments = (
cls(text, None if control else apply(_style), control)
for text, _style, control in result_segments
)
if post_style:
result_segments = (
cls(
text,
(
None
if control
else (_style + post_style if _style else post_style)
),
control,
)
for text, _style, control in result_segments
)
return result_segments
@classmethod
def filter_control(
cls, segments: Iterable["Segment"], is_control: bool = False
) -> Iterable["Segment"]:
"""Filter segments by ``is_control`` attribute.
Args:
segments (Iterable[Segment]): An iterable of Segment instances.
is_control (bool, optional): is_control flag to match in search.
Returns:
Iterable[Segment]: And iterable of Segment instances.
"""
if is_control:
return filter(attrgetter("control"), segments)
else:
return filterfalse(attrgetter("control"), segments)
@classmethod
def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
"""Split a sequence of segments in to a list of lines.
Args:
segments (Iterable[Segment]): Segments potentially containing line feeds.
Yields:
Iterable[List[Segment]]: Iterable of segment lists, one per line.
"""
line: List[Segment] = []
append = line.append
for segment in segments:
if "\n" in segment.text and not segment.control:
text, style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, style))
if new_line:
yield line
line = []
append = line.append
else:
append(segment)
if line:
yield line
@classmethod
def split_and_crop_lines(
cls,
segments: Iterable["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
include_new_lines: bool = True,
) -> Iterable[List["Segment"]]:
"""Split segments in to lines, and crop lines greater than a given length.
Args:
segments (Iterable[Segment]): An iterable of segments, probably
generated from console.render.
length (int): Desired line length.
style (Style, optional): Style to use for any padding.
pad (bool): Enable padding of lines that are less than `length`.
Returns:
Iterable[List[Segment]]: An iterable of lines of segments.
"""
line: List[Segment] = []
append = line.append
adjust_line_length = cls.adjust_line_length
new_line_segment = cls("\n")
for segment in segments:
if "\n" in segment.text and not segment.control:
text, segment_style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, segment_style))
if new_line:
cropped_line = adjust_line_length(
line, length, style=style, pad=pad
)
if include_new_lines:
cropped_line.append(new_line_segment)
yield cropped_line
line.clear()
else:
append(segment)
if line:
yield adjust_line_length(line, length, style=style, pad=pad)
@classmethod
def adjust_line_length(
cls,
line: List["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
) -> List["Segment"]:
"""Adjust a line to a given width (cropping or padding as required).
Args:
segments (Iterable[Segment]): A list of segments in a single line.
length (int): The desired width of the line.
style (Style, optional): The style of padding if used (space on the end). Defaults to None.
pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
Returns:
List[Segment]: A line of segments with the desired length.
"""
line_length = sum(segment.cell_length for segment in line)
new_line: List[Segment]
if line_length < length:
if pad:
new_line = line + [cls(" " * (length - line_length), style)]
else:
new_line = line[:]
elif line_length > length:
new_line = []
append = new_line.append
line_length = 0
for segment in line:
segment_length = segment.cell_length
if line_length + segment_length < length or segment.control:
append(segment)
line_length += segment_length
else:
text, segment_style, _ = segment
text = set_cell_size(text, length - line_length)
append(cls(text, segment_style))
break
else:
new_line = line[:]
return new_line
@classmethod
def get_line_length(cls, line: List["Segment"]) -> int:
"""Get the length of list of segments.
Args:
line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
Returns:
int: The length of the line.
"""
_cell_len = cell_len
return sum(_cell_len(text) for text, style, control in line if not control)
@classmethod
def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
"""Get the shape (enclosing rectangle) of a list of lines.
Args:
lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
Returns:
Tuple[int, int]: Width and height in characters.
"""
get_line_length = cls.get_line_length
max_width = max(get_line_length(line) for line in lines) if lines else 0
return (max_width, len(lines))
@classmethod
def set_shape(
cls,
lines: List[List["Segment"]],
width: int,
height: Optional[int] = None,
style: Optional[Style] = None,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Set the shape of a list of lines (enclosing rectangle).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style, optional): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
_height = height or len(lines)
blank = (
[cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
)
adjust_line_length = cls.adjust_line_length
shaped_lines = lines[:_height]
shaped_lines[:] = [
adjust_line_length(line, width, style=style) for line in lines
]
if len(shaped_lines) < _height:
shaped_lines.extend([blank] * (_height - len(shaped_lines)))
return shaped_lines
@classmethod
def align_top(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns lines to top (adds extra lines to bottom as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = lines + [[blank]] * extra_lines
return lines
@classmethod
def align_bottom(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns render to bottom (adds extra lines above as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added. Defaults to None.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = [[blank]] * extra_lines + lines
return lines
@classmethod
def align_middle(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns lines to middle (adds extra lines to above and below as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
top_lines = extra_lines // 2
bottom_lines = extra_lines - top_lines
lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
return lines
@classmethod
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Simplify an iterable of segments by combining contiguous segments with the same style.
Args:
segments (Iterable[Segment]): An iterable of segments.
Returns:
Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
"""
iter_segments = iter(segments)
try:
last_segment = next(iter_segments)
except StopIteration:
return
_Segment = Segment
for segment in iter_segments:
if last_segment.style == segment.style and not segment.control:
last_segment = _Segment(
last_segment.text + segment.text, last_segment.style
)
else:
yield last_segment
last_segment = segment
yield last_segment
@classmethod
def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all links from an iterable of styles.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with link removed.
"""
for segment in segments:
if segment.control or segment.style is None:
yield segment
else:
text, style, _control = segment
yield cls(text, style.update_link(None) if style else None)
@classmethod
def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all styles from an iterable of segments.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with styles replace with None
"""
for text, _style, control in segments:
yield cls(text, None, control)
@classmethod
def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all color from an iterable of segments.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with colorless style.
"""
cache: Dict[Style, Style] = {}
for text, style, control in segments:
if style:
colorless_style = cache.get(style)
if colorless_style is None:
colorless_style = style.without_color
cache[style] = colorless_style
yield cls(text, colorless_style, control)
else:
yield cls(text, None, control)
@classmethod
def divide(
cls, segments: Iterable["Segment"], cuts: Iterable[int]
) -> Iterable[List["Segment"]]:
"""Divides an iterable of segments in to portions.
Args:
cuts (Iterable[int]): Cell positions where to divide.
Yields:
[Iterable[List[Segment]]]: An iterable of Segments in List.
"""
split_segments: List["Segment"] = []
add_segment = split_segments.append
iter_cuts = iter(cuts)
while True:
cut = next(iter_cuts, -1)
if cut == -1:
return
if cut != 0:
break
yield []
pos = 0
segments_clear = split_segments.clear
segments_copy = split_segments.copy
_cell_len = cached_cell_len
for segment in segments:
text, _style, control = segment
while text:
end_pos = pos if control else pos + _cell_len(text)
if end_pos < cut:
add_segment(segment)
pos = end_pos
break
if end_pos == cut:
add_segment(segment)
yield segments_copy()
segments_clear()
pos = end_pos
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
break
else:
before, segment = segment.split_cells(cut - pos)
text, _style, control = segment
add_segment(before)
yield segments_copy()
segments_clear()
pos = cut
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
yield segments_copy()
| Segment |
python | tensorflow__tensorflow | tensorflow/lite/python/util_test.py | {
"start": 6677,
"end": 12959
} | class ____(test_util.TensorFlowTestCase):
def testGetTensorsValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
dtype=dtypes.float32, shape=[1, 16, 16, 3])
_ = in_tensor + in_tensor
sess = session.Session()
tensors = util.get_tensors_from_tensor_names(sess.graph, ["Placeholder"])
self.assertEqual("Placeholder:0", tensors[0].name)
def testGetTensorsInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
dtype=dtypes.float32, shape=[1, 16, 16, 3])
_ = in_tensor + in_tensor
sess = session.Session()
with self.assertRaises(ValueError) as error:
util.get_tensors_from_tensor_names(sess.graph, ["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSetTensorShapeValid(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3, 5])
self.assertAllEqual([None, 3, 5], tensor.shape)
util.set_tensor_shapes([tensor], {"Placeholder": [5, 3, 5]})
self.assertAllEqual([5, 3, 5], tensor.shape)
def testSetTensorShapeNoneValid(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32)
util.set_tensor_shapes([tensor], {"Placeholder": [1, 3, 5]})
self.assertAllEqual([1, 3, 5], tensor.shape)
def testSetTensorShapeArrayInvalid(self):
# Tests set_tensor_shape where the tensor name passed in doesn't exist.
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3, 5])
self.assertAllEqual([None, 3, 5], tensor.shape)
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"invalid-input": [5, 3, 5]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
self.assertAllEqual([None, 3, 5], tensor.shape)
def testSetTensorShapeDimensionInvalid(self):
# Tests set_tensor_shape where the shape passed in is incompatible.
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3, 5])
self.assertAllEqual([None, 3, 5], tensor.shape)
with self.assertRaises(ValueError) as error:
util.set_tensor_shapes([tensor], {"Placeholder": [1, 5, 5]})
self.assertIn("The shape of tensor 'Placeholder' cannot be changed",
str(error.exception))
self.assertAllEqual([None, 3, 5], tensor.shape)
def testSetTensorShapeEmpty(self):
with ops.Graph().as_default():
tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3, 5])
self.assertAllEqual([None, 3, 5], tensor.shape)
util.set_tensor_shapes([tensor], {})
self.assertAllEqual([None, 3, 5], tensor.shape)
def _get_keras_model(add_unquantizable_layer=False):
"""Define Sample keras model and returns it."""
# Define a pseudo MNIST dataset (as downloading the dataset on-the-fly causes
# network connection failures)
n = 10 # Number of samples
images = np.random.randint(low=0, high=255, size=[n, 28, 28], dtype=np.uint8)
labels = np.random.randint(low=0, high=9, size=(n,), dtype=np.uint8)
# Normalize the input image so that each pixel value is between 0 to 1.
images = images / 255.0
# Define TF model
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
if add_unquantizable_layer:
# This adds Neg op to the model which will remain as float.
model.add(tf.keras.layers.Lambda(lambda x: -x))
# Train
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
model.fit(
images,
labels,
epochs=1,
validation_split=0.1,
)
return model
def _generate_integer_tflite_model(quantization_type=dtypes.int8,
use_saved_model=False,
saved_model_dir=None,
add_unquantizable_layer=False):
"""Define an integer post-training quantized tflite model."""
model = _get_keras_model(add_unquantizable_layer)
if not use_saved_model:
# Convert TF Model to an Integer Quantized TFLite Model
converter = lite.TFLiteConverterV2.from_keras_model(model)
else:
tf.saved_model.save(model, saved_model_dir)
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = {lite.Optimize.DEFAULT}
def representative_dataset_gen():
for _ in range(2):
yield [
np.random.uniform(low=0, high=1, size=(1, 28, 28)).astype(np.float32)
]
converter.representative_dataset = representative_dataset_gen
if quantization_type == dtypes.int8:
converter.target_spec.supported_ops = {lite.OpsSet.TFLITE_BUILTINS_INT8}
else:
converter.target_spec.supported_ops = {
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
}
tflite_model = converter.convert()
return tflite_model
def _test_param_modify_integer_model_io_type():
"""Function to generate parameterized inputs for testing."""
params = []
str_template = "_{}{}{}{}"
map_model_type = {
"PostTraining": True,
# "DuringTraining": False,
}
map_quantize_type_to_io_types = {
tf.int8: {tf.float32, tf.int8, tf.uint8},
tf.int16: {tf.float32, tf.int16}
}
for k1, v1 in map_model_type.items():
for qtype, v2 in map_quantize_type_to_io_types.items():
qstr = "_IntegerQuantize{}".format(qtype.name.capitalize())
for itype in v2:
istr = "_Input{}".format(itype.name.capitalize())
for otype in v2:
ostr = "_Output{}".format(otype.name.capitalize())
params.append((str_template.format(k1, qstr, istr,
ostr), v1, qtype, itype, otype))
return params
| TensorFunctionsTest |
python | google__jax | jax/_src/pallas/fuser/block_spec.py | {
"start": 66822,
"end": 78657
} | class ____(Protocol):
def __call__(
self,
ctx: PushRuleContext,
block_spec: pallas_core.BlockSpec | tuple[pallas_core.BlockSpec, ...],
**params: Any,
) -> pallas_core.BlockSpec | tuple[pallas_core.BlockSpec, ...]:
...
def register_push_block_spec_rule(
prim: core.Primitive,
) -> Callable[[PushBlockSpecRuleFn], PushBlockSpecRuleFn]:
def wrapper(
f: PushBlockSpecRuleFn,
) -> PushBlockSpecRuleFn:
push_block_spec_rules[prim] = f
return f
return wrapper
def _binop_push_rule(
prim: core.Primitive,
ctx: PullRuleContext,
left_block_spec: pallas_core.BlockSpec,
right_block_spec: pallas_core.BlockSpec,
**params: Any,
) -> Sequence[pallas_core.BlockSpec]:
del prim, params
left_aval, right_aval = ctx.avals_in
assert isinstance(left_aval, core.ShapedArray)
assert isinstance(right_aval, core.ShapedArray)
if not right_aval.shape:
return left_block_spec
if not left_aval.shape:
return right_block_spec
lhs_has_block_spec = left_block_spec is not pallas_core.no_block_spec
rhs_has_block_spec = right_block_spec is not pallas_core.no_block_spec
if not (lhs_has_block_spec ^ rhs_has_block_spec):
# We can only do a push if one of the block specs is unspecified
# or they are identical.
if left_block_spec is right_block_spec:
return left_block_spec
raise ValueError('Illegal binary push. One of the block specs must be no_block_spec.')
for l, r in zip(left_aval.shape, right_aval.shape, strict=True):
if l == 1 and r != 1 and lhs_has_block_spec:
raise ValueError('Cannot propagate block spec through LHS broadcast.')
if r == 1 and l != 1 and rhs_has_block_spec:
raise ValueError('Cannot propagate block spec through RHS broadcast.')
if left_block_spec is pallas_core.no_block_spec:
return right_block_spec
if right_block_spec is pallas_core.no_block_spec:
return left_block_spec
if right_block_spec != left_block_spec:
raise ValueError('Invalid block spec')
return left_block_spec
register_binop_push_rule = lambda prim: register_push_block_spec_rule(prim)(
functools.partial(_binop_push_rule, prim)
)
register_binop_push_rule(lax.mul_p)
register_binop_push_rule(lax.add_p)
register_binop_push_rule(lax.sub_p)
register_binop_push_rule(lax.div_p)
register_binop_push_rule(lax.max_p)
register_binop_push_rule(lax.lt_p)
register_binop_push_rule(lax.eq_p)
register_binop_push_rule(lax.gt_p)
register_binop_push_rule(lax.and_p)
register_binop_push_rule(lax.pow_p)
register_binop_push_rule(ad_util.add_any_p)
def _eltwise_push_rule(
prim: core.Primitive,
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
**params,
) -> pallas_core.BlockSpec:
del prim, ctx, params
return block_spec
@register_push_block_spec_rule(lax.transpose_p)
def _transpose_push_rule(
ctx: PushRuleContext,
block_spec: pallas_core.BlockSpec,
*,
permutation: tuple[int, ...],
) -> pallas_core.BlockSpec:
del ctx
block_shape = block_spec.block_shape
new_shape = tuple(block_shape[i] for i in permutation)
if set(permutation[-2:]) != {permutation[-1], permutation[-2]}:
raise NotImplementedError(
'Cannot permute last two dimensions with leading dimensions.'
)
def new_index_map(*args):
original_idxs = block_spec.index_map(*args)
return tuple(original_idxs[i] for i in permutation)
return pallas_core.BlockSpec(new_shape, new_index_map)
@register_push_block_spec_rule(lax.convert_element_type_p)
def _convert_element_type_push_rule(
ctx: PushRuleContext,
block_spec: pallas_core.BlockSpec,
*,
new_dtype: jnp.dtype,
weak_type: bool,
sharding: jax.sharding.Sharding,
):
del ctx, new_dtype, weak_type, sharding
return block_spec
@register_push_block_spec_rule(lax.select_n_p)
def _select_n_push_rule(
ctx: PushRuleContext,
*args: pallas_core.BlockSpec,
):
del ctx
block_specs = [b for b in args if b is not pallas_core.no_block_spec]
assert len(block_specs) > 0
block_spec = block_specs[0]
if len(block_specs) > 1:
if any(b is not block_spec for b in block_specs):
raise NotImplementedError(
'select_n with multiple differing inputs not supported yet'
)
return block_spec
@register_push_block_spec_rule(custom_derivatives.custom_jvp_call_p)
def _custom_jvp_call_push_rule(
ctx, *block_specs, call_jaxpr: core.ClosedJaxpr, **_
):
assert not call_jaxpr.consts
return _push_block_spec_jaxpr(call_jaxpr.jaxpr, *block_specs)
@register_push_block_spec_rule(custom_derivatives.custom_vjp_call_p)
def _custom_vjp_call_push_rule(
ctx,
*block_specs,
call_jaxpr: core.ClosedJaxpr,
num_consts,
fwd_jaxpr_thunk,
bwd,
out_trees,
symbolic_zeros,
):
del ctx, num_consts, fwd_jaxpr_thunk, bwd, out_trees, symbolic_zeros
return _push_block_spec_jaxpr(call_jaxpr.jaxpr, *block_specs)
@register_push_block_spec_rule(hijax.call_hi_primitive_p)
def _custom_call_hi_primitive_push_block_spec_rule(
ctx: PullRuleContext, *block_specs, prim
):
return prim.push_block_spec_rule(ctx, block_specs)
@register_push_block_spec_rule(pjit.jit_p)
def _pjit_push_rule(ctx, *block_specs, jaxpr: core.ClosedJaxpr, **_):
assert not jaxpr.consts
return _push_block_spec_jaxpr(jaxpr.jaxpr, *block_specs)
def register_eltwise_rule(prim: core.Primitive):
register_pull_block_spec_rule(prim)(
functools.partial(_eltwise_pull_rule, prim)
)
register_usage_rule(prim)(functools.partial(_eltwise_usage_rule, prim))
register_eval_rule(prim)(functools.partial(_eltwise_eval_rule, prim))
register_push_block_spec_rule(prim)(
functools.partial(_eltwise_push_rule, prim)
)
register_eltwise_rule(lax.exp_p)
register_eltwise_rule(lax.tanh_p)
register_eltwise_rule(lax.sin_p)
register_eltwise_rule(lax.cos_p)
register_eltwise_rule(lax.sqrt_p)
register_eltwise_rule(lax.rsqrt_p)
register_eltwise_rule(lax.square_p)
register_eltwise_rule(lax.log_p)
register_eltwise_rule(lax.integer_pow_p)
@register_push_block_spec_rule(lax.reshape_p)
def _reshape_push_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dimensions: tuple[int, ...] | None,
new_sizes: tuple[int, ...],
sharding: jax.sharding.Sharding,
):
del sharding, new_sizes
if dimensions is not None:
raise NotImplementedError('reshape with None dimensions not supported yet')
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
aval_out = ctx.avals_out[0]
assert isinstance(aval_out, core.ShapedArray)
if _pattern_match_lanes_to_sublanes_reshape(aval_in, aval_out):
block_shape = tuple(block_spec.block_shape)
if not isinstance(block_shape[-1], (int, pallas_core.Blocked)):
raise NotImplementedError(
f'reshape must use Blocked block size on lanes: {block_shape}'
)
last_dim = aval_out.shape[-1]
last_block_dim = _block_size(block_shape[-1])
if last_block_dim % 128 != 0:
raise NotImplementedError(
'reshape with non-128 aligned block size on lanes not supported yet'
)
if last_block_dim % last_dim != 0:
raise NotImplementedError(
'reshape with non-divisible block size on lanes not supported yet'
)
num_last_dim_blocks = last_block_dim // last_dim
new_block_shape = block_shape[:1] + (num_last_dim_blocks, last_dim)
def new_index_map(*args):
*idx, last = block_spec.index_map(*args)
return *idx, last, 0
return pallas_core.BlockSpec(new_block_shape, new_index_map)
raise NotImplementedError(f'reshape not supported yet: {aval_in}, {aval_out}')
@register_push_block_spec_rule(lax.reduce_sum_p)
def _reduce_sum_push_rule(
ctx: PushRuleContext,
block_spec: pallas_core.BlockSpec,
*,
axes: tuple[int, ...],
out_sharding,
):
del out_sharding
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
if not all(
aval_in.shape[i] == pallas_core.get_block_size(block_spec.block_shape[i])
for i in axes
):
raise NotImplementedError(
f'reduce_sum over partial blocks not supported yet: {aval_in.shape=},'
f' {block_spec.block_shape=}, {axes=}'
)
new_block_shape = tuple(
bd for i, bd in enumerate(block_spec.block_shape) if i not in axes
)
def new_index_map(*args):
idx = block_spec.index_map(*args)
return tuple(idx[i] for i in range(len(idx)) if i not in axes)
return block_spec.replace(
block_shape=tuple(new_block_shape), index_map=new_index_map
)
@register_push_block_spec_rule(lax.broadcast_in_dim_p)
def _broadcast_in_dim_push_rule(
ctx: PushRuleContext,
block_spec: pallas_core.BlockSpec,
*,
shape: tuple[int, ...],
broadcast_dimensions: tuple[int, ...],
sharding: jax.sharding.Sharding,
):
del sharding
in_aval = ctx.avals_in[0]
assert isinstance(in_aval, core.ShapedArray)
in_shape = in_aval.shape
dim_map = {
out_dim: in_dim
for in_dim, out_dim in enumerate(broadcast_dimensions)
}
new_block_shape = []
for i, s in enumerate(shape):
if i in dim_map:
in_dim = dim_map[i]
if in_shape[in_dim] != s:
assert pallas_core.get_block_size(block_spec.block_shape[in_dim]) == 1
new_block_shape.append(s)
else:
new_block_shape.append(block_spec.block_shape[in_dim])
else:
new_block_shape.append(s)
def new_index_map(*args):
idx = block_spec.index_map(*args)
return tuple(
idx[dim_map[i]] if i in dim_map else 0 for i in range(len(shape))
)
return pallas_core.BlockSpec(tuple(new_block_shape), new_index_map)
@register_push_block_spec_rule(lax.concatenate_p)
def _concatenate_push_rule(
ctx: PushRuleContext,
*block_specs: pallas_core.BlockSpec,
dimension: int,
):
avals_in = ctx.avals_in
block_shapes = [
pallas_core._canonicalize_block_shape(block_spec.block_shape)
for block_spec in block_specs
]
# We only support concatenation if the entirety of the concat dimension is blocked.
assert all(hasattr(aval_in, 'shape') for aval_in in avals_in)
if not all(
block_shape[dimension] == pallas_core.Blocked(avals_in.shape[dimension]) # pytype: disable=attribute-error
for block_shape, avals_in in zip(block_shapes, avals_in)
):
raise NotImplementedError(
f'concatenate not supported yet: {block_shapes=}, {avals_in=}'
)
def _new_index_map(*args):
all_indices = [block_spec.index_map(*args) for block_spec in block_specs]
# This is a very important check. We cannot actually construct a single BlockSpec
# for the output of concatenate if the indices are not identical across all the
# inputs. This is not something we can always enforce statically, but to be conservative
# we apply a very aggressive check. We can consider relaxing this later.
if not all(
(all_indices[0][i] is all_indices[j][i])
for i in range(len(all_indices[0]))
for j in range(len(all_indices))
):
raise ValueError(
'Cannot statically prove that all input blocks to concatenate are the'
' same.'
)
# If all block indices are the same, we are materializing the full concatenation along
# the concat dimension, so we use index 0.
base_indices = list(all_indices[0])
base_indices[dimension] = 0
return tuple(base_indices)
new_block_shape = list(block_specs[0].block_shape)
# Since the entirety of the concat dimension is materialized in the blocks,
# the new block size is the sum of the block sizes of the inputs along that
# dimension.
new_block_shape[dimension] = sum(
pallas_core.get_block_size(block_shape[dimension])
for block_shape in block_shapes
)
return pallas_core.BlockSpec(tuple(new_block_shape), _new_index_map)
| PushBlockSpecRuleFn |
python | ansible__ansible | lib/ansible/_internal/_json/_profiles/_legacy.py | {
"start": 577,
"end": 1048
} | class ____:
"""
Temporarily wraps strings which are not trusted for templating.
Used before serialization of strings not tagged TrustedAsTemplate when trust inversion is enabled and trust is allowed in the string's context.
Used during deserialization of `__ansible_unsafe` strings to indicate they should not be tagged TrustedAsTemplate.
"""
__slots__ = ('value',)
def __init__(self, value: str) -> None:
self.value = value
| _Untrusted |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 4846,
"end": 4942
} | class ____:
prop1: int
def __init__(self):
self.prop1 = 0
@dataclass
| Concrete15_3 |
python | Netflix__metaflow | metaflow/plugins/secrets/secrets_spec.py | {
"start": 127,
"end": 4251
} | class ____:
def __init__(self, secrets_backend_type, secret_id, options={}, role=None):
self._secrets_backend_type = secrets_backend_type
self._secret_id = secret_id
self._options = options
self._role = role
@property
def secrets_backend_type(self):
return self._secrets_backend_type
@property
def secret_id(self):
return self._secret_id
@property
def options(self):
return self._options
@property
def role(self):
return self._role
def to_json(self):
"""Mainly used for testing... not the same as the input dict in secret_spec_from_dict()!"""
return {
"secrets_backend_type": self.secrets_backend_type,
"secret_id": self.secret_id,
"options": self.options,
"role": self.role,
}
def __str__(self):
return "%s (%s)" % (self._secret_id, self._secrets_backend_type)
@staticmethod
def secret_spec_from_str(secret_spec_str, role):
# "." may be used in secret_id one day (provider specific). HOWEVER, it provides the best UX for
# non-conflicting cases (i.e. for secret ids that don't contain "."). This is true for all AWS
# Secrets Manager secrets.
#
# So we skew heavily optimize for best upfront UX for the present (1/2023).
#
# If/when a certain secret backend supports "." secret names, we can figure out a solution at that time.
# At a minimum, dictionary style secret spec may be used with no code changes (see secret_spec_from_dict()).
# Other options could be:
# - accept and document that "." secret_ids don't work in Metaflow (across all possible providers)
# - add a Metaflow config variable that specifies the separator (default ".")
# - smarter spec parsing, that errors on secrets that look ambiguous. "aws-secrets-manager.XYZ" could mean:
# + secret_id "XYZ" in aws-secrets-manager backend, OR
# + secret_id "aws-secrets-manager.XYZ" default backend (if it is defined).
# + in this case, user can simply set "azure-key-vault.aws-secrets-manager.XYZ" instead!
parts = secret_spec_str.split(".", maxsplit=1)
if len(parts) == 1:
secrets_backend_type = get_default_secrets_backend_type()
secret_id = parts[0]
else:
secrets_backend_type = parts[0]
secret_id = parts[1]
return SecretSpec(
secrets_backend_type, secret_id=secret_id, options={}, role=role
)
@staticmethod
def secret_spec_from_dict(secret_spec_dict, role):
if "type" not in secret_spec_dict:
secrets_backend_type = get_default_secrets_backend_type()
else:
secrets_backend_type = secret_spec_dict["type"]
if not isinstance(secrets_backend_type, str):
raise MetaflowException(
"Bad @secrets specification - 'type' must be a string - found %s"
% type(secrets_backend_type)
)
secret_id = secret_spec_dict.get("id")
if not isinstance(secret_id, str):
raise MetaflowException(
"Bad @secrets specification - 'id' must be a string - found %s"
% type(secret_id)
)
options = secret_spec_dict.get("options", {})
if not isinstance(options, dict):
raise MetaflowException(
"Bad @secrets specification - 'option' must be a dict - found %s"
% type(options)
)
role_for_source = secret_spec_dict.get("role", None)
if role_for_source is not None:
if not isinstance(role_for_source, str):
raise MetaflowException(
"Bad @secrets specification - 'role' must be a str - found %s"
% type(role_for_source)
)
role = role_for_source
return SecretSpec(
secrets_backend_type, secret_id=secret_id, options=options, role=role
)
| SecretSpec |
python | pdm-project__pdm | src/pdm/models/candidates.py | {
"start": 11067,
"end": 29166
} | class ____:
"""A candidate that has been prepared for installation.
The metadata and built wheel are available.
"""
_build_dir_cache: ClassVar[dict[Link, str]] = {}
candidate: Candidate
environment: BaseEnvironment
reporter: CandidateReporter = dataclasses.field(default_factory=CandidateReporter)
def __post_init__(self) -> None:
self.req = self.candidate.req
self.link = self._replace_url_vars(self.candidate.link)
self._cached: Path | None = None
self._source_dir: Path | None = None
self._unpacked_dir: Path | None = None
self._metadata_dir: str | None = None
self._metadata: im.Distribution | None = None
if self.link is not None and self.link.is_file and self.link.file_path.is_dir():
self._source_dir = self.link.file_path
self._unpacked_dir = self._source_dir / (self.link.subdirectory or "")
def _replace_url_vars(self, link: Link | None) -> Link | None:
if link is None:
return None
url = self.environment.project.backend.expand_line(link.normalized)
return dataclasses.replace(link, url=url)
@cached_property
def revision(self) -> str:
from unearth import vcs_support
if not (self._source_dir and os.path.exists(self._source_dir)):
# It happens because the cached wheel is hit and the source code isn't
# pulled to local. In this case the link url must contain the full commit
# hash which can be taken as the revision safely.
# See more info at https://github.com/pdm-project/pdm/issues/349
rev = get_rev_from_url(self.candidate.link.url) # type: ignore[union-attr]
if rev:
return rev
assert isinstance(self.req, VcsRequirement)
return vcs_support.get_backend(self.req.vcs, self.environment.project.core.ui.verbosity).get_revision(
cast(Path, self._source_dir)
)
def direct_url(self) -> dict[str, Any] | None:
"""PEP 610 direct_url.json data"""
req = self.req
if isinstance(req, VcsRequirement):
if req.editable:
assert self._source_dir
return _filter_none(
{
"url": self._source_dir.as_uri(),
"dir_info": {"editable": True},
"subdirectory": req.subdirectory,
}
)
return _filter_none(
{
"url": url_without_fragments(req.repo),
"vcs_info": _filter_none(
{
"vcs": req.vcs,
"requested_revision": req.ref,
"commit_id": self.revision,
}
),
"subdirectory": req.subdirectory,
}
)
elif isinstance(req, FileRequirement):
assert self.link is not None
if self.link.is_file and self.link.file_path.is_dir():
return _filter_none(
{
"url": self.link.url_without_fragment,
"dir_info": _filter_none({"editable": req.editable or None}),
"subdirectory": req.subdirectory,
}
)
hash_cache = self.environment.project.make_hash_cache()
return _filter_none(
{
"url": self.link.url_without_fragment,
"archive_info": {
"hash": hash_cache.get_hash(self.link, self.environment.session).replace(":", "=")
},
"subdirectory": req.subdirectory,
}
)
else:
return None
def build(self) -> Path:
"""Call PEP 517 build hook to build the candidate into a wheel"""
self._obtain(allow_all=False)
if self._cached:
return self._cached
if not self.req.editable:
cached = self._get_build_cache()
if cached:
return cached
assert self._source_dir, "Source directory isn't ready yet"
builder_cls = EditableBuilder if self.req.editable else WheelBuilder
builder = builder_cls(str(self._unpacked_dir), self.environment)
build_dir = self._get_wheel_dir()
os.makedirs(build_dir, exist_ok=True)
termui.logger.info("Running PEP 517 backend to build a wheel for %s", self.link)
self.reporter.report_build_start(self.link.filename) # type: ignore[union-attr]
self._cached = Path(builder.build(build_dir, metadata_directory=self._metadata_dir))
self.reporter.report_build_end(self.link.filename) # type: ignore[union-attr]
return self._cached
def _obtain(self, allow_all: bool = False, unpack: bool = True) -> None:
"""Fetch the link of the candidate and unpack to local if necessary.
:param allow_all: If true, don't validate the wheel tag nor hashes
:param unpack: Whether to download and unpack the link if it's not local
"""
if self._cached and self._wheel_compatible(self._cached.name, allow_all):
return
if self._source_dir and self._source_dir.exists():
return
sources = filtered_sources(self.environment.project.sources, self.req.key)
env_spec = self.environment.allow_all_spec if allow_all else self.environment.spec
with self.environment.get_finder(sources, env_spec=env_spec) as finder:
if not self.link or (self.link.is_wheel and not self._wheel_compatible(self.link.filename, allow_all)):
if self.req.is_file_or_url:
raise CandidateNotFound(f"The URL requirement {self.req.as_line()} is a wheel but incompatible")
self.link = self._cached = None # reset the incompatible wheel
self.link = _find_best_match_link(
finder, self.req.as_pinned_version(self.candidate.version), self.candidate.hashes
)
if not self.link:
raise CandidateNotFound(
f"No candidate is found for `{self.req.project_name}` that matches the environment or hashes"
)
if not self.candidate.link:
self.candidate.link = self.link
# find if there is any build cache for the candidate
if not self.req.editable:
cached = self._get_build_cache()
if cached and self._wheel_compatible(cached.name, allow_all):
self._cached = cached
return
# If not, download and unpack the link
if unpack:
self._unpack(validate_hashes=not allow_all)
def _unpack(self, validate_hashes: bool = False) -> None:
hash_options = None
if validate_hashes and self.candidate.hashes:
hash_options = convert_hashes(self.candidate.hashes)
assert self.link is not None
with self.environment.get_finder() as finder:
with TemporaryDirectory(prefix="pdm-download-") as tmpdir:
build_dir = self._get_build_dir()
if self.link.is_wheel:
download_dir = build_dir
else:
download_dir = tmpdir
result = finder.download_and_unpack(
self.link,
build_dir,
download_dir,
hash_options,
download_reporter=self.reporter.report_download,
unpack_reporter=self.reporter.report_unpack,
)
if self.link.is_wheel:
self._cached = result
else:
self._source_dir = Path(build_dir)
self._unpacked_dir = result
def prepare_metadata(self, force_build: bool = False) -> im.Distribution:
if self.candidate.installed is not None:
return self.candidate.installed
self._obtain(allow_all=True, unpack=False)
if self._metadata_dir:
return im.PathDistribution(Path(self._metadata_dir))
if self._cached:
return self._get_metadata_from_wheel(self._cached)
assert self.link is not None
if self.link.dist_info_metadata:
assert self.link.dist_info_link
dist = self._get_metadata_from_metadata_link(self.link.dist_info_link, self.link.dist_info_metadata)
if dist is not None:
return dist
self._unpack(validate_hashes=False)
if self._cached: # check again if the wheel is downloaded to local
return self._get_metadata_from_wheel(self._cached)
assert self._unpacked_dir, "Source directory isn't ready yet"
pyproject_toml = self._unpacked_dir / "pyproject.toml"
if not force_build and pyproject_toml.exists():
dist = self._get_metadata_from_project(pyproject_toml)
if dist is not None:
return dist
# If all fail, try building the source to get the metadata
metadata_parent = self.environment.project.core.create_temp_dir(prefix="pdm-meta-")
return self._get_metadata_from_build(self._unpacked_dir, metadata_parent)
def _get_metadata_from_metadata_link(
self, link: Link, medata_hash: bool | dict[str, str] | None
) -> im.Distribution | None:
resp = self.environment.session.get(link.normalized)
if isinstance(medata_hash, dict):
hash_name, hash_value = next(iter(medata_hash.items()))
if hashlib.new(hash_name, resp.content).hexdigest() != hash_value:
termui.logger.warning("Metadata hash mismatch for %s, ignoring the metadata", link)
return None
return MetadataDistribution(resp.text)
def _get_metadata_from_wheel(self, wheel: Path) -> im.Distribution:
# Get metadata from METADATA inside the wheel
metadata_parent = self.environment.project.core.create_temp_dir(prefix="pdm-meta-")
dist_info = self._metadata_dir = _get_wheel_metadata_from_wheel(wheel, metadata_parent)
return im.PathDistribution(Path(dist_info))
def _get_metadata_from_project(self, pyproject_toml: Path) -> im.Distribution | None:
# Try getting from PEP 621 metadata
from pdm.formats import MetaConvertError
from pdm.project.project_file import PyProject
try:
pyproject = PyProject(pyproject_toml, ui=self.environment.project.core.ui)
except MetaConvertError as e:
termui.logger.warning("Failed to parse pyproject.toml: %s", e)
return None
metadata = pyproject.metadata
if not metadata:
termui.logger.warning("Failed to parse pyproject.toml")
return None
dynamic_fields = metadata.get("dynamic", [])
# Use the parse result only when all are static
if not set(dynamic_fields).isdisjoint(
{
"name",
"version",
"dependencies",
"optional-dependencies",
"requires-python",
}
):
return None
try:
backend_cls = get_backend_by_spec(pyproject.build_system)
except Exception:
# no variable expansion
backend_cls = get_backend("setuptools")
backend = backend_cls(pyproject_toml.parent)
if "name" not in metadata:
termui.logger.warning("Failed to parse pyproject.toml, name is required")
return None
setup = Setup(
name=metadata.get("name"),
summary=metadata.get("description"),
version=metadata.get("version", "0.0.0"),
install_requires=list(
map(
backend.expand_line,
metadata.get("dependencies", []),
)
),
extras_require={
k: list(map(backend.expand_line, v)) for k, v in metadata.get("optional-dependencies", {}).items()
},
python_requires=metadata.get("requires-python"),
)
return setup.as_dist()
def _get_metadata_from_build(self, source_dir: Path, metadata_parent: str) -> im.Distribution:
builder = EditableBuilder if self.req.editable else WheelBuilder
try:
termui.logger.info("Running PEP 517 backend to get metadata for %s", self.link)
self.reporter.report_build_start(self.link.filename) # type: ignore[union-attr]
self._metadata_dir = builder(source_dir, self.environment).prepare_metadata(metadata_parent)
self.reporter.report_build_end(self.link.filename) # type: ignore[union-attr]
except BuildError:
termui.logger.warning("Failed to build package, try parsing project files.")
try:
setup = Setup.from_directory(source_dir)
except Exception:
message = "Failed to parse the project files, dependencies may be missing"
termui.logger.warning(message)
warnings.warn(message, PDMWarning, stacklevel=1)
setup = Setup()
return setup.as_dist()
else:
return im.PathDistribution(Path(cast(str, self._metadata_dir)))
@property
def metadata(self) -> im.Distribution:
if self._metadata is None:
result = self.prepare_metadata()
if not self.candidate.name:
self.req.name = self.candidate.name = cast(str, result.metadata.get("Name"))
if not self.candidate.version and result.metadata.get("Version"):
self.candidate.version = result.version
if not self.candidate.requires_python:
self.candidate.requires_python = result.metadata.get("Requires-Python", "")
self._metadata = result
return self._metadata
def get_dependencies_from_metadata(self) -> list[Requirement]:
"""Get the dependencies of a candidate from metadata."""
extras = self.req.extras or ()
return filter_requirements_with_extras(self.metadata.requires or [], extras)
def should_cache(self) -> bool:
"""Determine whether to cache the dependencies and built wheel."""
from unearth import vcs_support
if not self.environment.project.core.state.enable_cache:
return False
link, source_dir = self.candidate.link, self._source_dir
if self.req.editable:
return False
if self.req.is_named:
return True
if self.req.is_vcs:
if not source_dir:
# If the candidate isn't prepared, we can't cache it
return False
assert link
vcs_backend = vcs_support.get_backend(link.vcs, self.environment.project.core.ui.verbosity)
return vcs_backend.is_immutable_revision(source_dir, link)
if link and not (link.is_file and link.file_path.is_dir()):
# Cache if the link contains egg-info like 'foo-1.0'
return _egg_info_re.search(link.filename) is not None
return False
def _get_build_cache(self) -> Path | None:
if not self.environment.project.core.state.enable_cache:
return None
wheel_cache = self.environment.project.make_wheel_cache()
assert self.candidate.link
cache_entry = wheel_cache.get(self.candidate.link, self.candidate.name, self.environment.spec)
if cache_entry is not None:
termui.logger.info("Using cached wheel: %s", cache_entry)
return cache_entry
def _get_build_dir(self) -> str:
assert self.link is not None
if self.link.is_file and self.link.file_path.is_dir():
# Local directories are built in tree
return str(self.link.file_path)
if self.req.editable:
# In this branch the requirement must be an editable VCS requirement.
# The repository will be unpacked into a *persistent* src directory.
prefix: Path | None = None
if self.environment.is_local:
prefix = self.environment.packages_path # type: ignore[attr-defined]
else:
venv = self.environment.interpreter.get_venv()
if venv is not None:
prefix = venv.root
if prefix is not None:
src_dir = prefix / "src"
else:
src_dir = Path("src")
src_dir.mkdir(exist_ok=True, parents=True)
dirname = self.candidate.name or self.req.name
if not dirname:
dirname, _ = os.path.splitext(self.link.filename)
return str(src_dir / str(dirname))
# Otherwise, for source dists, they will be unpacked into a *temp* directory.
if (build_dir := self._build_dir_cache.get(self.link)) is None:
build_dir = self._build_dir_cache[self.link] = self.environment.project.core.create_temp_dir(
prefix="pdm-build-"
)
return build_dir
def _wheel_compatible(self, wheel_file: str, allow_all: bool = False) -> bool:
env_spec = self.environment.allow_all_spec if allow_all else self.environment.spec
return env_spec.wheel_compatibility(wheel_file) is not None
def _get_wheel_dir(self) -> str:
assert self.candidate.link
wheel_cache = self.environment.project.make_wheel_cache()
if self.should_cache():
termui.logger.info("Saving wheel to cache: %s", self.candidate.link)
return wheel_cache.get_path_for_link(self.candidate.link, self.environment.spec).as_posix()
else:
return wheel_cache.get_ephemeral_path_for_link(self.candidate.link, self.environment.spec).as_posix()
| PreparedCandidate |
python | getsentry__sentry | src/sentry/discover/arithmetic.py | {
"start": 645,
"end": 737
} | class ____(ArithmeticError):
"""Exceeded the maximum allowed operators"""
| MaxOperatorError |
python | neetcode-gh__leetcode | python/0435-non-overlapping-intervals.py | {
"start": 0,
"end": 373
} | class ____:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort()
res = 0
prevEnd = intervals[0][1]
for start, end in intervals[1:]:
if start >= prevEnd:
prevEnd = end
else:
res += 1
prevEnd = min(end, prevEnd)
return res
| Solution |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 23327,
"end": 25875
} | class ____(BaseModel):
"""
Task Instance body for get batch.
"""
model_config = ConfigDict(
extra="forbid",
)
dag_ids: Annotated[list[str] | None, Field(title="Dag Ids")] = None
dag_run_ids: Annotated[list[str] | None, Field(title="Dag Run Ids")] = None
task_ids: Annotated[list[str] | None, Field(title="Task Ids")] = None
state: Annotated[list[TaskInstanceState | None] | None, Field(title="State")] = None
run_after_gte: Annotated[datetime | None, Field(title="Run After Gte")] = None
run_after_gt: Annotated[datetime | None, Field(title="Run After Gt")] = None
run_after_lte: Annotated[datetime | None, Field(title="Run After Lte")] = None
run_after_lt: Annotated[datetime | None, Field(title="Run After Lt")] = None
logical_date_gte: Annotated[datetime | None, Field(title="Logical Date Gte")] = None
logical_date_gt: Annotated[datetime | None, Field(title="Logical Date Gt")] = None
logical_date_lte: Annotated[datetime | None, Field(title="Logical Date Lte")] = None
logical_date_lt: Annotated[datetime | None, Field(title="Logical Date Lt")] = None
start_date_gte: Annotated[datetime | None, Field(title="Start Date Gte")] = None
start_date_gt: Annotated[datetime | None, Field(title="Start Date Gt")] = None
start_date_lte: Annotated[datetime | None, Field(title="Start Date Lte")] = None
start_date_lt: Annotated[datetime | None, Field(title="Start Date Lt")] = None
end_date_gte: Annotated[datetime | None, Field(title="End Date Gte")] = None
end_date_gt: Annotated[datetime | None, Field(title="End Date Gt")] = None
end_date_lte: Annotated[datetime | None, Field(title="End Date Lte")] = None
end_date_lt: Annotated[datetime | None, Field(title="End Date Lt")] = None
duration_gte: Annotated[float | None, Field(title="Duration Gte")] = None
duration_gt: Annotated[float | None, Field(title="Duration Gt")] = None
duration_lte: Annotated[float | None, Field(title="Duration Lte")] = None
duration_lt: Annotated[float | None, Field(title="Duration Lt")] = None
pool: Annotated[list[str] | None, Field(title="Pool")] = None
queue: Annotated[list[str] | None, Field(title="Queue")] = None
executor: Annotated[list[str] | None, Field(title="Executor")] = None
page_offset: Annotated[int | None, Field(ge=0, title="Page Offset")] = 0
page_limit: Annotated[int | None, Field(ge=0, title="Page Limit")] = 100
order_by: Annotated[str | None, Field(title="Order By")] = None
| TaskInstancesBatchBody |
python | pytorch__pytorch | torch/_inductor/runtime/hints.py | {
"start": 3394,
"end": 3720
} | class ____(Enum):
ONE_ELEMENT_PER_THREAD = 0
# Triton codegen tries to codegen set of AutotuneHints.
# Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>""
# which isn't valid python.
# Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32".
__repr__ = Enum.__str__
| AutotuneHint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.