language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/distributed/checkpoint/test_compatibility.py | {
"start": 464,
"end": 3461
} | class ____(TestCase):
def test_metadata(self) -> None:
# Ensure that all the new fields of all the metadata have the default
# values so that we can always deserialize from a legacy metadata.
try:
tensor = torch.zeros(4, 4)
chunk_meta = ChunkStorageMetadata(
torch.Size((1, 1)),
torch.Size((1, 1)),
)
tensor_meta = TensorStorageMetadata(
properties=TensorProperties.create_from_tensor(tensor),
size=tensor.size(),
chunks=[chunk_meta],
)
b_meta = BytesStorageMetadata()
_ = Metadata(state_dict_metadata={"a": tensor_meta, "b": b_meta})
_ = MetadataIndex(fqn="a.b.c")
except Exception as e:
raise RuntimeError(
"The change may break the BC of distributed checkpoint."
) from e
def test_sharded_tensor_dependency(self) -> None:
# Ensure that we can load the existing DCP checkpoints back even if the
# metadata contain # _shard.sharded_tensor.metadata.
from torch.distributed._shard.sharded_tensor.metadata import (
TensorProperties as stp,
)
with patch("torch.distributed.checkpoint.metadata.TensorProperties", stp):
dcp.save(
{"a": torch.zeros(4, 4)},
dcp.FileSystemWriter("/tmp/dcp_testing"),
)
dcp.load(
{"a": torch.zeros(4, 4)},
dcp.FileSystemReader("/tmp/dcp_testing"),
)
@with_temp_dir
def test_storage_meta(self) -> None:
writer = dcp.FileSystemWriter(self.temp_dir)
dcp.save({"a": torch.zeros(4, 4)}, storage_writer=writer)
reader = dcp.FileSystemReader(self.temp_dir)
storage_meta = reader.read_metadata().storage_meta
self.assertNotEqual(storage_meta, None)
self.assertEqual(str(storage_meta.checkpoint_id), self.temp_dir)
self.assertEqual(storage_meta.save_id, writer.save_id)
self.assertEqual(storage_meta.load_id, reader.load_id)
@with_temp_dir
def test_with_v_2_3(self) -> None:
sd = {
"a": torch.zeros(4, 4),
"dict": {
"dict_a": {"dict_a_1": 1, "dict_a_2": 2},
"dict_b": {"dict_b_1": 1, "dict_b_2": 2},
},
"list": [0, 1, 2, 3, 4, 5],
}
load_sd = {
"a": torch.ones(4, 4),
"dict": {
"dict_a": {"dict_a_1": 2, "dict_a_2": 4},
"dict_b": {"dict_b_1": 2, "dict_b_2": 4},
},
"list": [10, 11, 12, 13, 14, 15],
}
dcp._version._act_like_version = "2_3"
dcp.save(sd, checkpoint_id=self.temp_dir)
dcp._version._act_like_version = None
dcp.load(load_sd, checkpoint_id=self.temp_dir)
self.assertEqual(sd, load_sd)
if __name__ == "__main__":
run_tests()
| TestDCPCompatbility |
python | django__django | tests/admin_inlines/tests.py | {
"start": 36724,
"end": 55235
} | class ____(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
@classmethod
def setUpTestData(cls):
cls.user = User(username="admin", is_staff=True, is_active=True)
cls.user.set_password("secret")
cls.user.save()
cls.author_ct = ContentType.objects.get_for_model(Author)
cls.holder_ct = ContentType.objects.get_for_model(Holder2)
cls.book_ct = ContentType.objects.get_for_model(Book)
cls.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(
codename="add_author", content_type=cls.author_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_author", content_type=cls.author_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="add_holder2", content_type=cls.holder_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_holder2", content_type=cls.holder_ct
)
cls.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name="The Author")
cls.book = author.books.create(name="The inline Book")
cls.author_change_url = reverse(
"admin:admin_inlines_author_change", args=(author.id,)
)
# Get the ID of the automatically created intermediate model for the
# Author-Book m2m.
author_book_auto_m2m_intermediate = Author.books.through.objects.get(
author=author, book=cls.book
)
cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
cls.holder = Holder2.objects.create(dummy=13)
cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder)
def setUp(self):
self.holder_change_url = reverse(
"admin:admin_inlines_holder2_change", args=(self.holder.id,)
)
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# No change permission on books, so no inline
self.assertNotContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse("admin:admin_inlines_holder2_add"))
# No permissions on Inner2s, so no inline
self.assertNotContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertNotContains(response, "Add another Inner2")
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertNotContains(response, "Add another Inner2")
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_view_only_perm(self):
permission = Permission.objects.get(
codename="view_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# View-only inlines. (It could be nicer to hide the empty, non-editable
# inlines on the add page.)
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_add_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, False
)
self.assertContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(
codename="add_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# No change permission on Books, so no inline
self.assertNotContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_holder2_add"))
# Add permission on inner2s, so we get the inline
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertContains(response, "Add another Inner2")
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS">',
html=True,
)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(
codename="add_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_view_only_perm(self):
permission = Permission.objects.get(
codename="view_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# View-only inlines.
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_add_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, False
)
self.assertContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
# The field in the inline is read-only.
self.assertContains(response, "<p>%s</p>" % self.book)
self.assertNotContains(
response,
'<input type="checkbox" name="Author_books-0-DELETE" '
'id="id_Author_books-0-DELETE">',
html=True,
)
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(
codename="change_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(response.context["inline_admin_formset"].has_add_permission, True)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, True
)
self.assertContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
self.assertContains(response, "Add another Author-book relationship")
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id,
html=True,
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertContains(response, "Add another Inner2")
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add
# new
self.assertContains(
response,
'<h2 id="inner2_set-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
# Just the one form for existing instances
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" '
'name="inner2_set-MAX_NUM_FORMS">',
html=True,
)
# TabularInline
self.assertContains(
response, '<th class="column-dummy required">Dummy</th>', html=True
)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
# One form for existing instance and three extra for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="delete_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="delete_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(
response,
'<h2 id="inner2_set-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
self.assertContains(
response,
'<h2 id="inner2_set-2-heading" class="inline-heading">Inner2s</h2>',
html=True,
)
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
# TabularInline
self.assertContains(
response, '<th class="column-dummy required">Dummy</th>', html=True
)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
| TestInlinePermissions |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 16946,
"end": 17223
} | class ____(logging.Formatter):
"""Don't include full path in exc_info messages"""
def formatException(self, exc_info):
formatted = super().formatException(exc_info)
return formatted.replace(__file__, ".../" + os.path.basename(__file__))
| ShortExcFormatter |
python | keras-team__keras | keras/src/optimizers/rmsprop.py | {
"start": 161,
"end": 5775
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to 1e-7.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
{{base_optimizer_keyword_args}}
Example:
>>> opt = keras.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = keras.backend.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> opt.minimize(loss, [var1])
>>> var1
9.683772
Reference:
- [Hinton, 2012](
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="rmsprop",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._velocities = self.add_optimizer_variables(var_list, "velocity")
self._momentums = []
if self.momentum > 0:
self._momentums = self.add_optimizer_variables(var_list, "momentum")
self._average_gradients = []
if self.centered:
self._average_gradients = self.add_optimizer_variables(
var_list, "average_gradient"
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
velocity = self._velocities[self._get_variable_index(variable)]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._get_variable_index(variable)]
average_grad = None
if self.centered:
average_grad = self._average_gradients[
self._get_variable_index(variable)
]
rho = self.rho
self.assign(
velocity,
ops.add(
ops.multiply(rho, velocity),
ops.multiply(1 - rho, ops.square(gradient)),
),
)
if self.centered:
self.assign(
average_grad,
ops.add(
ops.multiply(rho, average_grad),
ops.multiply(1 - rho, gradient),
),
)
denominator = velocity - ops.square(average_grad) + self.epsilon
else:
denominator = ops.add(velocity, self.epsilon)
increment = ops.divide(
ops.multiply(lr, gradient), ops.sqrt(denominator)
)
if self.momentum > 0:
self.assign(
momentum,
ops.add(ops.multiply(self.momentum, momentum), increment),
)
self.assign_sub(variable, momentum)
else:
self.assign_sub(variable, increment)
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"momentum": self.momentum,
"epsilon": self.epsilon,
"centered": self.centered,
}
)
return config
RMSprop.__doc__ = RMSprop.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| RMSprop |
python | jazzband__django-formtools | tests/tests.py | {
"start": 6838,
"end": 8646
} | class ____(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Speaking español.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Speaking español. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True, use_required_attribute=False)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_hash_with_file(self):
with InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8') as some_file:
f1 = HashTestFormWithFile({'name': 'joe'})
f2 = HashTestFormWithFile({'name': 'joe'}, files={'attachment': some_file})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertNotEqual(hash1, hash2)
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as some_file:
some_file.write(b'1')
some_file.seek(0)
f1 = HashTestFormWithFile({'name': 'joe'})
f2 = HashTestFormWithFile({'name': 'joe'}, files={'attachment': some_file})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertNotEqual(hash1, hash2)
| FormHmacTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 160274,
"end": 164049
} | class ____(DialectKWArgs, HasConditionalDDL, SchemaItem):
"""A table-level SQL constraint.
:class:`_schema.Constraint` serves as the base class for the series of
constraint objects that can be associated with :class:`_schema.Table`
objects, including :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`
:class:`_schema.UniqueConstraint`, and
:class:`_schema.CheckConstraint`.
"""
__visit_name__ = "constraint"
_creation_order: int
_column_flag: bool
def __init__(
self,
name: _ConstraintNameArgument = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_create_rule: Optional[Any] = None,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
:param _create_rule:
used internally by some datatypes that also create constraints.
:param _type_bound:
used internally to indicate that this constraint is associated with
a specific datatype.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
self.comment = comment
def _should_create_for_compiler(
self, compiler: DDLCompiler, **kw: Any
) -> bool:
if self._create_rule is not None and not self._create_rule(compiler):
return False
elif self._ddl_if is not None:
return self._ddl_if._should_execute(
ddl.CreateConstraint(self), self, None, compiler=compiler, **kw
)
else:
return True
@property
def table(self) -> Table:
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?"
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, (Table, Column))
self.parent = parent
parent.constraints.add(self)
@util.deprecated(
"1.4",
"The :meth:`_schema.Constraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw: Any) -> Self:
return self._copy(**kw)
def _copy(self, **kw: Any) -> Self:
raise NotImplementedError()
| Constraint |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 6343,
"end": 6415
} | class ____(Exception):
"Specified columns do not exist"
| InvalidColumns |
python | numba__numba | numba/tests/test_record_dtype.py | {
"start": 26299,
"end": 26693
} | class ____(TestRecordDtype):
'''
Same as TestRecordDtype, but stressing the Dispatcher's type dispatch
mechanism (issue #384). Note that this does not stress caching of ndarray
typecodes as the path that uses the cache is not taken with recarrays.
'''
def get_cfunc(self, pyfunc, argspec):
return _get_cfunc_nopython(pyfunc, argspec)
| TestRecordDtypeWithDispatcher |
python | kamyu104__LeetCode-Solutions | Python/maximum-area-rectangle-with-point-constraints-i.py | {
"start": 66,
"end": 1588
} | class ____(object):
def maxRectangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
class BIT(object): # 0-indexed.
def __init__(self, n):
self.__bit = [0]*(n+1) # Extra one for dummy node.
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
points.sort()
y_to_idx = {y:idx for idx, y in enumerate(sorted(set(y for _, y in points)))}
bit = BIT(len(y_to_idx))
lookup = {}
result = -1
for i, (x, y) in enumerate(points):
y_idx = y_to_idx[y]
bit.add(y_idx, +1)
if not (i-1 >= 0 and points[i-1][0] == x):
continue
prev_y_idx = y_to_idx[points[i-1][1]]
curr = bit.query(y_idx)-bit.query(prev_y_idx-1)
if (prev_y_idx, y_idx) in lookup and lookup[prev_y_idx, y_idx][0] == curr-2:
result = max(result, (x-lookup[prev_y_idx, y_idx][1])*(y-points[i-1][1]))
lookup[prev_y_idx, y_idx] = (curr, x)
return result
# Time: O(n^2)
# Space: O(1)
# sort, brute force
| Solution |
python | joke2k__faker | faker/providers/lorem/az_AZ/__init__.py | {
"start": 68,
"end": 1860
} | class ____(LoremProvider):
"""Implement lorem provider for ``az_AZ`` locale.
Word list is based on the source(s) below with some filtering.
Sources:
- https://1000mostcommonwords.com/1000-most-common-azerbaijani-words/
"""
word_list = (
"kimi",
"mən",
"olmaq",
"at",
"bir",
"var",
"bu",
"dən",
"tərəfindən",
"isti",
"bilərsiniz",
"həyata",
"digər",
"etmək",
"onların",
"vaxt",
"əgər",
"olacaq",
"necə",
"bildirib",
"bir",
"hər",
"demək",
"yoxdur",
"dəst",
"üç",
"istəyirəm",
"hava",
"quyu",
"oynamaq",
"kiçik",
"son",
"qoymaq",
"ev",
"oxumaq",
"əl",
"port",
"böyük",
"sehr",
"əlavə",
"etmək",
"hətta",
"torpaq",
"burada",
"lazımdır",
"böyük",
"yüksək",
"belə",
"izləmək",
"akt",
"niyə",
"soruşmaq",
"oxumaq",
"dəyişiklik",
"getdi",
"yüngül",
"cür",
"müstəqil",
"ehtiyac",
"ev",
"şəkil",
"çalışmaq",
"azad",
"yenidən",
"heyvan",
"nöqtə",
"ana",
"dünya",
"yaxın",
"qurmaq",
"özü",
"torpaq",
"ata",
"hər",
"hansı",
"bir",
"yeni",
"iş",
"hissə",
"almaq",
"yer",
"etdi",
"yaşamaq",
"harada",
"sonra",
"cümlə",
"böyük",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | huggingface__transformers | tests/quantization/torchao_integration/test_torchao.py | {
"start": 34769,
"end": 35509
} | class ____(TorchAoSerializationTest):
device = f"{torch_device}:0"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
super().setUpClass()
# fmt: off
cls.quant_scheme = Int4WeightOnlyConfig(**{"group_size": 32, "version": 1})
cls.quant_scheme_kwargs = {}
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): "What are we having for dinner?\n\nJessica: (smiling)",
("cuda", 7): "What are we having for dinner?\n- 1. What is the temperature outside",
}
)
# fmt: on
cls.EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
@require_torch_accelerator
| TorchAoSerializationAcceleratorTest |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 23975,
"end": 27127
} | class ____(PreTrainedModel):
config: ChineseCLIPConfig
base_model_prefix = "chinese_clip"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, ChineseCLIPVisionEmbeddings):
factor = self.config.initializer_factor
init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, ChineseCLIPTextEmbeddings):
init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
if embedding.padding_idx is not None:
init.zeros_(embedding.weight[embedding.padding_idx])
elif isinstance(module, ChineseCLIPVisionAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
init.normal_(module.q_proj.weight, std=in_proj_std)
init.normal_(module.k_proj.weight, std=in_proj_std)
init.normal_(module.v_proj.weight, std=in_proj_std)
init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, ChineseCLIPVisionMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
init.normal_(module.fc1.weight, std=fc_std)
init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, ChineseCLIPModel):
init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
# Copied from transformers.models.align.modeling_align.AlignTextEncoder with Align->ChineseCLIP
| ChineseCLIPPreTrainedModel |
python | apache__airflow | providers/databricks/tests/unit/databricks/triggers/test_databricks.py | {
"start": 9733,
"end": 13628
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
self.end_time = time.time() + 60
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=LOGIN,
password=PASSWORD,
extra=None,
)
)
self.trigger = DatabricksSQLStatementExecutionTrigger(
statement_id=STATEMENT_ID,
databricks_conn_id=DEFAULT_CONN_ID,
polling_period_seconds=POLLING_INTERVAL_SECONDS,
end_time=self.end_time,
)
def test_serialize(self):
assert self.trigger.serialize() == (
"airflow.providers.databricks.triggers.databricks.DatabricksSQLStatementExecutionTrigger",
{
"statement_id": STATEMENT_ID,
"databricks_conn_id": DEFAULT_CONN_ID,
"end_time": self.end_time,
"polling_period_seconds": POLLING_INTERVAL_SECONDS,
"retry_delay": 10,
"retry_limit": 3,
"retry_args": None,
},
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks.DatabricksHook.a_get_sql_statement_state")
async def test_run_return_success(self, mock_a_get_sql_statement_state):
mock_a_get_sql_statement_state.return_value = SQLStatementState(state="SUCCEEDED")
trigger_event = self.trigger.run()
async for event in trigger_event:
assert event == TriggerEvent(
{
"statement_id": STATEMENT_ID,
"state": SQLStatementState(state="SUCCEEDED").to_json(),
"error": {},
}
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks.DatabricksHook.a_get_sql_statement_state")
async def test_run_return_failure(self, mock_a_get_sql_statement_state):
mock_a_get_sql_statement_state.return_value = SQLStatementState(
state="FAILED",
error_code="500",
error_message="Something went wrong",
)
trigger_event = self.trigger.run()
async for event in trigger_event:
assert event == TriggerEvent(
{
"statement_id": STATEMENT_ID,
"state": SQLStatementState(
state="FAILED",
error_code="500",
error_message="Something went wrong",
).to_json(),
"error": {
"error_code": "500",
"error_message": "Something went wrong",
},
}
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.triggers.databricks.asyncio.sleep")
@mock.patch("airflow.providers.databricks.hooks.databricks.DatabricksHook.a_get_sql_statement_state")
async def test_sleep_between_retries(self, mock_a_get_sql_statement_state, mock_sleep):
mock_a_get_sql_statement_state.side_effect = [
SQLStatementState(
state="PENDING",
),
SQLStatementState(
state="SUCCEEDED",
),
]
trigger_event = self.trigger.run()
async for event in trigger_event:
assert event == TriggerEvent(
{
"statement_id": STATEMENT_ID,
"state": SQLStatementState(state="SUCCEEDED").to_json(),
"error": {},
}
)
mock_sleep.assert_called_once()
mock_sleep.assert_called_with(POLLING_INTERVAL_SECONDS)
| TestDatabricksSQLStatementExecutionTrigger |
python | run-llama__llama_index | llama-index-integrations/graph_stores/llama-index-graph-stores-neptune/llama_index/graph_stores/neptune/database_property_graph.py | {
"start": 456,
"end": 6184
} | class ____(NeptuneBasePropertyGraph):
supports_vector_queries: bool = False
def __init__(
self,
host: str,
port: int = 8182,
client: Any = None,
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
sign: bool = True,
use_https: bool = True,
**kwargs: Any,
) -> None:
"""
Init.
Args:
host (str): The host endpoint
port (int, optional): The port. Defaults to 8182.
client (Any, optional): If provided, this is the client that will be used. Defaults to None.
credentials_profile_name (Optional[str], optional): If provided this is the credentials profile that will be used. Defaults to None.
region_name (Optional[str], optional): The region to use. Defaults to None.
sign (bool, optional): True will SigV4 sign all requests, False will not. Defaults to True.
use_https (bool, optional): True to use https, False to use http. Defaults to True.
"""
self._client = create_neptune_database_client(
host, port, client, credentials_profile_name, region_name, sign, use_https
)
def structured_query(self, query: str, param_map: Dict[str, Any] = None) -> Any:
"""
Run the structured query.
Args:
query (str): The query to run
param_map (Dict[str, Any] | None, optional): A dictionary of query parameters. Defaults to None.
Raises:
NeptuneQueryException: An exception from Neptune with details
Returns:
Any: The results of the query
"""
param_map = param_map or {}
try:
logger.debug(
f"structured_query() query: {query} parameters: {json.dumps(param_map)}"
)
return self.client.execute_open_cypher_query(
openCypherQuery=query, parameters=json.dumps(param_map)
)["results"]
except Exception as e:
raise NeptuneQueryException(
{
"message": "An error occurred while executing the query.",
"details": str(e),
"query": query,
"parameters": str(param_map),
}
)
def vector_query(self, query: VectorStoreQuery, **kwargs: Any) -> Tuple[List[Any]]:
"""
NOT SUPPORTED.
Args:
query (VectorStoreQuery): _description_
Raises:
NotImplementedError: _description_
Returns:
Tuple[List[LabelledNode] | List[float]]: _description_
"""
raise NotImplementedError
def upsert_nodes(self, nodes: List[LabelledNode]) -> None:
"""
Upsert the nodes in the graph.
Args:
nodes (List[LabelledNode]): The list of nodes to upsert
"""
# Lists to hold separated types
entity_dicts: List[dict] = []
chunk_dicts: List[dict] = []
# Sort by type
for item in nodes:
if isinstance(item, EntityNode):
entity_dicts.append({**item.dict(), "id": item.id})
elif isinstance(item, ChunkNode):
chunk_dicts.append({**item.dict(), "id": item.id})
else:
# Log that we do not support these types of nodes
# Or raise an error?
pass
if chunk_dicts:
for d in chunk_dicts:
self.structured_query(
"""
WITH $data AS row
MERGE (c:Chunk {id: row.id})
SET c.text = row.text
SET c += removeKeyFromMap(row.properties, '')
RETURN count(*)
""",
param_map={"data": d},
)
if entity_dicts:
for d in entity_dicts:
self.structured_query(
f"""
WITH $data AS row
MERGE (e:`{BASE_NODE_LABEL}` {{id: row.id}})
SET e += removeKeyFromMap(row.properties, '')
SET e.name = row.name, e:`{BASE_ENTITY_LABEL}`
SET e:`{d["label"]}`
WITH e, row
WHERE removeKeyFromMap(row.properties, '').triplet_source_id IS NOT NULL
MERGE (c:Chunk {{id: removeKeyFromMap(row.properties, '').triplet_source_id}})
MERGE (e)<-[:MENTIONS]-(c)
RETURN count(*) as count
""",
param_map={"data": d},
)
def _get_summary(self) -> Dict:
"""
Get the Summary of the graph schema.
Returns:
Dict: The graph summary
"""
try:
response = self.client.get_propertygraph_summary()
except Exception as e:
raise NeptuneQueryException(
{
"message": (
"Summary API is not available for this instance of Neptune,"
"ensure the engine version is >=1.2.1.0"
),
"details": str(e),
}
)
try:
summary = response["payload"]["graphSummary"]
except Exception:
raise NeptuneQueryException(
{
"message": "Summary API did not return a valid response.",
"details": response.content.decode(),
}
)
else:
return summary
| NeptuneDatabasePropertyGraphStore |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 13372,
"end": 14030
} | class ____(_GenerativeProvider):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.COHERE, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
kProperty: Optional[int]
model: Optional[str]
maxTokensProperty: Optional[int]
returnLikelihoodsProperty: Optional[str]
stopSequencesProperty: Optional[List[str]]
temperatureProperty: Optional[float]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
| _GenerativeCohereConfig |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-sbert-rerank/llama_index/postprocessor/sbert_rerank/base.py | {
"start": 656,
"end": 5725
} | class ____(BaseNodePostprocessor):
"""
HuggingFace class for cross encoding two sentences/texts.
Args:
model (str): A model name from Hugging Face Hub that can be loaded with AutoModel, or a path to a local model.
device (str, optional): Device (like “cuda”, “cpu”, “mps”, “npu”) that should be used for computation.
If None, checks if a GPU can be used.
cache_folder (str, Path, optional): Path to the folder where cached files are stored. Defaults to None.
top_n (int): Number of nodes to return sorted by score. Defaults to 2.
keep_retrieval_score (bool, optional): Whether to keep the retrieval score in metadata. Defaults to False.
cross_encoder_kwargs (dict, optional): Additional keyword arguments for CrossEncoder initialization. Defaults to None.
"""
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
cross_encoder_kwargs: dict = Field(
default_factory=dict,
description="Additional keyword arguments for CrossEncoder initialization. "
"device and model should not be included here.",
)
_model: Any = PrivateAttr()
_device: str = PrivateAttr()
def __init__(
self,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
cache_folder: Optional[Union[str, Path]] = None,
top_n: int = 2,
keep_retrieval_score: Optional[bool] = False,
cross_encoder_kwargs: Optional[dict] = None,
):
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
cross_encoder_kwargs=cross_encoder_kwargs or {},
)
init_kwargs = self.cross_encoder_kwargs.copy()
if "device" in init_kwargs or "model" in init_kwargs:
raise ValueError(
"'device' and 'model' should not be specified in 'cross_encoder_kwargs'. "
"Use the top-level 'device' and 'model' parameters instead."
)
# Set default max_length if not provided by the user in kwargs.
if "max_length" not in init_kwargs:
init_kwargs["max_length"] = DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH
# Explicit arguments from the constructor take precedence over kwargs
resolved_device = infer_torch_device() if device is None else device
init_kwargs["device"] = resolved_device
self._device = resolved_device
if cache_folder:
init_kwargs["cache_folder"] = cache_folder
self._model = CrossEncoder(
model_name_or_path=model,
**init_kwargs,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
| SentenceTransformerRerank |
python | realpython__materials | python-practice-problems/caesar.py | {
"start": 831,
"end": 2079
} | class ____(unittest.TestCase):
def test_a(self):
start = "aaa"
result = caesar(start, 1)
self.assertEqual(result, "bbb")
result = caesar(start, 5)
self.assertEqual(result, "fff")
def test_punctuation(self):
start = "aaa.bbb"
result = caesar(start, 1)
self.assertEqual(result, "bbb.ccc")
result = caesar(start, -1)
self.assertEqual(result, "zzz.aaa")
def test_whitespace(self):
start = "aaa bb b"
result = caesar(start, 1)
self.assertEqual(result, "bbb cc c")
result = caesar(start, 3)
self.assertEqual(result, "ddd ee e")
def test_wraparound(self):
start = "abc"
result = caesar(start, -1)
self.assertEqual(result, "zab")
result = caesar(start, -2)
self.assertEqual(result, "yza")
result = caesar(start, -3)
self.assertEqual(result, "xyz")
start = "xyz"
result = caesar(start, 1)
self.assertEqual(result, "yza")
result = caesar(start, 2)
self.assertEqual(result, "zab")
result = caesar(start, 3)
self.assertEqual(result, "abc")
if __name__ == "__main__":
unittest.main()
| CaesarTestCase |
python | nedbat__coveragepy | coverage/parser.py | {
"start": 24078,
"end": 46313
} | class ____:
"""Analyze source text with an AST to find executable code paths.
The .analyze() method does the work, and populates these attributes:
`arcs`: a set of (from, to) pairs of the the arcs possible in the code.
`missing_arc_fragments`: a dict mapping (from, to) arcs to lists of
message fragments explaining why the arc is missing from execution::
{ (start, end): [(missing_cause_msg, action_msg), ...], }
For an arc starting from line 17, they should be usable to form complete
sentences like: "Line 17 didn't {action_msg} because {missing_cause_msg}".
NOTE: Starting in July 2024, I've been whittling this down to only report
arc that are part of true branches. It's not clear how far this work will
go.
"""
def __init__(
self,
filename: str,
root_node: ast.AST,
statements: set[TLineNo],
multiline: dict[TLineNo, TLineNo],
) -> None:
self.filename = filename
self.root_node = root_node
self.statements = {multiline.get(l, l) for l in statements}
self.multiline = multiline
# Turn on AST dumps with an environment variable.
# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0")))
if dump_ast: # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print(f"Statements: {self.statements}")
print(f"Multiline map: {self.multiline}")
print(ast.dump(self.root_node, include_attributes=True, indent=4))
self.arcs: set[TArc] = set()
self.missing_arc_fragments: TArcFragments = collections.defaultdict(list)
self.block_stack: list[Block] = []
# If `with` clauses jump to their start on the way out, we need
# information to be able to skip over that jump. We record the arcs
# from `with` into the clause (with_entries), and the arcs from the
# clause to the `with` (with_exits).
self.current_with_starts: set[TLineNo] = set()
self.all_with_starts: set[TLineNo] = set()
self.with_entries: set[TArc] = set()
self.with_exits: set[TArc] = set()
# $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code.
self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0")))
def analyze(self) -> None:
"""Examine the AST tree from `self.root_node` to determine possible arcs."""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, f"_code_object__{node_name}", None)
if code_object_handler is not None:
code_object_handler(node)
def with_jump_fixers(self) -> dict[TArc, tuple[TArc, TArc]]:
"""Get a dict with data for fixing jumps out of with statements.
Returns a dict. The keys are arcs leaving a with-statement by jumping
back to its start. The values are pairs: first, the arc from the start
to the next statement, then the arc that exits the with without going
to the start.
"""
fixers = {}
with_nexts = {
arc
for arc in self.arcs
if arc[0] in self.all_with_starts and arc not in self.with_entries
}
for start in self.all_with_starts:
nexts = {arc[1] for arc in with_nexts if arc[0] == start}
if not nexts:
continue
assert len(nexts) == 1, f"Expected one arc, got {nexts} with {start = }"
nxt = nexts.pop()
ends = {arc[0] for arc in self.with_exits if arc[1] == start}
for end in ends:
fixers[(end, start)] = ((start, nxt), (end, nxt))
return fixers
# Code object dispatchers: _code_object__*
#
# These methods are used by analyze() as the start of the analysis.
# There is one for each construct with a code object.
def _code_object__Module(self, node: ast.Module) -> None:
start = self.line_for_node(node)
if node.body:
exits = self.process_body(node.body)
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "exit the module")
else:
# Empty module.
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None:
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.process_body(node.body)
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
start = self.line_for_node(node)
exits = self.process_body(node.body)
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, f"exit class {node.name!r}")
def add_arc(
self,
start: TLineNo,
end: TLineNo,
missing_cause_msg: str | None = None,
action_msg: str | None = None,
) -> None:
"""Add an arc, including message fragments to use if it is missing."""
if self.debug: # pragma: debugging
print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}")
print(short_stack(), end="\n\n")
self.arcs.add((start, end))
if start in self.current_with_starts:
self.with_entries.add((start, end))
if missing_cause_msg is not None or action_msg is not None:
self.missing_arc_fragments[(start, end)].append((missing_cause_msg, action_msg))
def nearest_blocks(self) -> Iterable[Block]:
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
def line_for_node(self, node: ast.AST) -> TLineNo:
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = cast(
Optional[Callable[[ast.AST], TLineNo]],
getattr(self, f"_line__{node_name}", None),
)
if handler is not None:
line = handler(node)
else:
line = node.lineno # type: ignore[attr-defined]
return self.multiline.get(line, line)
# First lines: _line__*
#
# Dispatched by line_for_node, each method knows how to identify the first
# line number in the node, as Python will report it.
def _line_decorated(self, node: ast.FunctionDef) -> TLineNo:
"""Compute first line number for things that can be decorated (classes and functions)."""
if node.decorator_list:
lineno = node.decorator_list[0].lineno
else:
lineno = node.lineno
return lineno
def _line__Assign(self, node: ast.Assign) -> TLineNo:
return self.line_for_node(node.value)
_line__ClassDef = _line_decorated
def _line__Dict(self, node: ast.Dict) -> TLineNo:
if node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{"a":1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
_line__FunctionDef = _line_decorated
_line__AsyncFunctionDef = _line_decorated
def _line__List(self, node: ast.List) -> TLineNo:
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node: ast.Module) -> TLineNo: # pylint: disable=unused-argument
return 1
# The node types that just flow to the next node with no complications.
OK_TO_DEFAULT = {
"AnnAssign",
"Assign",
"Assert",
"AugAssign",
"Delete",
"Expr",
"Global",
"Import",
"ImportFrom",
"Nonlocal",
"Pass",
}
def node_exits(self, node: ast.AST) -> set[ArcStart]:
"""Find the set of arc starts that exit this node.
Return a set of ArcStarts, exits from this node to the next. Because a
node represents an entire sub-tree (including its children), the exits
from a node can be arbitrarily complex::
if something(1):
if other(2):
doit(3)
else:
doit(5)
There are three exits from line 1: they start at lines 1, 3 and 5.
There are two exits from line 2: lines 3 and 5.
"""
node_name = node.__class__.__name__
handler = cast(
Optional[Callable[[ast.AST], set[ArcStart]]],
getattr(self, f"_handle__{node_name}", None),
)
if handler is not None:
arc_starts = handler(node)
else:
# No handler: either it's something that's ok to default (a simple
# statement), or it's something we overlooked.
if env.TESTING:
if node_name not in self.OK_TO_DEFAULT:
raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure
# Default for simple statements: one exit from this node.
arc_starts = {ArcStart(self.line_for_node(node))}
return arc_starts
def process_body(
self,
body: Sequence[ast.AST],
from_start: ArcStart | None = None,
prev_starts: set[ArcStart] | None = None,
) -> set[ArcStart]:
"""Process the body of a compound statement.
`body` is the body node to process.
`from_start` is a single `ArcStart` that starts an arc into this body.
`prev_starts` is a set of ArcStarts that can all be the start of arcs
into this body. Only one of `from_start` and `prev_starts` should be
given.
Records arcs within the body by calling `self.add_arc`.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
if from_start is None:
prev_starts = set()
else:
prev_starts = {from_start}
else:
assert from_start is None
# Loop over the nodes in the body, making arcs from each one's exits to
# the next node.
for body_node in body:
lineno = self.line_for_node(body_node)
if lineno not in self.statements:
continue
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.node_exits(body_node)
return prev_starts
# Exit processing: process_*_exits
#
# These functions process the four kinds of jump exits: break, continue,
# raise, and return. To figure out where an exit goes, we have to look at
# the block stack context. For example, a break will jump to the nearest
# enclosing loop block, or the nearest enclosing finally block, whichever
# is nearer.
def process_break_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_break_exits(exits, self.add_arc):
break
def process_continue_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_continue_exits(exits, self.add_arc):
break
def process_raise_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if block.process_raise_exits(exits, self.add_arc):
break
def process_return_exits(self, exits: set[ArcStart]) -> None:
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks(): # pragma: always breaks
if block.process_return_exits(exits, self.add_arc):
break
# Node handlers: _handle__*
#
# Each handler deals with a specific AST node type, dispatched from
# node_exits. Handlers return the set of exits from that node, and can
# also call self.add_arc to record arcs they find. These functions mirror
# the Python semantics of each syntactic construct. See the docstring
# for node_exits to understand the concept of exits from a node.
#
# Every node type that represents a statement should have a handler, or it
# should be listed in OK_TO_DEFAULT.
def _handle__Break(self, node: ast.Break) -> set[ArcStart]:
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits({break_start})
return set()
def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]:
"""Add arcs for things that can be decorated (classes and functions)."""
main_line: TLineNo = node.lineno
last: TLineNo | None = node.lineno
decs = node.decorator_list
if decs:
last = None
for dec_node in decs:
dec_start = self.line_for_node(dec_node)
if last is not None and dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
assert last is not None
self.add_arc(last, main_line)
last = main_line
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
assert node.body, f"Oops: {node.body = } in {self.filename}@{node.lineno}"
# The body is handled in collect_arcs.
assert last is not None
return {ArcStart(last)}
_handle__ClassDef = _handle_decorated
def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]:
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits({continue_start})
return set()
def _handle__For(self, node: ast.For) -> set[ArcStart]:
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.process_body(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
assert isinstance(my_block, LoopBlock)
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.process_body(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
def _handle__If(self, node: ast.If) -> set[ArcStart]:
start = self.line_for_node(node.test)
constant_test, val = is_constant_test_expr(node.test)
exits = set()
if not constant_test or val:
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits |= self.process_body(node.body, from_start=from_start)
if not constant_test or not val:
from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
exits |= self.process_body(node.orelse, from_start=from_start)
return exits
def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
start = self.line_for_node(node)
last_start = start
exits = set()
for case in node.cases:
case_start = self.line_for_node(case.pattern)
self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
from_start = ArcStart(
case_start,
cause="the pattern on line {lineno} never matched",
)
exits |= self.process_body(case.body, from_start=from_start)
last_start = case_start
# case is now the last case, check for wildcard match.
pattern = case.pattern # pylint: disable=undefined-loop-variable
while isinstance(pattern, ast.MatchOr):
pattern = pattern.patterns[-1]
while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None:
pattern = pattern.pattern
had_wildcard = (
isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable
)
if not had_wildcard:
exits.add(
ArcStart(case_start, cause="the pattern on line {lineno} always matched"),
)
return exits
def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]:
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits({raise_start})
# `raise` statement jumps away, no exits from here.
return set()
def _handle__Return(self, node: ast.Return) -> set[ArcStart]:
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits({return_start})
# `return` statement jumps away, no exits from here.
return set()
def _handle__Try(self, node: ast.Try) -> set[ArcStart]:
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
# This is true by virtue of Python syntax: have to have either except
# or finally, or both.
assert handler_start is not None or final_start is not None
try_block = TryBlock(handler_start, final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.process_body(node.body, from_start=ArcStart(start))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
else:
self.block_stack.pop()
handler_exits: set[ArcStart] = set()
if node.handlers:
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.process_body(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.process_body(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = exits
final_exits = self.process_body(node.finalbody, prev_starts=final_from)
if exits:
# The finally clause's exits are only exits for the try block
# as a whole if the try block had some exits to begin with.
exits = final_exits
return exits
_handle__TryStar = _handle__Try
def _handle__While(self, node: ast.While) -> set[ArcStart]:
start = to_top = self.line_for_node(node.test)
constant_test, _ = is_constant_test_expr(node.test)
self.block_stack.append(LoopBlock(start=to_top))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.process_body(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
assert isinstance(my_block, LoopBlock)
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
if node.orelse:
else_exits = self.process_body(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
def _handle__With(self, node: ast.With) -> set[ArcStart]:
if env.PYBEHAVIOR.exit_with_through_ctxmgr:
starts = [self.line_for_node(item.context_expr) for item in node.items]
else:
starts = [self.line_for_node(node)]
for start in starts:
self.current_with_starts.add(start)
self.all_with_starts.add(start)
exits = self.process_body(node.body, from_start=ArcStart(starts[-1]))
start = starts[-1]
self.current_with_starts.remove(start)
with_exit = {ArcStart(start)}
if exits:
for xit in exits:
self.add_arc(xit.lineno, start)
self.with_exits.add((xit.lineno, start))
exits = with_exit
return exits
_handle__AsyncWith = _handle__With
| AstArcAnalyzer |
python | numba__numba | numba/cuda/tests/cudapy/test_userexc.py | {
"start": 109,
"end": 246
} | class ____(Exception):
pass
regex_pattern = (
r'In function [\'"]test_exc[\'"], file [\:\.\/\\\-a-zA-Z_0-9]+, line \d+'
)
| MyError |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 5586,
"end": 5725
} | class ____(ParentI):
def f(self):
super: "str"
builtins.super(ChildI4, self).f() # no __class__ in the local scope
| ChildI4 |
python | Lightning-AI__lightning | tests/tests_pytorch/test_cli.py | {
"start": 3714,
"end": 8649
} | class ____(LightningModule):
def __init__(self, model_param: int):
super().__init__()
self.model_param = model_param
def _model_builder(model_param: int) -> Model:
return Model(model_param)
def _trainer_builder(
limit_train_batches: int, fast_dev_run: bool = False, callbacks: Optional[Union[list[Callback], Callback]] = None
) -> Trainer:
return Trainer(limit_train_batches=limit_train_batches, fast_dev_run=fast_dev_run, callbacks=callbacks)
@pytest.mark.parametrize(("trainer_class", "model_class"), [(Trainer, Model), (_trainer_builder, _model_builder)])
def test_lightning_cli(trainer_class, model_class, monkeypatch):
"""Test that LightningCLI correctly instantiates model, trainer and calls fit."""
expected_model = {"model_param": 7}
expected_trainer = {"limit_train_batches": 100}
def fit(trainer, model):
for k, v in expected_model.items():
assert getattr(model, k) == v
for k, v in expected_trainer.items():
assert getattr(trainer, k) == v
save_callback = [x for x in trainer.callbacks if isinstance(x, SaveConfigCallback)]
assert len(save_callback) == 1
save_callback[0].on_train_start(trainer, model)
def on_train_start(callback, trainer, _):
config_dump = callback.parser.dump(callback.config, skip_none=False)
for k, v in expected_model.items():
assert f" {k}: {v}" in config_dump
for k, v in expected_trainer.items():
assert f" {k}: {v}" in config_dump
trainer.ran_asserts = True
monkeypatch.setattr(Trainer, "fit", fit)
monkeypatch.setattr(SaveConfigCallback, "on_train_start", on_train_start)
with mock.patch("sys.argv", ["any.py", "fit", "--model.model_param=7", "--trainer.limit_train_batches=100"]):
cli = LightningCLI(model_class, trainer_class=trainer_class, save_config_callback=SaveConfigCallback)
assert hasattr(cli.trainer, "ran_asserts")
assert cli.trainer.ran_asserts
def test_lightning_cli_args_callbacks(cleandir):
callbacks = [
{
"class_path": "lightning.pytorch.callbacks.LearningRateMonitor",
"init_args": {"logging_interval": "epoch", "log_momentum": True},
},
{"class_path": "lightning.pytorch.callbacks.ModelCheckpoint", "init_args": {"monitor": "NAME"}},
]
class TestModel(BoringModel):
def on_fit_start(self):
callback = [c for c in self.trainer.callbacks if isinstance(c, LearningRateMonitor)]
assert len(callback) == 1
assert callback[0].logging_interval == "epoch"
assert callback[0].log_momentum is True
callback = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]
assert len(callback) == 1
assert callback[0].monitor == "NAME"
self.trainer.ran_asserts = True
with mock.patch("sys.argv", ["any.py", "fit", f"--trainer.callbacks={json.dumps(callbacks)}"]):
cli = LightningCLI(
TestModel, trainer_defaults={"fast_dev_run": True, "logger": lazy_instance(CSVLogger, save_dir=".")}
)
assert cli.trainer.ran_asserts
def test_lightning_cli_single_arg_callback():
with mock.patch("sys.argv", ["any.py", "--trainer.callbacks=DeviceStatsMonitor"]):
cli = LightningCLI(BoringModel, run=False)
assert cli.config.trainer.callbacks.class_path == "lightning.pytorch.callbacks.DeviceStatsMonitor"
assert not isinstance(cli.config_init.trainer, list)
@pytest.mark.parametrize("run", [False, True])
def test_lightning_cli_configurable_callbacks(cleandir, run):
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_lightning_class_args(LearningRateMonitor, "learning_rate_monitor")
def fit(self, **_):
pass
cli_args = ["fit"] if run else []
cli_args += ["--learning_rate_monitor.logging_interval=epoch"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = MyLightningCLI(BoringModel, run=run)
callback = [c for c in cli.trainer.callbacks if isinstance(c, LearningRateMonitor)]
assert len(callback) == 1
assert callback[0].logging_interval == "epoch"
def test_lightning_cli_args_cluster_environments(cleandir):
plugins = [{"class_path": "lightning.fabric.plugins.environments.SLURMEnvironment"}]
class TestModel(BoringModel):
def on_fit_start(self):
# Ensure SLURMEnvironment is set, instead of default LightningEnvironment
assert isinstance(self.trainer._accelerator_connector.cluster_environment, SLURMEnvironment)
self.trainer.ran_asserts = True
with mock.patch("sys.argv", ["any.py", "fit", f"--trainer.plugins={json.dumps(plugins)}"]):
cli = LightningCLI(TestModel, trainer_defaults={"fast_dev_run": True})
assert cli.trainer.ran_asserts
| Model |
python | ApeWorX__ape | src/ape_test/accounts.py | {
"start": 411,
"end": 3133
} | class ____(TestAccountContainerAPI):
generated_accounts: list["TestAccount"] = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __len__(self) -> int:
return self.number_of_accounts + len(self.generated_accounts)
@property
def mnemonic(self) -> str:
# Overridden so we can overload the setter.
return self.config_manager.test.mnemonic
@mnemonic.setter
def mnemonic(self, mnemonic: str) -> None:
# Overridden so we can also clear out generated accounts cache.
self.config_manager.test.mnemonic = mnemonic
self.generated_accounts = []
@mnemonic.setter
def mnemonic(self, mnemonic: str) -> None:
self.config_manager.test.mnemonic = mnemonic
self.generated_accounts = []
@property
def config(self):
return self.config_manager.get_config("test")
@property
def aliases(self) -> Iterator[str]:
for index in range(self.number_of_accounts):
yield f"TEST::{index}"
@property
def accounts(self) -> Iterator["TestAccount"]:
for index in range(self.number_of_accounts):
yield cast(TestAccount, self.get_test_account(index))
def get_test_account(self, index: int) -> TestAccountAPI:
if index >= self.number_of_accounts:
new_index = index - self.number_of_accounts
return self.generated_accounts[new_index]
try:
return self.provider.get_test_account(index)
except (NotImplementedError, ProviderNotConnectedError):
return self.generate_account(index=index)
def generate_account(self, index: Optional[int] = None) -> "TestAccountAPI":
new_index = (
self.number_of_accounts + len(self.generated_accounts) if index is None else index
)
generated_account = generate_dev_accounts(
self.mnemonic, 1, hd_path=self.hd_path, start_index=new_index
)[0]
account = self.init_test_account(
new_index, generated_account.address, generated_account.private_key
)
# Only cache if being created outside the expected number of accounts.
# Else, ends up cached twice and caused logic problems elsewhere.
if new_index >= self.number_of_accounts:
self.generated_accounts.append(account)
return account
@classmethod
def init_test_account(cls, index: int, address: AddressType, private_key: str) -> "TestAccount":
return TestAccount(
index=index,
address_str=address,
private_key=private_key,
)
def reset(self):
self.generated_accounts = []
| TestAccountContainer |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/cloud/test_jobs.py | {
"start": 24020,
"end": 24244
} | class ____:
def test_run(self):
assert get_run_id.fn({"id": 42}) == 42
def test_fail(self):
with pytest.raises(RuntimeError, match="Unable to determine run"):
get_run_id.fn({})
| TestGetRunId |
python | ray-project__ray | rllib/utils/filter.py | {
"start": 504,
"end": 1391
} | class ____:
"""Processes input, possibly statefully."""
def apply_changes(self, other: "Filter", *args, **kwargs) -> None:
"""Updates self with "new state" from other filter."""
raise NotImplementedError
def copy(self) -> "Filter":
"""Creates a new object with same state as self.
Returns:
A copy of self.
"""
raise NotImplementedError
def sync(self, other: "Filter") -> None:
"""Copies all state from other filter to self."""
raise NotImplementedError
def reset_buffer(self) -> None:
"""Creates copy of current state and resets accumulated state"""
raise NotImplementedError
def as_serializable(self) -> "Filter":
raise NotImplementedError
@Deprecated(new="Filter.reset_buffer()", error=True)
def clear_buffer(self):
pass
@OldAPIStack
| Filter |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/atomic_function_test.py | {
"start": 1364,
"end": 5935
} | class ____(test.TestCase):
def test_call_eager(self):
definition, func_type = get_function_def_and_type(
lambda x, y: x + y, (constant_op.constant(1), constant_op.constant(2))
)
atomic = atomic_function.from_function_def(definition, func_type)
self.assertRegex(
str(atomic),
r"<AtomicFunction> .*(x: TensorSpec.*, y: TensorSpec.*) ->"
r" TensorSpec.*",
)
self.assertRegex(
repr(atomic).replace("\n", " "),
r"AtomicFunction.*name.*bound_context.*function_type.*"
r"children.*call_options.*cached_graph.*",
)
self.assertEqual(
atomic.call_flat(constant_op.constant(3), constant_op.constant(4))[
0
].numpy(),
7,
)
def test_call_graph(self):
definition, func_type = get_function_def_and_type(
lambda x, y: x + y, (constant_op.constant(1), constant_op.constant(2))
)
atomic = atomic_function.from_function_def(definition, func_type)
@polymorphic_function.function
def foo(a, b):
return atomic.call_flat(a, b)[0]
self.assertEqual(
foo(constant_op.constant(3), constant_op.constant(4)).numpy(),
7,
)
def test_variable_input_eager(self):
definition, func_type = get_function_def_and_type(
lambda x, y: x + y,
(resource_variable_ops.ResourceVariable(1), constant_op.constant(2)),
)
atomic = atomic_function.from_function_def(definition, func_type)
self.assertEqual(
atomic.call_flat(
resource_variable_ops.ResourceVariable(3)._handle,
constant_op.constant(4),
)[0].numpy(),
7,
)
def test_variable_input_graph(self):
definition, func_type = get_function_def_and_type(
lambda x, y: x + y,
(resource_variable_ops.ResourceVariable(1), constant_op.constant(2)),
)
atomic = atomic_function.from_function_def(definition, func_type)
@polymorphic_function.function
def foo(a, b):
return atomic.call_flat(a, b)[0]
self.assertEqual(
foo(
resource_variable_ops.ResourceVariable(3)._handle,
constant_op.constant(4),
).numpy(),
7,
)
def test_call_with_captures(self):
my_capture = constant_op.constant(2)
@polymorphic_function.function
def foo(x):
my_dict = {}
my_dict["my_tensor"] = x["my_tensor"]
my_dict["my_resource"] = x["my_variable"].handle
my_dict["my_capture"] = my_capture
my_dict["my_ints"] = x["my_ints"]
return my_dict
structured_inputs = {
"my_tensor": constant_op.constant(1),
"my_variable": resource_variable_ops.ResourceVariable(1),
"my_ints": [1, 2, 3],
}
function_def, function_type = get_function_def_and_type(
foo, (structured_inputs,)
)
atomic = atomic_function.from_function_def(function_def, function_type)
with self.assertRaisesRegex(ValueError, "Use call_with_captures instead."):
atomic(structured_inputs)
result = atomic.call_with_captures((structured_inputs,), {}, [my_capture])
self.assertEqual(
result["my_tensor"].numpy(), structured_inputs["my_tensor"].numpy()
)
self.assertEqual(result["my_resource"].dtype, dtypes.resource)
self.assertEqual(result["my_capture"].numpy(), my_capture.numpy())
self.assertEqual(result["my_ints"][0].numpy(), 1)
self.assertEqual(result["my_ints"][1].numpy(), 2)
self.assertEqual(result["my_ints"][2].numpy(), 3)
def test_call(self):
@polymorphic_function.function
def foo(x):
my_dict = {}
my_dict["my_tensor"] = x["my_tensor"]
my_dict["my_resource"] = x["my_variable"].handle
my_dict["my_ints"] = x["my_ints"]
return my_dict
structured_inputs = {
"my_tensor": constant_op.constant(1),
"my_variable": resource_variable_ops.ResourceVariable(1),
"my_ints": [1, 2, 3],
}
function_def, function_type = get_function_def_and_type(
foo, (structured_inputs,)
)
atomic = atomic_function.from_function_def(function_def, function_type)
result = atomic(structured_inputs)
self.assertEqual(
result["my_tensor"].numpy(), structured_inputs["my_tensor"].numpy()
)
self.assertEqual(result["my_resource"].dtype, dtypes.resource)
self.assertEqual(result["my_ints"][0].numpy(), 1)
self.assertEqual(result["my_ints"][1].numpy(), 2)
self.assertEqual(result["my_ints"][2].numpy(), 3)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| AtomicFunctionTest |
python | getsentry__sentry | src/sentry/integrations/utils/codecov.py | {
"start": 4872,
"end": 6671
} | class ____(TypedDict):
lineCoverage: NotRequired[LineCoverage]
coverageUrl: NotRequired[str]
status: NotRequired[int]
attemptedUrl: NotRequired[str]
def fetch_codecov_data(config: CodecovConfig) -> CodecovData:
data: CodecovData = {}
try:
# Check if there's an error in the outcome or if sourcePath is missing
if "error" in config["outcome"] or "sourcePath" not in config["outcome"]:
data = {"status": status.HTTP_400_BAD_REQUEST}
return data
repo = config["repository"].name
service = config["config"]["provider"]["key"]
path = config["outcome"]["sourcePath"]
lineCoverage, codecovUrl = get_codecov_data(repo, service, path)
if lineCoverage and codecovUrl:
data = {
"lineCoverage": lineCoverage,
"coverageUrl": codecovUrl,
"status": status.HTTP_200_OK,
}
except requests.exceptions.HTTPError as error:
data = {
"attemptedUrl": error.response.url,
"status": error.response.status_code,
}
# Do not report an error when coverage is not found
if error.response.status_code != status.HTTP_404_NOT_FOUND:
logger.exception("Codecov HTTP error: %s", error.response.status_code)
except requests.Timeout:
scope = Scope.get_isolation_scope()
scope.set_tag("codecov.timeout", True)
scope.set_tag("codecov.timeout_secs", CODECOV_TIMEOUT)
scope.set_tag("codecov.http_code", status.HTTP_408_REQUEST_TIMEOUT)
data = {"status": status.HTTP_408_REQUEST_TIMEOUT}
except Exception as error:
data = {"status": status.HTTP_500_INTERNAL_SERVER_ERROR}
logger.exception(str(error))
return data
| CodecovData |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1557,
"end": 1594
} | class ____(Token):
id = '?'
| KeyToken |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_fiscal.py | {
"start": 5001,
"end": 11792
} | class ____:
def test_get_year_end(self):
assert makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SAT
).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 8, 31)
assert makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SUN
).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 9, 1)
assert makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.FRI
).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 8, 30)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12, variation="nearest")
assert offset_n.get_year_end(datetime(2012, 1, 1)) == datetime(2013, 1, 1)
assert offset_n.get_year_end(datetime(2012, 1, 10)) == datetime(2013, 1, 1)
assert offset_n.get_year_end(datetime(2013, 1, 1)) == datetime(2013, 12, 31)
assert offset_n.get_year_end(datetime(2013, 1, 2)) == datetime(2013, 12, 31)
assert offset_n.get_year_end(datetime(2013, 1, 3)) == datetime(2013, 12, 31)
assert offset_n.get_year_end(datetime(2013, 1, 10)) == datetime(2013, 12, 31)
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
assert JNJ.get_year_end(datetime(2006, 1, 1)) == datetime(2006, 12, 31)
offset_lom_aug_sat = makeFY5253NearestEndMonth(
1, startingMonth=8, weekday=WeekDay.SAT
)
offset_lom_aug_thu = makeFY5253NearestEndMonth(
1, startingMonth=8, weekday=WeekDay.THU
)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12, variation="nearest")
on_offset_cases = [
# From Wikipedia (see:
# https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
# #Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
# 2008-08-30 2008 August 30 (leap year)
# 2009-08-29 2009 August 29
# 2010-08-28 2010 August 28
# 2011-09-03 2011 September 3
# 2012-09-01 2012 September 1 (leap year)
# 2013-08-31 2013 August 31
# 2014-08-30 2014 August 30
# 2015-08-29 2015 August 29
# 2016-09-03 2016 September 3 (leap year)
# 2017-09-02 2017 September 2
# 2018-09-01 2018 September 1
# 2019-08-31 2019 August 31
(offset_lom_aug_sat, datetime(2006, 9, 2), True),
(offset_lom_aug_sat, datetime(2007, 9, 1), True),
(offset_lom_aug_sat, datetime(2008, 8, 30), True),
(offset_lom_aug_sat, datetime(2009, 8, 29), True),
(offset_lom_aug_sat, datetime(2010, 8, 28), True),
(offset_lom_aug_sat, datetime(2011, 9, 3), True),
(offset_lom_aug_sat, datetime(2016, 9, 3), True),
(offset_lom_aug_sat, datetime(2017, 9, 2), True),
(offset_lom_aug_sat, datetime(2018, 9, 1), True),
(offset_lom_aug_sat, datetime(2019, 8, 31), True),
(offset_lom_aug_sat, datetime(2006, 8, 27), False),
(offset_lom_aug_sat, datetime(2007, 8, 28), False),
(offset_lom_aug_sat, datetime(2008, 8, 31), False),
(offset_lom_aug_sat, datetime(2009, 8, 30), False),
(offset_lom_aug_sat, datetime(2010, 8, 29), False),
(offset_lom_aug_sat, datetime(2011, 8, 28), False),
(offset_lom_aug_sat, datetime(2006, 8, 25), False),
(offset_lom_aug_sat, datetime(2007, 8, 24), False),
(offset_lom_aug_sat, datetime(2008, 8, 29), False),
(offset_lom_aug_sat, datetime(2009, 8, 28), False),
(offset_lom_aug_sat, datetime(2010, 8, 27), False),
(offset_lom_aug_sat, datetime(2011, 8, 26), False),
(offset_lom_aug_sat, datetime(2019, 8, 30), False),
# From Micron, see:
# http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
def test_apply(self):
date_seq_nem_8_sat = [
datetime(2006, 9, 2),
datetime(2007, 9, 1),
datetime(2008, 8, 30),
datetime(2009, 8, 29),
datetime(2010, 8, 28),
datetime(2011, 9, 3),
]
JNJ = [
datetime(2005, 1, 2),
datetime(2006, 1, 1),
datetime(2006, 12, 31),
datetime(2007, 12, 30),
datetime(2008, 12, 28),
datetime(2010, 1, 3),
datetime(2011, 1, 2),
datetime(2012, 1, 1),
datetime(2012, 12, 30),
]
DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5, variation="nearest")
tests = [
(
makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
date_seq_nem_8_sat,
),
(
makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT),
date_seq_nem_8_sat,
),
(
makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
[datetime(2006, 9, 1)] + date_seq_nem_8_sat,
),
(
makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT),
[datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:],
),
(
makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT),
list(reversed(date_seq_nem_8_sat)),
),
(
makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),
JNJ,
),
(
makeFY5253NearestEndMonth(n=-1, startingMonth=12, weekday=WeekDay.SUN),
list(reversed(JNJ)),
),
(
makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),
[datetime(2005, 1, 2), datetime(2006, 1, 1)],
),
(
makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),
[datetime(2006, 1, 2), datetime(2006, 12, 31)],
),
(DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)]),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
| TestFY5253NearestEndMonth |
python | apache__airflow | providers/elasticsearch/src/airflow/providers/elasticsearch/log/es_response.py | {
"start": 1695,
"end": 2339
} | class ____:
"""Helper class to provide attribute like access to Dictionary objects."""
def __init__(self, d):
super().__setattr__("_d_", d)
def __getattr__(self, attr_name):
"""Retrieve an item as an attribute from the dictionary."""
try:
return self.__getitem__(attr_name)
except KeyError:
raise AttributeError(f"{self.__class__.__name__!r} object has no attribute {attr_name!r}")
def __getitem__(self, key):
"""Retrieve an item using a key from the dictionary."""
return _wrap(self._d_[key])
def to_dict(self):
return self._d_
| AttributeDict |
python | kamyu104__LeetCode-Solutions | Python/check-if-any-element-has-prime-frequency.py | {
"start": 514,
"end": 802
} | class ____(object):
def checkPrimeFrequency(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
cnt = collections.defaultdict(int)
for x in nums:
cnt[x] += 1
return any(SPF[v] == v for v in cnt.itervalues())
| Solution |
python | pytorch__pytorch | torch/utils/_sympy/functions.py | {
"start": 6729,
"end": 11682
} | class ____(sympy.Function):
"""
We maintain this so that:
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
NB: This is Python-style floor division, round to -Inf
"""
nargs: tuple[int, ...] = (2,)
precedence: int = 35 # lower precedence than add
is_integer: bool = True
@property
def base(self) -> sympy.Basic:
# pyrefly: ignore [missing-attribute]
return self.args[0]
@property
def divisor(self) -> sympy.Basic:
# pyrefly: ignore [missing-attribute]
return self.args[1]
def _sympystr(self, printer: sympy.printing.StrPrinter) -> str:
base = printer.parenthesize(self.base, PRECEDENCE["Atom"] - 0.5)
divisor = printer.parenthesize(self.divisor, PRECEDENCE["Atom"] - 0.5)
return f"({base}//{divisor})"
# Automatic evaluation.
# https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval
@classmethod
def eval(cls, base: sympy.Integer, divisor: sympy.Integer) -> sympy.Basic | None:
# python test/test_dynamic_shapes.py -k TestDimConstraints.test_dim_constraints_solve_full
# Assert triggered by inequality solver
# assert base.is_integer, base
# assert divisor.is_integer, divisor
# We don't provide the same error message as in Python because SymPy
# makes it difficult to check the types.
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if base in (int_oo, -int_oo, sympy.oo, -sympy.oo) and divisor in (
int_oo,
-int_oo,
sympy.oo,
-sympy.oo,
):
return sympy.nan
if base is sympy.nan or divisor is sympy.nan:
return sympy.nan
if base.is_zero:
return sympy.S.Zero
if base.is_integer and equal_valued(divisor, 1):
return base
if base.is_integer and equal_valued(divisor, -1):
return sympy.Mul(base, -1)
if (
isinstance(base, sympy.Number)
and isinstance(divisor, sympy.Number)
and (
base in (int_oo, -int_oo, sympy.oo, -sympy.oo)
or divisor in (int_oo, -int_oo, sympy.oo, -sympy.oo)
)
):
r = float(base) / float(divisor)
if r == math.inf:
return int_oo
elif r == -math.inf:
return -int_oo
elif math.isnan(r):
return sympy.nan
else:
return sympy.Integer(math.floor(r))
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return sympy.Integer(int(base) // int(divisor))
if isinstance(base, FloorDiv):
return FloorDiv(base.args[0], base.args[1] * divisor)
# Expands (x + y) // b into x // b + y // b.
# This only works if floor is an identity, i.e. x / b is an integer.
if isinstance(divisor, sympy.Integer):
quotients = 0
terms = []
for term in sympy.Add.make_args(base):
quotient = term / divisor
# This is a sympy bug fixed in https://github.com/sympy/sympy/pull/28442
# sympy can generate a quotient with (1/22)*.... such that quotient.is_integer is True
# FloorDiv should not allow that as output. see
quotient_is_integer = None
if isinstance(quotient, sympy.Mul) and TorchVersion(
sympy.__version__
) < TorchVersion("1.15.0"):
rationals = quotient.atoms(sympy.Rational)
all_rationals_ints = all(r.q == 1 for r in rationals)
quotient_is_integer = quotient.is_integer and all_rationals_ints
else:
quotient_is_integer = quotient.is_integer
if quotient_is_integer:
terms.append(term)
quotients += quotient
if len(terms) != 0:
# Passing evaluate = False since expression will be optimized during the subtraction post its construction.
return (
FloorDiv(base - sympy.Add(*terms, evaluate=False), divisor)
+ quotients
)
try:
gcd = simple_floordiv_gcd(base, divisor)
if equal_valued(gcd, 1) and isinstance(divisor, sympy.Add):
gcd = sympy.gcd(base, divisor)
if not equal_valued(gcd, 1):
return FloorDiv(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
)
except sympy.PolynomialError:
pass # https://github.com/pytorch/pytorch/issues/108276
return None
| FloorDiv |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial004.py | {
"start": 242,
"end": 653
} | class ____(BaseModel):
username: str
full_name: Union[str, None] = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item,
user: User,
importance: int = Body(gt=0),
q: Union[str, None] = None,
):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
if q:
results.update({"q": q})
return results
| User |
python | falconry__falcon | tests/test_media_multipart.py | {
"start": 10672,
"end": 11865
} | class ____:
def on_post(self, req, resp):
values = []
for part in req.media:
values.append(
{
'content_type': part.content_type,
'data': part.data.decode(),
'filename': part.filename,
'name': part.name,
'secure_filename': part.secure_filename if part.filename else None,
'text': part.text,
}
)
resp.media = values
def on_post_media(self, req, resp):
deserialized = []
for part in req.media:
part_media = part.get_media()
assert part_media == part.media
deserialized.append(part_media)
resp.media = deserialized
def on_post_mirror(self, req, resp):
parts = []
for part in req.get_media():
parts.append(
{
'content': part.stream.read(),
'content_type': part.content_type,
'name': part.name,
}
)
resp.content_type = falcon.MEDIA_MSGPACK
resp.media = parts
| MultipartAnalyzer |
python | numpy__numpy | numpy/polynomial/tests/test_hermite_e.py | {
"start": 3464,
"end": 6188
} | class ____:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
# check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
# check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = f"At i={i}"
tgt = y[i]
res = herme.hermeval(x, [0] * i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
# test values
tgt = y1 * y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
# test values
tgt = y1 * y2 * y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3) * 2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3) * 3)
| TestEvaluation |
python | tensorflow__tensorflow | tensorflow/compiler/tests/dynamic_slice_ops_test.py | {
"start": 985,
"end": 3369
} | class ____(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
self.assertAllClose(result, expected, rtol=1e-3)
def testUpdateSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array([], dtype=dtype),
np.array([], dtype=dtype),
np.array([0], dtype=np.int32)
],
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([11, 12, 13], dtype=dtype),
np.array([6], dtype=np.int32)
],
expected=np.array([1, 2, 3, 4, 5, 6, 11, 12, 13, 10], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.array([[42, 43], [44, 45]], dtype=dtype),
np.array([1, 2], dtype=np.int32)
],
expected=np.array(
[[1, 2, 3, 4], [5, 6, 42, 43], [9, 10, 44, 45]], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.array([[], []], dtype=dtype),
np.array([1, 2], dtype=np.int32)
],
expected=np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype))
self._assertOpOutputMatchesExpected(
xla.dynamic_update_slice, [
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=dtype),
np.ones([3, 4], dtype=dtype),
np.array([0, 0], dtype=np.int32)
],
expected=np.ones([3, 4], dtype=dtype))
if __name__ == '__main__':
test.main()
| DynamicUpdateSliceOpsTest |
python | django__django | tests/admin_inlines/admin.py | {
"start": 1179,
"end": 1290
} | class ____(admin.TabularInline):
model = NonAutoPKBook
classes = ("collapse",)
| NonAutoPKBookTabularInline |
python | has2k1__plotnine | plotnine/scales/scale_size.py | {
"start": 2729,
"end": 3273
} | class ____(scale_datetime):
"""
Datetime area-size scale
"""
_aesthetics = ["size"]
range: InitVar[tuple[float, float]] = (1, 6)
"""
Range ([Minimum, Maximum]) of the size.
"""
_: KW_ONLY
guide: Literal["legend"] | None = "legend"
def __post_init__(
self, range, date_breaks, date_labels, date_minor_breaks
):
from mizani.palettes import area_pal
super().__post_init__(date_breaks, date_labels, date_minor_breaks)
self.palette = area_pal(range)
| scale_size_datetime |
python | realpython__materials | django-vue-graphql/source_code_final/back_end/blog/models.py | {
"start": 363,
"end": 495
} | class ____(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
| Tag |
python | kamyu104__LeetCode-Solutions | Python/sort-array-by-parity.py | {
"start": 29,
"end": 324
} | class ____(object):
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
i = 0
for j in xrange(len(A)):
if A[j] % 2 == 0:
A[i], A[j] = A[j], A[i]
i += 1
return A
| Solution |
python | numpy__numpy | tools/swig/test/testFortran.py | {
"start": 3270,
"end": 3537
} | class ____(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
| ulongTestCase |
python | mlflow__mlflow | dev/clint/src/clint/rules/forbidden_set_active_model_usage.py | {
"start": 84,
"end": 667
} | class ____(Rule):
def _message(self) -> str:
return (
"Usage of `set_active_model` is not allowed in mlflow, use `_set_active_model` instead."
)
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""Check if this is a call to set_active_model function."""
if names := resolver.resolve(node):
match names:
case ["mlflow", *_, "set_active_model"]:
return True
case _:
return False
return False
| ForbiddenSetActiveModelUsage |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/events.py | {
"start": 2280,
"end": 6065
} | class ____(event.Events[InstrumentationFactory]):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = InstrumentationFactory
@classmethod
def _accept_with(
cls,
target: Union[
InstrumentationFactory,
Type[InstrumentationFactory],
],
identifier: str,
) -> Optional[
Union[
InstrumentationFactory,
Type[InstrumentationFactory],
]
]:
if isinstance(target, type):
return _InstrumentationEventsHold(target) # type: ignore [return-value] # noqa: E501
else:
return None
@classmethod
def _listen(
cls, event_key: _EventKey[_T], propagate: bool = True, **kw: Any
) -> None:
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
def listen(target_cls: type, *arg: Any) -> Optional[Any]:
listen_cls = target()
# if weakref were collected, however this is not something
# that normally happens. it was occurring during test teardown
# between mapper/registry/instrumentation_manager, however this
# interaction was changed to not rely upon the event system.
if listen_cls is None:
return None
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
else:
return None
def remove(ref: ReferenceType[_T]) -> None:
key = event.registry._EventKey( # type: ignore [type-var]
None,
identifier,
listen,
instrumentation._instrumentation_factory,
)
getattr(
instrumentation._instrumentation_factory.dispatch, identifier
).remove(key)
target = weakref.ref(target.class_, remove)
event_key.with_dispatch_target(
instrumentation._instrumentation_factory
).with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls) -> None:
super()._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls: ClassManager[_O]) -> None:
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls: ClassManager[_O]) -> None:
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(
self, cls: ClassManager[_O], key: _KT, inst: _O
) -> None:
"""Called when an attribute is instrumented."""
| InstrumentationEvents |
python | readthedocs__readthedocs.org | readthedocs/core/history.py | {
"start": 3883,
"end": 4391
} | class ____(forms.ModelForm):
"""Set the change_reason on the model changed through this form."""
change_reason = None
def get_change_reason(self):
if self.change_reason:
return self.change_reason
klass = self.__class__.__name__
return f"origin=form class={klass}"
def save(self, commit=True):
if self.instance:
set_change_reason(self.instance, self.get_change_reason())
return super().save(commit=commit)
| SimpleHistoryModelForm |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 50650,
"end": 51191
} | class ____(backend_tools.ToolCopyToClipboardBase):
def trigger(self, *args, **kwargs):
if not self.canvas._isDrawn:
self.canvas.draw()
if not self.canvas.bitmap.IsOk() or not wx.TheClipboard.Open():
return
try:
wx.TheClipboard.SetData(wx.BitmapDataObject(self.canvas.bitmap))
finally:
wx.TheClipboard.Close()
FigureManagerWx._toolbar2_class = NavigationToolbar2Wx
FigureManagerWx._toolmanager_toolbar_class = ToolbarWx
@_Backend.export
| ToolCopyToClipboardWx |
python | numpy__numpy | numpy/distutils/command/config.py | {
"start": 20334,
"end": 20670
} | class ____:
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
| GrabStdout |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-azure-table/source_azure_table/azure_table.py | {
"start": 235,
"end": 4662
} | class ____:
"""
This reader reads data from given table
Attributes
----------
logger : AirbyteLogger
Airbyte's Logger instance
account_name : str
The name of your storage account.
access_key : str
The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys
endpoint_suffix : str
The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix
connection_string: str
storage account connection string created using above params. Read more about connection string here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account
Methods
-------
get_table_service_client()
Returns azure table service client from connection string.
get_table_client(table_name: str)
Returns azure table client from connection string.
get_tables()
Fetches all tables from storage account
read_table()
Reads data from an Azure table
"""
def __init__(self, logger: logging.Logger, config: dict):
"""
Parameters
----------
config : dict
Airbyte's configuration obect
"""
self.logger = logger
self.account_name = config[constants.azure_storage_account_name_key_name]
self.access_key = config[constants.azure_storage_access_key_key_name]
self.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]
self.connection_string = "DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}".format(
self.account_name, self.access_key, self.endpoint_suffix
)
def get_table_service_client(self) -> TableServiceClient:
"""
Returns azure table service client from connection string.
Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables
"""
try:
return TableServiceClient.from_connection_string(conn_str=self.connection_string)
except Exception as e:
raise Exception(f"An exception occurred: {str(e)}")
def get_table_client(self, table_name: str) -> TableClient:
"""
Returns azure table client from connection string.
Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities
Parameters
----------
table_name : str
table name for which you would like create table client for.
"""
try:
if not table_name:
raise Exception("An exception occurred: table name is not valid.")
return TableClient.from_connection_string(self.connection_string, table_name=table_name)
except Exception as e:
raise Exception(f"An exception occurred: {str(e)}")
def get_tables(self) -> ItemPaged:
"""
Fetches all tables from storage account and returns them in Airbyte stream.
"""
try:
table_service_client = self.get_table_service_client()
tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)
return tables_iterator
except Exception as e:
raise Exception(f"An exception occurred: {str(e)}")
def read_table(self, table_client: TableClient, filter_query: str = None) -> Iterable:
"""
Reads data from an Azure table.
Parameters
----------
table_client : TableClient
table client object to be able to access querying methods.
filter_query : str
either None or a query to pull data from table storage (based on the PartitionKey)
"""
if filter_query is None:
return table_client.list_entities()
else:
return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)
| AzureTableReader |
python | scikit-learn__scikit-learn | sklearn/compose/tests/test_column_transformer.py | {
"start": 59861,
"end": 77372
} | class ____(Trans):
def __init__(self, feature_names_out=None):
self.feature_names_out = feature_names_out
def get_feature_names_out(self, input_features=None):
if self.feature_names_out is not None:
return np.asarray(self.feature_names_out, dtype=object)
return input_features
@pytest.mark.parametrize(
"transformers, remainder, expected_names",
[
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", ["d"]),
],
"passthrough",
["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", ["d"]),
],
"drop",
["bycol1__d", "bycol1__c", "bycol2__d"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", ["d"]),
],
"passthrough",
["bycol1__b", "remainder__a", "remainder__c"],
),
(
[
("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
],
"passthrough",
["bycol1__pca1", "bycol1__pca2", "remainder__c"],
),
(
[
("bycol1", TransWithNames(["a", "b"]), ["d"]),
("bycol2", "passthrough", ["b"]),
],
"drop",
["bycol1__a", "bycol1__b", "bycol2__b"],
),
(
[
("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
],
"passthrough",
[
"bycol1__pca0",
"bycol1__pca1",
"bycol2__pca0",
"bycol2__pca1",
"remainder__a",
"remainder__c",
"remainder__d",
],
),
(
[
("bycol1", "drop", ["d"]),
],
"drop",
[],
),
(
[
("bycol1", TransWithNames(), slice(1, 3)),
],
"drop",
["bycol1__b", "bycol1__c"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", slice(3, 4)),
],
"passthrough",
["bycol1__b", "remainder__a", "remainder__c"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice(3, 4)),
],
"passthrough",
["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
),
(
[
("bycol1", TransWithNames(), slice("b", "c")),
],
"drop",
["bycol1__b", "bycol1__c"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", slice("c", "d")),
],
"passthrough",
["bycol1__b", "remainder__a"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice("c", "d")),
],
"passthrough",
[
"bycol1__d",
"bycol1__c",
"bycol2__c",
"bycol2__d",
"remainder__a",
"remainder__b",
],
),
],
)
def test_verbose_feature_names_out_true(transformers, remainder, expected_names):
"""Check feature_names_out for verbose_feature_names_out=True (default)"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
ct = ColumnTransformer(
transformers,
remainder=remainder,
)
ct.fit(df)
names = ct.get_feature_names_out()
assert isinstance(names, np.ndarray)
assert names.dtype == object
assert_array_equal(names, expected_names)
def _feature_names_out_callable_name_clash(trans_name: str, feat_name: str):
return f"{trans_name[:2]}++{feat_name}"
def _feature_names_out_callable_upper(trans_name: str, feat_name: str):
return f"{trans_name.upper()}={feat_name.upper()}"
@pytest.mark.parametrize(
"transformers, remainder, verbose_feature_names_out, expected_names",
[
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", ["d"]),
],
"passthrough",
_feature_names_out_callable_name_clash,
["by++d", "by++c", "by++d", "re++a", "re++b"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", ["d"]),
],
"drop",
"{feature_name}-{transformer_name}",
["d-bycol1", "c-bycol1", "d-bycol2"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice("c", "d")),
],
"passthrough",
_feature_names_out_callable_upper,
[
"BYCOL1=D",
"BYCOL1=C",
"BYCOL2=C",
"BYCOL2=D",
"REMAINDER=A",
"REMAINDER=B",
],
),
],
)
def test_verbose_feature_names_out_callable_or_str(
transformers, remainder, verbose_feature_names_out, expected_names
):
"""Check feature_names_out for verbose_feature_names_out=True (default)"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
ct = ColumnTransformer(
transformers,
remainder=remainder,
verbose_feature_names_out=verbose_feature_names_out,
)
ct.fit(df)
names = ct.get_feature_names_out()
assert isinstance(names, np.ndarray)
assert names.dtype == object
assert_array_equal(names, expected_names)
@pytest.mark.parametrize(
"transformers, remainder, expected_names",
[
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", ["a"]),
],
"passthrough",
["d", "c", "a", "b"],
),
(
[
("bycol1", TransWithNames(["a"]), ["d", "c"]),
("bycol2", "passthrough", ["d"]),
],
"drop",
["a", "d"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", ["d"]),
],
"passthrough",
["b", "a", "c"],
),
(
[
("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
],
"passthrough",
["pca1", "pca2", "c"],
),
(
[
("bycol1", TransWithNames(["a", "c"]), ["d"]),
("bycol2", "passthrough", ["d"]),
],
"drop",
["a", "c", "d"],
),
(
[
("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]),
],
"passthrough",
["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"],
),
(
[
("bycol1", "drop", ["d"]),
],
"drop",
[],
),
(
[
("bycol1", TransWithNames(), slice(1, 2)),
("bycol2", "drop", ["d"]),
],
"passthrough",
["b", "a", "c"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", slice(3, 4)),
],
"passthrough",
["b", "a", "c"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice(0, 2)),
],
"drop",
["d", "c", "a", "b"],
),
(
[
("bycol1", TransWithNames(), slice("a", "b")),
("bycol2", "drop", ["d"]),
],
"passthrough",
["a", "b", "c"],
),
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "drop", slice("c", "d")),
],
"passthrough",
["b", "a"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice("a", "b")),
],
"drop",
["d", "c", "a", "b"],
),
(
[
("bycol1", TransWithNames(), ["d", "c"]),
("bycol2", "passthrough", slice("b", "b")),
],
"drop",
["d", "c", "b"],
),
],
)
def test_verbose_feature_names_out_false(transformers, remainder, expected_names):
"""Check feature_names_out for verbose_feature_names_out=False"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
ct = ColumnTransformer(
transformers,
remainder=remainder,
verbose_feature_names_out=False,
)
ct.fit(df)
names = ct.get_feature_names_out()
assert isinstance(names, np.ndarray)
assert names.dtype == object
assert_array_equal(names, expected_names)
@pytest.mark.parametrize(
"transformers, remainder, colliding_columns",
[
(
[
("bycol1", TransWithNames(), ["b"]),
("bycol2", "passthrough", ["b"]),
],
"drop",
"['b']",
),
(
[
("bycol1", TransWithNames(["c", "d"]), ["c"]),
("bycol2", "passthrough", ["c"]),
],
"drop",
"['c']",
),
(
[
("bycol1", TransWithNames(["a"]), ["b"]),
("bycol2", "passthrough", ["b"]),
],
"passthrough",
"['a']",
),
(
[
("bycol1", TransWithNames(["a"]), ["b"]),
("bycol2", "drop", ["b"]),
],
"passthrough",
"['a']",
),
(
[
("bycol1", TransWithNames(["c", "b"]), ["b"]),
("bycol2", "passthrough", ["c", "b"]),
],
"drop",
"['b', 'c']",
),
(
[
("bycol1", TransWithNames(["a"]), ["b"]),
("bycol2", "passthrough", ["a"]),
("bycol3", TransWithNames(["a"]), ["b"]),
],
"passthrough",
"['a']",
),
(
[
("bycol1", TransWithNames(["a", "b"]), ["b"]),
("bycol2", "passthrough", ["a"]),
("bycol3", TransWithNames(["b"]), ["c"]),
],
"passthrough",
"['a', 'b']",
),
(
[
("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
],
"passthrough",
"['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]",
),
(
[
("bycol1", TransWithNames(["a", "b"]), slice(1, 2)),
("bycol2", "passthrough", ["a"]),
("bycol3", TransWithNames(["b"]), ["c"]),
],
"passthrough",
"['a', 'b']",
),
(
[
("bycol1", TransWithNames(["a", "b"]), ["b"]),
("bycol2", "passthrough", slice(0, 1)),
("bycol3", TransWithNames(["b"]), ["c"]),
],
"passthrough",
"['a', 'b']",
),
(
[
("bycol1", TransWithNames(["a", "b"]), slice("b", "c")),
("bycol2", "passthrough", ["a"]),
("bycol3", TransWithNames(["b"]), ["c"]),
],
"passthrough",
"['a', 'b']",
),
(
[
("bycol1", TransWithNames(["a", "b"]), ["b"]),
("bycol2", "passthrough", slice("a", "a")),
("bycol3", TransWithNames(["b"]), ["c"]),
],
"passthrough",
"['a', 'b']",
),
],
)
def test_verbose_feature_names_out_false_errors(
transformers, remainder, colliding_columns
):
"""Check feature_names_out for verbose_feature_names_out=False"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
ct = ColumnTransformer(
transformers,
remainder=remainder,
verbose_feature_names_out=False,
)
ct.fit(df)
msg = re.escape(
f"Output feature names: {colliding_columns} are not unique. Please set "
"verbose_feature_names_out=True to add prefixes to feature names"
)
with pytest.raises(ValueError, match=msg):
ct.get_feature_names_out()
@pytest.mark.parametrize("verbose_feature_names_out", [True, False])
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_column_transformer_set_output(verbose_feature_names_out, remainder):
"""Check column transformer behavior with set_output."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10])
ct = ColumnTransformer(
[("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])],
remainder=remainder,
verbose_feature_names_out=verbose_feature_names_out,
)
X_trans = ct.fit_transform(df)
assert isinstance(X_trans, np.ndarray)
ct.set_output(transform="pandas")
df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20])
X_trans = ct.transform(df_test)
assert isinstance(X_trans, pd.DataFrame)
feature_names_out = ct.get_feature_names_out()
assert_array_equal(X_trans.columns, feature_names_out)
assert_array_equal(X_trans.index, df_test.index)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
@pytest.mark.parametrize("fit_transform", [True, False])
def test_column_transform_set_output_mixed(remainder, fit_transform):
"""Check ColumnTransformer outputs mixed types correctly."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame(
{
"pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
"color": pd.Series(["green", "blue", "red"], dtype="object"),
"age": [1.4, 2.1, 4.4],
"height": [20, 40, 10],
"distance": pd.Series([20, pd.NA, 100], dtype="Int32"),
}
)
ct = ColumnTransformer(
[
(
"color_encode",
OneHotEncoder(sparse_output=False, dtype="int8"),
["color"],
),
("age", StandardScaler(), ["age"]),
],
remainder=remainder,
verbose_feature_names_out=False,
).set_output(transform="pandas")
if fit_transform:
X_trans = ct.fit_transform(df)
else:
X_trans = ct.fit(df).transform(df)
assert isinstance(X_trans, pd.DataFrame)
assert_array_equal(X_trans.columns, ct.get_feature_names_out())
expected_dtypes = {
"color_blue": "int8",
"color_green": "int8",
"color_red": "int8",
"age": "float64",
"pet": "category",
"height": "int64",
"distance": "Int32",
}
for col, dtype in X_trans.dtypes.items():
assert dtype == expected_dtypes[col]
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_column_transform_set_output_after_fitting(remainder):
pd = pytest.importorskip("pandas")
df = pd.DataFrame(
{
"pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
"age": [1.4, 2.1, 4.4],
"height": [20, 40, 10],
}
)
ct = ColumnTransformer(
[
(
"color_encode",
OneHotEncoder(sparse_output=False, dtype="int16"),
["pet"],
),
("age", StandardScaler(), ["age"]),
],
remainder=remainder,
verbose_feature_names_out=False,
)
# fit without calling set_output
X_trans = ct.fit_transform(df)
assert isinstance(X_trans, np.ndarray)
assert X_trans.dtype == "float64"
ct.set_output(transform="pandas")
X_trans_df = ct.transform(df)
expected_dtypes = {
"pet_cat": "int16",
"pet_dog": "int16",
"pet_snake": "int16",
"height": "int64",
"age": "float64",
}
for col, dtype in X_trans_df.dtypes.items():
assert dtype == expected_dtypes[col]
# PandasOutTransformer that does not define get_feature_names_out and always expects
# the input to be a DataFrame.
| TransWithNames |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 11372,
"end": 11489
} | class ____(ApeException):
"""
Raised when a problem occurs when using blockchain networks.
"""
| NetworkError |
python | euske__pdfminer | pdfminer/pdffont.py | {
"start": 4888,
"end": 13966
} | class ____:
STANDARD_STRINGS = (
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft',
'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period',
'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash',
'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright',
'guillemotright', 'ellipsis', 'perthousand', 'questiondown',
'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut',
'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash',
'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu',
'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth',
'multiply', 'threesuperior', 'copyright', 'Aacute',
'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde',
'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde',
'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde',
'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave',
'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex',
'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute',
'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex',
'odieresis', 'ograve', 'otilde', 'scaron', 'uacute',
'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis',
'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle',
'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader',
'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle',
'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle',
'sevenoldstyle', 'eightoldstyle', 'nineoldstyle',
'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior',
'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior',
'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior',
'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior',
'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall',
'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall',
'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall',
'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall',
'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall',
'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall',
'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall',
'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior',
'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall',
'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior',
'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior',
'seveninferior', 'eightinferior', 'nineinferior',
'centinferior', 'dollarinferior', 'periodinferior',
'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall',
'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall',
'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall',
'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall',
'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall',
'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall',
'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000',
'001.001', '001.002', '001.003', 'Black', 'Bold', 'Book',
'Light', 'Medium', 'Regular', 'Roman', 'Semibold',
)
class INDEX:
def __init__(self, fp):
self.fp = fp
self.offsets = []
(count, offsize) = struct.unpack('>HB', self.fp.read(3))
for i in range(count+1):
self.offsets.append(nunpack(self.fp.read(offsize)))
self.base = self.fp.tell()-1
self.fp.seek(self.base+self.offsets[-1])
return
def __repr__(self):
return '<INDEX: size=%d>' % len(self)
def __len__(self):
return len(self.offsets)-1
def __getitem__(self, i):
self.fp.seek(self.base+self.offsets[i])
return self.fp.read(self.offsets[i+1]-self.offsets[i])
def __iter__(self):
return iter(self[i] for i in range(len(self)))
def __init__(self, name, fp):
self.name = name
self.fp = fp
# Header
(_major, _minor, hdrsize, offsize) = struct.unpack('BBBB', self.fp.read(4))
self.fp.read(hdrsize-4)
# Name INDEX
self.name_index = self.INDEX(self.fp)
# Top DICT INDEX
self.dict_index = self.INDEX(self.fp)
# String INDEX
self.string_index = self.INDEX(self.fp)
# Global Subr INDEX
self.subr_index = self.INDEX(self.fp)
# Top DICT DATA
self.top_dict = getdict(self.dict_index[0])
(charset_pos,) = self.top_dict.get(15, [0])
(encoding_pos,) = self.top_dict.get(16, [0])
(charstring_pos,) = self.top_dict.get(17, [0])
# CharStrings
self.fp.seek(charstring_pos)
self.charstring = self.INDEX(self.fp)
self.nglyphs = len(self.charstring)
# Encodings
self.code2gid = {}
self.gid2code = {}
self.fp.seek(encoding_pos)
format = self.fp.read(1)
if format == b'\x00':
# Format 0
(n,) = struct.unpack('B', self.fp.read(1))
for (code, gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))):
self.code2gid[code] = gid
self.gid2code[gid] = code
elif format == b'\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
code = 0
for i in range(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in range(first, first+nleft+1):
self.code2gid[code] = gid
self.gid2code[gid] = code
code += 1
else:
raise ValueError('unsupported encoding format: %r' % format)
# Charsets
self.name2gid = {}
self.gid2name = {}
self.fp.seek(charset_pos)
format = self.fp.read(1)
if format == b'\x00':
# Format 0
n = self.nglyphs-1
for (gid, sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))):
gid += 1
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
elif format == b'\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
sid = 0
for i in range(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in range(first, first+nleft+1):
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
sid += 1
elif format == b'\x02':
# Format 2
assert 0
else:
raise ValueError('unsupported charset format: %r' % format)
#print(self.code2gid)
#print(self.name2gid)
#assert 0
return
def getstr(self, sid):
if sid < len(self.STANDARD_STRINGS):
return self.STANDARD_STRINGS[sid]
return self.string_index[sid-len(self.STANDARD_STRINGS)]
## TrueTypeFont
##
| CFFFont |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 62676,
"end": 64594
} | class ____(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
| Const1D |
python | allegroai__clearml | clearml/utilities/resource_monitor.py | {
"start": 477,
"end": 26475
} | class ____(BackgroundMonitor):
_title_machine = ":monitor:machine"
_title_gpu = ":monitor:gpu"
_first_report_sec_default = 30.0
_wait_for_first_iteration_to_start_sec_default = 180.0
_max_wait_for_first_iteration_to_start_sec_default = 1800.0
_resource_monitor_instances = []
_multi_node_single_task = None
def __init__(
self,
task: Session,
sample_frequency_per_sec: float = 2.0,
report_frequency_sec: float = 30.0,
first_report_sec: float = None,
wait_for_first_iteration_to_start_sec: float = None,
max_wait_for_first_iteration_to_start_sec: float = None,
report_mem_used_per_process: bool = True,
) -> None:
super(ResourceMonitor, self).__init__(task=task, wait_period=sample_frequency_per_sec)
# noinspection PyProtectedMember
ResourceMonitor._resource_monitor_instances.append(self)
ResourceMonitor._multi_node_single_task = ENV_MULTI_NODE_SINGLE_TASK.get()
self._task = task
self._sample_frequency = sample_frequency_per_sec
self._report_frequency = report_frequency_sec
# noinspection PyProtectedMember
self._first_report_sec = next(
value
# noinspection PyProtectedMember
for value in (
first_report_sec,
ResourceMonitor._first_report_sec_default,
report_frequency_sec,
)
if value is not None
)
self.wait_for_first_iteration = next(
value
for value in (
wait_for_first_iteration_to_start_sec,
# noinspection PyProtectedMember
ResourceMonitor._wait_for_first_iteration_to_start_sec_default,
0.0,
)
if value is not None
)
self.max_check_first_iteration = next(
value
for value in (
max_wait_for_first_iteration_to_start_sec,
# noinspection PyProtectedMember
ResourceMonitor._max_wait_for_first_iteration_to_start_sec_default,
0.0,
)
if value is not None
)
self._num_readouts = 0
self._readouts = {}
self._previous_readouts = {}
self._previous_readouts_ts = time()
self._gpustat_fail = 0
self._gpustat = gpustat
self._active_gpus = None
self._process_info = psutil.Process() if report_mem_used_per_process else None
self._last_process_pool = {}
self._last_process_id_list = []
self._gpu_memory_per_process = True
self._default_gpu_utilization = config.get("resource_monitoring.default_gpu_utilization", 100)
# allow default_gpu_utilization as null in the config, in which case we don't log anything
if self._default_gpu_utilization is not None:
self._default_gpu_utilization = int(self._default_gpu_utilization)
self._gpu_utilization_warning_sent = False
# noinspection PyBroadException
try:
self._debug_mode = bool(os.getenv("CLEARML_RESMON_DEBUG", ""))
except Exception:
self._debug_mode = False
if not self._gpustat:
self._task.get_logger().report_text("ClearML Monitor: GPU monitoring is not available")
else: # if running_remotely():
# noinspection PyBroadException
try:
active_gpus = os.environ.get("NVIDIA_VISIBLE_DEVICES", "") or os.environ.get("CUDA_VISIBLE_DEVICES", "")
if active_gpus and active_gpus != "all":
if os.path.isdir(active_gpus):
try:
self._active_gpus = os.listdir(active_gpus)
except OSError as e:
logging.getLogger("clearml.resource_monitor").warning(
"Failed listing {}: {}".format(active_gpus, e)
)
else:
self._active_gpus = [g.strip() for g in active_gpus.split(",")]
except Exception:
pass
def daemon(self) -> None:
if self._is_thread_mode_and_not_main_process():
return
multi_node_single_task_reporting = False
report_node_as_series = False
rank = 0
world_size_digits = 0
# check if we are in multi-node reporting to the same Task
# noinspection PyBroadException
try:
if self._multi_node_single_task:
# if resource monitoring is disabled, do nothing
if self._multi_node_single_task < 0:
return
# we are reporting machines stats on a different machine over the same Task
multi_node_single_task_reporting = True
if self._multi_node_single_task == 1:
# report per machine graph (unique title)
report_node_as_series = False
elif self._multi_node_single_task == 2:
# report per machine series (i.e. merge title+series resource and have "node X" as different series)
report_node_as_series = True
# noinspection PyBroadException
try:
rank = int(os.environ.get("RANK", os.environ.get("SLURM_PROCID")) or 0)
world_size_digits = ceil(log10(int(os.environ.get("WORLD_SIZE") or 0)))
except Exception:
pass
except Exception:
pass
seconds_since_started = 0
reported = 0
last_iteration = 0
fallback_to_sec_as_iterations = None
# get max GPU ID, and make sure our active list is within range
if self._active_gpus:
# noinspection PyBroadException
try:
gpu_stat = self._gpustat.new_query()
if max(self._active_gpus) > len(gpu_stat.gpus) - 1:
self._active_gpus = None
except Exception:
pass
# add Task runtime_properties with the machine spec
if Session.check_min_api_version("2.13"):
try:
machine_spec = self._get_machine_specs()
if machine_spec:
# noinspection PyProtectedMember
self._task._set_runtime_properties(runtime_properties=machine_spec)
except Exception as ex:
logging.getLogger("clearml.resource_monitor").debug(
"Failed logging machine specification: {}".format(ex)
)
# last_iteration_interval = None
# last_iteration_ts = 0
# repeated_iterations = 0
while True:
last_report = time()
current_report_frequency = self._report_frequency if reported != 0 else self._first_report_sec
while (time() - last_report) < current_report_frequency:
# wait for self._sample_frequency seconds, if event set quit
if self._event.wait(1.0 / self._sample_frequency):
return
# noinspection PyBroadException
try:
self._update_readouts()
except Exception:
pass
seconds_since_started += int(round(time() - last_report))
# check if we do not report any metric (so it means the last iteration will not be changed)
if fallback_to_sec_as_iterations is None:
if IsTensorboardInit.tensorboard_used():
fallback_to_sec_as_iterations = False
elif seconds_since_started >= self.wait_for_first_iteration:
self._task.get_logger().report_text(
"ClearML Monitor: Could not detect iteration reporting, "
"falling back to iterations as seconds-from-start"
)
fallback_to_sec_as_iterations = True
elif fallback_to_sec_as_iterations is True and seconds_since_started <= self.max_check_first_iteration:
if self._check_logger_reported():
fallback_to_sec_as_iterations = False
self._task.get_logger().report_text(
"ClearML Monitor: Reporting detected, reverting back to iteration based reporting"
)
clear_readouts = True
# if we do not have last_iteration, we just use seconds as iteration
if fallback_to_sec_as_iterations:
iteration = seconds_since_started
else:
iteration = self._task.get_last_iteration()
if iteration < last_iteration:
# we started a new session?!
# wait out
clear_readouts = False
iteration = last_iteration
elif iteration == last_iteration:
# repeated_iterations += 1
# if last_iteration_interval:
# # to be on the safe side, we don't want to pass the actual next iteration
# iteration += int(0.95*last_iteration_interval[0] * (seconds_since_started - last_iteration_ts)
# / last_iteration_interval[1])
# else:
# iteration += 1
clear_readouts = False
iteration = last_iteration
else:
# last_iteration_interval = (iteration - last_iteration, seconds_since_started - last_iteration_ts)
# repeated_iterations = 0
# last_iteration_ts = seconds_since_started
last_iteration = iteration
fallback_to_sec_as_iterations = False
clear_readouts = True
# start reporting only when we figured out, if this is seconds based, or iterations based
average_readouts = self._get_average_readouts()
if fallback_to_sec_as_iterations is not None:
for k, v in average_readouts.items():
# noinspection PyBroadException
try:
# 3 digits after the dot
value = round(v * 1000) / 1000.0
title = self._title_gpu if k.startswith("gpu_") else self._title_machine
series = k
if multi_node_single_task_reporting:
if report_node_as_series:
# for rank 0 we keep the same original report so that external services
# can always check the default cpu/gpu utilization
if rank == 0:
self._task.get_logger().report_scalar(
title=title,
series=series,
iteration=iteration,
value=value,
)
# now let's create an additional report
title = "{}:{}".format(":".join(title.split(":")[:-1]), series)
series = "rank {:0{world_size_digits}d}".format(
rank, world_size_digits=world_size_digits
)
elif rank > 0:
title = "{}:rank{:0{world_size_digits}d}".format(
title, rank, world_size_digits=world_size_digits
)
else:
# for rank 0 we keep the same original report so that external services
# can always check the default cpu/gpu utilization
pass
self._task.get_logger().report_scalar(
title=title, series=series, iteration=iteration, value=value
)
except Exception:
pass
# clear readouts if this is update is not averaged
if clear_readouts:
self._clear_readouts()
# count reported iterations
reported += 1
def _update_readouts(self) -> None:
readouts = self._machine_stats()
elapsed = time() - self._previous_readouts_ts
self._previous_readouts_ts = time()
for k, v in readouts.items():
# cumulative measurements
if k.endswith("_mbs"):
v = (v - self._previous_readouts.get(k, v)) / elapsed
self._readouts[k] = self._readouts.get(k, 0.0) + v
self._num_readouts += 1
self._previous_readouts = readouts
def _get_num_readouts(self) -> int:
return self._num_readouts
def _get_average_readouts(self) -> dict:
average_readouts = dict((k, v / float(self._num_readouts)) for k, v in self._readouts.items())
return average_readouts
def _clear_readouts(self) -> None:
self._readouts = {}
self._num_readouts = 0
def _machine_stats(self) -> Dict[str, float]:
"""
:return: machine stats dictionary, all values expressed in megabytes
"""
cpu_usage = [self._safe_cast(v, float) for v in psutil.cpu_percent(percpu=True)]
stats = {
"cpu_usage": sum(cpu_usage) / float(len(cpu_usage)),
}
bytes_per_megabyte = 1024**2
def bytes_to_megabytes(x: float) -> float:
return x / bytes_per_megabyte
virtual_memory = psutil.virtual_memory()
# stats["memory_used_gb"] = bytes_to_megabytes(virtual_memory.used) / 1024
stats["memory_used_gb"] = (
bytes_to_megabytes(self._get_process_used_memory() if self._process_info else virtual_memory.used) / 1024
)
stats["memory_free_gb"] = bytes_to_megabytes(virtual_memory.available) / 1024
disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
stats["disk_free_percent"] = 100.0 - disk_use_percentage
with warnings.catch_warnings():
if logging.root.level > logging.DEBUG: # If the logging level is bigger than debug, ignore
# psutil.sensors_temperatures warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
sensor_stat = psutil.sensors_temperatures() if hasattr(psutil, "sensors_temperatures") else {}
if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
stats["cpu_temperature"] = max([self._safe_cast(t.current, float) for t in sensor_stat["coretemp"]])
# protect against permission issues
# update cached measurements
# noinspection PyBroadException
try:
net_stats = psutil.net_io_counters()
stats["network_tx_mbs"] = bytes_to_megabytes(net_stats.bytes_sent)
stats["network_rx_mbs"] = bytes_to_megabytes(net_stats.bytes_recv)
except Exception:
pass
# protect against permission issues
# noinspection PyBroadException
try:
io_stats = psutil.disk_io_counters()
stats["io_read_mbs"] = bytes_to_megabytes(io_stats.read_bytes)
stats["io_write_mbs"] = bytes_to_megabytes(io_stats.write_bytes)
except Exception:
pass
# check if we can access the gpu statistics
if self._gpustat:
# noinspection PyBroadException
try:
stats.update(self._get_gpu_stats())
except Exception:
# something happened and we can't use gpu stats,
self._gpustat_fail += 1
if self._gpustat_fail >= 3:
msg = "ClearML Monitor: GPU monitoring failed getting GPU reading, switching off GPU monitoring"
if self._debug_mode:
import traceback
msg += "\n" + traceback.format_exc()
self._task.get_logger().report_text(msg)
self._gpustat = None
return stats
def _check_logger_reported(self) -> bool:
titles = self.get_logger_reported_titles(self._task)
return len(titles) > 0
@classmethod
def get_logger_reported_titles(cls, task: Session) -> List[str]:
# noinspection PyProtectedMember
titles = list(task.get_logger()._get_used_title_series().keys())
# noinspection PyBroadException
try:
multi_node = cls._multi_node_single_task is not None
except Exception:
multi_node = False
if multi_node:
title_machine = ":".join(cls._title_machine.split(":")[:-1])
title_gpu = ":".join(cls._title_gpu.split(":")[:-1])
if not title_machine:
title_machine = cls._title_machine
if not title_gpu:
title_gpu = cls._title_gpu
try:
titles = [t for t in titles if not t.startswith(title_machine) and not t.startswith(title_gpu)]
except ValueError:
pass
else:
try:
titles.remove(cls._title_machine)
except ValueError:
pass
try:
titles.remove(cls._title_gpu)
except ValueError:
pass
return titles
def _get_process_used_memory(self) -> int:
def mem_usage_children(a_mem_size: int, pr: psutil.Process, parent_mem: psutil.Process = None) -> int:
self._last_process_id_list.append(pr.pid)
# add out memory usage
our_mem = pr.memory_info()
mem_diff = our_mem.rss - parent_mem.rss if parent_mem else our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
# now we are the parent
for child in pr.children():
# get the current memory
m = pr.memory_info()
mem_diff = m.rss - our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
a_mem_size = mem_usage_children(a_mem_size, child, parent_mem=m)
return a_mem_size
# only run the memory usage query once per reporting period
# because this memory query is relatively slow, and changes very little.
if self._last_process_pool.get("cpu") and (time() - self._last_process_pool["cpu"][0]) < self._report_frequency:
return self._last_process_pool["cpu"][1]
# if we have no parent process, return 0 (it's an error)
if not self._process_info:
return 0
self._last_process_id_list = []
mem_size = mem_usage_children(0, self._process_info)
self._last_process_pool["cpu"] = time(), mem_size
return mem_size
def _skip_nonactive_gpu(self, gpu: Any) -> bool:
if not self._active_gpus:
return False
# noinspection PyBroadException
try:
uuid = getattr(gpu, "uuid", None)
mig_uuid = getattr(gpu, "mig_uuid", None)
return (
str(gpu.index) not in self._active_gpus
and (not uuid or uuid not in self._active_gpus)
and (not mig_uuid or mig_uuid not in self._active_gpus)
)
except Exception:
pass
return False
def _get_gpu_stats(self) -> dict:
if not self._gpustat:
return {}
# per process memory query id slow, so we only call it once per reporting period,
# On the rest of the samples we return the previous memory measurement
# update mem used by our process and sub processes
if (
self._gpu_memory_per_process
and self._process_info
and (
not self._last_process_pool.get("gpu")
or (time() - self._last_process_pool["gpu"][0]) >= self._report_frequency
)
):
gpu_mem = {}
# noinspection PyBroadException
try:
gpu_stat = self._gpustat.new_query(per_process_stats=True)
except Exception:
gpu_stat = self._gpustat.new_query(per_process_stats=False)
for i, g in enumerate(gpu_stat.gpus):
# if processes is None, that means we can't query GPU memory usage per proces, so we can stop
if g.processes is None:
self._gpu_memory_per_process = False
break
# only monitor the active gpu's, if none were selected, monitor everything
if self._skip_nonactive_gpu(g):
continue
gpu_mem[i] = 0
for p in g.processes:
if p is not None and p["pid"] in self._last_process_id_list:
gpu_mem[i] += p.get("gpu_memory_usage", 0)
self._last_process_pool["gpu"] = time(), gpu_mem
else:
# if we do no need to update the memory usage, run global query
# if we have no parent process (backward compatibility), return global stats
gpu_stat = self._gpustat.new_query(per_process_stats=False)
gpu_mem = self._last_process_pool["gpu"][1] if self._last_process_pool.get("gpu") else None
# generate the statistics dict for actual report
stats = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._skip_nonactive_gpu(g):
continue
if g.get("temperature.gpu") is not None:
stats["gpu_%d_temperature" % i] = self._safe_cast(g["temperature.gpu"], float)
if g.get("utilization.gpu") is not None:
stats["gpu_%d_utilization" % i] = self._safe_cast(g["utilization.gpu"], float)
else:
stats["gpu_%d_utilization" % i] = self._default_gpu_utilization
if not self._gpu_utilization_warning_sent:
if g.mig_index is not None:
self._task.get_logger().report_text(
"Running inside MIG, Nvidia driver cannot export utilization, pushing fixed value {}".format(
# noqa
self._default_gpu_utilization
)
)
else:
self._task.get_logger().report_text(
"Nvidia driver cannot export utilization, pushing fixed value {}".format(
self._default_gpu_utilization
)
)
self._gpu_utilization_warning_sent = True
if g.get("memory.used") is not None:
# use previously sampled process gpu memory, or global if it does not exist
stats["gpu_%d_mem_used_gb" % i] = self._safe_cast(gpu_mem[i] if gpu_mem and i in gpu_mem else g["memory.used"], float) / 1024
if g.get("memory.total") is not None:
if self._safe_cast(g["memory.total"], float) != 0.0:
stats["gpu_%d_mem_usage" % i] = 100.0 * self._safe_cast(g["memory.used"], float) / self._safe_cast(g["memory.total"], float, default=1.0)
# already in MBs
stats["gpu_%d_mem_free_gb" % i] = (self._safe_cast(g["memory.total"], float) - self._safe_cast(g["memory.used"], float)) / 1024
if g.get("power.draw") is not None:
# power draw in Watts
stats["gpu_%d_power_draw" % i] = self._safe_cast(g["power.draw"], int, default=0)
return stats
def _get_machine_specs(self) -> dict:
specs = {}
# noinspection PyBroadException
try:
specs = {
"platform": str(sys.platform),
"python_version": str(platform.python_version()),
"python_exec": str(sys.executable),
"OS": str(platform.platform(aliased=True)),
"processor": str(platform.machine()),
"cpu_cores": int(psutil.cpu_count()),
"memory_gb": round(psutil.virtual_memory().total / 1024**3, 1),
"hostname": str(platform.node()),
"gpu_count": 0,
}
if self._gpustat:
gpu_stat = self._gpustat.new_query(shutdown=True, get_driver_info=True)
if gpu_stat.gpus:
gpus = [g for i, g in enumerate(gpu_stat.gpus) if not self._skip_nonactive_gpu(g)]
specs.update(
gpu_count=int(len(gpus)),
gpu_type=", ".join(g.name for g in gpus),
gpu_memory=", ".join("{}GB".format(round(g.memory_total / 1024.0)) for g in gpus),
gpu_driver_version=gpu_stat.driver_version or "",
gpu_driver_cuda_version=gpu_stat.driver_cuda_version or "",
)
except Exception:
pass
return specs
def _safe_cast(self, value, type_=float, default=0.0):
try:
return type_(value)
except (ValueError, TypeError) as e:
if self._debug_mode:
print("Failed casting {} to {}: {}".format(value, type_, e))
return default
@property
def resource_monitor_instances(self) -> None:
return self._resource_monitor_instances
| ResourceMonitor |
python | PrefectHQ__prefect | src/prefect/server/events/clients.py | {
"start": 6378,
"end": 7327
} | class ____(EventsClient):
_publisher: messaging.EventPublisher
async def __aenter__(self) -> Self:
publisher = messaging.create_event_publisher()
self._publisher = await publisher.__aenter__()
return self
async def __aexit__(
self,
exc_type: Optional[Type[Exception]],
exc_val: Optional[Exception],
exc_tb: Optional[TracebackType],
) -> None:
await self._publisher.__aexit__(exc_type, exc_val, exc_tb)
del self._publisher
return None
async def emit(self, event: Event) -> ReceivedEvent:
if not hasattr(self, "_publisher"):
raise TypeError(
"Events may only be emitted while this client is being used as a "
"context manager"
)
received_event = event.receive()
await self._publisher.publish_event(received_event)
return received_event
| PrefectServerEventsClient |
python | getsentry__sentry | tests/sentry/seer/explorer/test_index_data.py | {
"start": 6433,
"end": 31814
} | class ____(APITransactionTestCase, SnubaTestCase, SpanTestCase):
def setUp(self) -> None:
super().setUp()
self.ten_mins_ago = before_now(minutes=10)
def test_get_profiles_for_trace(self) -> None:
"""Test the full end-to-end happy path for get_profiles_for_trace."""
trace_id = "a" * 32 # Valid 32-char hex trace ID
profile1_id = uuid.uuid4().hex # Transaction profile
profile2_id = uuid.uuid4().hex # Transaction profile
profiler_id = uuid.uuid4().hex # Continuous profile
thread_id = "12345"
# Create span with transaction profile (profile_id)
span1 = self.create_span(
{
"trace_id": trace_id,
"description": "GET /api/users/profile",
"sentry_tags": {"transaction": "api/users/profile", "op": "http.server"},
"is_segment": True,
},
start_ts=self.ten_mins_ago,
)
span1.update({"profile_id": profile1_id})
# Create span with transaction profile (profile_id)
span2 = self.create_span(
{
"trace_id": trace_id,
"parent_span_id": span1["span_id"],
"description": "SELECT * FROM users",
"sentry_tags": {"transaction": "api/users/profile", "op": "db.query"},
"is_segment": False,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=10),
)
span2.update({"profile_id": profile2_id})
# Create span with no profile data (should be ignored by query constraint)
span3 = self.create_span(
{
"trace_id": trace_id,
"parent_span_id": span1["span_id"],
"description": "No profile span",
"sentry_tags": {"transaction": "api/users/profile", "op": "other"},
"is_segment": False,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=20),
)
# Remove any default profile data from span3
if "profile_id" in span3:
del span3["profile_id"]
if "profiler_id" in span3:
del span3["profiler_id"]
if "thread_id" in span3:
del span3["thread_id"]
# Create span with continuous profile (profiler_id + thread_id)
span4 = self.create_span(
{
"trace_id": trace_id,
"parent_span_id": span1["span_id"],
"description": "Continuous profile span",
"sentry_tags": {
"transaction": "api/users/profile",
"op": "continuous",
},
"is_segment": False,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=30),
)
# Remove any default profile_id and set continuous profile fields
if "profile_id" in span4:
del span4["profile_id"]
span4.update(
{
"profiler_id": profiler_id,
"thread_id": thread_id,
# Set in sentry_tags as well for proper field mapping
"sentry_tags": {
**span4.get("sentry_tags", {}),
"profiler_id": profiler_id,
"thread.id": thread_id,
},
}
)
self.store_spans([span1, span2, span3, span4], is_eap=True)
with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service:
# Mock profile service responses for both transaction and continuous profiles
def mock_service_response(method, path, *args, **kwargs):
if f"profiles/{profile1_id}" in path:
response = mock.Mock()
response.status = 200
response.data = orjson.dumps(
{
"profile": {
"frames": [
{
"function": "main",
"module": "app",
"filename": "main.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 1000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
)
return response
elif f"profiles/{profile2_id}" in path:
response = mock.Mock()
response.status = 200
response.data = orjson.dumps(
{
"profile": {
"frames": [
{
"function": "query",
"module": "db",
"filename": "db.py",
"lineno": 20,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 2000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
)
return response
elif "/chunks" in path:
response = mock.Mock()
response.status = 200
response.data = orjson.dumps(
{
"chunk": {
"profile": {
"frames": [
{
"function": "continuous_func",
"module": "profiler",
"filename": "profiler.py",
"lineno": 30,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 3000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
}
)
return response
else:
# Return 404 for unexpected calls
response = mock.Mock()
response.status = 404
return response
mock_service.side_effect = mock_service_response
# Call the function
result = get_profiles_for_trace(trace_id, self.project.id)
# Verify the result structure
assert result is not None
assert result.trace_id == trace_id
assert result.project_id == self.project.id
# Should find 3 spans with profile data (span3 filtered out by query constraint)
assert len(result.profiles) == 3
# Verify profiles are properly processed
profile_ids_found = [p.profile_id for p in result.profiles]
assert profile1_id in profile_ids_found
assert profile2_id in profile_ids_found
assert profiler_id in profile_ids_found
# Verify correct service calls were made
assert mock_service.call_count == 3
# Check transaction profile calls use /profiles/ endpoint
mock_service.assert_any_call(
"GET",
f"/organizations/{self.organization.id}/projects/{self.project.id}/profiles/{profile1_id}",
params={"format": "sample"},
)
mock_service.assert_any_call(
"GET",
f"/organizations/{self.organization.id}/projects/{self.project.id}/profiles/{profile2_id}",
params={"format": "sample"},
)
# Check continuous profile call uses /chunks/ endpoint
mock_service.assert_any_call(
method="POST",
path=f"/organizations/{self.organization.id}/projects/{self.project.id}/chunks",
json_data=mock.ANY,
)
def test_get_profiles_for_trace_aggregates_duplicate_profiles(self) -> None:
"""Test that aggregation query returns one row per unique profile_id."""
trace_id = "b" * 32 # Valid 32-char hex trace ID
profile_id = uuid.uuid4().hex # Same profile ID for multiple spans
transaction_name1 = "api/duplicate/test"
transaction_name2 = "api/different/transaction"
# Create multiple spans with the same profile_id but different transactions
# The aggregation query should group these and return just one row
span1 = self.create_span(
{
"trace_id": trace_id,
"description": "First span with profile",
"sentry_tags": {"transaction": transaction_name1, "op": "http.server"},
"is_segment": True,
},
start_ts=self.ten_mins_ago,
)
span1.update({"profile_id": profile_id})
span2 = self.create_span(
{
"trace_id": trace_id,
"description": "Second span with same profile",
"sentry_tags": {"transaction": transaction_name1, "op": "db.query"},
"is_segment": False,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=100),
)
span2.update({"profile_id": profile_id})
span3 = self.create_span(
{
"trace_id": trace_id,
"description": "Third span with same profile",
"sentry_tags": {"transaction": transaction_name2, "op": "cache.get"},
"is_segment": False,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=200),
)
span3.update({"profile_id": profile_id})
# Create a span with different profile_id (should be separate row in aggregation)
different_profile_id = uuid.uuid4().hex
span4 = self.create_span(
{
"trace_id": trace_id,
"description": "Different profile span",
"sentry_tags": {"transaction": transaction_name1, "op": "http.server"},
"is_segment": True,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=300),
)
span4.update({"profile_id": different_profile_id})
self.store_spans([span1, span2, span3, span4], is_eap=True)
# Mock the external profiling service calls
with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service:
# Mock profile service response
def mock_service_response(method, path, *args, **kwargs):
response = mock.Mock()
response.status = 200
response.data = orjson.dumps(
{
"profile": {
"frames": [
{
"function": "aggregated_function",
"module": "app",
"filename": "aggregated.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 1000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
)
return response
mock_service.side_effect = mock_service_response
# Call the function
result = get_profiles_for_trace(trace_id, self.project.id)
# Verify the result structure
assert result is not None
assert result.trace_id == trace_id
assert result.project_id == self.project.id
# Aggregation query should return 2 rows: one per unique profile_id
assert len(result.profiles) == 2
# Verify each unique profile_id appears exactly once
profile_ids_found = [p.profile_id for p in result.profiles]
assert profile_id in profile_ids_found
assert different_profile_id in profile_ids_found
assert profile_ids_found.count(profile_id) == 1
assert profile_ids_found.count(different_profile_id) == 1
# Verify that the profile service was called only twice (once per unique profile_id)
assert mock_service.call_count == 2
# Verify all profiles are transaction profiles (not continuous)
for profile in result.profiles:
assert profile.is_continuous is False
def test_get_profiles_for_trace_aggregates_continuous_profiles(self) -> None:
"""Test that aggregation query returns one row per unique profiler_id for continuous profiles."""
trace_id = "c" * 32 # Valid 32-char hex trace ID
profiler_id = uuid.uuid4().hex # Same profiler ID for multiple spans
thread_id = "67890"
transaction_name1 = "api/continuous/test"
transaction_name2 = "api/different/continuous"
# Create multiple spans with the same profiler_id but different transactions
# The aggregation query should group these and return just one row
spans = []
for i in range(3):
# Alternate between transaction names to test aggregation across transactions
transaction_name = transaction_name1 if i % 2 == 0 else transaction_name2
span = self.create_span(
{
"trace_id": trace_id,
"description": f"Continuous span {i + 1}",
"sentry_tags": {
"transaction": transaction_name,
"op": f"continuous.{i + 1}",
"profiler_id": profiler_id,
"thread.id": thread_id,
},
"is_segment": i == 0, # First span is transaction
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=i * 100),
)
# Remove any default profile_id and set continuous profile fields
if "profile_id" in span:
del span["profile_id"]
span.update(
{
"profiler_id": profiler_id,
"thread_id": thread_id,
}
)
spans.append(span)
# Create a continuous profile span with different profiler_id (should be separate row)
different_profiler_id = uuid.uuid4().hex
span_different = self.create_span(
{
"trace_id": trace_id,
"description": "Different profiler continuous span",
"sentry_tags": {
"transaction": transaction_name1,
"op": "continuous.different",
"profiler_id": different_profiler_id,
"thread.id": thread_id,
},
"is_segment": True,
},
start_ts=self.ten_mins_ago + timedelta(milliseconds=400),
)
if "profile_id" in span_different:
del span_different["profile_id"]
span_different.update(
{
"profiler_id": different_profiler_id,
"thread_id": thread_id,
}
)
spans.append(span_different)
self.store_spans(spans, is_eap=True)
# Mock the external profiling service calls
with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service:
# Mock profile service response for continuous profiles (/chunks endpoint)
def mock_service_response(method, path, *args, **kwargs):
response = mock.Mock()
response.status = 200
response.data = orjson.dumps(
{
"chunk": {
"profile": {
"frames": [
{
"function": "continuous_aggregated_function",
"module": "profiler",
"filename": "continuous.py",
"lineno": 15,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 1000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
}
)
return response
mock_service.side_effect = mock_service_response
# Call the function
result = get_profiles_for_trace(trace_id, self.project.id)
# Verify the result structure
assert result is not None
assert result.trace_id == trace_id
assert result.project_id == self.project.id
# Aggregation query should return 2 rows: one per unique profiler_id
assert len(result.profiles) == 2
# Verify each unique profiler_id appears exactly once
profile_ids_found = [p.profile_id for p in result.profiles]
assert profiler_id in profile_ids_found
assert different_profiler_id in profile_ids_found
assert profile_ids_found.count(profiler_id) == 1
assert profile_ids_found.count(different_profiler_id) == 1
# Verify that the profile service was called only twice (once per unique profiler_id)
# Both should use the /chunks endpoint for continuous profiles
assert mock_service.call_count == 2
# Check that all calls used the /chunks endpoint (continuous profiles)
for call in mock_service.call_args_list:
assert call[1]["method"] == "POST"
assert "/chunks" in call[1]["path"]
# Verify all profiles are continuous profiles
for profile in result.profiles:
assert profile.is_continuous is True
def test_get_profiles_for_trace_uses_aggregated_timestamps(self) -> None:
"""Test that aggregation query correctly computes min/max timestamps for each profile."""
trace_id = "d" * 32 # Valid 32-char hex trace ID
profile_id = uuid.uuid4().hex
# Create spans with the same profile_id at different times
# The aggregation should use min(start) and max(end) timestamps
span1_start = self.ten_mins_ago
span1_duration_ms = 50.0 # span1 ends at ten_mins_ago + 50ms
span2_start = self.ten_mins_ago + timedelta(milliseconds=100)
span2_duration_ms = 100.0 # span2 ends at ten_mins_ago + 200ms (latest end)
span3_start = self.ten_mins_ago + timedelta(milliseconds=25)
span3_duration_ms = 50.0 # span3 ends at ten_mins_ago + 75ms (middle)
span1 = self.create_span(
{
"trace_id": trace_id,
"description": "First span (earliest start)",
"sentry_tags": {"transaction": "api/test", "op": "http.server"},
"is_segment": True,
},
start_ts=span1_start,
duration=int(span1_duration_ms),
)
span1.update({"profile_id": profile_id})
span2 = self.create_span(
{
"trace_id": trace_id,
"description": "Second span (latest end)",
"sentry_tags": {"transaction": "api/test", "op": "db.query"},
"is_segment": False,
},
start_ts=span2_start,
duration=int(span2_duration_ms),
)
span2.update({"profile_id": profile_id})
span3 = self.create_span(
{
"trace_id": trace_id,
"description": "Third span (middle)",
"sentry_tags": {"transaction": "api/test", "op": "cache.get"},
"is_segment": False,
},
start_ts=span3_start,
duration=int(span3_duration_ms),
)
span3.update({"profile_id": profile_id})
self.store_spans([span1, span2, span3], is_eap=True)
captured_timestamps = {}
with mock.patch("sentry.seer.explorer.index_data.fetch_profile_data") as mock_fetch:
# Mock to capture the timestamps passed to fetch_profile_data
def capture_and_return(
profile_id, organization_id, project_id, start_ts, end_ts, is_continuous
):
captured_timestamps["start_ts"] = start_ts
captured_timestamps["end_ts"] = end_ts
captured_timestamps["profile_id"] = profile_id
captured_timestamps["is_continuous"] = is_continuous
return {
"profile": {
"frames": [
{
"function": "test_function",
"module": "app",
"filename": "test.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{
"elapsed_since_start_ns": 1000000,
"thread_id": "1",
"stack_id": 0,
}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
mock_fetch.side_effect = capture_and_return
# Call the function
result = get_profiles_for_trace(trace_id, self.project.id)
# Verify result
assert result is not None
assert len(result.profiles) == 1
# Verify fetch_profile_data was called with aggregated timestamps
assert mock_fetch.call_count == 1
# Calculate expected end times based on start + duration
_ = span1_start + timedelta(milliseconds=span1_duration_ms)
span2_end = span2_start + timedelta(milliseconds=span2_duration_ms)
_ = span3_start + timedelta(milliseconds=span3_duration_ms)
# The aggregation should use:
# - min(start_ts) = span1_start (earliest start)
# - max(finish_ts) = span2_end (latest end: ten_mins_ago + 200ms)
assert captured_timestamps["start_ts"] == pytest.approx(span1_start.timestamp())
assert captured_timestamps["end_ts"] == pytest.approx(span2_end.timestamp())
assert captured_timestamps["profile_id"] == profile_id
assert captured_timestamps["is_continuous"] is False
| TestGetProfilesForTrace |
python | dask__distributed | distributed/utils_test.py | {
"start": 67520,
"end": 69179
} | class ____(Worker):
"""A Worker that sets event `in_execute` the first time it enters the execute
method and then does not proceed, thus leaving the task in executing state
indefinitely, until the test sets `block_execute`.
Finally, the worker sets `in_execute_exit` when execute() terminates, but before the
worker state has processed its exit callback. The worker will block one last time
until the test sets `block_execute_exit`.
Note
----
In the vast majority of the test cases, it is simpler and more readable to just
submit to a regular Worker a task that blocks on a distributed.Event:
.. code-block:: python
def f(in_task, block_task):
in_task.set()
block_task.wait()
in_task = distributed.Event()
block_task = distributed.Event()
fut = c.submit(f, in_task, block_task)
await in_task.wait()
await block_task.set()
See also
--------
BlockedGatherDep
BlockedGetData
BarrierGetData
"""
def __init__(self, *args, **kwargs):
self.in_execute = asyncio.Event()
self.block_execute = asyncio.Event()
self.in_execute_exit = asyncio.Event()
self.block_execute_exit = asyncio.Event()
super().__init__(*args, **kwargs)
async def execute(self, key: Key, *, stimulus_id: str) -> StateMachineEvent:
self.in_execute.set()
await self.block_execute.wait()
try:
return await super().execute(key, stimulus_id=stimulus_id)
finally:
self.in_execute_exit.set()
await self.block_execute_exit.wait()
| BlockedExecute |
python | eth-brownie__brownie | brownie/exceptions.py | {
"start": 6678,
"end": 6753
} | class ____(BrownieEnvironmentWarning):
pass
@final
| InvalidArgumentWarning |
python | pyca__cryptography | tests/hazmat/primitives/test_block.py | {
"start": 5819,
"end": 6699
} | class ____:
def test_cbc(self):
with pytest.raises(TypeError):
modes.CBC([1] * 16) # type:ignore[arg-type]
def test_cfb(self):
with pytest.raises(TypeError):
CFB([1] * 16) # type:ignore[arg-type]
def test_cfb8(self):
with pytest.raises(TypeError):
CFB8([1] * 16) # type:ignore[arg-type]
def test_ofb(self):
with pytest.raises(TypeError):
OFB([1] * 16) # type:ignore[arg-type]
def test_ctr(self):
with pytest.raises(TypeError):
modes.CTR([1] * 16) # type:ignore[arg-type]
def test_gcm_iv(self):
with pytest.raises(TypeError):
modes.GCM([1] * 16) # type:ignore[arg-type]
def test_gcm_tag(self):
with pytest.raises(TypeError):
modes.GCM(b"\x00" * 16, [1] * 16) # type:ignore[arg-type]
| TestModesRequireBytes |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 156097,
"end": 169144
} | class ____(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where ``x > 0``, `p` is a real number and ``b > 0``\([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with ``p = -1/2``, ``b = 1 / mu`` and ``scale = mu``.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _shape_info(self):
ip = _ShapeInfo("p", False, (-np.inf, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ip, ib]
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
logpdf_single = np.vectorize(logpdf_single, otypes=[np.float64])
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning, stacklevel=3)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, p, b):
_a, _b = self._get_support(p, b)
def _cdf_single(x, p, b):
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf',
user_data)
return integrate.quad(llc, _a, x)[0]
_cdf_single = np.vectorize(_cdf_single, otypes=[np.float64])
return _cdf_single(x, p, b)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return xpx.apply_where(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
fill_value=-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same length as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not numsamples:
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
def logqpdf(x):
return self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
def logqpdf(x):
return self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
f"in {i*N} attempts. Sampling does not appear to "
"work for the provided parameters.")
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning, stacklevel=3)
m = np.full_like(num, np.nan, dtype=np.float64)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
| geninvgauss_gen |
python | great-expectations__great_expectations | great_expectations/expectations/validation_handlers.py | {
"start": 91,
"end": 227
} | class ____:
def column_map_expectation(self) -> None:
logger.debug("MetaPandasDataset.column_map_expectation")
| MetaPandasDataset |
python | streamlit__streamlit | lib/tests/streamlit/elements/color_picker_test.py | {
"start": 1041,
"end": 9393
} | class ____(DeltaGeneratorTestCase):
def test_just_label(self):
"""Test that it can be called with no value."""
st.color_picker("the label")
c = self.get_delta_from_queue().new_element.color_picker
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert c.default == "#000000"
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.color_picker("the label", disabled=True)
c = self.get_delta_from_queue().new_element.color_picker
assert c.disabled
@parameterized.expand([("#333333", "#333333"), ("#333", "#333"), (None, "#000000")])
def test_value_types(self, arg_value, proto_value):
"""Test that it supports different types of values."""
st.color_picker("the label", arg_value)
c = self.get_delta_from_queue().new_element.color_picker
assert c.label == "the label"
assert c.default == proto_value
def test_invalid_value_type_error(self):
"""Tests that when the value type is invalid, an exception is generated"""
with pytest.raises(StreamlitAPIException):
st.color_picker("the label", 1234567)
def test_invalid_string(self):
"""Tests that when the string doesn't match regex, an exception is generated"""
with pytest.raises(StreamlitAPIException):
st.color_picker("the label", "#invalid-string")
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
st.color_picker("foo")
proto = self.get_delta_from_queue().new_element.color_picker
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("form"):
st.color_picker("foo")
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
color_picker_proto = self.get_delta_from_queue(1).new_element.color_picker
assert color_picker_proto.form_id == form_proto.form.form_id
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.color_picker("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.color_picker
assert c.label == "the label"
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.color_picker("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.color_picker("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_color_picker_with_width(self):
"""Test st.color_picker with different width types."""
test_cases = [
(500, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 500),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for index, (
width_value,
expected_width_spec,
field_name,
field_value,
) in enumerate(test_cases):
with self.subTest(width_value=width_value):
st.color_picker(f"test label {index}", width=width_value)
el = self.get_delta_from_queue().new_element
assert el.color_picker.label == f"test label {index}"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
def test_color_picker_with_invalid_width(self):
"""Test st.color_picker with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
100.5,
"Invalid width value: 100.5. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.color_picker("test label", width=width_value)
assert str(exc.value) == expected_error_message
def test_color_picker_default_width(self):
"""Test that st.color_picker defaults to content width."""
st.color_picker("test label")
el = self.get_delta_from_queue().new_element
assert el.color_picker.label == "test label"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_color_picker_enforces_minimum_width(self):
"""Test that st.color_picker enforces minimum width of 40px."""
test_cases = [
(10, 40), # Below minimum -> enforced to 40
(40, 40), # Exactly minimum -> stays 40
(100, 100), # Above minimum -> stays as specified
]
for specified_width, expected_width in test_cases:
with self.subTest(specified_width=specified_width):
st.color_picker(f"test label {specified_width}", width=specified_width)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == expected_width
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params
st.color_picker(
label="Label 1",
key="color_picker_key",
value="#112233",
help="Help 1",
disabled=False,
width="content",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
)
c1 = self.get_delta_from_queue().new_element.color_picker
id1 = c1.id
# Second render with different params but same key
st.color_picker(
label="Label 2",
key="color_picker_key",
value="#abcdef",
help="Help 2",
disabled=True,
width="stretch",
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
)
c2 = self.get_delta_from_queue().new_element.color_picker
id2 = c2.id
assert id1 == id2
| ColorPickerTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 30628,
"end": 32131
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook")
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogRenameTagTemplateFieldOperator(
task_id="task_id",
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.rename_tag_template_field.assert_called_once_with(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudDataCatalogRenameTagTemplateFieldOperator |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 17002,
"end": 17280
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
destination: Optional[str] = Field(
None, description="DBFS destination. Example: `dbfs:/my/path`"
)
| DbfsStorageInfo |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/pipes/message_readers.py | {
"start": 1609,
"end": 3014
} | class ____(PipesChunkedLogReader):
def __init__(
self,
*,
bucket: str,
key: str,
client: Optional["S3Client"] = None,
interval: float = 10,
target_stream: Optional[IO[str]] = None,
# TODO: maybe move this parameter to a different scope
decode_fn: Optional[Callable[[bytes], str]] = None,
debug_info: Optional[str] = None,
):
self.bucket = bucket
self.key = key
self.client: S3Client = client or boto3.client("s3")
self.decode_fn = decode_fn or default_log_decode_fn
self.log_position = 0
super().__init__(
interval=interval, target_stream=target_stream or sys.stdout, debug_info=debug_info
)
@property
def name(self) -> str:
return f"PipesS3LogReader(s3://{os.path.join(self.bucket, self.key)})"
def target_is_readable(self, params: PipesParams) -> bool:
return _can_read_from_s3(
client=self.client,
bucket=self.bucket,
key=self.key,
)
def download_log_chunk(self, params: PipesParams) -> Optional[str]:
text = self.decode_fn(
self.client.get_object(Bucket=self.bucket, Key=self.key)["Body"].read()
)
current_position = self.log_position
self.log_position += len(text)
return text[current_position:]
| PipesS3LogReader |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 49247,
"end": 53811
} | class ____(AggregateFnV2):
def _require_datasketches(self):
try:
from datasketches import kll_floats_sketch # type: ignore[import]
except ImportError as exc:
raise ImportError(
"ApproximateQuantile requires the `datasketches` package. "
"Install it with `pip install datasketches`."
) from exc
return kll_floats_sketch
def __init__(
self,
on: str,
quantiles: List[float],
quantile_precision: int = 800,
alias_name: Optional[str] = None,
):
"""
Computes the approximate quantiles of a column by using a datasketches kll_floats_sketch.
https://datasketches.apache.org/docs/KLL/KLLSketch.html
The accuracy of the KLL quantile sketch is a function of the configured quantile precision, which also affects
the overall size of the sketch.
The KLL Sketch has absolute error. For example, a specified rank accuracy of 1% at the
median (rank = 0.50) means that the true quantile (if you could extract it from the set)
should be between getQuantile(0.49) and getQuantile(0.51). This same 1% error applied at a
rank of 0.95 means that the true quantile should be between getQuantile(0.94) and getQuantile(0.96).
In other words, the error is a fixed +/- epsilon for the entire range of ranks.
Typical single-sided rank error by quantile_precision (use for getQuantile/getRank):
- quantile_precision=100 → ~2.61%
- quantile_precision=200 → ~1.33%
- quantile_precision=400 → ~0.68%
- quantile_precision=800 → ~0.35%
See https://datasketches.apache.org/docs/KLL/KLLAccuracyAndSize.html for details on accuracy and size.
Null values in the target column are ignored when constructing the sketch.
Example:
.. testcode::
import ray
from ray.data.aggregate import ApproximateQuantile
# Create a dataset with some values
ds = ray.data.from_items(
[{"value": 20.0}, {"value": 40.0}, {"value": 60.0},
{"value": 80.0}, {"value": 100.0}]
)
result = ds.aggregate(ApproximateQuantile(on="value", quantiles=[0.1, 0.5, 0.9]))
# Result: {'approx_quantile(value)': [20.0, 60.0, 100.0]}
Args:
on: The name of the column to calculate the quantile on. Must be a numeric column.
quantiles: The list of quantiles to compute. Must be between 0 and 1 inclusive. For example, quantiles=[0.5] computes the median. Null entries in the source column are skipped.
quantile_precision: Controls the accuracy and memory footprint of the sketch (K in KLL); higher values yield lower error but use more memory. Defaults to 800. See https://datasketches.apache.org/docs/KLL/KLLAccuracyAndSize.html for details on accuracy and size.
alias_name: Optional name for the resulting column. If not provided, defaults to "approx_quantile({column_name})".
"""
self._sketch_cls = self._require_datasketches()
self._quantiles = quantiles
self._quantile_precision = quantile_precision
super().__init__(
alias_name if alias_name else f"approx_quantile({str(on)})",
on=on,
ignore_nulls=True,
zero_factory=lambda: self.zero(quantile_precision).serialize(),
)
def zero(self, quantile_precision: int):
return self._sketch_cls(k=quantile_precision)
def aggregate_block(self, block: Block) -> bytes:
block_acc = BlockAccessor.for_block(block)
table = block_acc.to_arrow()
column = table.column(self.get_target_column())
sketch = self.zero(self._quantile_precision)
for value in column:
# we ignore nulls here
if value.as_py() is not None:
sketch.update(float(value.as_py()))
return sketch.serialize()
def combine(self, current_accumulator: bytes, new: bytes) -> bytes:
combined = self.zero(self._quantile_precision)
combined.merge(self._sketch_cls.deserialize(current_accumulator))
combined.merge(self._sketch_cls.deserialize(new))
return combined.serialize()
def finalize(self, accumulator: bytes) -> List[float]:
return self._sketch_cls.deserialize(accumulator).get_quantiles(self._quantiles)
@PublicAPI(stability="alpha")
| ApproximateQuantile |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/codeact_agent.py | {
"start": 2771,
"end": 15021
} | class ____(BaseWorkflowAgent):
"""
A workflow agent that can execute code.
"""
scratchpad_key: str = "scratchpad"
code_execute_fn: Union[Callable, Awaitable] = Field(
description=(
"The function to execute code. Required in order to execute code generated by the agent.\n"
"The function protocol is as follows: async def code_execute_fn(code: str) -> Dict[str, Any]"
),
)
code_act_system_prompt: Union[str, BasePromptTemplate] = Field(
default=DEFAULT_CODE_ACT_PROMPT,
description="The system prompt for the code act agent.",
validate_default=True,
)
def __init__(
self,
code_execute_fn: Union[Callable, Awaitable],
name: str = "code_act_agent",
description: str = "A workflow agent that can execute code.",
system_prompt: Optional[str] = None,
tools: Optional[List[Union[BaseTool, Callable]]] = None,
tool_retriever: Optional[ObjectRetriever] = None,
can_handoff_to: Optional[List[str]] = None,
llm: Optional[LLM] = None,
code_act_system_prompt: Union[
str, BasePromptTemplate
] = DEFAULT_CODE_ACT_PROMPT,
streaming: bool = True,
):
tools = tools or []
tools.append( # type: ignore
FunctionTool.from_defaults(code_execute_fn, name=EXECUTE_TOOL_NAME) # type: ignore
)
if isinstance(code_act_system_prompt, str):
if system_prompt:
code_act_system_prompt += "\n" + system_prompt
code_act_system_prompt = PromptTemplate(code_act_system_prompt)
elif isinstance(code_act_system_prompt, BasePromptTemplate):
if system_prompt:
code_act_system_str = code_act_system_prompt.get_template()
code_act_system_str += "\n" + system_prompt
code_act_system_prompt = PromptTemplate(code_act_system_str)
super().__init__(
name=name,
description=description,
system_prompt=system_prompt,
tools=tools,
tool_retriever=tool_retriever,
can_handoff_to=can_handoff_to,
llm=llm,
code_act_system_prompt=code_act_system_prompt,
code_execute_fn=code_execute_fn,
streaming=streaming,
)
def _get_tool_fns(self, tools: Sequence[BaseTool]) -> List[Callable]:
"""Get the tool functions while validating that they are valid tools for the CodeActAgent."""
callables = []
for tool in tools:
if (
tool.metadata.name == "handoff"
or tool.metadata.name == EXECUTE_TOOL_NAME
):
continue
if isinstance(tool, FunctionTool):
if tool.requires_context:
raise ValueError(
f"Tool {tool.metadata.name} requires context. "
"CodeActAgent only supports tools that do not require context."
)
callables.append(tool.real_fn)
else:
raise ValueError(
f"Tool {tool.metadata.name} is not a FunctionTool. "
"CodeActAgent only supports Functions and FunctionTools."
)
return callables
def _extract_code_from_response(self, response_text: str) -> Optional[str]:
"""
Extract code from the LLM response using XML-style <execute> tags.
Args:
response_text: The LLM response text
Returns:
Extracted code or None if no code found
"""
# Match content between <execute> and </execute> tags
execute_pattern = r"<execute>(.*?)</execute>"
execute_matches = re.findall(execute_pattern, response_text, re.DOTALL)
if execute_matches:
return "\n\n".join([x.strip() for x in execute_matches])
return None
def _get_tool_descriptions(self, tools: Sequence[BaseTool]) -> str:
"""
Generate tool descriptions for the system prompt using tool metadata.
Args:
tools: List of available tools
Returns:
Tool descriptions as a string
"""
tool_descriptions = []
tool_fns = self._get_tool_fns(tools)
for fn in tool_fns:
signature = inspect.signature(fn)
fn_name: str = fn.__name__
docstring: Optional[str] = inspect.getdoc(fn)
tool_description = f"def {fn_name}{signature!s}:"
if docstring:
tool_description += f'\n """\n{docstring}\n """\n'
tool_description += "\n ...\n"
tool_descriptions.append(tool_description)
return "\n\n".join(tool_descriptions)
async def _get_response(
self, current_llm_input: List[ChatMessage], tools: Sequence[BaseTool]
) -> ChatResponse:
if any(tool.metadata.name == "handoff" for tool in tools):
if not isinstance(self.llm, FunctionCallingLLM):
raise ValueError("llm must be a function calling LLM to use handoff")
tools = [tool for tool in tools if tool.metadata.name == "handoff"]
return await self.llm.achat_with_tools(
tools=tools, chat_history=current_llm_input
)
else:
return await self.llm.achat(current_llm_input)
async def _get_streaming_response(
self,
ctx: Context,
current_llm_input: List[ChatMessage],
tools: Sequence[BaseTool],
) -> Tuple[ChatResponse, str]:
if any(tool.metadata.name == "handoff" for tool in tools):
if not isinstance(self.llm, FunctionCallingLLM):
raise ValueError("llm must be a function calling LLM to use handoff")
tools = [tool for tool in tools if tool.metadata.name == "handoff"]
response = await self.llm.astream_chat_with_tools(
tools=tools, chat_history=current_llm_input
)
else:
response = await self.llm.astream_chat(current_llm_input)
last_chat_response = ChatResponse(message=ChatMessage())
full_response_text = ""
# Process streaming response
async for last_chat_response in response:
delta = last_chat_response.delta or ""
full_response_text += delta
# Create a raw object for the event stream
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
# Write delta to the event stream
ctx.write_event_to_stream(
AgentStream(
delta=delta,
response=full_response_text,
# We'll add the tool call after processing the full response
tool_calls=[],
raw=raw,
current_agent_name=self.name,
thinking_delta=last_chat_response.additional_kwargs.get(
"thinking_delta", None
),
)
)
return last_chat_response, full_response_text
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[BaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the code act agent."""
if not self.code_execute_fn:
raise ValueError("code_execute_fn must be provided for CodeActAgent")
# Get current scratchpad
scratchpad: List[ChatMessage] = await ctx.store.get(
self.scratchpad_key, default=[]
)
current_llm_input = [*llm_input, *scratchpad]
# Create a system message with tool descriptions
tool_descriptions = self._get_tool_descriptions(tools)
system_prompt = self.code_act_system_prompt.format(
tool_descriptions=tool_descriptions
)
# Add or overwrite system message
has_system = False
for i, msg in enumerate(current_llm_input):
if msg.role.value == "system":
current_llm_input[i] = ChatMessage(role="system", content=system_prompt)
has_system = True
break
if not has_system:
current_llm_input.insert(
0, ChatMessage(role="system", content=system_prompt)
)
# Write the input to the event stream
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
if self.streaming:
chat_response, full_response_text = await self._get_streaming_response(
ctx, current_llm_input, tools
)
else:
chat_response = await self._get_response(current_llm_input, tools)
full_response_text = chat_response.message.content or ""
# Extract code from the response
code = self._extract_code_from_response(full_response_text)
# Create a tool call for executing the code if code was found
tool_calls = []
if code:
tool_id = str(uuid.uuid4())
tool_calls = [
ToolSelection(
tool_id=tool_id,
tool_name=EXECUTE_TOOL_NAME,
tool_kwargs={"code": code},
)
]
if isinstance(self.llm, FunctionCallingLLM):
extra_tool_calls = self.llm.get_tool_calls_from_response(
chat_response, error_on_no_tool_call=False
)
tool_calls.extend(extra_tool_calls)
# Add the response to the scratchpad
message = ChatMessage(role="assistant", content=full_response_text)
scratchpad.append(message)
await ctx.store.set(self.scratchpad_key, scratchpad)
# Create the raw object for the output
raw = (
chat_response.raw.model_dump()
if isinstance(chat_response.raw, BaseModel)
else chat_response.raw
)
return AgentOutput(
response=message,
tool_calls=tool_calls,
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for code act agent."""
scratchpad: List[ChatMessage] = await ctx.store.get(
self.scratchpad_key, default=[]
)
# handle code execution and handoff
for tool_call_result in results:
# Format the output as a tool response message
if tool_call_result.tool_name == EXECUTE_TOOL_NAME:
code_result = f"Result of executing the code given:\n\n{tool_call_result.tool_output.content}"
scratchpad.append(
ChatMessage(
role="user",
content=code_result,
)
)
elif tool_call_result.tool_name == "handoff":
scratchpad.append(
ChatMessage(
role="tool",
blocks=tool_call_result.tool_output.blocks,
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
else:
raise ValueError(f"Unknown tool name: {tool_call_result.tool_name}")
await ctx.store.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""
Finalize the code act agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.store.get(
self.scratchpad_key, default=[]
)
await memory.aput_messages(scratchpad)
# reset scratchpad
await ctx.store.set(self.scratchpad_key, [])
return output
| CodeActAgent |
python | dagster-io__dagster | docs/sphinx/_ext/dagster-sphinx/dagster_sphinx/docstring_flags.py | {
"start": 4565,
"end": 4970
} | class ____(SphinxDirective):
# Takes two arguments-- the first word is the flag type and the remaining words are the message.
required_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
flag_node = flag()
flag_node["flag_type"] = self.arguments[0]
flag_node["message"] = " ".join(self.content)
return [flag_node]
| FlagDirective |
python | kamyu104__LeetCode-Solutions | Python/insert-greatest-common-divisors-in-linked-list.py | {
"start": 43,
"end": 484
} | class ____(object):
def insertGreatestCommonDivisors(self, head):
"""
:type head: Optional[ListNode]
:rtype: Optional[ListNode]
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
curr = head
while curr.next:
curr.next = ListNode(gcd(curr.val, curr.next.val), curr.next)
curr = curr.next.next
return head
| Solution |
python | astropy__astropy | astropy/visualization/tests/test_interval.py | {
"start": 3698,
"end": 3919
} | class ____(TestInterval):
# Make sure intervals work with MaskedArray
data = np.concatenate((np.linspace(-20.0, 60.0, 100), np.full(100, 1e6)))
data = np.ma.MaskedArray(data, data > 1000)
| TestIntervalMaskedArray |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function_test.py | {
"start": 151812,
"end": 167643
} | class ____(test.TestCase, parameterized.TestCase):
def testNestedCallWatchedVariables(self):
v = variables.Variable(4.)
@polymorphic_function.function
def f():
return v**2.
with backprop.GradientTape() as tape:
f()
self.assertEqual((v,), tape.watched_variables())
@polymorphic_function.function
def g():
return f()
with backprop.GradientTape() as tape:
g()
self.assertEqual((v,), tape.watched_variables())
# f() can rely on the variable being read during its trace. g() checks that
# variables from a function which knows about them are recorded on the
# tape. h() tests that functions forward knowledge of variables to callers.
@polymorphic_function.function
def h():
return g()
with backprop.GradientTape() as tape:
h()
self.assertEqual((v,), tape.watched_variables())
def testReplaceCaptureWithDeferred(self):
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = constant_op.constant(3.0)
@polymorphic_function.function
def fn():
a = x + y
b = a + z
return b
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), 6.0)
value = constant_op.constant(4.0)
def closure():
return value
concrete_fn.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[1],
closure,
spec=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32),
placeholder=concrete_fn.inputs[1])
self.assertAllEqual(concrete_fn(), 8.0)
value = constant_op.constant(5.0)
self.assertAllEqual(concrete_fn(), 9.0)
def testRaiseReplaceCaptureWithDeferredTypeSpecMismatch(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@polymorphic_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_lib.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def bool_closure():
return new_bool_captured_tensor
# Test raise if replacing a bool capture with a closure of output type
# float32
new_float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
def float_closure():
return new_float_captured_tensor
with self.assertRaisesRegex(ValueError,
'Attempting to substitute closure with spec*'):
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
float_closure,
spec=tensor_lib.TensorSpec(shape=(1,), dtype=dtypes.float32))
# Test replace without a placeholder
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
bool_closure,
spec=tensor_lib.TensorSpec(shape=(), dtype=dtypes.bool))
self.assertAllEqual(concrete_fn(), [5.])
def testConcreteFunctionSetExternalCapture(self):
captured_tensor = constant_op.constant([1.])
value = constant_op.constant([2.])
@polymorphic_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_lib.TensorSpec(shape=(1,), dtype=dtypes.float32))
return deferred_tensor + captured_tensor
cf = fn.get_concrete_function()
self.assertLen(cf._captured_inputs, 2)
self.assertEqual(list(map(callable, cf._captured_inputs)), [False, True])
self.assertAllEqual(cf(), [3.])
# Reset capture to a deferred one, reset deferred capture to a capture.
cf.set_external_captures([cf._captured_inputs[1], cf._captured_inputs[0]])
value = constant_op.constant([3.])
self.assertAllEqual(cf(), [4.])
def testGraphReplaceCaptureAndSetExternalCapture(self):
bool_captured_tensor = constant_op.constant(True)
float_captured_tensor = constant_op.constant([3.], dtype=dtypes.float32)
value = constant_op.constant([2.], dtype=dtypes.float32)
@polymorphic_function.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tensor_lib.TensorSpec(shape=(1,), dtype=dtypes.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
self.assertAllEqual(concrete_fn(), [2.])
new_bool_captured_tensor = constant_op.constant(False)
def closure():
return new_bool_captured_tensor
concrete_fn.graph.replace_capture_with_deferred_capture(
concrete_fn.captured_inputs[0],
closure,
spec=tensor_lib.TensorSpec(shape=(), dtype=dtypes.bool),
placeholder=concrete_fn.inputs[1])
concrete_fn.set_external_captures([
closure, concrete_fn._captured_inputs[1],
concrete_fn._captured_inputs[2]
])
self.assertAllEqual(concrete_fn(), [5.])
def testDeferredCapture(self):
value = 1.0
@polymorphic_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_lib.TensorSpec(None))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(lazy_capture(2.0), 4.0)
def testNestedDeferredCapture(self):
value = 1.0
@polymorphic_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_lib.TensorSpec(None))
return x + y
@polymorphic_function.function
def outer(x):
return inner(x)
self.assertAllEqual(outer(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(outer(2.0), 4.0)
def testNestedDeferredCaptureInTFWhileLoop(self):
value = 1.
@polymorphic_function.function
def inner(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_lib.TensorSpec(None))
return x + y
@polymorphic_function.function
def outer():
dummy = constant_op.constant(True)
sums = constant_op.constant(0.)
while dummy:
directives.set_loop_options(
shape_invariants=[(sums, tensor_shape.TensorShape(None))])
sums += inner(2.)
dummy = constant_op.constant(False)
return sums
self.assertAllEqual(outer(), 3.)
value = constant_op.constant(2.)
self.assertAllEqual(outer(), 4.)
value = constant_op.constant(3.)
self.assertAllEqual(outer(), 5.)
def testDeferredCaptureWithKey(self):
value0 = 1.0
value1 = 2.0
@polymorphic_function.function
def lazy_capture(x):
w = ops.get_default_graph().capture_call_time_value(
lambda: value0, tensor_lib.TensorSpec(None), key=0)
y = ops.get_default_graph().capture_call_time_value(
lambda: value1, tensor_lib.TensorSpec(None), key=1)
def bad_closure():
raise ValueError('Should not run')
z = ops.get_default_graph().capture_call_time_value(
bad_closure, tensor_lib.TensorSpec(None), key=1)
return x + y + w + z
self.assertAllEqual(lazy_capture(2.0), 7.0)
value0 = 2.0
value1 = 3.0
self.assertAllEqual(lazy_capture(2.0), 10.0)
def testDeferredCaptureTypeError(self):
value = constant_op.constant(1.0)
@polymorphic_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_lib.TensorSpec(()))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# dtype mismatch
value = constant_op.constant(1)
with self.assertRaisesRegex(TypeError, 'Can not cast Tensor'):
lazy_capture(2.0)
# shape mismatch
value = constant_op.constant([1.0])
with self.assertRaisesRegex(TypeError, 'Can not cast'):
lazy_capture(2.0)
def testDeferredCaptureReturnNestWithCompositeTensor(self):
i_s = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
r_t = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])
s_t = sparse_tensor.SparseTensor(
values=[1, 2, 3], indices=[[0], [8], [10]], dense_shape=[20])
@polymorphic_function.function
def lazy_capture():
y = ops.get_default_graph().capture_call_time_value(
lambda: {'i': i_s, 't': (r_t, s_t)},
{'i': indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int32),
't': (ragged_tensor.RaggedTensorSpec([2, None, None], dtypes.int32),
sparse_tensor.SparseTensorSpec([None], dtypes.int32))})
return y['i'], y['t']
i, (r, s) = lazy_capture()
self.assertAllEqual(i_s.values, i.values)
self.assertAllEqual(i_s.indices, i.indices)
self.assertAllEqual(i_s.dense_shape, i.dense_shape)
self.assertAllEqual(r_t, r)
self.assertAllEqual(s_t.indices, s.indices)
self.assertAllEqual(s_t.values, s.values)
self.assertAllEqual(s_t.dense_shape, s.dense_shape)
def testDeferredCaptureCompositeTensorSpecTypeMismatch(self):
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64))
@polymorphic_function.function
def lazy_capture():
return ops.get_default_graph().capture_call_time_value(
lambda: value, indexed_slices.IndexedSlicesSpec(dtype=dtypes.int32))
# Type matches spec.
lazy_capture()
# Extra dense shape component.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
with self.assertRaises(ValueError):
lazy_capture()
# Index dtype mismatch int32 vs. int64.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]))
with self.assertRaises(TypeError):
lazy_capture()
@parameterized.parameters(
(1, int, 2, int, 1),
(1, constant_op.constant, 2, constant_op.constant, 1))
def testRetraceLogicWithSideInputs(self, val_before, type_before, val_after,
type_after, expected_len):
@polymorphic_function.function
def f():
func = lambda: x
return ops.get_default_graph()._experimental_capture_side_input_by_ref( # pylint: disable=protected-access
'lambda: x', func)
x = type_before(val_before)
_ = f()
x = type_after(val_after)
_ = f()
self.assertLen(total_function_cache(f), expected_len)
def testByRefCaptureWithInputSignature(self):
@polymorphic_function.function(input_signature=[])
def f():
func = lambda: x
return ops.get_default_graph()._experimental_capture_side_input_by_ref( # pylint: disable=protected-access
'lambda: x', func)
x = 1
_ = f()
x = 2
_ = f()
self.assertLen(total_function_cache(f), 1)
def testFunctoolsLruCache(self):
self.skipTest(
"b/194845243: inspect.getfullargspec doesn't unwrap Python decorators.")
@polymorphic_function.function
@functools.lru_cache(maxsize=2)
def f(a):
return 2 * a
self.assertAllEqual(f(1), array_ops.constant(2))
def testGraphRemoveFunction(self):
@polymorphic_function.function
def g(x):
return x + 1
@polymorphic_function.function
def f(x):
return g(x)
graph = f.get_concrete_function(constant_op.constant(1)).graph
graph_def = graph.as_graph_def()
func_name = graph_def.library.function[0].signature.name
self.assertLen(graph_def.library.function, 1)
self.assertTrue(graph._is_function(func_name))
graph._remove_function(func_name)
updated_graph_def = graph.as_graph_def()
self.assertEmpty(updated_graph_def.library.function)
self.assertFalse(graph._is_function(func_name))
with self.assertRaisesRegex(ValueError, 'not found'):
graph._remove_function(func_name)
def testInputAndOutputDataclass(self):
@polymorphic_function.function
def f(x):
return x
mt = MaskedTensor(mask=True, value=constant_op.constant([1.0]))
result = f(mt)
self.assertEqual(result.mask, mt.mask)
self.assertAllEqual(result.value, mt.value)
def testInputAndOutputNestedDataclass(self):
@polymorphic_function.function
def f(x):
return x
mt = MaskedTensor(mask=True, value=constant_op.constant([1.0]))
mt2 = MaskedTensor(mask=False, value=constant_op.constant([2.0]))
mtp = MaskedTensorPair(masks=[True, False], value1=mt, value2=mt2)
result = f(mtp)
self.assertEqual(result.masks, mtp.masks)
self.assertEqual(result.value1.mask, mt.mask)
self.assertAllEqual(result.value1.value, mt.value)
self.assertEqual(result.value2.mask, mt2.mask)
self.assertAllEqual(result.value2.value, mt2.value)
def testInputAndCreatNewDataclass(self):
@polymorphic_function.function
def f(x, y):
return MaskedTensor(mask=x.mask, value=y.value)
mt = MaskedTensor(mask=False, value=constant_op.constant([1.0]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2.0]))
result = f(mt, mt2)
self.assertEqual(result.mask, mt.mask)
self.assertAllEqual(result.value, mt2.value)
def testDataclassWithUnhashableMetadata(self):
@polymorphic_function.function
def f(x, y):
return MaskedTensorPair(
masks=x.masks + y.masks, value1=x.value1, value2=y.value2
)
mt = MaskedTensor(mask=False, value=constant_op.constant([1.0]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2.0]))
mtp = MaskedTensorPair(masks=[True, True], value1=mt, value2=mt2)
mt3 = MaskedTensor(mask=False, value=constant_op.constant([3.0]))
mt4 = MaskedTensor(mask=True, value=constant_op.constant([4.0]))
mtp2 = MaskedTensorPair(masks=[False, False], value1=mt3, value2=mt4)
result = f(mtp, mtp2)
self.assertEqual(result.masks, mtp.masks + mtp2.masks)
self.assertEqual(result.value1.mask, mt.mask)
self.assertAllEqual(result.value1.value, mt.value)
self.assertEqual(result.value2.mask, mt4.mask)
self.assertAllEqual(result.value2.value, mt4.value)
def testDataClassWithSubTraceType(self):
@polymorphic_function.function
def f(x):
return x
mt = MaskedTensor(mask=True, value=constant_op.constant([1.0]))
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2.0]))
f1 = f.get_concrete_function(mt)
f2 = f.get_concrete_function(mt2)
# mt2's TraceType is the same as mt1, so it doesn't need retrace
self.assertIs(f1, f2)
mt3 = MaskedTensor(
mask=False,
value=tensor_lib.TensorSpec(shape=[None, None], dtype=dtypes.int32),
)
f3 = f.get_concrete_function(mt3)
self.assertIsNot(f1, f3)
mt4 = MaskedTensor(
mask=False,
value=constant_op.constant(
[[1], [2]], shape=[2, 1], dtype=dtypes.int32
),
)
f4 = f.get_concrete_function(mt4)
# mt4's TraceType can be matched by mt3's spec, so it doesn't need retrace
self.assertIs(f3, f4)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| MultiDeviceTest |
python | tensorflow__tensorflow | tensorflow/python/saved_model/save_context.py | {
"start": 766,
"end": 1865
} | class ____(threading.local):
"""A context for building a graph of SavedModel."""
def __init__(self):
super(SaveContext, self).__init__()
self._in_save_context = False
self._options = None
def options(self):
if not self.in_save_context():
raise ValueError("Not in a SaveContext.")
return self._options
def enter_save_context(self, options):
self._in_save_context = True
self._options = options
def exit_save_context(self):
self._in_save_context = False
self._options = None
def in_save_context(self):
return self._in_save_context
_save_context = SaveContext()
@contextlib.contextmanager
def save_context(options):
if in_save_context():
raise ValueError("Already in a SaveContext.")
_save_context.enter_save_context(options)
try:
yield
finally:
_save_context.exit_save_context()
def in_save_context():
"""Returns whether under a save context."""
return _save_context.in_save_context()
def get_save_options():
"""Returns the save options if under a save context."""
return _save_context.options()
| SaveContext |
python | pytorch__pytorch | torch/_inductor/codegen/rocm/rocm_template.py | {
"start": 765,
"end": 6637
} | class ____(KernelTemplate):
index_counter = itertools.count()
gfx9_threads_per_warp = 64
def __init__(
self,
name: str,
input_nodes: list[Buffer],
layout: Layout,
input_reorder: Optional[list[int]] = None,
) -> None:
"""
Baseclass for ROCm C++ Templates, derived from KernelTemplate. Not to be instantiated directly.
Args:
name (str): The name of the ROCmTemplate object.
input_nodes (List[IRNode]): A list of input IRNodes.
layout (Layout): The layout of the output buffer / tensor.
input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.
"""
super().__init__(name)
self.input_nodes = input_nodes
self.output_node: Buffer = Buffer(name="buf_out", layout=layout)
self.input_reorder = input_reorder
self.layout = layout
def generate( # type: ignore[override]
self,
**kwargs,
) -> ROCmTemplateCaller:
"""
Generates the ROCm template caller object for the given GEMM template and operation. This ROCmTemplateCaller
may be used to call and benchmark the generated ROCm kernel in a standalone manner to enable Autotuning.
Args:
kwargs: Additional keyword arguments.
Returns:
A ROCmTemplateCaller object representing the generated ROCm template caller.
"""
kernel_name = f"rocm_{self.name}"
kernel_hash_name = f"rocm_{self.name}_{next(self.index_counter)}"
with (
patch.object(V.graph, "get_dtype", self._fake_get_dtype(self.output_node)),
ROCmTemplateKernel(
kernel_name=kernel_name,
runtime_arg_info=self.get_runtime_arg_info(),
runtime_arg_values=self.get_runtime_arg_values(**kwargs),
) as kernel,
):
code = self.render(kernel=kernel, **kwargs)
_, call_args, _, _ = kernel.args.python_argdefs()
log.debug("Autotune key: %s, Generated Code:\n%s", kernel_hash_name, code)
log.debug(
"Args: cpp_argdefs: %s, python_argdefs: %s",
kernel.args.cpp_argdefs(DTYPE_TO_ROCM_TYPE),
kernel.args.python_argdefs(),
)
input_reorder = (
self.input_reorder
if self.input_reorder is not None
else list(range(len(self.input_nodes)))
)
expected_args = list(
unique(self.input_nodes[idx].get_name() for idx in input_reorder)
)
expected_args.extend([self.output_node.get_name()])
assert list(call_args)[: len(expected_args)] == expected_args, (
call_args,
expected_args,
)
size_args = (
self.size_args() if hasattr(self, "size_args") else ()
) # subclass should define def size_args()
size_args_ints = [
V.graph.sizevars.size_hint(arg) for arg in size_args
] # resolve to ints for benchmarking
# The runtime args come right after the size args
runtime_args = self.get_runtime_arg_values(**kwargs)
extra_args = size_args_ints + runtime_args
bmreq = ROCmBenchmarkRequest(
kernel_name=kernel_name,
input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(self.output_node),
extra_args=extra_args,
source_code=code,
)
def make_kernel_render(
template_node: ROCmTemplateBuffer,
epilogue_nodes: Optional[Sequence[IRNode]] = None,
):
kernel = ROCmTemplateKernel(
kernel_name="KERNEL_NAME",
runtime_arg_info=self.get_runtime_arg_info(),
runtime_arg_values=self.get_runtime_arg_values(**kwargs),
)
render = functools.partial(
self.render,
kernel=kernel,
template_buffer_node=template_node,
epilogue_nodes=epilogue_nodes,
**kwargs, # includes "op" argument in case of CUTLASSGemmTemplate
)
return kernel, render
return ROCmTemplateCaller(
kernel_hash_name,
self.name,
self.input_nodes,
self.output_node.get_layout(),
make_kernel_render,
bmreq,
self,
kwargs,
)
def header(self) -> IndentedBuffer:
res = IndentedBuffer()
res.splice(
"""
#include <exception>
#include <iostream>
#include <memory>
#include <random>
#include <vector>
"""
)
return res
def globals(self) -> IndentedBuffer:
res = IndentedBuffer()
res.splice(
"""
// We compile all models with -fvisibility=hidden. Any symbols that need to be
// exposed in the final shared library must be declared with PT_EXPORT to make
// them visible.
#ifdef __GNUC__ // Applies to any compiler with GNU extensions (clang and g++)
#define PT_EXPORT __attribute__((__visibility__("default")))
#else
#ifdef _WIN32
#define PT_EXPORT __declspec(dllexport)
#else
#define PT_EXPORT
#endif
#endif
"""
)
return res
def render(self, **kwargs) -> str:
raise NotImplementedError
def get_runtime_arg_info(self) -> list[ArgInfo]:
return []
def get_runtime_arg_values(self, **kwargs) -> list[Any]:
return []
| ROCmTemplate |
python | pandas-dev__pandas | pandas/plotting/_core.py | {
"start": 25422,
"end": 78431
} | class ____(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
Attributes
----------
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot (DataFrame only)
- 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool or sequence of iterables, default False
Whether to group columns into subplots:
- ``False`` : No subplots will be used
- ``True`` : Make separate subplots for each column.
- sequence of iterables of column labels: Create a subplot for each
group of columns. For example `[('a', 'c'), ('b', 'd')]` will
create 2 subplots: one with columns 'a' and 'c', and one
with columns 'b' and 'd'. Remaining columns that aren't specified
will be plotted in additional subplots (one per column).
sharex : bool, default True if ax is None else False
In case ``subplots=True``, share x axis and set some x axis labels
to invisible; defaults to True if ax is None otherwise False if
an ax is passed in; Be aware, that passing in both an ax and
``sharex=True`` will alter all x axis labels for all axis in a figure.
sharey : bool, default False
In case ``subplots=True``, share y axis and set some y axis labels to invisible.
layout : tuple, optional
(rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
Set the x limits of the current axes.
ylim : 2-tuple/list
Set the y limits of the current axes.
xlabel : label, optional
Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the
x-column name for planar plots.
.. versionchanged:: 2.0.0
Now applicable to histograms.
ylabel : label, optional
Name to use for the ylabel on y-axis. Default will show no ylabel, or the
y-column name for planar plots.
.. versionchanged:: 2.0.0
Now applicable to histograms.
rot : float, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : float, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
stacked : bool, default False in line and bar plots, and True in area plot
If True, create stacked plot.
secondary_y : bool or sequence, default False
Whether to plot on the secondary y-axis if a list/tuple, which
columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
DataFrame.hist : Make a histogram.
DataFrame.boxplot : Make a box plot.
DataFrame.plot.scatter : Make a scatter plot with varying marker
point size and color.
DataFrame.plot.hexbin : Make a hexagonal binning plot of
two variables.
DataFrame.plot.kde : Make Kernel Density Estimate plot using
Gaussian kernels.
DataFrame.plot.area : Make a stacked area plot.
DataFrame.plot.bar : Make a bar plot.
DataFrame.plot.barh : Make a horizontal bar plot.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
Examples
--------
For Series:
.. plot::
:context: close-figs
>>> ser = pd.Series([1, 2, 3, 3])
>>> plot = ser.plot(kind="hist", title="My plot")
For DataFrame:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... {
... "length": [1.5, 0.5, 1.2, 0.9, 3],
... "width": [0.7, 0.2, 0.15, 0.2, 1.1],
... },
... index=["pig", "rabbit", "duck", "chicken", "horse"],
... )
>>> plot = df.plot(title="DataFrame Plot")
For SeriesGroupBy:
.. plot::
:context: close-figs
>>> lst = [-1, -2, -3, 1, 2, 3]
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> plot = ser.groupby(lambda x: x > 0).plot(title="SeriesGroupBy Plot")
For DataFrameGroupBy:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({"col1": [1, 2, 3, 4], "col2": ["A", "B", "A", "B"]})
>>> plot = df.groupby("col2").plot(kind="bar", title="DataFrameGroupBy Plot")
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data: Series | DataFrame) -> None:
self._parent = data
@staticmethod
def _get_call_args(backend_name: str, data: Series | DataFrame, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
[
f"{name}={value!r}"
for (name, _), value in zip(arg_def, args, strict=False)
]
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {
name: value for (name, _), value in zip(arg_def, args, strict=False)
}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(
f"{kind} is not a valid plot kind Valid plot kinds: {self._all_kinds}"
)
data = self._parent
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
if y is not None:
if is_integer(y) and not holds_integer(data.columns):
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy(deep=False)
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not holds_integer(data.columns):
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not holds_integer(data.columns):
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
data = data[y]
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
# error: Argument 1 to "len" has incompatible type "Any | bool";
# expected "Sized" [arg-type]
match = is_list_like(label_kw) and len(label_kw) == len(y) # type: ignore[arg-type]
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
def line(
self,
x: Hashable | None = None,
y: Hashable | None = None,
color: str | Sequence[str] | dict | None = None,
**kwargs,
) -> PlotAccessor:
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array-like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's line will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color lines for
column `a` in green and lines for column `b` in red.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line() # doctest: +SKIP
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame(
... {
... "pig": [20, 18, 489, 675, 1776],
... "horse": [4, 25, 281, 600, 1900],
... },
... index=[1990, 1997, 2003, 2009, 2014],
... )
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
Let's repeat the same example, but specifying colors for
each column (in this case, for each animal).
>>> axes = df.plot.line(
... subplots=True, color={"pig": "pink", "horse": "#742802"}
... )
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x="pig", y="horse")
"""
if color is not None:
kwargs["color"] = color
return self(kind="line", x=x, y=y, **kwargs)
def bar(
self,
x: Hashable | None = None,
y: Hashable | None = None,
color: str | Sequence[str] | dict | None = None,
**kwargs,
) -> PlotAccessor:
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array-like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's bar will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color bars for
column `a` in green and bars for column `b` in red.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
>>> ax = df.plot.bar(x="lab", y="val", rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = [
... "snail",
... "pig",
... "elephant",
... "rabbit",
... "giraffe",
... "coyote",
... "horse",
... ]
>>> df = pd.DataFrame({"speed": speed, "lifespan": lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Plot stacked bar charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.bar(stacked=True)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
If you don't like the default colours, you can specify how you'd
like each column to be colored.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(
... rot=0,
... subplots=True,
... color={"speed": "red", "lifespan": "green"},
... )
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y="speed", rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x="lifespan", rot=0)
"""
if color is not None:
kwargs["color"] = color
return self(kind="bar", x=x, y=y, **kwargs)
def barh(
self,
x: Hashable | None = None,
y: Hashable | None = None,
color: str | Sequence[str] | dict | None = None,
**kwargs,
) -> PlotAccessor:
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array-like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's bar will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color bars for
column `a` in green and bars for column `b` in red.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.bar : Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
>>> ax = df.plot.barh(x="lab", y="val")
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = [
... "snail",
... "pig",
... "elephant",
... "rabbit",
... "giraffe",
... "coyote",
... "horse",
... ]
>>> df = pd.DataFrame({"speed": speed, "lifespan": lifespan}, index=index)
>>> ax = df.plot.barh()
Plot stacked barh charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.barh(stacked=True)
We can specify colors for each column
.. plot::
:context: close-figs
>>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = [
... "snail",
... "pig",
... "elephant",
... "rabbit",
... "giraffe",
... "coyote",
... "horse",
... ]
>>> df = pd.DataFrame({"speed": speed, "lifespan": lifespan}, index=index)
>>> ax = df.plot.barh(y="speed")
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = [
... "snail",
... "pig",
... "elephant",
... "rabbit",
... "giraffe",
... "coyote",
... "horse",
... ]
>>> df = pd.DataFrame({"speed": speed, "lifespan": lifespan}, index=index)
>>> ax = df.plot.barh(x="lifespan")
"""
if color is not None:
kwargs["color"] = color
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
The matplotlib axes containing the box plot.
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list("ABCD"))
>>> ax = df.plot.box()
You can also generate groupings if you specify the `by` parameter (which
can take a column name, or a list or tuple of column names):
.. plot::
:context: close-figs
>>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]
>>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})
>>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8))
"""
return self(kind="box", by=by, **kwargs)
def hist(
self, by: IndexLabel | None = None, bins: int = 10, **kwargs
) -> PlotAccessor:
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we roll a die 6000 times, we expect to get each value around 1000
times. But when we roll two dice and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=["one"])
>>> df["two"] = df["one"] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
A grouped histogram can be generated by providing the parameter `by` (which
can be a column name, or a list of column names):
.. plot::
:context: close-figs
>>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]
>>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})
>>> ax = df.plot.hist(column=["age"], by="gender", figsize=(10, 8))
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(
self,
bw_method: Literal["scott", "silverman"] | float | Callable | None = None,
ind: np.ndarray | int | None = None,
weights: np.ndarray | None = None,
**kwargs,
) -> PlotAccessor:
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
weights : NumPy array, optional
Weights of datapoints. This must be the same shape as datapoints.
If None (default), the samples are assumed to be equally weighted.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
The matplotlib axes containing the KDE plot.
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... {
... "x": [1, 2, 2.5, 3, 3.5, 4, 5],
... "y": [4, 4, 4.5, 5, 5.5, 6, 6],
... }
... )
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, weights=weights, **kwargs)
density = kde
def area(
self,
x: Hashable | None = None,
y: Hashable | None = None,
stacked: bool = True,
**kwargs,
) -> PlotAccessor:
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... {
... "sales": [3, 2, 3, 9, 10, 6],
... "signups": [5, 5, 6, 12, 14, 13],
... "visits": [20, 42, 28, 62, 81, 50],
... },
... index=pd.date_range(
... start="2018/01/01", end="2018/07/01", freq="ME"
... ),
... )
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y="sales")
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... {
... "sales": [3, 2, 3],
... "visits": [20, 42, 28],
... "day": [1, 2, 3],
... }
... )
>>> ax = df.plot.area(x="day")
"""
return self(kind="area", x=x, y=y, stacked=stacked, **kwargs)
def pie(self, y: IndexLabel | None = None, **kwargs) -> PlotAccessor:
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... {"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
... index=["Mercury", "Venus", "Earth"],
... )
>>> plot = df.plot.pie(y="mass", figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(11, 6))
"""
if y is not None:
kwargs["y"] = y
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(
self,
x: Hashable,
y: Hashable,
s: Hashable | Sequence[Hashable] | None = None,
c: Hashable | Sequence[Hashable] | None = None,
**kwargs,
) -> PlotAccessor:
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : str, scalar or array-like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array-like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
The matplotlib axes containing the scatter plot.
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... [
... [5.1, 3.5, 0],
... [4.9, 3.0, 0],
... [7.0, 3.2, 1],
... [6.4, 3.2, 1],
... [5.9, 3.0, 2],
... ],
... columns=["length", "width", "species"],
... )
>>> ax1 = df.plot.scatter(x="length", y="width", c="DarkBlue")
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(
... x="length", y="width", c="species", colormap="viridis"
... )
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(
self,
x: Hashable,
y: Hashable,
C: Hashable | None = None,
reduce_C_function: Callable | None = None,
gridsize: int | tuple[int, int] | None = None,
**kwargs,
) -> PlotAccessor:
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.Axes
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({"x": np.random.randn(n), "y": np.random.randn(n)})
>>> ax = df.plot.hexbin(x="x", y="y", gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame(
... {
... "coord_x": np.random.uniform(-3, 3, size=n),
... "coord_y": np.random.uniform(30, 50, size=n),
... "observations": np.random.randint(1, 5, size=n),
... }
... )
>>> ax = df.plot.hexbin(
... x="coord_x",
... y="coord_y",
... C="observations",
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis",
... )
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends: dict[str, types.ModuleType] = {}
def _load_backend(backend: str) -> types.ModuleType:
"""
Load a pandas plotting backend.
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with importlib.metadata, "matplotlib", or a module name.
Returns
-------
types.ModuleType
The imported backend.
"""
from importlib.metadata import entry_points
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
module = importlib.import_module("pandas.plotting._matplotlib")
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
return module
found_backend = False
eps = entry_points()
key = "pandas_plotting_backends"
# entry_points lost dict API ~ PY 3.10
# https://github.com/python/importlib_metadata/issues/298
if hasattr(eps, "select"):
entry = eps.select(group=key)
else:
# Argument 2 to "get" of "dict" has incompatible type "Tuple[]";
# expected "EntryPoints" [arg-type]
entry = eps.get(key, ()) # type: ignore[arg-type]
for entry_point in entry:
found_backend = entry_point.name == backend
if found_backend:
module = entry_point.load()
break
if not found_backend:
# Fall back to unregistered, module name approach.
try:
module = importlib.import_module(backend)
found_backend = True
except ImportError:
# We re-raise later on.
pass
if found_backend:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option is set,
# rather than at plot time.
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've "
f"installed the package providing the '{backend}' entrypoint, or that "
"the package has a top-level `.plot` method."
)
def _get_plot_backend(backend: str | None = None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas uses matplotlib by default, but the idea here
is that it can also work with other third-party backends. This function
returns the module which provides a top-level `.plot` method that will
actually do the plotting. The backend is specified from a string, which
either comes from the keyword argument `backend`, or, if not specified, from
the option `pandas.options.plotting.backend`. All the rest of the code in
this file uses the backend specified there for the plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
Notes
-----
Modifies `_backends` with imported backend as a side effect.
"""
backend_str: str = backend or get_option("plotting.backend")
if backend_str in _backends:
return _backends[backend_str]
module = _load_backend(backend_str)
_backends[backend_str] = module
return module
| PlotAccessor |
python | google__jax | jax/experimental/slab/slab.py | {
"start": 1042,
"end": 1145
} | class ____(NamedTuple):
data: jax.Array
cursor: Address
@jax.tree_util.register_pytree_node_class
| Slab |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 51705,
"end": 55646
} | class ____(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, 1)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = nn.Dropout(drop_out)
self.post_init()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.deberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"DebertaV2ForMaskedLM",
"DebertaV2ForMultipleChoice",
"DebertaV2ForQuestionAnswering",
"DebertaV2ForSequenceClassification",
"DebertaV2ForTokenClassification",
"DebertaV2Model",
"DebertaV2PreTrainedModel",
]
| DebertaV2ForMultipleChoice |
python | nryoung__algorithms | tests/test_data_structures.py | {
"start": 286,
"end": 12543
} | class ____(unittest.TestCase):
"""
Test Binary Search Tree Implementation
"""
key_val = [
("a", 1), ("b", 2), ("c", 3),
("d", 4), ("e", 5), ("f", 6),
("g", 7), ("h", 8), ("i", 9)
]
def shuffle_list(self, ls):
shuffle(ls)
return ls
def test_size(self):
# Size starts at 0
self.bst = binary_search_tree.BinarySearchTree()
self.assertEqual(self.bst.size(), 0)
# Doing a put increases the size to 1
self.bst.put("one", 1)
self.assertEqual(self.bst.size(), 1)
# Putting a key that is already in doesn't change size
self.bst.put("one", 1)
self.assertEqual(self.bst.size(), 1)
self.bst.put("one", 2)
self.assertEqual(self.bst.size(), 1)
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.size(), size)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.size(), size)
def test_is_empty(self):
self.bst = binary_search_tree.BinarySearchTree()
self.assertTrue(self.bst.is_empty())
self.bst.put("a", 1)
self.assertFalse(self.bst.is_empty())
def test_get(self):
self.bst = binary_search_tree.BinarySearchTree()
# Getting a key not in BST returns None
self.assertEqual(self.bst.get("one"), None)
# Get with a present key returns proper value
self.bst.put("one", 1)
self.assertEqual(self.bst.get("one"), 1)
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.get(k), v)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.get(k), v)
def test_contains(self):
self.bst = binary_search_tree.BinarySearchTree()
self.assertFalse(self.bst.contains("a"))
self.bst.put("a", 1)
self.assertTrue(self.bst.contains("a"))
def test_put(self):
self.bst = binary_search_tree.BinarySearchTree()
# When BST is empty first put becomes root
self.bst.put("bbb", 1)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.left, None)
# Adding a key greater than root doesn't update the left tree
# but does update the right
self.bst.put("ccc", 2)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.left, None)
self.assertEqual(self.bst.root.right.key, "ccc")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("bbb", 1)
# Adding a key less than root doesn't update the right tree
# but does update the left
self.bst.put("aaa", 2)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.right, None)
self.assertEqual(self.bst.root.left.key, "aaa")
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.get(k), v)
self.assertEqual(self.bst.size(), size)
self.bst = binary_search_tree.BinarySearchTree()
shuffled = self.shuffle_list(self.key_val[:])
size = 0
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.get(k), v)
self.assertEqual(self.bst.size(), size)
def test_min_key(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val[::-1]:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.min_key(), k)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.min_key(), "a")
def test_max_key(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.max_key(), k)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.max_key(), "i")
def test_floor_key(self):
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("g", 7)
self.assertEqual(self.bst.floor_key("a"), "a")
self.assertEqual(self.bst.floor_key("b"), "a")
self.assertEqual(self.bst.floor_key("g"), "g")
self.assertEqual(self.bst.floor_key("h"), "g")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("a", 1)
self.bst.put("g", 7)
self.assertEqual(self.bst.floor_key("a"), "a")
self.assertEqual(self.bst.floor_key("b"), "a")
self.assertEqual(self.bst.floor_key("g"), "g")
self.assertEqual(self.bst.floor_key("h"), "g")
def test_ceiling_key(self):
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("g", 7)
self.assertEqual(self.bst.ceiling_key("a"), "a")
self.assertEqual(self.bst.ceiling_key("b"), "c")
self.assertEqual(self.bst.ceiling_key("g"), "g")
self.assertEqual(self.bst.ceiling_key("f"), "g")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("a", 1)
self.bst.put("g", 7)
self.assertEqual(self.bst.ceiling_key("a"), "a")
self.assertEqual(self.bst.ceiling_key("b"), "c")
self.assertEqual(self.bst.ceiling_key("g"), "g")
self.assertEqual(self.bst.ceiling_key("f"), "g")
def test_select_key(self):
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.select_key(0), "a")
self.assertEqual(self.bst.select_key(1), "b")
self.assertEqual(self.bst.select_key(2), "c")
def test_rank(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.rank("a"), 0)
self.assertEqual(self.bst.rank("b"), 1)
self.assertEqual(self.bst.rank("c"), 2)
self.assertEqual(self.bst.rank("d"), 3)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.rank("a"), 0)
self.assertEqual(self.bst.rank("b"), 1)
self.assertEqual(self.bst.rank("c"), 2)
self.assertEqual(self.bst.rank("d"), 3)
def test_delete_min(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1):
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), self.key_val[i+1][0])
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), None)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1):
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), self.key_val[i+1][0])
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), None)
def test_delete_max(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1, 0, -1):
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), self.key_val[i-1][0])
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), None)
shuffled = self.shuffle_list(self.key_val[:])
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1, 0, -1):
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), self.key_val[i-1][0])
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), None)
def test_delete(self):
# delete key from an empty bst
self.bst = binary_search_tree.BinarySearchTree()
self.bst.delete("a")
self.assertEqual(self.bst.root, None)
self.assertEqual(self.bst.size(), 0)
# delete key not present in bst
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete key when bst only contains one key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.assertEqual(self.bst.root.key, "a")
self.bst.delete("a")
self.assertEqual(self.bst.root, None)
self.assertEqual(self.bst.size(), 0)
# delete parent key when it only has a left child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.assertEqual(self.bst.root.left.key, "a")
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete parent key when it only has a right child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("b", 2)
self.assertEqual(self.bst.root.right.key, "b")
self.bst.delete("a")
self.assertEqual(self.bst.root.key, "b")
self.assertEqual(self.bst.size(), 1)
# delete left child key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.assertEqual(self.bst.root.left.key, "a")
self.bst.delete("a")
self.assertEqual(self.bst.root.key, "b")
self.assertEqual(self.bst.size(), 1)
# delete right child key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("b", 2)
self.assertEqual(self.bst.root.right.key, "b")
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete parent key when it has a left and right child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "c")
self.assertEqual(self.bst.size(), 2)
def test_keys(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(
self.bst.keys(),
["a", "b", "c", "d", "e", "f", "g", "h", "i"]
)
| TestBinarySearchTree |
python | celery__celery | celery/app/defaults.py | {
"start": 1016,
"end": 16093
} | class ____:
"""Describes a Celery configuration option."""
alt = None
deprecate_by = None
remove_by = None
old = set()
typemap = {'string': str, 'int': int, 'float': float, 'any': lambda v: v,
'bool': strtobool, 'dict': dict, 'tuple': tuple}
def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in kwargs.items():
setattr(self, attr, value)
def to_python(self, value):
return self.typemap[self.type](value)
def __repr__(self):
return '<Option: type->{} default->{!r}>'.format(self.type,
self.default)
NAMESPACES = Namespace(
accept_content=Option(DEFAULT_ACCEPT_CONTENT, type='list', old=OLD_NS),
result_accept_content=Option(None, type='list'),
enable_utc=Option(True, type='bool'),
imports=Option((), type='tuple', old=OLD_NS),
include=Option((), type='tuple', old=OLD_NS),
timezone=Option(type='string', old=OLD_NS),
beat=Namespace(
__old__=OLD_NS_BEAT,
max_loop_interval=Option(0, type='float'),
schedule=Option({}, type='dict'),
scheduler=Option('celery.beat:PersistentScheduler'),
schedule_filename=Option('celerybeat-schedule'),
sync_every=Option(0, type='int'),
cron_starting_deadline=Option(None, type=int)
),
broker=Namespace(
url=Option(None, type='string'),
read_url=Option(None, type='string'),
write_url=Option(None, type='string'),
transport=Option(type='string'),
transport_options=Option({}, type='dict'),
connection_timeout=Option(4, type='float'),
connection_retry=Option(True, type='bool'),
connection_retry_on_startup=Option(None, type='bool'),
connection_max_retries=Option(100, type='int'),
channel_error_retry=Option(False, type='bool'),
failover_strategy=Option(None, type='string'),
heartbeat=Option(120, type='int'),
heartbeat_checkrate=Option(3.0, type='int'),
login_method=Option(None, type='string'),
native_delayed_delivery_queue_type=Option(default='quorum', type='string'),
pool_limit=Option(10, type='int'),
use_ssl=Option(False, type='bool'),
host=Option(type='string'),
port=Option(type='int'),
user=Option(type='string'),
password=Option(type='string'),
vhost=Option(type='string'),
),
cache=Namespace(
__old__=old_ns('celery_cache'),
backend=Option(),
backend_options=Option({}, type='dict'),
),
cassandra=Namespace(
entry_ttl=Option(type='float'),
keyspace=Option(type='string'),
port=Option(type='string'),
read_consistency=Option(type='string'),
servers=Option(type='list'),
bundle_path=Option(type='string'),
table=Option(type='string'),
write_consistency=Option(type='string'),
auth_provider=Option(type='string'),
auth_kwargs=Option(type='string'),
options=Option({}, type='dict'),
),
s3=Namespace(
access_key_id=Option(type='string'),
secret_access_key=Option(type='string'),
bucket=Option(type='string'),
base_path=Option(type='string'),
endpoint_url=Option(type='string'),
region=Option(type='string'),
),
azureblockblob=Namespace(
container_name=Option('celery', type='string'),
retry_initial_backoff_sec=Option(2, type='int'),
retry_increment_base=Option(2, type='int'),
retry_max_attempts=Option(3, type='int'),
base_path=Option('', type='string'),
connection_timeout=Option(20, type='int'),
read_timeout=Option(120, type='int'),
),
gcs=Namespace(
bucket=Option(type='string'),
project=Option(type='string'),
base_path=Option('', type='string'),
ttl=Option(0, type='float'),
),
control=Namespace(
queue_ttl=Option(300.0, type='float'),
queue_expires=Option(10.0, type='float'),
queue_exclusive=Option(False, type='bool'),
queue_durable=Option(False, type='bool'),
exchange=Option('celery', type='string'),
),
couchbase=Namespace(
__old__=old_ns('celery_couchbase'),
backend_settings=Option(None, type='dict'),
),
arangodb=Namespace(
__old__=old_ns('celery_arangodb'),
backend_settings=Option(None, type='dict')
),
mongodb=Namespace(
__old__=old_ns('celery_mongodb'),
backend_settings=Option(type='dict'),
),
cosmosdbsql=Namespace(
database_name=Option('celerydb', type='string'),
collection_name=Option('celerycol', type='string'),
consistency_level=Option('Session', type='string'),
max_retry_attempts=Option(9, type='int'),
max_retry_wait_time=Option(30, type='int'),
),
event=Namespace(
__old__=old_ns('celery_event'),
queue_expires=Option(60.0, type='float'),
queue_ttl=Option(5.0, type='float'),
queue_prefix=Option('celeryev'),
queue_exclusive=Option(False, type='bool'),
queue_durable=Option(False, type='bool'),
serializer=Option('json'),
exchange=Option('celeryev', type='string'),
),
redis=Namespace(
__old__=old_ns('celery_redis'),
backend_use_ssl=Option(type='dict'),
db=Option(type='int'),
host=Option(type='string'),
max_connections=Option(type='int'),
username=Option(type='string'),
password=Option(type='string'),
port=Option(type='int'),
socket_timeout=Option(120.0, type='float'),
socket_connect_timeout=Option(None, type='float'),
retry_on_timeout=Option(False, type='bool'),
socket_keepalive=Option(False, type='bool'),
),
result=Namespace(
__old__=old_ns('celery_result'),
backend=Option(type='string'),
cache_max=Option(
-1,
type='int', old={'celery_max_cached_results'},
),
compression=Option(type='str'),
exchange=Option('celeryresults'),
exchange_type=Option('direct'),
expires=Option(
timedelta(days=1),
type='float', old={'celery_task_result_expires'},
),
persistent=Option(None, type='bool'),
extended=Option(False, type='bool'),
serializer=Option('json'),
backend_transport_options=Option({}, type='dict'),
chord_retry_interval=Option(1.0, type='float'),
chord_join_timeout=Option(3.0, type='float'),
backend_max_sleep_between_retries_ms=Option(10000, type='int'),
backend_max_retries=Option(float("inf"), type='float'),
backend_base_sleep_between_retries_ms=Option(10, type='int'),
backend_always_retry=Option(False, type='bool'),
),
elasticsearch=Namespace(
__old__=old_ns('celery_elasticsearch'),
retry_on_timeout=Option(type='bool'),
max_retries=Option(type='int'),
timeout=Option(type='float'),
save_meta_as_text=Option(True, type='bool'),
),
security=Namespace(
__old__=old_ns('celery_security'),
certificate=Option(type='string'),
cert_store=Option(type='string'),
key=Option(type='string'),
key_password=Option(type='bytes'),
digest=Option(DEFAULT_SECURITY_DIGEST, type='string'),
),
database=Namespace(
url=Option(old={'celery_result_dburi'}),
engine_options=Option(
type='dict', old={'celery_result_engine_options'},
),
short_lived_sessions=Option(
False, type='bool', old={'celery_result_db_short_lived_sessions'},
),
table_schemas=Option(type='dict'),
table_names=Option(type='dict', old={'celery_result_db_tablenames'}),
create_tables_at_setup=Option(True, type='bool'),
),
task=Namespace(
__old__=OLD_NS,
acks_late=Option(False, type='bool'),
acks_on_failure_or_timeout=Option(True, type='bool'),
always_eager=Option(False, type='bool'),
annotations=Option(type='any'),
compression=Option(type='string', old={'celery_message_compression'}),
create_missing_queues=Option(True, type='bool'),
create_missing_queue_type=Option('classic', type='string'),
create_missing_queue_exchange_type=Option(None, type='string'),
inherit_parent_priority=Option(False, type='bool'),
default_delivery_mode=Option(2, type='string'),
default_queue=Option('celery'),
default_queue_type=Option('classic', type='string'),
default_exchange=Option(None, type='string'), # taken from queue
default_exchange_type=Option('direct'),
default_routing_key=Option(None, type='string'), # taken from queue
default_rate_limit=Option(type='string'),
default_priority=Option(None, type='string'),
eager_propagates=Option(
False, type='bool', old={'celery_eager_propagates_exceptions'},
),
ignore_result=Option(False, type='bool'),
store_eager_result=Option(False, type='bool'),
protocol=Option(2, type='int', old={'celery_task_protocol'}),
publish_retry=Option(
True, type='bool', old={'celery_task_publish_retry'},
),
publish_retry_policy=Option(
{'max_retries': 3,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2},
type='dict', old={'celery_task_publish_retry_policy'},
),
queues=Option(type='dict'),
queue_max_priority=Option(None, type='int'),
reject_on_worker_lost=Option(type='bool'),
remote_tracebacks=Option(False, type='bool'),
routes=Option(type='any'),
send_sent_event=Option(
False, type='bool', old={'celery_send_task_sent_event'},
),
serializer=Option('json', old={'celery_task_serializer'}),
soft_time_limit=Option(
type='float', old={'celeryd_task_soft_time_limit'},
),
time_limit=Option(
type='float', old={'celeryd_task_time_limit'},
),
store_errors_even_if_ignored=Option(False, type='bool'),
track_started=Option(False, type='bool'),
allow_error_cb_on_chord_header=Option(False, type='bool'),
),
worker=Namespace(
__old__=OLD_NS_WORKER,
agent=Option(None, type='string'),
autoscaler=Option('celery.worker.autoscale:Autoscaler'),
cancel_long_running_tasks_on_connection_loss=Option(
False, type='bool'
),
soft_shutdown_timeout=Option(0.0, type='float'),
enable_soft_shutdown_on_idle=Option(False, type='bool'),
concurrency=Option(None, type='int'),
consumer=Option('celery.worker.consumer:Consumer', type='string'),
direct=Option(False, type='bool', old={'celery_worker_direct'}),
disable_rate_limits=Option(
False, type='bool', old={'celery_disable_rate_limits'},
),
deduplicate_successful_tasks=Option(
False, type='bool'
),
enable_remote_control=Option(
True, type='bool', old={'celery_enable_remote_control'},
),
hijack_root_logger=Option(True, type='bool'),
log_color=Option(type='bool'),
log_format=Option(DEFAULT_PROCESS_LOG_FMT),
lost_wait=Option(10.0, type='float', old={'celeryd_worker_lost_wait'}),
max_memory_per_child=Option(type='int'),
max_tasks_per_child=Option(type='int'),
pool=Option(DEFAULT_POOL),
pool_putlocks=Option(True, type='bool'),
pool_restarts=Option(False, type='bool'),
proc_alive_timeout=Option(4.0, type='float'),
prefetch_multiplier=Option(4, type='int'),
eta_task_limit=Option(None, type='int'),
enable_prefetch_count_reduction=Option(True, type='bool'),
disable_prefetch=Option(False, type='bool'),
redirect_stdouts=Option(
True, type='bool', old={'celery_redirect_stdouts'},
),
redirect_stdouts_level=Option(
'WARNING', old={'celery_redirect_stdouts_level'},
),
send_task_events=Option(
False, type='bool', old={'celery_send_events'},
),
state_db=Option(),
task_log_format=Option(DEFAULT_TASK_LOG_FMT),
timer=Option(type='string'),
timer_precision=Option(1.0, type='float'),
detect_quorum_queues=Option(True, type='bool'),
),
)
def _flatten_keys(ns, key, opt):
return [(ns + key, opt)]
def _to_compat(ns, key, opt):
if opt.old:
return [
(oldkey.format(key).upper(), ns + key, opt)
for oldkey in opt.old
]
return [((ns + key).upper(), ns + key, opt)]
def flatten(d, root='', keyfilter=_flatten_keys):
"""Flatten settings."""
stack = deque([(root, d)])
while stack:
ns, options = stack.popleft()
for key, opt in options.items():
if isinstance(opt, dict):
stack.append((ns + key + '_', opt))
else:
yield from keyfilter(ns, key, opt)
DEFAULTS = {
key: opt.default for key, opt in flatten(NAMESPACES)
}
__compat = list(flatten(NAMESPACES, keyfilter=_to_compat))
_OLD_DEFAULTS = {old_key: opt.default for old_key, _, opt in __compat}
_TO_OLD_KEY = {new_key: old_key for old_key, new_key, _ in __compat}
_TO_NEW_KEY = {old_key: new_key for old_key, new_key, _ in __compat}
__compat = None
SETTING_KEYS = set(DEFAULTS.keys())
_OLD_SETTING_KEYS = set(_TO_NEW_KEY.keys())
def find_deprecated_settings(source): # pragma: no cover
from celery.utils import deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
deprecated.warn(description=f'The {name!r} setting',
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative=f'Use the {opt.alt} instead')
return source
@memoize(maxsize=None)
def find(name, namespace='celery'):
"""Find setting by name."""
# - Try specified name-space first.
namespace = namespace.lower()
try:
return searchresult(
namespace, name.lower(), NAMESPACES[namespace][name.lower()],
)
except KeyError:
# - Try all the other namespaces.
for ns, opts in NAMESPACES.items():
if ns.lower() == name.lower():
return searchresult(None, ns, opts)
elif isinstance(opts, dict):
try:
return searchresult(ns, name.lower(), opts[name.lower()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.lower(), DEFAULTS[name.lower()])
| Option |
python | eventlet__eventlet | eventlet/green/http/cookies.py | {
"start": 11129,
"end": 18623
} | class ____(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "Secure",
"httponly" : "HttpOnly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self._key = self._value = self._coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
@property
def key(self):
return self._key
@key.setter
def key(self, key):
_warn_deprecated_setter('key')
self._key = key
@property
def value(self):
return self._value
@value.setter
def value(self, value):
_warn_deprecated_setter('value')
self._value = value
@property
def coded_value(self):
return self._coded_value
@coded_value.setter
def coded_value(self, coded_value):
_warn_deprecated_setter('coded_value')
self._coded_value = coded_value
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid attribute %r" % (K,))
dict.__setitem__(self, K, V)
def setdefault(self, key, val=None):
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
return dict.setdefault(self, key, val)
def __eq__(self, morsel):
if not isinstance(morsel, Morsel):
return NotImplemented
return (dict.__eq__(self, morsel) and
self._value == morsel._value and
self._key == morsel._key and
self._coded_value == morsel._coded_value)
__ne__ = object.__ne__
def copy(self):
morsel = Morsel()
dict.update(morsel, self)
morsel.__dict__.update(self.__dict__)
return morsel
def update(self, values):
data = {}
for key, val in dict(values).items():
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
data[key] = val
dict.update(self, data)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
if LegalChars != _LegalChars:
import warnings
warnings.warn(
'LegalChars parameter is deprecated, ignored and will '
'be removed in future versions.', DeprecationWarning,
stacklevel=2)
if key.lower() in self._reserved:
raise CookieError('Attempt to set a reserved key %r' % (key,))
if not _is_legal_key(key):
raise CookieError('Illegal key %r' % (key,))
# It's a good key, so save it.
self._key = key
self._value = val
self._coded_value = coded_val
def __getstate__(self):
return {
'key': self._key,
'value': self._value,
'coded_value': self._coded_value,
}
def __setstate__(self, state):
self._key = state['key']
self._value = state['value']
self._coded_value = state['coded_value']
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key in self._flags:
if value:
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
_LegalValueChars = _LegalKeyChars + r'\[\]'
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
\s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key'
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
[""" + _LegalValueChars + r"""]* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
| Morsel |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 59170,
"end": 59732
} | class ____(PrefectFilterBaseModel):
"""Filter by `BlockSchema.capabilities`"""
any_: Optional[list[str]] = Field(
default=None,
examples=[["2.0.0", "2.1.0"]],
description="A list of block schema versions.",
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnElement[bool]] = []
if self.any_ is not None:
filters.append(db.BlockSchema.version.in_(self.any_))
return filters
| BlockSchemaFilterVersion |
python | getsentry__sentry | src/sentry/models/options/organization_option.py | {
"start": 3845,
"end": 4620
} | class ____(Model):
"""
Organization options apply only to an instance of a organization.
Options which are specific to a plugin should namespace
their key. e.g. key='myplugin:optname'
key: onboarding:complete
value: { updated: datetime }
"""
__relocation_scope__ = RelocationScope.Organization
organization = FlexibleForeignKey("sentry.Organization")
key = models.CharField(max_length=64)
value = models.JSONField(null=True)
objects: ClassVar[OrganizationOptionManager] = OrganizationOptionManager()
class Meta:
app_label = "sentry"
db_table = "sentry_organizationoptions"
unique_together = (("organization", "key"),)
__repr__ = sane_repr("organization_id", "key", "value")
| OrganizationOption |
python | run-llama__llama_index | llama-index-core/tests/program/test_function_program.py | {
"start": 610,
"end": 1522
} | class ____(BaseModel):
title: str
artist: str
songs: List[MockSong]
MOCK_ALBUM = MockAlbum(
title="hello",
artist="world",
songs=[MockSong(title="song1"), MockSong(title="song2")],
)
MOCK_ALBUM_2 = MockAlbum(
title="hello2",
artist="world2",
songs=[MockSong(title="song3"), MockSong(title="song4")],
)
def _get_mock_album_response(
allow_parallel_tool_calls: bool = False,
) -> AgentChatResponse:
"""Get mock album."""
if allow_parallel_tool_calls:
albums = [MOCK_ALBUM, MOCK_ALBUM_2]
else:
albums = [MOCK_ALBUM]
tool_outputs = [
ToolOutput(
content=str(a),
tool_name="tool_output",
raw_input={},
raw_output=a,
)
for a in albums
]
# return tool outputs
return AgentChatResponse(
response="output",
sources=tool_outputs,
)
| MockAlbum |
python | pytest-dev__pytest | src/_pytest/legacypath.py | {
"start": 9355,
"end": 10366
} | class ____:
"""Backward compatibility wrapper that implements ``py.path.local``
for :class:`TempPathFactory`.
.. note::
These days, it is preferred to use ``tmp_path_factory``.
:ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`.
"""
_tmppath_factory: TempPathFactory
def __init__(
self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False
) -> None:
check_ispytest(_ispytest)
self._tmppath_factory = tmppath_factory
def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH:
"""Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object."""
return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
def getbasetemp(self) -> LEGACY_PATH:
"""Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object."""
return legacy_path(self._tmppath_factory.getbasetemp().resolve())
| TempdirFactory |
python | PrefectHQ__prefect | src/prefect/server/schemas/responses.py | {
"start": 20845,
"end": 21502
} | class ____(ORMBaseModel):
"""
A response object for global concurrency limits.
"""
active: bool = Field(
default=True, description="Whether the global concurrency limit is active."
)
name: str = Field(
default=..., description="The name of the global concurrency limit."
)
limit: int = Field(default=..., description="The concurrency limit.")
active_slots: int = Field(default=..., description="The number of active slots.")
slot_decay_per_second: float = Field(
default=2.0,
description="The decay rate for active slots when used as a rate limit.",
)
| GlobalConcurrencyLimitResponse |
python | ethereum__web3.py | tests/conftest.py | {
"start": 812,
"end": 1569
} | class ____:
LogAnonymous = 0
LogNoArguments = 1
LogSingleArg = 2
LogDoubleArg = 3
LogTripleArg = 4
LogQuadrupleArg = 5
LogSingleAnonymous = 6
LogSingleWithIndex = 7
LogDoubleAnonymous = 8
LogDoubleWithIndex = 9
LogTripleWithIndex = 10
LogQuadrupleWithIndex = 11
LogBytes = 12
LogString = 13
LogDynamicArgs = 14
LogListArgs = 15
LogAddressIndexed = 16
LogAddressNotIndexed = 17
@pytest.fixture(scope="session")
def emitter_contract_event_ids():
return LogFunctions
# This class defines topics for the EmitterContract and is used to construct
# a fixture for contract event log topics. Parameterized tests that utilize
# an `emitter` contract fixture will use this data.
| LogFunctions |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 13735,
"end": 14369
} | class ____(graphene.ObjectType, AssetEventMixin):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "HealthChangedEvent"
def __init__(self, event: EventLogEntry):
dagster_event = check.not_none(event.dagster_event)
self.asset_health_changed = dagster_event.asset_health_changed_data
super().__init__(**_construct_asset_event_metadata_params(event, self.asset_health_changed))
AssetEventMixin.__init__(
self,
event=event,
metadata=self.asset_health_changed,
)
| GrapheneHealthChangedEvent |
python | mlflow__mlflow | mlflow/entities/span_status.py | {
"start": 1882,
"end": 5306
} | class ____:
"""
Status of the span or the trace.
Args:
status_code: The status code of the span or the trace. This must be one of the
values of the :py:class:`mlflow.entities.SpanStatusCode` enum or a string
representation of it like "OK", "ERROR".
description: Description of the status. This should be only set when the status
is ERROR, otherwise it will be ignored.
"""
status_code: SpanStatusCode
description: str = ""
def __post_init__(self):
"""
If user provides a string status code, validate it and convert to
the corresponding enum value.
"""
if isinstance(self.status_code, str):
try:
self.status_code = SpanStatusCode(self.status_code)
except ValueError:
raise MlflowException(
f"{self.status_code} is not a valid SpanStatusCode value. "
f"Please use one of {[status_code.value for status_code in SpanStatusCode]}",
error_code=INVALID_PARAMETER_VALUE,
)
def to_otel_status(self) -> trace_api.Status:
"""
Convert :py:class:`mlflow.entities.SpanStatus` object to OpenTelemetry status object.
:meta private:
"""
try:
status_code = getattr(trace_api.StatusCode, self.status_code.name)
except AttributeError:
raise MlflowException(
f"Invalid status code: {self.status_code}", error_code=INVALID_PARAMETER_VALUE
)
return trace_api.Status(status_code, self.description)
@classmethod
def from_otel_status(cls, otel_status: trace_api.Status) -> SpanStatus:
"""
Convert OpenTelemetry status object to our status object.
:meta private:
"""
try:
status_code = SpanStatusCode(otel_status.status_code.name)
except ValueError:
raise MlflowException(
f"Got invalid status code from OpenTelemetry: {otel_status.status_code}",
error_code=INVALID_PARAMETER_VALUE,
)
return cls(status_code, otel_status.description or "")
def to_otel_proto_status(self):
"""
Convert to OpenTelemetry protobuf Status for OTLP export.
:meta private:
"""
status = OtelStatus()
if self.status_code == SpanStatusCode.OK:
status.code = OtelStatus.StatusCode.STATUS_CODE_OK
elif self.status_code == SpanStatusCode.ERROR:
status.code = OtelStatus.StatusCode.STATUS_CODE_ERROR
else:
status.code = OtelStatus.StatusCode.STATUS_CODE_UNSET
if self.description:
status.message = self.description
return status
@classmethod
def from_otel_proto_status(cls, otel_proto_status) -> SpanStatus:
"""
Create a SpanStatus from an OpenTelemetry protobuf Status.
:meta private:
"""
# Map protobuf status codes to SpanStatusCode
if otel_proto_status.code == OtelStatus.STATUS_CODE_OK:
status_code = SpanStatusCode.OK
elif otel_proto_status.code == OtelStatus.STATUS_CODE_ERROR:
status_code = SpanStatusCode.ERROR
else:
status_code = SpanStatusCode.UNSET
return cls(status_code, otel_proto_status.message or "")
| SpanStatus |
python | conda__conda | conda/common/configuration.py | {
"start": 3302,
"end": 3699
} | class ____(ValidationError):
def __init__(self, source, keys, preferred_key):
self.source = source
self.keys = keys
msg = (
f"Multiple aliased keys in file {source}:\n"
f"{pretty_list(keys)}\n"
f"Must declare only one. Prefer '{preferred_key}'"
)
super().__init__(preferred_key, None, source, msg=msg)
| MultipleKeysError |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedules/__init__.py | {
"start": 1338,
"end": 1470
} | class ____(graphene.ObjectType):
scheduler_class = graphene.String()
class Meta:
name = "Scheduler"
| GrapheneScheduler |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-google-genai/llama_index/llms/google_genai/utils.py | {
"start": 15992,
"end": 21346
} | class ____(typing.TypedDict):
model: str
history: list[types.Content]
config: types.GenerateContentConfig
async def prepare_chat_params(
model: str,
messages: Sequence[ChatMessage],
use_file_api: bool = False,
client: Optional[Client] = None,
**kwargs: Any,
) -> tuple[Union[types.Content, types.File], ChatParams]:
"""
Prepare common parameters for chat creation.
Args:
messages: Sequence of chat messages
use_file_api: Whether to use File API or not for large files.
client: Google Genai client used for uploading large files.
**kwargs: Additional keyword arguments
Returns:
tuple containing:
- next_msg: the next message to send
- chat_kwargs: processed keyword arguments for chat creation
"""
# Extract system message if present
system_message: str | None = None
if messages and messages[0].role == MessageRole.SYSTEM:
sys_msg = messages.pop(0)
system_message = sys_msg.content
# Now messages contains the rest of the chat history
# Merge messages with the same role
merged_messages = merge_neighboring_same_role_messages(messages)
initial_history = await asyncio.gather(
*[
chat_message_to_gemini(message, use_file_api, client)
for message in merged_messages
]
)
# merge tool messages into a single tool message
# while maintaining the tool names
history = []
for idx, msg in enumerate(initial_history):
if idx < 1:
history.append(msg)
continue
# Skip if the role is different or not a tool message
if msg.parts and not any(
part.function_response is not None for part in msg.parts
):
history.append(msg)
continue
last_msg = history[-1]
# Skip if the last message is not a tool message
if last_msg.parts and not any(
part.function_response is not None for part in last_msg.parts
):
history.append(msg)
continue
# Skip if the role is different
if last_msg.role != msg.role:
history.append(msg)
continue
# Merge the tool messages
last_msg.parts.extend(msg.parts or [])
# Separate the next message from the history
next_msg = history.pop()
tools: types.Tool | list[types.Tool] | None = kwargs.pop("tools", None)
if tools and not isinstance(tools, list):
tools = [tools]
config: Union[types.GenerateContentConfig, dict] = kwargs.pop(
"generation_config", {}
)
if not isinstance(config, dict):
config = config.model_dump()
# Add system message as system_instruction if present
if system_message:
config["system_instruction"] = system_message
chat_kwargs: ChatParams = {"model": model, "history": history}
if tools:
if not config.get("automatic_function_calling"):
config["automatic_function_calling"] = types.AutomaticFunctionCallingConfig(
disable=True, maximum_remote_calls=None
)
if not config.get("tool_config"):
config["tool_config"] = kwargs.pop("tool_config", None)
if not config.get("tools"):
config["tools"] = tools
chat_kwargs["config"] = types.GenerateContentConfig(**config)
return next_msg, chat_kwargs
def handle_streaming_flexible_model(
current_json: str,
candidate: types.Candidate,
output_cls: Type[BaseModel],
flexible_model: Type[BaseModel],
) -> Tuple[Optional[BaseModel], str]:
parts = candidate.content.parts or []
data = parts[0].text if parts else None
if data:
current_json += data
try:
return output_cls.model_validate_json(current_json), current_json
except ValidationError:
try:
return flexible_model.model_validate_json(
_repair_incomplete_json(current_json)
), current_json
except ValidationError:
return None, current_json
return None, current_json
def _should_retry(exception: BaseException):
if isinstance(exception, errors.ClientError):
if exception.status in (429, 408):
return True
return False
def create_retry_decorator(
max_retries: int,
random_exponential: bool = False,
stop_after_delay_seconds: Optional[float] = None,
min_seconds: float = 4,
max_seconds: float = 60,
) -> typing.Callable[[Any], Any]:
wait_strategy = (
wait_random_exponential(min=min_seconds, max=max_seconds)
if random_exponential
else wait_exponential(multiplier=1, min=min_seconds, max=max_seconds)
)
stop_strategy: stop_base = stop_after_attempt(max_retries)
if stop_after_delay_seconds is not None:
stop_strategy = stop_strategy | stop_after_delay(stop_after_delay_seconds)
return retry(
reraise=True,
stop=stop_strategy,
wait=wait_strategy,
retry=(
retry_if_exception_type(
(errors.ServerError, httpx.ConnectError, httpx.ConnectTimeout)
)
| retry_if_exception(_should_retry)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
| ChatParams |
python | python-markdown__markdown | tests/test_syntax/extensions/test_smarty.py | {
"start": 805,
"end": 5717
} | class ____(TestCase):
default_kwargs = {'extensions': ['smarty']}
def test_basic(self):
self.assertMarkdownRenders(
"It's fun. What's fun?",
'<p>It’s fun. What’s fun?</p>'
)
self.assertMarkdownRenders(
'"Isn\'t this fun"? --- she said...',
'<p>“Isn’t this fun”? — she said…</p>'
)
self.assertMarkdownRenders(
'"\'Quoted\' words in a larger quote."',
'<p>“‘Quoted’ words in a larger quote.”</p>'
)
self.assertMarkdownRenders(
'\'Quoted "words" in a larger quote.\'',
'<p>‘Quoted “words” in a larger quote.’</p>'
)
self.assertMarkdownRenders(
'"Quoted words at the \'end.\'"',
'<p>“Quoted words at the ‘end.’”</p>'
)
self.assertMarkdownRenders(
'\'Quoted words at the "end."\'',
'<p>‘Quoted words at the “end.”’</p>'
)
self.assertMarkdownRenders(
'(He replied, "She said \'Hello.\'")',
'<p>(He replied, “She said ‘Hello.’”)</p>'
)
self.assertMarkdownRenders(
'<span>He replied, "She said \'Hello.\'"</span>',
'<p><span>He replied, “She said ‘Hello.’”</span></p>'
)
self.assertMarkdownRenders(
'"quoted" text and **bold "quoted" text**',
'<p>“quoted” text and <strong>bold “quoted” text</strong></p>'
)
self.assertMarkdownRenders(
"'quoted' text and **bold 'quoted' text**",
'<p>‘quoted’ text and <strong>bold ‘quoted’ text</strong></p>'
)
self.assertMarkdownRenders(
'em-dashes (---) and ellipes (...)',
'<p>em-dashes (—) and ellipes (…)</p>'
)
self.assertMarkdownRenders(
'"[Link](http://example.com)" --- she said.',
'<p>“<a href="http://example.com">Link</a>” — she said.</p>'
)
self.assertMarkdownRenders(
'"Ellipsis within quotes..."',
'<p>“Ellipsis within quotes…”</p>'
)
self.assertMarkdownRenders(
"*Custer*'s Last Stand",
"<p><em>Custer</em>’s Last Stand</p>"
)
def test_years(self):
self.assertMarkdownRenders("1440--80's", '<p>1440–80’s</p>')
self.assertMarkdownRenders("1440--'80s", '<p>1440–’80s</p>')
self.assertMarkdownRenders("1440---'80s", '<p>1440—’80s</p>')
self.assertMarkdownRenders("1960's", '<p>1960’s</p>')
self.assertMarkdownRenders("one two '60s", '<p>one two ’60s</p>')
self.assertMarkdownRenders("'60s", '<p>’60s</p>')
def test_wrapping_line(self):
text = (
"A line that 'wraps' with\n"
"*emphasis* at the beginning of the next line."
)
html = (
'<p>A line that ‘wraps’ with\n'
'<em>emphasis</em> at the beginning of the next line.</p>'
)
self.assertMarkdownRenders(text, html)
def test_escaped(self):
self.assertMarkdownRenders(
'Escaped \\-- ndash',
'<p>Escaped -- ndash</p>'
)
self.assertMarkdownRenders(
'\\\'Escaped\\\' \\"quotes\\"',
'<p>\'Escaped\' "quotes"</p>'
)
self.assertMarkdownRenders(
'Escaped ellipsis\\...',
'<p>Escaped ellipsis...</p>'
)
self.assertMarkdownRenders(
'\'Escaped \\"quotes\\" in real ones\'',
'<p>‘Escaped "quotes" in real ones’</p>'
)
self.assertMarkdownRenders(
'\\\'"Real" quotes in escaped ones\\\'',
"<p>'“Real” quotes in escaped ones'</p>"
)
def test_escaped_attr(self):
self.assertMarkdownRenders(
'',
'<p><img alt="x"x" src="x" /></p>'
)
def test_code_spans(self):
self.assertMarkdownRenders(
'Skip `"code" -- --- \'spans\' ...`.',
'<p>Skip <code>"code" -- --- \'spans\' ...</code>.</p>'
)
def test_code_blocks(self):
text = (
' Also skip "code" \'blocks\'\n'
' foo -- bar --- baz ...'
)
html = (
'<pre><code>Also skip "code" \'blocks\'\n'
'foo -- bar --- baz ...\n'
'</code></pre>'
)
self.assertMarkdownRenders(text, html)
def test_horizontal_rule(self):
self.assertMarkdownRenders('--- -- ---', '<hr />')
| TestSmarty |
python | facebook__pyre-check | client/commands/expression_level_coverage.py | {
"start": 952,
"end": 1061
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
line: int
column: int
@dataclass(frozen=True)
| Pair |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 19348,
"end": 19654
} | class ____(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ("items",)
items: list[Expr]
def as_const(self, eval_ctx: EvalContext | None = None) -> list[t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
| List |
python | ray-project__ray | python/ray/tune/tests/test_tune_restore_warm_start.py | {
"start": 7513,
"end": 8246
} | class ____(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
dim_dict = {
"height": (ValueType.CONTINUOUS, [-100, 100], 1e-2),
"width": (ValueType.DISCRETE, [0, 20], False),
}
def cost(param):
tune.report(
dict(loss=(param["height"] - 14) ** 2 - abs(param["width"] - 3))
)
search_alg = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=200,
dim_dict=dim_dict,
metric="loss",
mode="min",
)
return search_alg, cost
@pytest.mark.skipif(sys.version_info >= (3, 12), reason="HEBO doesn't support py312")
| ZOOptWarmStartTest |
python | imageio__imageio | imageio/config/extensions.py | {
"start": 188,
"end": 47023
} | class ____:
"""File Extension Metadata
This class holds information about a image file format associated with a
given extension. This information is used to track plugins that are known to
be able to handle a particular format. It also contains additional
information about a format, which is used when creating the supported format
docs.
Plugins known to be able to handle this format are ordered by a ``priority``
list. This list is used to determine the ideal plugin to use when choosing a
plugin based on file extension.
Parameters
----------
extension : str
The name of the extension including the initial dot, e.g. ".png".
priority : List
A list of plugin names (entries in config.known_plugins) that can handle
this format. The position of a plugin expresses a preference, e.g.
["plugin1", "plugin2"] indicates that, if available, plugin1 should be
preferred over plugin2 when handling a request related to this format.
name : str
The full name of the format.
description : str
A description of the format.
external_link : str
A link to further information about the format. Typically, the format's
specification.
volume_support : str
If True, the format/extension supports volumetric image data.
Examples
--------
>>> FileExtension(
name="Bitmap",
extension=".bmp",
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"],
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
)
"""
def __init__(
self,
*,
extension,
priority,
name=None,
description=None,
external_link=None,
volume_support=False,
):
self.extension = extension
self.priority = priority
self.name = name
self.description = description
self.external_link = external_link
self.default_priority = priority.copy()
self.volume_support = volume_support
def reset(self):
self.priority = self.default_priority.copy()
extension_list = [
FileExtension(
name="Hasselblad raw",
extension=".3fr",
priority=["RAW-FI"],
),
FileExtension(
name="Sony alpha",
extension=".arw",
priority=["RAW-FI"],
),
FileExtension(
name="Animated Portable Network Graphics",
external_link="https://en.wikipedia.org/wiki/APNG",
extension=".apng",
priority=["pillow", "pyav"],
),
FileExtension(
name="Audio Video Interleave",
extension=".avi",
priority=["FFMPEG"],
),
FileExtension(
name="Casio raw format",
extension=".bay",
priority=["RAW-FI"],
),
FileExtension(
extension=".blp",
priority=["pillow"],
),
FileExtension(
name="Bitmap",
extension=".bmp",
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"],
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
),
FileExtension(
name="Device-Independent Bitmap",
extension=".dip",
priority=["opencv"],
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
),
FileExtension(
name="Re-Volt mipmap",
extension=".bmq",
priority=["RAW-FI"],
),
FileExtension(
name="Binary Structured Data Format",
extension=".bsdf",
priority=["BSDF"],
external_link="http://bsdf.io/",
),
FileExtension(
name="Binary Universal Form for the Representation of meteorological data",
extension=".bufr",
priority=["pillow", "BUFR-PIL"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".bw",
priority=["pillow", "SGI-PIL", "SGI-FI"],
),
FileExtension(
name="Scirra Construct",
extension=".cap",
priority=["RAW-FI"],
),
FileExtension(
name="AMETEK High Speed Camera Format",
extension=".cine",
priority=["RAW-FI"],
external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution",
),
FileExtension(extension=".cr2", priority=["RAW-FI"]),
FileExtension(
extension=".crw",
priority=["RAW-FI"],
),
FileExtension(
extension=".cs1",
priority=["RAW-FI"],
),
FileExtension(
name="Computerized Tomography",
extension=".ct",
priority=["DICOM"],
),
FileExtension(
name="Windows Cursor Icons",
extension=".cur",
priority=["pillow", "CUR-PIL"],
),
FileExtension(
name="Dr. Halo",
extension=".cut",
priority=["CUT-FI"],
),
FileExtension(
extension=".dc2",
priority=["RAW-FI"],
),
FileExtension(
name="DICOM file format",
extension=".dcm",
priority=["DICOM", "ITK"],
),
FileExtension(
extension=".dcr",
priority=["RAW-FI"],
),
FileExtension(
name="Intel DCX",
extension=".dcx",
priority=["pillow", "DCX-PIL"],
),
FileExtension(
name="DirectX Texture Container",
extension=".dds",
priority=["pillow", "DDS-FI", "DDS-PIL"],
),
FileExtension(
name="Windows Bitmap",
extension=".dib",
priority=["pillow", "DIB-PIL"],
),
FileExtension(
name="DICOM file format",
extension=".dicom",
priority=["ITK"],
),
FileExtension(
extension=".dng",
priority=["RAW-FI"],
),
FileExtension(
extension=".drf",
priority=["RAW-FI"],
),
FileExtension(
extension=".dsc",
priority=["RAW-FI"],
),
FileExtension(
name="Enhanced Compression Wavelet",
extension=".ecw",
priority=["GDAL"],
),
FileExtension(
name="Windows Metafile",
extension=".emf",
priority=["pillow", "WMF-PIL"],
),
FileExtension(
name="Encapsulated Postscript",
extension=".eps",
priority=["pillow", "EPS-PIL"],
),
FileExtension(
extension=".erf",
priority=["RAW-FI"],
),
FileExtension(
name="OpenEXR",
extension=".exr",
external_link="https://openexr.readthedocs.io/en/latest/",
priority=["EXR-FI", "pyav", "opencv"],
),
FileExtension(
extension=".fff",
priority=["RAW-FI"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fit",
priority=["pillow", "FITS-PIL", "FITS"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fits",
priority=["pillow", "FITS-PIL", "FITS", "pyav"],
),
FileExtension(
name="Autodesk FLC Animation",
extension=".flc",
priority=["pillow", "FLI-PIL"],
),
FileExtension(
name="Autodesk FLI Animation",
extension=".fli",
priority=["pillow", "FLI-PIL"],
),
FileExtension(
name="Kodak FlashPix",
extension=".fpx",
priority=["pillow", "FPX-PIL"],
),
FileExtension(
name="Independence War 2: Edge Of Chaos Texture Format",
extension=".ftc",
priority=["pillow", "FTEX-PIL"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fts",
priority=["FITS"],
),
FileExtension(
name="Independence War 2: Edge Of Chaos Texture Format",
extension=".ftu",
priority=["pillow", "FTEX-PIL"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fz",
priority=["FITS"],
),
FileExtension(
name="Raw fax format CCITT G.3",
extension=".g3",
priority=["G3-FI"],
),
FileExtension(
name="GIMP brush file",
extension=".gbr",
priority=["pillow", "GBR-PIL"],
),
FileExtension(
name="Grassroots DICOM",
extension=".gdcm",
priority=["ITK"],
),
FileExtension(
name="Graphics Interchange Format",
extension=".gif",
priority=["pillow", "GIF-PIL", "pyav"],
),
FileExtension(
name="UMDS GIPL",
extension=".gipl",
priority=["ITK"],
),
FileExtension(
name="gridded meteorological data",
extension=".grib",
priority=["pillow", "GRIB-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".h5",
priority=["pillow", "HDF5-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".hdf",
priority=["pillow", "HDF5-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".hdf5",
priority=["ITK"],
),
FileExtension(
name="JPEG Extended Range",
extension=".hdp",
priority=["JPEG-XR-FI"],
),
FileExtension(
name="High Dynamic Range Image",
extension=".hdr",
priority=["HDR-FI", "ITK", "opencv"],
),
FileExtension(
extension=".ia",
priority=["RAW-FI"],
),
FileExtension(
extension=".icb",
priority=["pillow"],
),
FileExtension(
name="Mac OS Icon File",
extension=".icns",
priority=["pillow", "ICNS-PIL"],
),
FileExtension(
name="Windows Icon File",
extension=".ico",
priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"],
),
FileExtension(
name="ILBM Interleaved Bitmap",
extension=".iff",
priority=["IFF-FI"],
),
FileExtension(
name="IPTC/NAA",
extension=".iim",
priority=["pillow", "IPTC-PIL"],
),
FileExtension(
extension=".iiq",
priority=["RAW-FI"],
),
FileExtension(
name="IFUNC Image Memory",
extension=".im",
priority=["pillow", "IM-PIL"],
),
FileExtension(
extension=".img",
priority=["ITK", "GDAL"],
),
FileExtension(
extension=".img.gz",
priority=["ITK"],
),
FileExtension(
name="IM Tools",
extension=".IMT",
priority=["pillow", "IMT-PIL"],
),
FileExtension(
name="Image Processing Lab",
extension=".ipl",
priority=["ITK"],
),
FileExtension(
name="JPEG 2000",
extension=".j2c",
priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
),
FileExtension(
name="JPEG 2000",
extension=".j2k",
priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
),
FileExtension(
name="JPEG",
extension=".jfif",
priority=["pillow", "JPEG-PIL"],
),
FileExtension(
name="JPEG",
extension=".jif",
priority=["JPEG-FI"],
),
FileExtension(
name="JPEG Network Graphics",
extension=".jng",
priority=["JNG-FI"],
),
FileExtension(
name="JPEG 2000",
extension=".jp2",
priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"],
),
FileExtension(
name="JPEG 2000",
extension=".jpc",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG",
extension=".jpe",
priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"],
),
FileExtension(
name="Joint Photographic Experts Group",
extension=".jpeg",
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
),
FileExtension(
name="JPEG 2000",
extension=".jpf",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="Joint Photographic Experts Group",
extension=".jpg",
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
),
FileExtension(
name="JPEG 2000",
extension=".jpx",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG Extended Range",
extension=".jxr",
priority=["JPEG-XR-FI"],
),
FileExtension(
extension=".k25",
priority=["RAW-FI"],
),
FileExtension(
extension=".kc2",
priority=["RAW-FI"],
),
FileExtension(
extension=".kdc",
priority=["RAW-FI"],
),
FileExtension(
name="C64 Koala Graphics",
extension=".koa",
priority=["KOALA-FI"],
),
FileExtension(
name="ILBM Interleaved Bitmap",
extension=".lbm",
priority=["IFF-FI"],
),
FileExtension(
name="Lytro F01",
extension=".lfp",
priority=["LYTRO-LFP"],
),
FileExtension(
name="Lytro Illum",
extension=".lfr",
priority=["LYTRO-LFR"],
),
FileExtension(
name="ZEISS LSM",
extension=".lsm",
priority=["tifffile", "ITK", "TIFF"],
),
FileExtension(
name="McIdas area file",
extension=".MCIDAS",
priority=["pillow", "MCIDAS-PIL"],
external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html",
),
FileExtension(
extension=".mdc",
priority=["RAW-FI"],
),
FileExtension(
extension=".mef",
priority=["RAW-FI"],
),
FileExtension(
name="FreeSurfer File Format",
extension=".mgh",
priority=["ITK"],
),
FileExtension(
name="ITK MetaImage",
extension=".mha",
priority=["ITK"],
),
FileExtension(
name="ITK MetaImage Header",
extension=".mhd",
priority=["ITK"],
),
FileExtension(
name="Microsoft Image Composer",
extension=".mic",
priority=["pillow", "MIC-PIL"],
),
FileExtension(
name="Matroska Multimedia Container",
extension=".mkv",
priority=["FFMPEG", "pyav"],
),
FileExtension(
name="Medical Imaging NetCDF",
extension=".mnc",
priority=["ITK"],
),
FileExtension(
name="Medical Imaging NetCDF 2",
extension=".mnc2",
priority=["ITK"],
),
FileExtension(
name="Leaf Raw Image Format",
extension=".mos",
priority=["RAW-FI"],
),
FileExtension(
name="QuickTime File Format",
extension=".mov",
priority=["FFMPEG", "pyav"],
),
FileExtension(
name="MPEG-4 Part 14",
extension=".mp4",
priority=["FFMPEG", "pyav"],
),
FileExtension(
name="MPEG-1 Moving Picture Experts Group",
extension=".mpeg",
priority=["FFMPEG", "pyav"],
),
FileExtension(
name="Moving Picture Experts Group",
extension=".mpg",
priority=["pillow", "FFMPEG", "pyav"],
),
FileExtension(
name="JPEG Multi-Picture Format",
extension=".mpo",
priority=["pillow", "MPO-PIL"],
),
FileExtension(
name="Magnetic resonance imaging",
extension=".mri",
priority=["DICOM"],
),
FileExtension(
extension=".mrw",
priority=["RAW-FI"],
),
FileExtension(
name="Windows Paint",
extension=".msp",
priority=["pillow", "MSP-PIL"],
),
FileExtension(
extension=".nef",
priority=["RAW-FI", "rawpy"],
),
FileExtension(
extension=".nhdr",
priority=["ITK"],
),
FileExtension(
extension=".nia",
priority=["ITK"],
),
FileExtension(
extension=".nii",
priority=["ITK"],
),
FileExtension(
name="nii.gz",
extension=".nii.gz",
priority=["ITK"],
),
FileExtension(
name="Numpy Array",
extension=".npz",
priority=["NPZ"],
volume_support=True,
),
FileExtension(
extension=".nrrd",
priority=["ITK"],
),
FileExtension(
extension=".nrw",
priority=["RAW-FI"],
),
FileExtension(
extension=".orf",
priority=["RAW-FI"],
),
FileExtension(
extension=".palm",
priority=["pillow"],
),
FileExtension(
name="Portable Bitmap",
extension=".pbm",
priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
),
FileExtension(
name="Kodak PhotoCD",
extension=".pcd",
priority=["pillow", "PCD-FI", "PCD-PIL"],
),
FileExtension(
name="Macintosh PICT",
extension=".pct",
priority=["PICT-FI"],
),
FileExtension(
name="Zsoft Paintbrush",
extension=".PCX",
priority=["pillow", "PCX-FI", "PCX-PIL"],
),
FileExtension(
extension=".pdf",
priority=["pillow"],
),
FileExtension(
extension=".pef",
priority=["RAW-FI"],
),
FileExtension(
extension=".pfm",
priority=["PFM-FI", "pyav", "opencv"],
),
FileExtension(
name="Portable Greymap",
extension=".pgm",
priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
),
FileExtension(
name="Macintosh PICT",
extension=".pic",
priority=["PICT-FI", "ITK", "opencv"],
),
FileExtension(
name="Macintosh PICT",
extension=".pict",
priority=["PICT-FI"],
),
FileExtension(
name="Portable Network Graphics",
extension=".png",
priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"],
),
FileExtension(
name="Portable Image Format",
extension=".pnm",
priority=["pillow", "opencv"],
),
FileExtension(
name="Pbmplus image",
extension=".ppm",
priority=["pillow", "PPM-PIL", "pyav"],
),
FileExtension(
name="Pbmplus image",
extension=".pbm",
priority=["pillow", "PPM-PIL", "PPM-FI"],
),
FileExtension(
name="Portable image format",
extension=".pxm",
priority=["opencv"],
),
FileExtension(
name="Portable Pixelmap (ASCII)",
extension=".ppm",
priority=["PPM-FI", "opencv"],
),
FileExtension(
name="Portable Pixelmap (Raw)",
extension=".ppm",
priority=["PPMRAW-FI"],
),
FileExtension(
name="Ghostscript",
extension=".ps",
priority=["pillow", "EPS-PIL"],
),
FileExtension(
name="Adope Photoshop 2.5 and 3.0",
extension=".psd",
priority=["pillow", "PSD-PIL", "PSD-FI"],
),
FileExtension(
extension=".ptx",
priority=["RAW-FI"],
),
FileExtension(
extension=".pxn",
priority=["RAW-FI"],
),
FileExtension(
name="PIXAR raster image",
extension=".pxr",
priority=["pillow", "PIXAR-PIL"],
),
FileExtension(
extension=".qtk",
priority=["RAW-FI"],
),
FileExtension(
extension=".raf",
priority=["RAW-FI"],
),
FileExtension(
name="Sun Raster File",
extension=".ras",
priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"],
),
FileExtension(
name="Sun Raster File",
extension=".sr",
priority=["opencv"],
),
FileExtension(
extension=".raw",
priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"],
),
FileExtension(
extension=".rdc",
priority=["RAW-FI"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".rgb",
priority=["pillow", "SGI-PIL"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".rgba",
priority=["pillow", "SGI-PIL"],
),
FileExtension(
extension=".rw2",
priority=["RAW-FI"],
),
FileExtension(
extension=".rwl",
priority=["RAW-FI"],
),
FileExtension(
extension=".rwz",
priority=["RAW-FI"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".sgi",
priority=["pillow", "SGI-PIL", "pyav"],
),
FileExtension(
name="SPE File Format",
extension=".spe",
priority=["SPE"],
),
FileExtension(
extension=".SPIDER",
priority=["pillow", "SPIDER-PIL"],
),
FileExtension(
extension=".sr2",
priority=["RAW-FI"],
),
FileExtension(
extension=".srf",
priority=["RAW-FI"],
),
FileExtension(
extension=".srw",
priority=["RAW-FI"],
),
FileExtension(
extension=".sti",
priority=["RAW-FI"],
),
FileExtension(
extension=".stk",
priority=["tifffile", "TIFF"],
),
FileExtension(
name="ShockWave Flash",
extension=".swf",
priority=["SWF", "pyav"],
),
FileExtension(
name="Truevision TGA",
extension=".targa",
priority=["pillow", "TARGA-FI"],
),
FileExtension(
name="Truevision TGA",
extension=".tga",
priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"],
),
FileExtension(
name="Tagged Image File",
extension=".tif",
priority=[
"tifffile",
"TIFF",
"pillow",
"TIFF-PIL",
"TIFF-FI",
"FEI",
"ITK",
"GDAL",
"pyav",
"opencv",
],
volume_support=True,
),
FileExtension(
name="Tagged Image File Format",
extension=".tiff",
priority=[
"tifffile",
"TIFF",
"pillow",
"TIFF-PIL",
"TIFF-FI",
"FEI",
"ITK",
"GDAL",
"pyav",
"opencv",
],
volume_support=True,
),
FileExtension(
extension=".vda",
priority=["pillow"],
),
FileExtension(
extension=".vst",
priority=["pillow"],
),
FileExtension(
extension=".vtk",
priority=["ITK"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wap",
priority=["WBMP-FI"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wbm",
priority=["WBMP-FI"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wbmp",
priority=["WBMP-FI"],
),
FileExtension(
name="JPEG Extended Range",
extension=".wdp",
priority=["JPEG-XR-FI"],
),
FileExtension(
name="Matroska",
extension=".webm",
priority=["FFMPEG", "pyav"],
),
FileExtension(
name="Google WebP",
extension=".webp",
priority=["pillow", "WEBP-FI", "pyav", "opencv"],
),
FileExtension(
name="Windows Meta File",
extension=".wmf",
priority=["pillow", "WMF-PIL"],
),
FileExtension(
name="Windows Media Video",
extension=".wmv",
priority=["FFMPEG"],
),
FileExtension(
name="X11 Bitmap",
extension=".xbm",
priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"],
),
FileExtension(
name="X11 Pixel Map",
extension=".xpm",
priority=["pillow", "XPM-PIL", "XPM-FI"],
),
FileExtension(
name="Thumbnail Image",
extension=".XVTHUMB",
priority=["pillow", "XVTHUMB-PIL"],
),
FileExtension(
extension=".dpx",
priority=["pyav"],
),
FileExtension(
extension=".im1",
priority=["pyav"],
),
FileExtension(
extension=".im24",
priority=["pyav"],
),
FileExtension(
extension=".im8",
priority=["pyav"],
),
FileExtension(
extension=".jls",
priority=["pyav"],
),
FileExtension(
extension=".ljpg",
priority=["pyav"],
),
FileExtension(
extension=".pam",
priority=["pyav"],
),
FileExtension(
extension=".pcx",
priority=["pyav"],
),
FileExtension(
extension=".pgmyuv",
priority=["pyav"],
),
FileExtension(
extension=".pix",
priority=["pyav"],
),
FileExtension(
extension=".ppm",
priority=["pyav"],
),
FileExtension(
extension=".rs",
priority=["pyav"],
),
FileExtension(
extension=".sun",
priority=["pyav"],
),
FileExtension(
extension=".sunras",
priority=["pyav"],
),
FileExtension(
extension=".xface",
priority=["pyav"],
),
FileExtension(
extension=".xwd",
priority=["pyav"],
),
FileExtension(
extension=".y",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".3g2",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".3gp",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".f4v",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".ism",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".isma",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".ismv",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".m4a",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".m4b",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".mj2",
priority=["pyav"],
),
FileExtension(
name="3GP (3GPP file format)",
extension=".psp",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".3g2",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".3gp",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".f4v",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".ism",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".isma",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".ismv",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".m4a",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".m4b",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".mj2",
priority=["pyav"],
),
FileExtension(
name="3GP2 (3GPP2 file format)",
extension=".psp",
priority=["pyav"],
),
FileExtension(
name="3GPP AMR",
extension=".amr",
priority=["pyav"],
),
FileExtension(
name="a64 - video for Commodore 64",
extension=".A64",
priority=["pyav"],
),
FileExtension(
name="a64 - video for Commodore 64",
extension=".a64",
priority=["pyav"],
),
FileExtension(
name="Adobe Filmstrip",
extension=".flm",
priority=["pyav"],
),
FileExtension(
name="AMV",
extension=".amv",
priority=["pyav"],
),
FileExtension(
name="ASF (Advanced / Active Streaming Format)",
extension=".asf",
priority=["pyav"],
),
FileExtension(
name="ASF (Advanced / Active Streaming Format)",
extension=".asf",
priority=["pyav"],
),
FileExtension(
name="ASF (Advanced / Active Streaming Format)",
extension=".wmv",
priority=["pyav"],
),
FileExtension(
name="ASF (Advanced / Active Streaming Format)",
extension=".wmv",
priority=["pyav"],
),
FileExtension(
name="AV1 Annex B",
extension=".obu",
priority=["pyav"],
),
FileExtension(
name="AV1 low overhead OBU",
extension=".obu",
priority=["pyav"],
),
FileExtension(
name="AVI (Audio Video Interleaved)",
extension=".avi",
priority=["pyav"],
),
FileExtension(
name="AVR (Audio Visual Research)",
extension=".avr",
priority=["pyav"],
),
FileExtension(
name="Beam Software SIFF",
extension=".vb",
priority=["pyav"],
),
FileExtension(
name="CD Graphics",
extension=".cdg",
priority=["pyav"],
),
FileExtension(
name="Commodore CDXL video",
extension=".cdxl",
priority=["pyav"],
),
FileExtension(
name="Commodore CDXL video",
extension=".xl",
priority=["pyav"],
),
FileExtension(
name="DASH Muxer",
extension=".mpd",
priority=["pyav"],
),
FileExtension(
name="Digital Pictures SGA",
extension=".sga",
priority=["pyav"],
),
FileExtension(
name="Discworld II BMV",
extension=".bmv",
priority=["pyav"],
),
FileExtension(
name="DV (Digital Video)",
extension=".dif",
priority=["pyav"],
),
FileExtension(
name="DV (Digital Video)",
extension=".dv",
priority=["pyav"],
),
FileExtension(
name="F4V Adobe Flash Video",
extension=".f4v",
priority=["pyav"],
),
FileExtension(
name="FLV (Flash Video)",
extension=".flv",
priority=["pyav"],
),
FileExtension(
name="GXF (General eXchange Format)",
extension=".gxf",
priority=["pyav"],
),
FileExtension(
name="iCE Draw File",
extension=".idf",
priority=["pyav"],
),
FileExtension(
name="IFV CCTV DVR",
extension=".ifv",
priority=["pyav"],
),
FileExtension(
name="iPod H.264 MP4 (MPEG-4 Part 14)",
extension=".m4a",
priority=["pyav"],
),
FileExtension(
name="iPod H.264 MP4 (MPEG-4 Part 14)",
extension=".m4b",
priority=["pyav"],
),
FileExtension(
name="iPod H.264 MP4 (MPEG-4 Part 14)",
extension=".m4v",
priority=["pyav"],
),
FileExtension(
name="IVR (Internet Video Recording)",
extension=".ivr",
priority=["pyav"],
),
FileExtension(
name="Konami PS2 SVAG",
extension=".svag",
priority=["pyav"],
),
FileExtension(
name="KUX (YouKu)",
extension=".kux",
priority=["pyav"],
),
FileExtension(
name="live RTMP FLV (Flash Video)",
extension=".flv",
priority=["pyav"],
),
FileExtension(
name="Loki SDL MJPEG",
extension=".mjpg",
priority=["pyav"],
),
FileExtension(
name="LVF",
extension=".lvf",
priority=["pyav"],
),
FileExtension(
name="Matroska / WebM",
extension=".mk3d",
priority=["pyav"],
),
FileExtension(
name="Matroska / WebM",
extension=".mka",
priority=["pyav"],
),
FileExtension(
name="Matroska / WebM",
extension=".mks",
priority=["pyav"],
),
FileExtension(
name="Microsoft XMV",
extension=".xmv",
priority=["pyav"],
),
FileExtension(
name="MIME multipart JPEG",
extension=".mjpg",
priority=["pyav"],
),
FileExtension(
name="MobiClip MODS",
extension=".mods",
priority=["pyav"],
),
FileExtension(
name="MobiClip MOFLEX",
extension=".moflex",
priority=["pyav"],
),
FileExtension(
name="Motion Pixels MVI",
extension=".mvi",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".3g2",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".3gp",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".f4v",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".ism",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".isma",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".ismv",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".m4a",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".m4b",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".mj2",
priority=["pyav"],
),
FileExtension(
name="MP4 (MPEG-4 Part 14)",
extension=".psp",
priority=["pyav"],
),
FileExtension(
name="MPEG-2 PS (DVD VOB)",
extension=".dvd",
priority=["pyav"],
),
FileExtension(
name="MPEG-2 PS (SVCD)",
extension=".vob",
priority=["pyav"],
),
FileExtension(
name="MPEG-2 PS (VOB)",
extension=".vob",
priority=["pyav"],
),
FileExtension(
name="MPEG-TS (MPEG-2 Transport Stream)",
extension=".m2t",
priority=["pyav"],
),
FileExtension(
name="MPEG-TS (MPEG-2 Transport Stream)",
extension=".m2ts",
priority=["pyav"],
),
FileExtension(
name="MPEG-TS (MPEG-2 Transport Stream)",
extension=".mts",
priority=["pyav"],
),
FileExtension(
name="MPEG-TS (MPEG-2 Transport Stream)",
extension=".ts",
priority=["pyav"],
),
FileExtension(
name="Musepack",
extension=".mpc",
priority=["pyav"],
),
FileExtension(
name="MXF (Material eXchange Format) Operational Pattern Atom",
extension=".mxf",
priority=["pyav"],
),
FileExtension(
name="MXF (Material eXchange Format)",
extension=".mxf",
priority=["pyav"],
),
FileExtension(
name="MxPEG clip",
extension=".mxg",
priority=["pyav"],
),
FileExtension(
name="NC camera feed",
extension=".v",
priority=["pyav"],
),
FileExtension(
name="NUT",
extension=".nut",
priority=["pyav"],
),
FileExtension(
name="Ogg Video",
extension=".ogv",
priority=["pyav"],
),
FileExtension(
name="Ogg",
extension=".ogg",
priority=["pyav"],
),
FileExtension(
name="On2 IVF",
extension=".ivf",
priority=["pyav"],
),
FileExtension(
name="PSP MP4 (MPEG-4 Part 14)",
extension=".psp",
priority=["pyav"],
),
FileExtension(
name="Psygnosis YOP",
extension=".yop",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".3g2",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".3gp",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".f4v",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".ism",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".isma",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".ismv",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".m4a",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".m4b",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".mj2",
priority=["pyav"],
),
FileExtension(
name="QuickTime / MOV",
extension=".psp",
priority=["pyav"],
),
FileExtension(
name="raw AVS2-P2/IEEE1857.4 video",
extension=".avs",
priority=["pyav"],
),
FileExtension(
name="raw AVS2-P2/IEEE1857.4 video",
extension=".avs2",
priority=["pyav"],
),
FileExtension(
name="raw AVS3-P2/IEEE1857.10",
extension=".avs3",
priority=["pyav"],
),
FileExtension(
name="raw Chinese AVS (Audio Video Standard) video",
extension=".cavs",
priority=["pyav"],
),
FileExtension(
name="raw Dirac",
extension=".drc",
priority=["pyav"],
),
FileExtension(
name="raw Dirac",
extension=".vc2",
priority=["pyav"],
),
FileExtension(
name="raw DNxHD (SMPTE VC-3)",
extension=".dnxhd",
priority=["pyav"],
),
FileExtension(
name="raw DNxHD (SMPTE VC-3)",
extension=".dnxhr",
priority=["pyav"],
),
FileExtension(
name="raw GSM",
extension=".gsm",
priority=["pyav"],
),
FileExtension(
name="raw H.261",
extension=".h261",
priority=["pyav"],
),
FileExtension(
name="raw H.263",
extension=".h263",
priority=["pyav"],
),
FileExtension(
name="raw H.264 video",
extension=".264",
priority=["pyav"],
),
FileExtension(
name="raw H.264 video",
extension=".avc",
priority=["pyav"],
),
FileExtension(
name="raw H.264 video",
extension=".h264",
priority=["pyav", "FFMPEG"],
),
FileExtension(
name="raw H.264 video",
extension=".h26l",
priority=["pyav"],
),
FileExtension(
name="raw HEVC video",
extension=".265",
priority=["pyav"],
),
FileExtension(
name="raw HEVC video",
extension=".h265",
priority=["pyav"],
),
FileExtension(
name="raw HEVC video",
extension=".hevc",
priority=["pyav"],
),
FileExtension(
name="raw id RoQ",
extension=".roq",
priority=["pyav"],
),
FileExtension(
name="raw Ingenient MJPEG",
extension=".cgi",
priority=["pyav"],
),
FileExtension(
name="raw IPU Video",
extension=".ipu",
priority=["pyav"],
),
FileExtension(
name="raw MJPEG 2000 video",
extension=".j2k",
priority=["pyav"],
),
FileExtension(
name="raw MJPEG video",
extension=".mjpeg",
priority=["pyav"],
),
FileExtension(
name="raw MJPEG video",
extension=".mjpg",
priority=["pyav"],
),
FileExtension(
name="raw MJPEG video",
extension=".mpo",
priority=["pyav"],
),
FileExtension(
name="raw MPEG-1 video",
extension=".m1v",
priority=["pyav"],
),
FileExtension(
name="raw MPEG-1 video",
extension=".mpeg",
priority=["pyav"],
),
FileExtension(
name="raw MPEG-1 video",
extension=".mpg",
priority=["pyav"],
),
FileExtension(
name="raw MPEG-2 video",
extension=".m2v",
priority=["pyav"],
),
FileExtension(
name="raw MPEG-4 video",
extension=".m4v",
priority=["pyav"],
),
FileExtension(
name="raw VC-1 video",
extension=".vc1",
priority=["pyav"],
),
FileExtension(
name="raw video",
extension=".cif",
priority=["pyav"],
),
FileExtension(
name="raw video",
extension=".qcif",
priority=["pyav"],
),
FileExtension(
name="raw video",
extension=".rgb",
priority=["pyav"],
),
FileExtension(
name="raw video",
extension=".yuv",
priority=["pyav"],
),
FileExtension(
name="RealMedia",
extension=".rm",
priority=["pyav"],
),
FileExtension(
name="SDR2",
extension=".sdr2",
priority=["pyav"],
),
FileExtension(
name="Sega FILM / CPK",
extension=".cpk",
priority=["pyav"],
),
FileExtension(
name="SER (Simple uncompressed video format for astronomical capturing)",
extension=".ser",
priority=["pyav"],
),
FileExtension(
name="Simbiosis Interactive IMX",
extension=".imx",
priority=["pyav"],
),
FileExtension(
name="Square SVS",
extension=".svs",
priority=["tifffile", "pyav"],
),
FileExtension(
name="TiVo TY Stream",
extension=".ty",
priority=["pyav"],
),
FileExtension(
name="TiVo TY Stream",
extension=".ty+",
priority=["pyav"],
),
FileExtension(
name="Uncompressed 4:2:2 10-bit",
extension=".v210",
priority=["pyav"],
),
FileExtension(
name="Uncompressed 4:2:2 10-bit",
extension=".yuv10",
priority=["pyav"],
),
FileExtension(
name="VC-1 test bitstream",
extension=".rcv",
priority=["pyav"],
),
FileExtension(
name="Video CCTV DAT",
extension=".dat",
priority=["pyav"],
),
FileExtension(
name="Video DAV",
extension=".dav",
priority=["pyav"],
),
FileExtension(
name="Vivo",
extension=".viv",
priority=["pyav"],
),
FileExtension(
name="WebM Chunk Muxer",
extension=".chk",
priority=["pyav"],
),
FileExtension(
name="WebM",
extension=".mk3d",
priority=["pyav"],
),
FileExtension(
name="WebM",
extension=".mka",
priority=["pyav"],
),
FileExtension(
name="WebM",
extension=".mks",
priority=["pyav"],
),
FileExtension(
name="Windows Television (WTV)",
extension=".wtv",
priority=["pyav"],
),
FileExtension(
name="Xilam DERF",
extension=".adp",
priority=["pyav"],
),
FileExtension(
name="YUV4MPEG pipe",
extension=".y4m",
priority=["pyav"],
),
FileExtension(
extension=".qpi",
priority=["tifffile"],
),
FileExtension(
name="PCO Camera",
extension=".pcoraw",
priority=["tifffile"],
),
FileExtension(
name="PCO Camera",
extension=".rec",
priority=["tifffile"],
),
FileExtension(
name="Perkin Elmer Vectra",
extension=".qptiff",
priority=["tifffile"],
),
FileExtension(
name="Pyramid Encoded TIFF",
extension=".ptiff",
priority=["tifffile"],
),
FileExtension(
name="Pyramid Encoded TIFF",
extension=".ptif",
priority=["tifffile"],
),
FileExtension(
name="Opticks Gel",
extension=".gel",
priority=["tifffile"],
),
FileExtension(
name="Zoomify Image Format",
extension=".zif",
priority=["tifffile"],
),
FileExtension(
name="Hamamatsu Slide Scanner",
extension=".ndpi",
priority=["tifffile"],
),
FileExtension(
name="Roche Digital Pathology",
extension=".bif",
priority=["tifffile"],
),
FileExtension(
extension=".tf8",
priority=["tifffile"],
),
FileExtension(
extension=".btf",
priority=["tifffile"],
),
FileExtension(
name="High Efficiency Image File Format",
extension=".heic",
priority=["pillow"],
),
FileExtension(
name="AV1 Image File Format",
extension=".avif",
priority=["pillow"],
),
]
extension_list.sort(key=lambda x: x.extension)
known_extensions = dict()
for ext in extension_list:
if ext.extension not in known_extensions:
known_extensions[ext.extension] = list()
known_extensions[ext.extension].append(ext)
extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list]
_video_extension_strings = [
".264",
".265",
".3g2",
".3gp",
".a64",
".A64",
".adp",
".amr",
".amv",
".asf",
".avc",
".avi",
".avr",
".avs",
".avs2",
".avs3",
".bmv",
".cavs",
".cdg",
".cdxl",
".cgi",
".chk",
".cif",
".cpk",
".dat",
".dav",
".dif",
".dnxhd",
".dnxhr",
".drc",
".dv",
".dvd",
".f4v",
".flm",
".flv",
".gsm",
".gxf",
".h261",
".h263",
".h264",
".h265",
".h26l",
".hevc",
".idf",
".ifv",
".imx",
".ipu",
".ism",
".isma",
".ismv",
".ivf",
".ivr",
".j2k",
".kux",
".lvf",
".m1v",
".m2t",
".m2ts",
".m2v",
".m4a",
".m4b",
".m4v",
".mj2",
".mjpeg",
".mjpg",
".mk3d",
".mka",
".mks",
".mkv",
".mods",
".moflex",
".mov",
".mp4",
".mpc",
".mpd",
".mpeg",
".mpg",
".mpo",
".mts",
".mvi",
".mxf",
".mxg",
".nut",
".obu",
".ogg",
".ogv",
".psp",
".qcif",
".rcv",
".rgb",
".rm",
".roq",
".sdr2",
".ser",
".sga",
".svag",
".svs",
".ts",
".ty",
".ty+",
".v",
".v210",
".vb",
".vc1",
".vc2",
".viv",
".vob",
".webm",
".wmv",
".wtv",
".xl",
".xmv",
".y4m",
".yop",
".yuv",
".yuv10",
]
video_extensions = list()
for ext_string in _video_extension_strings:
formats = known_extensions[ext_string]
video_extensions.append(formats[0])
video_extensions.sort(key=lambda x: x.extension)
| FileExtension |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 34470,
"end": 35192
} | class ____(PipesStdioLogWriterChannel):
"""A log writer channel that writes stdout or stderr via the message writer channel."""
def __init__(
self,
message_channel: PipesMessageWriterChannel,
stream: Literal["stdout", "stderr"],
name: str,
interval: float,
):
self.message_channel = message_channel
super().__init__(interval=interval, stream=stream, name=name)
def write_chunk(self, chunk: str) -> None:
self.message_channel.write_message(
_make_message(
method="log_external_stream",
params={"stream": self.stream, "text": chunk, "extras": {}},
)
)
| PipesDefaultLogWriterChannel |
python | coleifer__peewee | peewee.py | {
"start": 55786,
"end": 56129
} | class ____(ColumnBase):
def __init__(self, namespace, attribute):
self._namespace = namespace
self._attribute = attribute
def __sql__(self, ctx):
return (ctx
.literal(self._namespace._name + '.')
.sql(Entity(self._attribute)))
EXCLUDED = _Namespace('EXCLUDED')
| NamespaceAttribute |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/dml.py | {
"start": 2023,
"end": 6727
} | class ____(StandardInsert):
"""SQLite-specific implementation of INSERT.
Adds methods for SQLite-specific syntaxes such as ON CONFLICT.
The :class:`_sqlite.Insert` object is created using the
:func:`sqlalchemy.dialects.sqlite.insert` function.
.. versionadded:: 1.4
.. seealso::
:ref:`sqlite_on_conflict_insert`
"""
stringify_dialect = "sqlite"
inherit_cache = True
@util.memoized_property
def excluded(
self,
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
SQLite's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenceable.
.. tip:: The :attr:`_sqlite.Insert.excluded` attribute is an instance
of :class:`_expression.ColumnCollection`, which provides an
interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.excluded.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.excluded["column name"]`` or
``stmt.excluded["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
"""
return alias(self.table, name="excluded").columns
_on_conflict_exclusive = _exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already has "
"an ON CONFLICT clause established"
},
)
@_on_conflict_exclusive
def on_conflict_do_update(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
) -> Self:
r"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index or unique constraint.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set\_:
A dictionary or other mapping object
where the keys are either names of columns in the target table,
or :class:`_schema.Column` objects or other ORM-mapped columns
matching that of the target table, and expressions or literals
as values, specifying the ``SET`` actions to take.
.. versionadded:: 1.4 The
:paramref:`_sqlite.Insert.on_conflict_do_update.set_`
parameter supports :class:`_schema.Column` objects from the target
:class:`_schema.Table` as keys.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. An expression object representing a ``WHERE``
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
meeting the ``WHERE`` condition will not be updated (effectively a
``DO NOTHING`` for those rows).
"""
return self.ext(
OnConflictDoUpdate(index_elements, index_where, set_, where)
)
@_on_conflict_exclusive
def on_conflict_do_nothing(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
) -> Self:
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index or unique constraint.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
"""
return self.ext(OnConflictDoNothing(index_elements, index_where))
| Insert |
python | dask__distributed | distributed/tests/test_nanny.py | {
"start": 28752,
"end": 29783
} | class ____(Nanny):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.in_instantiate = asyncio.Event()
self.wait_instantiate = asyncio.Event()
async def instantiate(self):
self.in_instantiate.set()
await self.wait_instantiate.wait()
raise RuntimeError("Nope")
@pytest.mark.parametrize("restart", [True, False])
@gen_cluster(client=True, nthreads=[])
async def test_nanny_plugin_register_during_start_failure(c, s, restart):
plugin = DummyNannyPlugin("foo", restart=restart)
n = SlowBrokenNanny(s.address)
assert not hasattr(n, "_plugin_registered")
start = asyncio.create_task(n.start())
await n.in_instantiate.wait()
register = asyncio.create_task(c.register_plugin(plugin))
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(register), timeout=0.1)
n.wait_instantiate.set()
with pytest.raises(RuntimeError):
await start
assert not await register
| SlowBrokenNanny |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 46822,
"end": 47090
} | class ____(BaseTool):
name: str = "foo"
description: str = "foo."
@override
def _run(self, x: int, y: Annotated[str, InjectedToolArg]) -> Any:
"""Foo.
Args:
x: abc
y: 123
"""
return y
| InjectedTool |
python | pennersr__django-allauth | allauth/mfa/base/views.py | {
"start": 4930,
"end": 5818
} | class ____(TemplateView):
template_name = "mfa/index." + account_settings.TEMPLATE_EXTENSION
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
authenticators = {}
for auth in Authenticator.objects.filter(user=self.request.user):
if auth.type == Authenticator.Type.WEBAUTHN:
auths = authenticators.setdefault(auth.type, [])
auths.append(auth.wrap())
else:
authenticators[auth.type] = auth.wrap()
ret["authenticators"] = authenticators
ret["MFA_SUPPORTED_TYPES"] = app_settings.SUPPORTED_TYPES
ret["is_mfa_enabled"] = is_mfa_enabled(self.request.user)
return ret
index = IndexView.as_view()
@method_decorator(
login_stage_required(stage=TrustStage.key, redirect_urlname="account_login"),
name="dispatch",
)
| IndexView |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_test.py | {
"start": 1605,
"end": 11586
} | class ____(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.cached_session(use_gpu=False):
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.cached_session():
tf_ans = ops.convert_to_tensor(x).eval()
dtype = dtypes_lib.as_dtype(np_ans.dtype)
if dtype.is_floating or dtype.is_complex:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testInvalidDType(self):
# Test case for GitHub issue 18474
with self.assertRaises(TypeError):
constant_op.constant(dtypes_lib.string, "[,]")
@test_util.run_deprecated_v1
def testBFloat16(self):
bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16))
self._testAll(np.empty((2, 0, 5)).astype(bfloat16))
@test_util.run_deprecated_v1
def testHalf(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16))
self._testAll(np.empty((2, 0, 5)).astype(np.float16))
@test_util.run_deprecated_v1
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
@test_util.run_deprecated_v1
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
@test_util.run_deprecated_v1
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
@test_util.run_deprecated_v1
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
@test_util.run_deprecated_v1
@test_util.disable_xla("b/183567451: XLA doesn't yet support int4")
def testInt4(self):
for dtype in [dtypes_lib.int4, dtypes_lib.uint4]:
np_dtype = dtype.as_numpy_dtype
self._testAll(
np.arange(dtype.min, dtype.max + 1)
.reshape([2, 4, 2])
.astype(np_dtype)
)
self._testAll(
(7 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np_dtype)
)
self._testAll(np.empty((2, 0, 5)).astype(np_dtype))
@test_util.run_deprecated_v1
def testComplex64(self):
self._testAll(
(1 + 2j) * np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
(1 + 2j) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
@test_util.run_deprecated_v1
def testComplex128(self):
self._testAll(
(1 + 2j) * np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(
(1 + 2j) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.run_deprecated_v1
def testString(self):
self._testCpu(
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
[2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
@test_util.run_deprecated_v1
def testVariant(self):
# TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant
# copying between CPU and GPU is supported.
with self.session(use_gpu=False):
variant_tensor = tensor_pb2.TensorProto(
dtype=dtypes_lib.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(1, dtype=np.int32).tobytes())
])
const = constant_op.constant(variant_tensor)
const_value = const.op.get_attr("value")
# Ensure we stored the tensor proto properly.
self.assertProtoEquals(variant_tensor, const_value)
# Smoke test -- ensure this executes without trouble.
# Right now, non-numpy-compatible objects cannot be returned from a
# session.run call; similarly, objects that can't be converted to
# native numpy types cannot be passed to ops.convert_to_tensor.
# TODO(ebrevdo): Add registration mechanism for
# ops.convert_to_tensor and for session.run output.
logging_const_op = logging_ops.Print(
const, [const],
message="Variant storing an int, decoded const value:").op
logging_const_op.run()
@test_util.run_deprecated_v1
def testStringWithNulls(self):
with self.cached_session():
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.cached_session():
val = ops.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.cached_session():
val = ops.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testEagerMemory(self):
"""Tests PyObject refs are managed correctly when executing eagerly."""
constant_op.constant([[1.]])
def testImplicitShapeNumPy(self):
with ops.Graph().as_default():
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with ops.Graph().as_default():
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with ops.Graph().as_default():
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with ops.Graph().as_default():
c = constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testPromotionShapes(self):
with ops.Graph().as_default():
c = constant_op.constant([7], shape=[10])
self.assertEqual(c.get_shape(), [10])
with ops.Graph().as_default():
c = constant_op.constant(3, shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "Too many elements provided."):
constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[5])
with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeConstant(self):
with ops.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegex(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = constant_op.constant(large_array)
# TODO(b/35396543): Temporarily disable: suspicion that
# this is causing test timeouts.
def _testTooLargeGraph(self):
with ops.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = constant_op.constant(large_array)
d = constant_op.constant(large_array)
with self.assertRaisesRegex(ValueError,
"GraphDef cannot be larger than 2GB."):
g.as_graph_def()
@test_util.run_deprecated_v1
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegex(ValueError,
"setting an array element with a sequence"):
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegex(
ValueError, "(Expected.*to be a dense tensor|inhomogeneous shape)"):
c = constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegex(
ValueError, "(Expected.*to be a dense tensor|inhomogeneous shape)"):
c = constant_op.constant([[1, 2], [3], [4, 5]])
| ConstantTest |
python | pandas-dev__pandas | asv_bench/benchmarks/inference.py | {
"start": 1816,
"end": 2280
} | class ____:
# maybe_convert_numeric depends _exclusively_ on _libs, could
# go in benchmarks/libs.py
def setup_cache(self):
N = 10**6
arr = np.repeat([2**63], N) + np.arange(N).astype("uint64")
data = arr.astype(object)
data[1::2] = arr[1::2].astype(str)
data[-1] = -1
return data
def time_convert(self, data):
lib.maybe_convert_numeric(data, set(), coerce_numeric=False)
| MaybeConvertNumeric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.