language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytest-dev__pytest | src/_pytest/unittest.py | {
"start": 2217,
"end": 7365
} | class ____(Class):
# Marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs.
nofuncargs = True
def newinstance(self):
# TestCase __init__ takes the method (test) name. The TestCase
# constructor treats the name "runTest" as a special no-op, so it can be
# used when a dummy instance is needed. While unittest.TestCase has a
# default, some subclasses omit the default (#9610), so always supply
# it.
return self.obj("runTest")
def collect(self) -> Iterable[Item | Collector]:
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
skipped = _is_skipped(cls)
if not skipped:
self._register_unittest_setup_method_fixture(cls)
self._register_unittest_setup_class_fixture(cls)
self._register_setup_class_fixture()
self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
loader = TestLoader()
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
yield TestCaseFunction.from_parent(self, name=name)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction.from_parent(self, name="runTest")
def _register_unittest_setup_class_fixture(self, cls: type) -> None:
"""Register an auto-use fixture to invoke setUpClass and
tearDownClass (#517)."""
setup = getattr(cls, "setUpClass", None)
teardown = getattr(cls, "tearDownClass", None)
if setup is None and teardown is None:
return None
cleanup = getattr(cls, "doClassCleanups", lambda: None)
def process_teardown_exceptions() -> None:
# tearDown_exceptions is a list set in the class containing exc_infos for errors during
# teardown for the class.
exc_infos = getattr(cls, "tearDown_exceptions", None)
if not exc_infos:
return
exceptions = [exc for (_, exc, _) in exc_infos]
# If a single exception, raise it directly as this provides a more readable
# error (hopefully this will improve in #12255).
if len(exceptions) == 1:
raise exceptions[0]
else:
raise ExceptionGroup("Unittest class cleanup errors", exceptions)
def unittest_setup_class_fixture(
request: FixtureRequest,
) -> Generator[None]:
cls = request.cls
if _is_skipped(cls):
reason = cls.__unittest_skip_why__
raise skip.Exception(reason, _use_item_location=True)
if setup is not None:
try:
setup()
# unittest does not call the cleanup function for every BaseException, so we
# follow this here.
except Exception:
cleanup()
process_teardown_exceptions()
raise
yield
try:
if teardown is not None:
teardown()
finally:
cleanup()
process_teardown_exceptions()
self.session._fixturemanager._register_fixture(
# Use a unique name to speed up lookup.
name=f"_unittest_setUpClass_fixture_{cls.__qualname__}",
func=unittest_setup_class_fixture,
nodeid=self.nodeid,
scope="class",
autouse=True,
)
def _register_unittest_setup_method_fixture(self, cls: type) -> None:
"""Register an auto-use fixture to invoke setup_method and
teardown_method (#517)."""
setup = getattr(cls, "setup_method", None)
teardown = getattr(cls, "teardown_method", None)
if setup is None and teardown is None:
return None
def unittest_setup_method_fixture(
request: FixtureRequest,
) -> Generator[None]:
self = request.instance
if _is_skipped(self):
reason = self.__unittest_skip_why__
raise skip.Exception(reason, _use_item_location=True)
if setup is not None:
setup(self, request.function)
yield
if teardown is not None:
teardown(self, request.function)
self.session._fixturemanager._register_fixture(
# Use a unique name to speed up lookup.
name=f"_unittest_setup_method_fixture_{cls.__qualname__}",
func=unittest_setup_method_fixture,
nodeid=self.nodeid,
scope="function",
autouse=True,
)
| UnitTestCase |
python | sqlalchemy__sqlalchemy | test/orm/test_mapper.py | {
"start": 81171,
"end": 82658
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("someprop", Integer),
)
def _test(self, value, instancelevel=None):
class Foo:
someprop = value
m = self.mapper(Foo, self.tables.foo)
eq_(Foo.someprop, value)
f1 = Foo()
if instancelevel is not None:
eq_(f1.someprop, instancelevel)
else:
eq_(f1.someprop, value)
assert self.tables.foo.c.someprop not in m._columntoproperty
def _test_not(self, value):
class Foo:
someprop = value
m = self.mapper(Foo, self.tables.foo)
is_(Foo.someprop.property.columns[0], self.tables.foo.c.someprop)
assert self.tables.foo.c.someprop in m._columntoproperty
def test_string(self):
self._test("someprop")
def test_unicode(self):
self._test("someprop")
def test_int(self):
self._test(5)
def test_dict(self):
self._test({"bar": "bat"})
def test_set(self):
self._test({6})
def test_column(self):
self._test_not(self.tables.foo.c.someprop)
def test_relationship(self):
self._test_not(relationship("bar"))
def test_descriptor(self):
def somefunc(self):
return "hi"
self._test(property(somefunc), "hi")
| IsUserlandTest |
python | google__jax | jax/experimental/colocated_python/func.py | {
"start": 1424,
"end": 1643
} | class ____:
"""User function wrapped by colocated_python."""
fun: Callable[..., Any]
fun_sourceinfo: str | None
fun_signature: inspect.Signature | None
@dataclasses.dataclass(frozen=True, slots=True)
| FunctionInfo |
python | FactoryBoy__factory_boy | tests/test_alchemy.py | {
"start": 1633,
"end": 2055
} | class ____(SQLAlchemyModelFactory):
class Meta:
model = models.MultifieldUniqueModel
sqlalchemy_get_or_create = ("slug", "text",)
sqlalchemy_session = models.session
sqlalchemy_session_persistence = 'commit'
id = factory.Sequence(lambda n: n)
slug = factory.Sequence(lambda n: "slug%s" % n)
text = factory.Sequence(lambda n: "text%s" % n)
| WithMultipleGetOrCreateFieldsFactory |
python | fsspec__filesystem_spec | fsspec/core.py | {
"start": 23238,
"end": 23829
} | class ____(io.TextIOWrapper):
"""TextIOWrapper cannot be pickled. This solves it.
Requires that ``buffer`` be pickleable, which all instances of
AbstractBufferedFile are.
"""
def __init__(
self,
buffer,
encoding=None,
errors=None,
newline=None,
line_buffering=False,
write_through=False,
):
self.args = buffer, encoding, errors, newline, line_buffering, write_through
super().__init__(*self.args)
def __reduce__(self):
return PickleableTextIOWrapper, self.args
| PickleableTextIOWrapper |
python | jazzband__django-pipeline | pipeline/forms.py | {
"start": 3010,
"end": 7847
} | class ____(type):
"""Metaclass for the PipelineFormMedia class.
This is responsible for converting CSS/JavaScript packages defined in
Pipeline into lists of files to include on a page. It handles access to the
:py:attr:`css` and :py:attr:`js` attributes on the class, generating a
list of files to return based on the Pipelined packages and individual
files listed in the :py:attr:`css`/:py:attr:`css_packages` or
:py:attr:`js`/:py:attr:`js_packages` attributes.
"""
def __new__(cls, name, bases, attrs):
"""Construct the class.
Args:
name (bytes):
The name of the class.
bases (tuple):
The base classes for the class.
attrs (dict):
The attributes going into the class.
Returns:
type:
The new class.
"""
new_class = super().__new__(cls, name, bases, attrs)
# If we define any packages, we'll need to use our special
# PipelineFormMediaProperty class. We use this instead of intercepting
# in __getattribute__ because Django does not access them through
# normal property access. Instead, grabs the Media class's __dict__
# and accesses them from there. By using these special properties, we
# can handle direct access (Media.css) and dictionary-based access
# (Media.__dict__['css']).
if "css_packages" in attrs:
new_class.css = PipelineFormMediaProperty(
cls._get_css_files, new_class, attrs.get("css") or {}
)
if "js_packages" in attrs:
new_class.js = PipelineFormMediaProperty(
cls._get_js_files, new_class, attrs.get("js") or []
)
return new_class
def _get_css_files(cls, extra_files):
"""Return all CSS files from the Media class.
Args:
extra_files (dict):
The contents of the Media class's original :py:attr:`css`
attribute, if one was provided.
Returns:
dict:
The CSS media types and files to return for the :py:attr:`css`
attribute.
"""
packager = Packager()
css_packages = getattr(cls, "css_packages", {})
return {
media_target: cls._get_media_files(
packager=packager,
media_packages=media_packages,
media_type="css",
extra_files=extra_files.get(media_target, []),
)
for media_target, media_packages in css_packages.items()
}
def _get_js_files(cls, extra_files):
"""Return all JavaScript files from the Media class.
Args:
extra_files (list):
The contents of the Media class's original :py:attr:`js`
attribute, if one was provided.
Returns:
list:
The JavaScript files to return for the :py:attr:`js` attribute.
"""
return cls._get_media_files(
packager=Packager(),
media_packages=getattr(cls, "js_packages", {}),
media_type="js",
extra_files=extra_files,
)
def _get_media_files(cls, packager, media_packages, media_type, extra_files):
"""Return source or output media files for a list of packages.
This will go through the media files belonging to the provided list
of packages referenced in a Media class and return the output files
(if Pipeline is enabled) or the source files (if not enabled).
Args:
packager (pipeline.packager.Packager):
The packager responsible for media compilation for this type
of package.
media_packages (list of unicode):
The list of media packages referenced in Media to compile or
return.
extra_files (list of unicode):
The list of extra files to include in the result. This would
be the list stored in the Media class's original :py:attr:`css`
or :py:attr:`js` attributes.
Returns:
list:
The list of media files for the given packages.
"""
source_files = list(extra_files)
if not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED:
default_collector.collect()
for media_package in media_packages:
package = packager.package_for(media_type, media_package)
if settings.PIPELINE_ENABLED:
source_files.append(staticfiles_storage.url(package.output_filename))
else:
source_files += packager.compile(package.paths)
return source_files
| PipelineFormMediaMetaClass |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 541014,
"end": 541355
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PullRequestReviewThread", graphql_name="node")
| PullRequestReviewThreadEdge |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 39963,
"end": 42032
} | class ____(TestCase):
@njit
def foo(fromty, toty):
l = listobject.new_list(toty)
l.append(fromty(0))
def check_good(self, fromty, toty):
TestItemCasting.foo(fromty, toty)
def check_bad(self, fromty, toty):
with self.assertRaises(TypingError) as raises:
TestItemCasting.foo(fromty, toty)
self.assertIn(
'cannot safely cast {fromty} to {toty}'.format(**locals()),
str(raises.exception),
)
def test_cast_int_to(self):
self.check_good(types.int32, types.float32)
self.check_good(types.int32, types.float64)
self.check_good(types.int32, types.complex128)
self.check_good(types.int64, types.complex128)
self.check_bad(types.int32, types.complex64)
self.check_good(types.int8, types.complex64)
def test_cast_float_to(self):
self.check_good(types.float32, types.float64)
self.check_good(types.float32, types.complex64)
self.check_good(types.float64, types.complex128)
def test_cast_bool_to(self):
self.check_good(types.boolean, types.int32)
self.check_good(types.boolean, types.float64)
self.check_good(types.boolean, types.complex128)
def test_cast_fail_unicode_int(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append("xyz")
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
'cannot safely cast unicode_type to int32',
str(raises.exception),
)
def test_cast_fail_int_unicode(self):
@njit
def foo():
l = listobject.new_list(types.unicode_type)
l.append(int32(0))
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
'Cannot cast int32 to unicode_type',
str(raises.exception),
)
@register_jitable
def make_test_list():
l = listobject.new_list(int32)
l.append(int32(1))
return l
| TestItemCasting |
python | mkdocs__mkdocs | mkdocs/utils/__init__.py | {
"start": 11543,
"end": 12280
} | class ____:
"""Same as a read-only property, but allows overwriting the field for good."""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, instance, owner=None):
if instance is None:
return self
return self.func(instance)
def __getattr__(name: str):
if name == 'warning_filter':
warnings.warn(
"warning_filter doesn't do anything since MkDocs 1.2 and will be removed soon. "
"All messages on the `mkdocs` logger get counted automatically.",
DeprecationWarning,
)
return logging.Filter()
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| weak_property |
python | django__django | tests/invalid_models_tests/test_relative_fields.py | {
"start": 74711,
"end": 84133
} | class ____(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(
ValueError, "Cannot specify through_fields without a through model"
):
models.ManyToManyField(Fan, through_fields=("f1", "f2"))
def test_invalid_order(self):
"""
Mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan, through="Invitation", through_fields=("invitee", "event")
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name="+")
field = Event._meta.get_field("invitees")
self.assertEqual(
field.check(from_model=Event),
[
Error(
"'Invitation.invitee' is not a foreign key to 'Event'.",
hint=(
"Did you mean one of the following foreign keys to 'Event': "
"event?"
),
obj=field,
id="fields.E339",
),
Error(
"'Invitation.event' is not a foreign key to 'Fan'.",
hint=(
"Did you mean one of the following foreign keys to 'Fan': "
"invitee, inviter?"
),
obj=field,
id="fields.E339",
),
],
)
def test_invalid_field(self):
"""
Providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through="Invitation",
through_fields=("invalid_field_1", "invalid_field_2"),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name="+")
field = Event._meta.get_field("invitees")
self.assertEqual(
field.check(from_model=Event),
[
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no "
"field 'invalid_field_1'.",
hint=(
"Did you mean one of the following foreign keys to 'Event': "
"event?"
),
obj=field,
id="fields.E338",
),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no "
"field 'invalid_field_2'.",
hint=(
"Did you mean one of the following foreign keys to 'Fan': "
"invitee, inviter?"
),
obj=field,
id="fields.E338",
),
],
)
def test_explicit_field_names(self):
"""
If ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan, through="Invitation", through_fields=(None, "invitee")
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name="+")
field = Event._meta.get_field("invitees")
self.assertEqual(
field.check(from_model=Event),
[
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint=(
"Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"
),
obj=field,
id="fields.E337",
),
],
)
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (("a", "b", "c"),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=("a", "b"),
to_fields=("a", "b"),
related_name="children",
)
field = Child._meta.get_field("parent")
self.assertEqual(
field.check(from_model=Child),
[
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Mark a single field as unique=True or add a set of "
"fields to a unique constraint (via unique_together or a "
"UniqueConstraint (without condition) in the model "
"Meta.constraints)."
),
obj=field,
id="fields.E310",
),
],
)
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (("a", "b", "c"),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=("a", "b", "d"),
to_fields=("a", "b", "d"),
related_name="children",
)
field = Child._meta.get_field("parent")
self.assertEqual(
field.check(from_model=Child),
[
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is "
"unique.",
hint=(
"Mark a single field as unique=True or add a set of "
"fields to a unique constraint (via unique_together or a "
"UniqueConstraint (without condition) in the model "
"Meta.constraints)."
),
obj=field,
id="fields.E310",
),
],
)
def test_invalid_to_argument_with_through(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foos = models.ManyToManyField(
to="Fo",
through="FooBar",
through_fields=("bar", "foo"),
)
class FooBar(models.Model):
foo = models.ForeignKey("Foo", on_delete=models.CASCADE)
bar = models.ForeignKey("Bar", on_delete=models.CASCADE)
field = Bar._meta.get_field("foos")
self.assertEqual(
field.check(from_model=Bar),
[
Error(
"Field defines a relation with model 'Fo', "
"which is either not installed, or is abstract.",
obj=field,
id="fields.E300",
),
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Bar.foos', "
"but it does not have a foreign key to 'Bar' "
"or 'invalid_models_tests.Fo'.",
obj=FooBar,
id="fields.E336",
),
Error(
"'FooBar.foo' is not a foreign key to 'Fo'.",
obj=field,
id="fields.E339",
),
],
)
@isolate_apps("invalid_models_tests")
| M2mThroughFieldsTests |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_hive_to_dynamodb.py | {
"start": 1258,
"end": 5887
} | class ____:
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
dag = DAG("test_dag_id", schedule=None, default_args=args)
self.dag = dag
self.sql = "SELECT 1"
self.hook = DynamoDBHook(aws_conn_id="aws_default", region_name="us-east-1")
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient="records"))
@mock_aws
def test_get_conn_returns_a_boto3_connection(self):
hook = DynamoDBHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
@mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_df",
return_value=pd.DataFrame(data=[("1", "sid")], columns=["id", "name"]),
)
@mock_aws
def test_get_records_with_schema(self, mock_get_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id="hive_to_dynamodb_check",
table_keys=["id"],
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table("test_airflow")
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 1
@mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_df",
return_value=pd.DataFrame(data=[("1", "sid"), ("1", "gupta")], columns=["id", "name"]),
)
@mock_aws
def test_pre_process_records_with_schema(self, mock_get_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id="hive_to_dynamodb_check",
table_keys=["id"],
pre_process=self.process_data,
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table("test_airflow")
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 1
@pytest.mark.parametrize("df_type", ["pandas", "polars"])
@mock_aws
def test_df_type_parameter(self, df_type):
if df_type == "polars" and pl is None:
pytest.skip("Polars not installed")
if df_type == "pandas":
test_df = pd.DataFrame(data=[("1", "sid")], columns=["id", "name"])
else:
test_df = pl.DataFrame({"id": ["1"], "name": ["sid"]})
with mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_df",
return_value=test_df,
) as mock_get_df:
self.hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id="hive_to_dynamodb_check",
table_keys=["id"],
df_type=df_type,
dag=self.dag,
)
operator.execute(None)
mock_get_df.assert_called_once_with(self.sql, schema="default", df_type=df_type)
table = self.hook.get_conn().Table("test_airflow")
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 1
| TestHiveToDynamoDBOperator |
python | kamyu104__LeetCode-Solutions | Python/validate-binary-search-tree.py | {
"start": 1049,
"end": 1511
} | class ____(object):
# @param root, a tree node
# @return a boolean
def isValidBST(self, root):
return self.isValidBSTRecu(root, float("-inf"), float("inf"))
def isValidBSTRecu(self, root, low, high):
if root is None:
return True
return low < root.val and root.val < high \
and self.isValidBSTRecu(root.left, low, root.val) \
and self.isValidBSTRecu(root.right, root.val, high)
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/longest-non-decreasing-subarray-after-replacing-at-most-one-element.py | {
"start": 42,
"end": 702
} | class ____(object):
def longestSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
right = [1]*len(nums)
for i in reversed(xrange(len(nums)-1)):
if nums[i] <= nums[i+1]:
right[i] = right[i+1]+1
result = min(max(right)+1, len(nums))
left = 1
for i in xrange(1, len(nums)-1):
if nums[i-1] <= nums[i+1]:
result = max(result, left+1+right[i+1])
if nums[i-1] <= nums[i]:
left += 1
else:
left = 1
return result
# Time: O(n)
# Space: O(n)
# prefix sum
| Solution |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 10070,
"end": 11055
} | class ____:
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
@pytest.fixture
def idct_lock(self):
return threading.Lock()
def test_definition(self, idct_lock):
for i in FFTWDATA_SIZES:
with idct_lock:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i-1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg=f"Size {i} failed")
| _TestIDCTBase |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_pathconverter.py | {
"start": 4176,
"end": 8194
} | class ____(util.MdCase):
"""Test absolute paths."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": "/Some/fake/path",
"absolute": True
}
}
def test_in_script(self):
"""Test that we do not parse image in script."""
self.check_markdown(
r'''
<script>
var str = '<img alt="picture" src="../test_extensions/_assets/bg.png" />'
</script>
''',
r'''
<script>
var str = '<img alt="picture" src="../test_extensions/_assets/bg.png" />'
</script>
''',
True
)
def test_comment(self):
"""Test comment."""
self.check_markdown(
r'<!--  -->',
r'<!--  -->'
)
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
r'',
r'<p><img alt="picture" src="/Some/fake/path/test_extensions/_assets/bg.png" /></p>'
)
def test_file_win_file_path_root(self):
"""Test windows file:// path with root slash."""
self.check_markdown(
r'[file link windows abs](file:///c:/path/file.html)',
r'<p><a href="file:///c:/path/file.html">file link windows abs</a></p>'
)
def test_win_file_path(self):
"""Test windows file:// path."""
self.check_markdown(
r'[file link windows abs2](file://c:/path/file.html)',
r'<p><a href="file://c:/path/file.html">file link windows abs2</a></p>'
)
def test_file_root(self):
"""Test Linux/Unix style root file:// path."""
self.check_markdown(
r'[file link abs](file:///path/file.html)',
r'<p><a href="file:///path/file.html">file link abs</a></p>'
)
def test_root(self):
"""Test /root path."""
self.check_markdown(
r'[absolute](/absolute)',
r'<p><a href="/absolute">absolute</a></p>'
)
def test_url(self):
"""Test normal URL."""
self.check_markdown(
r'[link](http://www.google.com)',
r'<p><a href="http://www.google.com">link</a></p>'
)
def test_fragment(self):
"""Test HTML fragment."""
self.check_markdown(
r'[fragment](#fragment)',
r'<p><a href="#fragment">fragment</a></p>'
)
def test_windows(self):
"""Test Windows file path."""
self.check_markdown(
r'[windows path abs](c:/path/file.html)',
r'<p><a href="c:/path/file.html">windows path abs</a></p>'
)
def test_network_path(self):
"""Test network path."""
self.check_markdown(
r'[windows network path](//network/path/file.html)',
r'<p><a href="//network/path/file.html">windows network path</a></p>'
)
def test_strange_url(self):
"""Test strange URL."""
self.check_markdown(
r'[strange link](strange://odd/link/file.html)',
r'<p><a href="strange://odd/link/file.html">strange link</a></p>'
)
def test_strange_url2(self):
"""Test additional strange URL."""
self.check_markdown(
r'[strange link 2](strange://www.odd.com/link/file.html)',
r'<p><a href="strange://www.odd.com/link/file.html">strange link 2</a></p>'
)
def test_mail(self):
"""Test mail link."""
self.check_markdown(
r'<mail@mail.com>',
r'<p><a href="mailto:mail@mail'
r'.com">mail@mail.com</a></p>'
)
| TestAbsolute |
python | keras-team__keras | keras/src/metrics/f_score_metrics_test.py | {
"start": 14009,
"end": 15160
} | class ____(testing.TestCase):
def test_config(self):
f1_obj = f_score_metrics.F1Score(dtype="float32")
config = f1_obj.get_config()
self.assertNotIn("beta", config)
# Check save and restore config
f1_obj = f_score_metrics.F1Score.from_config(config)
self.assertEqual(f1_obj.average, None)
self.assertEqual(f1_obj.dtype, "float32")
def test_correctness(self):
f1 = f_score_metrics.F1Score()
fbeta = f_score_metrics.FBetaScore(beta=1.0)
y_true = np.array(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
]
)
y_pred = np.array(
[
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
)
fbeta.update_state(y_true, y_pred)
f1.update_state(y_true, y_pred)
self.assertAllClose(fbeta.result(), f1.result(), atol=1e-6)
| F1ScoreTest |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 10581,
"end": 10715
} | class ____(PrefectException):
"""Raised when an incorrect URL is provided to a GitHub filesystem block."""
| InvalidRepositoryURLError |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 1795,
"end": 1918
} | class ____(ColumnArgumentRole):
__slots__ = ()
_role_name = "Column expression or string key"
| ColumnArgumentOrKeyRole |
python | huggingface__transformers | src/transformers/models/mimi/modeling_mimi.py | {
"start": 58904,
"end": 61736
} | class ____(nn.Module):
"""Split Residual Vector Quantizer."""
def __init__(self, config: MimiConfig):
super().__init__()
self.codebook_size = config.codebook_size
self.frame_rate = config.frame_rate
self.max_num_quantizers = config.num_quantizers
self.num_semantic_quantizers = config.num_semantic_quantizers
self.num_acoustic_quantizers = config.num_quantizers - config.num_semantic_quantizers
self.semantic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_semantic_quantizers)
self.acoustic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_acoustic_quantizers)
def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[float] = None) -> torch.Tensor:
"""
Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets
the appropriate number of quantizers to use and returns indices for each quantizer.
"""
num_quantizers = self.max_num_quantizers if num_quantizers is None else num_quantizers
if num_quantizers > self.max_num_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.max_num_quantizers}, but is currently {num_quantizers}."
)
if num_quantizers < self.num_semantic_quantizers:
raise ValueError(
f"The number of quantizers (i.e codebooks) asked should be higher than the number of semantic quantizers {self.num_semantic_quantizers}, but is currently {num_quantizers}."
)
# codes is [K, B, T], with T frames, K nb of codebooks.
codes = self.semantic_residual_vector_quantizer.encode(embeddings)
if num_quantizers > self.num_semantic_quantizers:
acoustic_codes = self.acoustic_residual_vector_quantizer.encode(
embeddings, num_quantizers=num_quantizers - self.num_semantic_quantizers
)
codes = torch.cat([codes, acoustic_codes], dim=0)
return codes
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
# The first num_semantic_quantizers codebooks are decoded using the semantic RVQ
quantized_out = self.semantic_residual_vector_quantizer.decode(codes[:, : self.num_semantic_quantizers])
# The rest of the codebooks are decoded using the acoustic RVQ
if codes.shape[1] > self.num_semantic_quantizers:
quantized_out += self.acoustic_residual_vector_quantizer.decode(codes[:, self.num_semantic_quantizers :])
return quantized_out
@auto_docstring
| MimiSplitResidualVectorQuantizer |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 79412,
"end": 84346
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and
multi-scale deformable attention heads. attention softmax, used to compute the weighted average in the
bi-attention heads.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
| MMGroundingDinoModelOutput |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 4777,
"end": 5008
} | class ____:
def m[S](self: S) -> S:
type S = int
print(S) # not a reference to the type variable, so not touched by the autofix
return 42
MetaType = TypeVar("MetaType")
| NamesShadowingTypeVarAreNotTouched |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 28056,
"end": 29058
} | class ____(SimpleTestCase):
def test_pillow_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
pillow_installed = False
else:
pillow_installed = True
class Model(models.Model):
field = models.ImageField(upload_to="somewhere")
field = Model._meta.get_field("field")
errors = field.check()
expected = (
[]
if pillow_installed
else [
Error(
"Cannot use ImageField because Pillow is not installed.",
hint=(
"Get Pillow at https://pypi.org/project/Pillow/ "
'or run command "python -m pip install Pillow".'
),
obj=field,
id="fields.E210",
),
]
)
self.assertEqual(errors, expected)
@isolate_apps("invalid_models_tests")
| ImageFieldTests |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/stackdriver.py | {
"start": 13582,
"end": 17073
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new alert or updates an existing policy identified the name field in the alerts parameter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverUpsertAlertOperator`
:param alerts: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#AlertPolicy.
(templated)
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which alert needs to be created/updated.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"alerts",
"impersonation_chain",
)
template_ext: Sequence[str] = (".json",)
operator_extra_links = (StackdriverPoliciesLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
alerts: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str = PROVIDE_PROJECT_ID,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.alerts = alerts
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Upsert Alert Policies: Alerts: %s Project id: %s", self.alerts, self.project_id)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.upsert_alert(
alerts=self.alerts,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverPoliciesLink.persist(
context=context,
project_id=self.project_id or self.hook.project_id,
)
| StackdriverUpsertAlertOperator |
python | ray-project__ray | python/ray/train/v2/jax/config.py | {
"start": 515,
"end": 3055
} | class ____(BackendConfig):
use_tpu: bool = False
use_gpu: bool = False
@property
def backend_cls(self):
return _JaxBackend
def _setup_jax_distributed_environment(
master_addr_with_port: str,
num_workers: int,
index: int,
use_tpu: bool,
use_gpu: bool,
resources_per_worker: dict,
):
"""Set up distributed Jax training information.
This function should be called on each worker. It sets JAX environment
variables and initializes JAX distributed training.
Args:
master_addr_with_port: The master address with port for coordination.
num_workers: Total number of workers.
index: Index of this worker.
use_tpu: Whether to configure for TPU. If True and JAX_PLATFORMS is not
already set, it will be set to "tpu".
use_gpu: Whether to configure for GPU. If True and JAX_PLATFORMS is not
already set, it will be set to "cuda".
resources_per_worker: The resources per worker.
"""
# Get JAX_PLATFORMS from environment if already set
jax_platforms = os.environ.get("JAX_PLATFORMS", "").lower()
if not jax_platforms and use_tpu:
os.environ["JAX_PLATFORMS"] = "tpu"
jax_platforms = "tpu"
if not jax_platforms and use_gpu:
os.environ["JAX_PLATFORMS"] = "cuda"
jax_platforms = "cuda"
if "cuda" in jax_platforms.split(","):
num_gpus_per_worker = resources_per_worker.get("GPU", 0)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
str(i) for i in range(num_gpus_per_worker)
)
import jax
if "tpu" in jax_platforms.split(","):
jax.distributed.initialize(master_addr_with_port, num_workers, index)
logger.info("Initialized JAX distributed on TPU.")
if "cuda" in jax_platforms.split(","):
if num_gpus_per_worker > 0:
local_device_ids = list(range(num_gpus_per_worker))
else:
local_device_ids = 0
jax.distributed.initialize(
master_addr_with_port, num_workers, index, local_device_ids
)
logger.info("Initialized JAX distributed on CUDA.")
def _shutdown_jax_distributed():
"""Shutdown JAX distributed environment.
This function should be called on each worker during cleanup.
If JAX distributed was not initialized, this is a no-op.
"""
try:
import jax
jax.distributed.shutdown()
except Exception as e:
logger.warning(f"Error during JAX distributed shutdown: {e}")
| JaxConfig |
python | RaRe-Technologies__gensim | gensim/models/basemodel.py | {
"start": 0,
"end": 1554
} | class ____:
def print_topic(self, topicno, topn=10):
"""Get a single topic as a formatted string.
Parameters
----------
topicno : int
Topic id.
topn : int
Number of words from topic that will be used.
Returns
-------
str
String representation of topic, like '-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + ... '.
"""
return ' + '.join('%.3f*"%s"' % (v, k) for k, v in self.show_topic(topicno, topn))
def print_topics(self, num_topics=20, num_words=10):
"""Get the most significant topics (alias for `show_topics()` method).
Parameters
----------
num_topics : int, optional
The number of topics to be selected, if -1 - all topics will be in result (ordered by significance).
num_words : int, optional
The number of words to be included per topics (ordered by significance).
Returns
-------
list of (int, list of (str, float))
Sequence with (topic_id, [(word, value), ... ]).
"""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def get_topics(self):
"""Get words X topics matrix.
Returns
--------
numpy.ndarray:
The term topic matrix learned during inference, shape (`num_topics`, `vocabulary_size`).
Raises
------
NotImplementedError
"""
raise NotImplementedError
| BaseTopicModel |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 283580,
"end": 284231
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("DiscussionCommentEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("DiscussionComment"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| DiscussionCommentConnection |
python | tensorflow__tensorflow | third_party/xla/xla/backends/cpu/codegen/computation_kernel_emitter_test.py | {
"start": 1735,
"end": 3303
} | class ____(parameterized.TestCase):
def test_basic_call(self):
dtype = np.dtype(np.float32)
lhs_literal = base_utilities.create_scalar_literal(1.0, dtype)
lhs_parameter = testlib_base.HloInstruction.create_parameter(
0, lhs_literal.shape(), "lhs"
)
rhs_literal = base_utilities.create_scalar_literal(2.0, dtype)
rhs_parameter = testlib_base.HloInstruction.create_parameter(
1, rhs_literal.shape(), "rhs"
)
result_literal = base_utilities.create_scalar_literal(0.0, dtype)
add_computation = create_trivial_add_computation(dtype)
call_instruction = testlib_base.HloInstruction.create_call(
result_literal.shape(),
[lhs_parameter, rhs_parameter],
add_computation,
)
hlo_module, buffer_assignment = utilities.build_hlo_module(
call_instruction,
lhs_parameter,
rhs_parameter,
extra_computations=[add_computation],
)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
computation_emitter = testlib_cpu.ComputationKernelEmitter(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
kernel_definition = computation_emitter.emit_kernel_definition()
self.assertIsNotNone(kernel_definition)
runner = testlib_cpu.KernelRunner.create(kernel_definition, jit_compiler)
runner.call([lhs_literal, rhs_literal, result_literal])
self.assertEqual(np.asarray(result_literal).item(), 3.0)
if __name__ == "__main__":
absltest.main()
| CallKernelTest |
python | langchain-ai__langchain | libs/core/langchain_core/messages/tool.py | {
"start": 8194,
"end": 12584
} | class ____(TypedDict):
"""A chunk of a tool call (yielded when streaming).
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
all string attributes are concatenated. Chunks are only merged if their
values of `index` are equal and not None.
Example:
```python
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
```
"""
name: str | None
"""The name of the tool to be called."""
args: str | None
"""The arguments to the tool call."""
id: str | None
"""An identifier associated with the tool call."""
index: int | None
"""The index of the tool call in a sequence."""
type: NotRequired[Literal["tool_call_chunk"]]
def tool_call_chunk(
*,
name: str | None = None,
args: str | None = None,
id: str | None = None,
index: int | None = None,
) -> ToolCallChunk:
"""Create a tool call chunk.
Args:
name: The name of the tool to be called.
args: The arguments to the tool call.
id: An identifier associated with the tool call.
index: The index of the tool call in a sequence.
Returns:
The created tool call chunk.
"""
return ToolCallChunk(
name=name, args=args, id=id, index=index, type="tool_call_chunk"
)
def invalid_tool_call(
*,
name: str | None = None,
args: str | None = None,
id: str | None = None,
error: str | None = None,
) -> InvalidToolCall:
"""Create an invalid tool call.
Args:
name: The name of the tool to be called.
args: The arguments to the tool call.
id: An identifier associated with the tool call.
error: An error message associated with the tool call.
Returns:
The created invalid tool call.
"""
return InvalidToolCall(
name=name, args=args, id=id, error=error, type="invalid_tool_call"
)
def default_tool_parser(
raw_tool_calls: list[dict],
) -> tuple[list[ToolCall], list[InvalidToolCall]]:
"""Best-effort parsing of tools.
Args:
raw_tool_calls: List of raw tool call dicts to parse.
Returns:
A list of tool calls and invalid tool calls.
"""
tool_calls = []
invalid_tool_calls = []
for raw_tool_call in raw_tool_calls:
if "function" not in raw_tool_call:
continue
function_name = raw_tool_call["function"]["name"]
try:
function_args = json.loads(raw_tool_call["function"]["arguments"])
parsed = tool_call(
name=function_name or "",
args=function_args or {},
id=raw_tool_call.get("id"),
)
tool_calls.append(parsed)
except json.JSONDecodeError:
invalid_tool_calls.append(
invalid_tool_call(
name=function_name,
args=raw_tool_call["function"]["arguments"],
id=raw_tool_call.get("id"),
error=None,
)
)
return tool_calls, invalid_tool_calls
def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
"""Best-effort parsing of tool chunks.
Args:
raw_tool_calls: List of raw tool call dicts to parse.
Returns:
List of parsed ToolCallChunk objects.
"""
tool_call_chunks = []
for tool_call in raw_tool_calls:
if "function" not in tool_call:
function_args = None
function_name = None
else:
function_args = tool_call["function"]["arguments"]
function_name = tool_call["function"]["name"]
parsed = tool_call_chunk(
name=function_name,
args=function_args,
id=tool_call.get("id"),
index=tool_call.get("index"),
)
tool_call_chunks.append(parsed)
return tool_call_chunks
def _merge_status(
left: Literal["success", "error"], right: Literal["success", "error"]
) -> Literal["success", "error"]:
return "error" if "error" in {left, right} else "success"
| ToolCallChunk |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 13033,
"end": 13150
} | class ____(models.Model):
fk = models.ForeignKey(SecondLevelInheritedModel, on_delete=models.CASCADE)
| MultiOneToOne |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1598236,
"end": 1598737
} | class ____(sgqlc.types.Union):
"""Types which can be parameters for `RepositoryRule` objects."""
__schema__ = github_schema
__types__ = (
BranchNamePatternParameters,
CommitAuthorEmailPatternParameters,
CommitMessagePatternParameters,
CommitterEmailPatternParameters,
PullRequestParameters,
RequiredDeploymentsParameters,
RequiredStatusChecksParameters,
TagNamePatternParameters,
UpdateParameters,
)
| RuleParameters |
python | paramiko__paramiko | paramiko/rsakey.py | {
"start": 1222,
"end": 7546
} | class ____(PKey):
"""
Representation of an RSA key which can be used to sign and verify SSH2
data.
"""
name = "ssh-rsa"
HASHES = {
"ssh-rsa": hashes.SHA1,
"ssh-rsa-cert-v01@openssh.com": hashes.SHA1,
"rsa-sha2-256": hashes.SHA256,
"rsa-sha2-256-cert-v01@openssh.com": hashes.SHA256,
"rsa-sha2-512": hashes.SHA512,
"rsa-sha2-512-cert-v01@openssh.com": hashes.SHA512,
}
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
key=None,
file_obj=None,
):
self.key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if key is not None:
self.key = key
else:
self._check_type_and_load_cert(
msg=msg,
# NOTE: this does NOT change when using rsa2 signatures; it's
# purely about key loading, not exchange or verification
key_type=self.name,
cert_type="ssh-rsa-cert-v01@openssh.com",
)
self.key = rsa.RSAPublicNumbers(
e=msg.get_mpint(), n=msg.get_mpint()
).public_key(default_backend())
@classmethod
def identifiers(cls):
return list(cls.HASHES.keys())
@property
def size(self):
return self.key.key_size
@property
def public_numbers(self):
if isinstance(self.key, rsa.RSAPrivateKey):
return self.key.private_numbers().public_numbers
else:
return self.key.public_numbers()
def asbytes(self):
m = Message()
m.add_string(self.name)
m.add_mpint(self.public_numbers.e)
m.add_mpint(self.public_numbers.n)
return m.asbytes()
def __str__(self):
# NOTE: see #853 to explain some legacy behavior.
# TODO 4.0: replace with a nice clean fingerprint display or something
return self.asbytes().decode("utf8", errors="ignore")
@property
def _fields(self):
return (self.get_name(), self.public_numbers.e, self.public_numbers.n)
def get_name(self):
return self.name
def get_bits(self):
return self.size
def can_sign(self):
return isinstance(self.key, rsa.RSAPrivateKey)
def sign_ssh_data(self, data, algorithm=None):
if algorithm is None:
algorithm = self.name
sig = self.key.sign(
data,
padding=padding.PKCS1v15(),
# HASHES being just a map from long identifier to either SHA1 or
# SHA256 - cert'ness is not truly relevant.
algorithm=self.HASHES[algorithm](),
)
m = Message()
# And here again, cert'ness is irrelevant, so it is stripped out.
m.add_string(algorithm.replace("-cert-v01@openssh.com", ""))
m.add_string(sig)
return m
def verify_ssh_sig(self, data, msg):
sig_algorithm = msg.get_text()
if sig_algorithm not in self.HASHES:
return False
key = self.key
if isinstance(key, rsa.RSAPrivateKey):
key = key.public_key()
# NOTE: pad received signature with leading zeros, key.verify()
# expects a signature of key size (e.g. PuTTY doesn't pad)
sign = msg.get_binary()
diff = key.key_size - len(sign) * 8
if diff > 0:
sign = b"\x00" * ((diff + 7) // 8) + sign
try:
key.verify(
sign, data, padding.PKCS1v15(), self.HASHES[sig_algorithm]()
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@staticmethod
def generate(bits, progress_func=None):
"""
Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.RSAKey` private key
"""
key = rsa.generate_private_key(
public_exponent=65537, key_size=bits, backend=default_backend()
)
return RSAKey(key=key)
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("RSA", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("RSA", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
pkformat, data = data
if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except (ValueError, TypeError, UnsupportedAlgorithm) as e:
raise SSHException(str(e))
elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
n, e, d, iqmp, p, q = self._uint32_cstruct_unpack(data, "iiiiii")
public_numbers = rsa.RSAPublicNumbers(e=e, n=n)
key = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=d % (p - 1),
dmq1=d % (q - 1),
iqmp=iqmp,
public_numbers=public_numbers,
).private_key(default_backend())
else:
self._got_bad_key_format_id(pkformat)
assert isinstance(key, rsa.RSAPrivateKey)
self.key = key
| RSAKey |
python | pytorch__pytorch | test/mobile/model_test/quantization_ops.py | {
"start": 1805,
"end": 4389
} | class ____:
def __init__(self) -> None:
super().__init__()
self.module = self.M()
def getModule(self):
return torch.ao.quantization.quantize_dynamic(self.module, dtype=torch.qint8)
class M(torch.nn.Module):
def __init__(self) -> None:
super(DynamicQuantModule.M, self).__init__()
self.rnn = nn.RNN(4, 8, 2)
self.rnncell = nn.RNNCell(4, 8)
self.gru = nn.GRU(4, 8, 2)
self.grucell = nn.GRUCell(4, 8)
self.lstm = nn.LSTM(4, 8, 2)
self.lstmcell = nn.LSTMCell(4, 8)
self.linears = nn.ModuleList(
[
nn.Identity(54),
nn.Linear(20, 20),
nn.Bilinear(20, 20, 40),
]
)
self.transformers = nn.ModuleList(
[
nn.Transformer(
d_model=2, nhead=2, num_encoder_layers=1, num_decoder_layers=1
),
nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=2, nhead=2), num_layers=1
),
nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=2, nhead=2), num_layers=1
),
]
)
# self.a = torch.nn.utils.rnn.pad_sequence([torch.tensor([1,2,3]), torch.tensor([3,4])], batch_first=True)
def forward(self):
input = torch.randn(5, 3, 4)
h = torch.randn(2, 3, 8)
c = torch.randn(2, 3, 8)
linear_input = torch.randn(32, 20)
trans_input = torch.randn(1, 16, 2)
tgt = torch.rand(1, 16, 2)
return len(
(
self.rnn(input, h),
self.rnncell(input[0], h[0]),
self.gru(input, h),
self.grucell(input[0], h[0]),
self.lstm(input, (h, c)),
# self.lstm(torch.nn.utils.rnn.pack_padded_sequence(self.a, lengths=torch.tensor([3,2,1])), (h, c)),
self.lstmcell(input[0], (h[0], c[0])),
self.transformers[0](trans_input, tgt),
self.transformers[1](trans_input),
self.transformers[2](trans_input, tgt),
self.linears[0](linear_input),
self.linears[1](linear_input),
self.linears[2](linear_input, linear_input),
)
)
| DynamicQuantModule |
python | numpy__numpy | numpy/f2py/_backends/_distutils.py | {
"start": 289,
"end": 2385
} | class ____(Backend):
def __init__(sef, *args, **kwargs):
warnings.warn(
"\ndistutils has been deprecated since NumPy 1.26.x\n"
"Use the Meson backend instead, or generate wrappers"
" without -c and use a custom build script",
VisibleDeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def compile(self):
num_info = {}
if num_info:
self.include_dirs.extend(num_info.get("include_dirs", []))
ext_args = {
"name": self.modulename,
"sources": self.sources,
"include_dirs": self.include_dirs,
"library_dirs": self.library_dirs,
"libraries": self.libraries,
"define_macros": self.define_macros,
"undef_macros": self.undef_macros,
"extra_objects": self.extra_objects,
"f2py_options": self.f2py_flags,
}
if self.sysinfo_flags:
for n in self.sysinfo_flags:
i = get_info(n)
if not i:
print(
f"No {n!r} resources found"
"in system (try `f2py --help-link`)"
)
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + self.setup_flags
sys.argv.extend(
[
"build",
"--build-temp",
self.build_dir,
"--build-base",
self.build_dir,
"--build-platlib",
".",
"--disable-optimization",
]
)
if self.fc_flags:
sys.argv.extend(["config_fc"] + self.fc_flags)
if self.flib_flags:
sys.argv.extend(["build_ext"] + self.flib_flags)
setup(ext_modules=[ext])
if self.remove_build_dir and os.path.exists(self.build_dir):
print(f"Removing build directory {self.build_dir}")
shutil.rmtree(self.build_dir)
| DistutilsBackend |
python | fluentpython__example-code | 17-futures/countries/flags2_await.py | {
"start": 443,
"end": 3083
} | class ____(Exception): # <1>
def __init__(self, country_code):
self.country_code = country_code
async def get_flag(base_url, cc): # <2>
url = '{}/{cc}/{cc}.gif'.format(base_url, cc=cc.lower())
with closing(await aiohttp.request('GET', url)) as resp:
if resp.status == 200:
image = await resp.read()
return image
elif resp.status == 404:
raise web.HTTPNotFound()
else:
raise aiohttp.HttpProcessingError(
code=resp.status, message=resp.reason,
headers=resp.headers)
async def download_one(cc, base_url, semaphore, verbose): # <3>
try:
with (await semaphore): # <4>
image = await get_flag(base_url, cc) # <5>
except web.HTTPNotFound: # <6>
status = HTTPStatus.not_found
msg = 'not found'
except Exception as exc:
raise FetchError(cc) from exc # <7>
else:
save_flag(image, cc.lower() + '.gif') # <8>
status = HTTPStatus.ok
msg = 'OK'
if verbose and msg:
print(cc, msg)
return Result(status, cc)
# END FLAGS2_ASYNCIO_TOP
# BEGIN FLAGS2_ASYNCIO_DOWNLOAD_MANY
async def downloader_coro(cc_list, base_url, verbose, concur_req): # <1>
counter = collections.Counter()
semaphore = asyncio.Semaphore(concur_req) # <2>
to_do = [download_one(cc, base_url, semaphore, verbose)
for cc in sorted(cc_list)] # <3>
to_do_iter = asyncio.as_completed(to_do) # <4>
if not verbose:
to_do_iter = tqdm.tqdm(to_do_iter, total=len(cc_list)) # <5>
for future in to_do_iter: # <6>
try:
res = await future # <7>
except FetchError as exc: # <8>
country_code = exc.country_code # <9>
try:
error_msg = exc.__cause__.args[0] # <10>
except IndexError:
error_msg = exc.__cause__.__class__.__name__ # <11>
if verbose and error_msg:
msg = '*** Error for {}: {}'
print(msg.format(country_code, error_msg))
status = HTTPStatus.error
else:
status = res.status
counter[status] += 1 # <12>
return counter # <13>
def download_many(cc_list, base_url, verbose, concur_req):
loop = asyncio.get_event_loop()
coro = downloader_coro(cc_list, base_url, verbose, concur_req)
counts = loop.run_until_complete(coro) # <14>
loop.close() # <15>
return counts
if __name__ == '__main__':
main(download_many, DEFAULT_CONCUR_REQ, MAX_CONCUR_REQ)
# END FLAGS2_ASYNCIO_DOWNLOAD_MANY
| FetchError |
python | ray-project__ray | doc/source/tune/doc_code/trial_checkpoint.py | {
"start": 4409,
"end": 5081
} | class ____(tune.Callback):
def __init__(self, iterations_per_checkpoint: int):
self.steps_per_checkpoint = iterations_per_checkpoint
self._trials_last_checkpoint = {}
def on_trial_result(
self, iteration: int, trials: list[Trial], trial: Trial, result: dict, **info
):
current_iteration = result[TRAINING_ITERATION]
if (
current_iteration - self._trials_last_checkpoint.get(trial, -1)
>= self.steps_per_checkpoint
):
result[SHOULD_CHECKPOINT] = True
self._trials_last_checkpoint[trial] = current_iteration
# __callback_api_checkpointing_end__
| CheckpointByStepsTaken |
python | doocs__leetcode | solution/1500-1599/1525.Number of Good Ways to Split a String/Solution.py | {
"start": 0,
"end": 298
} | class ____:
def numSplits(self, s: str) -> int:
cnt = Counter(s)
vis = set()
ans = 0
for c in s:
vis.add(c)
cnt[c] -= 1
if cnt[c] == 0:
cnt.pop(c)
ans += len(vis) == len(cnt)
return ans
| Solution |
python | has2k1__plotnine | plotnine/themes/seaborn_rcmod.py | {
"start": 15337,
"end": 15502
} | class ____(_RCAesthetics):
"""Light wrapper on a dict to set context temporarily."""
_keys = _context_keys
_set = staticmethod(set_context)
| _PlottingContext |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 4295,
"end": 4399
} | class ____(BaseStringEnumerationType):
"""Set of enumerated xsd:string values."""
| XsdStringEnumeration |
python | kamyu104__LeetCode-Solutions | Python/sorted-gcd-pair-queries.py | {
"start": 146,
"end": 779
} | class ____(object):
def gcdValues(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[int]
:rtype: List[int]
"""
cnt1 = collections.Counter(nums)
cnt2 = [0]*(max(nums)+1)
for g in reversed(xrange(1, len(cnt2))):
c = sum(cnt1[ng] for ng in xrange(g, len(cnt2), g))
cnt2[g] = c*(c-1)//2-sum(cnt2[ng] for ng in xrange(g+g, len(cnt2), g))
prefix = [0]*(len(cnt2)+1)
for i in xrange(len(prefix)-1):
prefix[i+1] = prefix[i]+cnt2[i]
return [bisect.bisect_right(prefix, q)-1 for q in queries]
| Solution |
python | encode__django-rest-framework | tests/browsable_api/views.py | {
"start": 327,
"end": 524
} | class ____(BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.is_staff or (request.user == obj.owner.organization_user.user)
| OrganizationPermissions |
python | weaviate__weaviate-python-client | weaviate/gql/aggregate.py | {
"start": 1791,
"end": 17410
} | class ____(GraphQL):
"""AggregateBuilder class used to aggregate Weaviate objects."""
def __init__(self, class_name: str):
"""Initialize a AggregateBuilder class instance.
Args:
class_name: Class name of the objects to be aggregated.
"""
self._class_name: str = _capitalize_first_letter(class_name)
self._object_limit: Optional[int] = None
self._with_meta_count: bool = False
self._fields: List[str] = []
self._where: Optional[Where] = None
self._group_by_properties: Optional[List[str]] = None
self._uses_filter: bool = False
self._near: Optional[Filter] = None
self._tenant: Optional[str] = None
self._limit: Optional[int] = None
self._hybrid: Optional[Hybrid] = None
def with_tenant(self, tenant: str) -> "AggregateBuilder":
"""Sets a tenant for the query."""
if not isinstance(tenant, str):
raise TypeError("tenant must be of type str")
self._tenant = tenant
self._uses_filter = True
return self
def with_meta_count(self) -> "AggregateBuilder":
"""Set Meta Count to True.
Returns:
Updated AggregateBuilder.
"""
self._with_meta_count = True
return self
def with_object_limit(self, limit: int) -> "AggregateBuilder":
"""Set objectLimit to limit vector search results used within the aggregation query only when with near<MEDIA> filter.
Args:
limit: The object limit.
Returns:
Updated AggregateBuilder.
"""
self._object_limit = limit
return self
def with_limit(self, limit: int) -> "AggregateBuilder":
"""Set limit to limit the number of returned results from the aggregation query.
Args:
limit: The limit.
Returns:
Updated AggregateBuilder.
"""
self._limit = limit
return self
def with_fields(self, field: str) -> "AggregateBuilder":
"""Include a field in the aggregate query.
Args:
field: Field to include in the aggregate query. e.g. '<property_name> { count }'
Returns:
Updated AggregateBuilder.
"""
self._fields.append(field)
return self
def with_where(self, content: dict) -> "AggregateBuilder":
"""Set 'where' filter.
Args:
content: The where filter to include in the aggregate query. See examples below.
Returns:
Updated AggregateBuilder.
"""
self._where = Where(content)
self._uses_filter = True
return self
def with_hybrid(self, content: dict) -> "AggregateBuilder":
"""Get objects using bm25 and vector, then combine the results using a reciprocal ranking algorithm.
Args:
content: The content of the `hybrid` filter to set.
Returns:
Updated AggregateBuilder.
"""
if self._near is not None:
raise AttributeError("Cannot use 'hybrid' and 'near' filters simultaneously.")
self._hybrid = Hybrid(content)
self._uses_filter = True
return self
def with_group_by_filter(self, properties: List[str]) -> "AggregateBuilder":
"""Add a group by filter to the query. Might requires the user to set an additional group by clause using `with_fields(..)`.
Args:
properties: The list of properties that are included in the group by filter.
Generates a filter like: 'groupBy: ["property1", "property2"]'
from a list ["property1", "property2"]
Returns:
Updated AggregateBuilder.
"""
self._group_by_properties = properties
self._uses_filter = True
return self
def with_near_text(self, content: dict) -> "AggregateBuilder":
"""Set `nearText` filter.
This filter can be used with text modules (text2vec).
E.g.: text2vec-contextionary, text2vec-transformers.
NOTE: The 'autocorrect' field is enabled only with the `text-spellcheck` Weaviate module.
Args:
content: The content of the `nearText` filter to set. See examples below.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
if self._near is not None:
raise AttributeError("Cannot use multiple 'near' filters.")
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
self._near = NearText(content)
self._uses_filter = True
return self
def with_near_vector(self, content: dict) -> "AggregateBuilder":
"""Set `nearVector` filter.
Args:
content: The content of the `nearVector` filter to set. See examples below.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
if self._near is not None:
raise AttributeError("Cannot use multiple 'near' filters.")
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
self._near = NearVector(content)
self._uses_filter = True
return self
def with_near_object(self, content: dict) -> "AggregateBuilder":
"""Set `nearObject` filter.
Args:
content: The content of the `nearObject` filter to set. See examples below.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
if self._near is not None:
raise AttributeError("Cannot use multiple 'near' filters.")
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
self._near = NearObject(content, True)
self._uses_filter = True
return self
def with_near_image(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearImage` filter.
Args:
content: The content of the `nearImage` filter to set. See examples below.
encode: Whether to encode the `content["image"]` to base64 and convert to string. If True, the
`content["image"]` can be an image path or a file opened in binary read mode. If False,
the `content["image"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.IMAGE
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content["image"] = file_encoder_b64(content["image"])
self._near = NearImage(content)
self._uses_filter = True
return self
def with_near_audio(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearAudio` filter.
Args:
content: The content of the `nearAudio` filter to set. See examples below.
encode: Whether to encode the `content["audio"]` to base64 and convert to string. If True, the
`content["audio"]` can be an audio path or a file opened in binary read mode. If False,
the `content["audio"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.AUDIO
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content[self._media_type.value] = file_encoder_b64(content[self._media_type.value])
self._near = NearAudio(content)
self._uses_filter = True
return self
def with_near_video(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearVideo` filter.
Args:
content: The content of the `nearVideo` filter to set. See examples below.
encode: Whether to encode the `content["video"]` to base64 and convert to string. If True, the
`content["video"]` can be an video path or a file opened in binary read mode. If False,
the `content["video"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.VIDEO
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content[self._media_type.value] = file_encoder_b64(content[self._media_type.value])
self._near = NearVideo(content)
self._uses_filter = True
return self
def with_near_depth(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearDepth` filter.
Args:
content: The content of the `nearDepth` filter to set. See examples below.
encode: Whether to encode the `content["depth"]` to base64 and convert to string. If True, the
`content["depth"]` can be an depth path or a file opened in binary read mode. If False,
the `content["depth"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.DEPTH
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content[self._media_type.value] = file_encoder_b64(content[self._media_type.value])
self._near = NearDepth(content)
self._uses_filter = True
return self
def with_near_thermal(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearThermal` filter.
Args:
content: The content of the `nearThermal` filter to set. See examples below.
encode: Whether to encode the `content["thermal"]` to base64 and convert to string. If True, the
`content["thermal"]` can be an thermal path or a file opened in binary read mode. If False,
the `content["thermal"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.THERMAL
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content[self._media_type.value] = file_encoder_b64(content[self._media_type.value])
self._near = NearThermal(content)
self._uses_filter = True
return self
def with_near_imu(self, content: dict, encode: bool = True) -> "AggregateBuilder":
"""Set `nearIMU` filter.
Args:
content: The content of the `nearIMU` filter to set. See examples below.
encode: Whether to encode the `content["thermal"]` to base64 and convert to string. If True, the
`content["thermal"]` can be an thermal path or a file opened in binary read mode. If False,
the `content["thermal"]` MUST be a base64 encoded string (NOT bytes, i.e. NOT binary
string that looks like this: b'BASE64ENCODED' but simple 'BASE64ENCODED').
By default True.
Returns:
Updated AggregateBuilder.
Raises:
AttributeError: If another 'near' filter was already set.
"""
self._media_type = MediaType.IMU
if self._near is not None:
raise AttributeError(
"Cannot use multiple 'near' filters, or a 'near' filter along with a 'ask' filter!"
)
if self._hybrid is not None:
raise AttributeError("Cannot use 'near' and 'hybrid' filters simultaneously.")
if encode:
content[self._media_type.value] = file_encoder_b64(content[self._media_type.value])
self._near = NearIMU(content)
self._uses_filter = True
return self
def build(self) -> str:
"""Build the query and return the string.
Returns:
The GraphQL query as a string.
"""
# Path
query = f"{{Aggregate{{{self._class_name}"
# Filter
if self._uses_filter:
query += "("
if self._where is not None:
query += str(self._where)
if self._group_by_properties is not None:
query += f"groupBy: {json.dumps(self._group_by_properties)}"
if self._near is not None:
query += str(self._near)
if self._object_limit:
query += f"objectLimit: {self._object_limit}"
if self._tenant is not None:
query += f'tenant: "{self._tenant}"'
if self._limit is not None:
query += f"limit: {self._limit}"
if self._hybrid is not None:
query += str(self._hybrid)
query += ")"
# Body
query += "{"
if self._with_meta_count:
query += "meta{count}"
for field in self._fields:
query += field
# close
query += "}}}"
return query
| AggregateBuilder |
python | mlflow__mlflow | mlflow/types/llm.py | {
"start": 13802,
"end": 17816
} | class ____(_BaseDataclass):
"""
Common parameters used for chat inference
Args:
temperature (float): A param used to control randomness and creativity during inference.
**Optional**, defaults to ``1.0``
max_tokens (int): The maximum number of new tokens to generate.
**Optional**, defaults to ``None`` (unlimited)
stop (List[str]): A list of tokens at which to stop generation.
**Optional**, defaults to ``None``
n (int): The number of responses to generate.
**Optional**, defaults to ``1``
stream (bool): Whether to stream back responses as they are generated.
**Optional**, defaults to ``False``
top_p (float): An optional param to control sampling with temperature, the model considers
the results of the tokens with top_p probability mass. E.g., 0.1 means only the tokens
comprising the top 10% probability mass are considered.
top_k (int): An optional param for reducing the vocabulary size to top k tokens
(sorted in descending order by their probabilities).
frequency_penalty: (float): An optional param of positive or negative value,
positive values penalize new tokens based on
their existing frequency in the text so far, decreasing the model's likelihood to repeat
the same line verbatim.
presence_penalty: (float): An optional param of positive or negative value,
positive values penalize new tokens based on whether they appear in the text so far,
increasing the model's likelihood to talk about new topics.
custom_inputs (Dict[str, Any]): An optional param to provide arbitrary additional context
to the model. The dictionary values must be JSON-serializable.
tools (List[:py:class:`ToolDefinition`]): An optional list of tools that can be called by
the model.
.. warning::
In an upcoming MLflow release, default values for `temperature`, `n` and `stream` will be
removed. Please provide these values explicitly in your code if needed.
"""
temperature: float = 1.0
max_tokens: int | None = None
stop: list[str] | None = None
n: int = 1
stream: bool = False
top_p: float | None = None
top_k: int | None = None
frequency_penalty: float | None = None
presence_penalty: float | None = None
custom_inputs: dict[str, Any] | None = None
tools: list[ToolDefinition] | None = None
def __post_init__(self):
self._validate_field("temperature", float, True)
self._validate_field("max_tokens", int, False)
self._validate_list("stop", str, False)
self._validate_field("n", int, True)
self._validate_field("stream", bool, True)
self._validate_field("top_p", float, False)
self._validate_field("top_k", int, False)
self._validate_field("frequency_penalty", float, False)
self._validate_field("presence_penalty", float, False)
self._convert_dataclass_list("tools", ToolDefinition, False)
# validate that the custom_inputs field is a map from string to string
if self.custom_inputs is not None:
if not isinstance(self.custom_inputs, dict):
raise ValueError(
"Expected `custom_inputs` to be a dictionary, "
f"received `{type(self.custom_inputs).__name__}`"
)
for key, value in self.custom_inputs.items():
if not isinstance(key, str):
raise ValueError(
"Expected `custom_inputs` to be of type `Dict[str, Any]`, "
f"received key of type `{type(key).__name__}` (key: {key})"
)
@classmethod
def keys(cls) -> set[str]:
"""
Return the keys of the dataclass
"""
return {field.name for field in fields(cls)}
@dataclass()
| ChatParams |
python | lepture__authlib | authlib/integrations/flask_oauth2/requests.py | {
"start": 1281,
"end": 1480
} | class ____(JsonRequest):
def __init__(self, request: Request):
super().__init__(request.method, request.url, request.headers)
self.payload = FlaskJsonPayload(request)
| FlaskJsonRequest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 19152,
"end": 19270
} | class ____(test.TestCase, _LUSolve):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
| LUSolveStatic |
python | modin-project__modin | modin/config/envvars.py | {
"start": 1290,
"end": 2597
} | class ____(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: Optional[str] = None
@classmethod
def _get_value_from_config(cls) -> Any:
"""
Read the value from environment variable.
Returns
-------
Any
Config raw value if it's set, otherwise `_UNSET`.
"""
if cls.varname is None:
raise TypeError("varname should not be None")
if cls.varname not in os.environ:
return _UNSET
raw = os.environ[cls.varname]
if not _TYPE_PARAMS[cls.type].verify(raw):
# TODO: use and test a better error message, like "Invalid value
# for {cls.varname}: {raw}"
raise ValueError(f"Unsupported raw value: {raw}")
return _TYPE_PARAMS[cls.type].decode(raw)
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
| EnvironmentVariable |
python | lxml__lxml | src/lxml/html/tests/test_select.py | {
"start": 47,
"end": 1771
} | class ____(unittest.TestCase):
@staticmethod
def _evaluate_select(options, multiple=False):
options = ''.join('<option' + (' selected="selected"' if selected else '') + '>' + option + '</option>'
for option, selected in options)
string = '<title>test</title><form><select%s>%s</select></form>' % \
(' multiple="multiple"' if multiple else '', options)
return lxml.html.fromstring(string).find('.//select').value
def test_single_select_value_no_options(self):
self.assertEqual(
self._evaluate_select([]),
None)
def test_single_select_value_no_selected_option(self):
# If no option is selected, the HTML5 specification requires the first option to get selected.
self.assertEqual(
self._evaluate_select([('a', False), ('b', False)]),
'a')
def test_single_select_value_multiple_selected_options(self):
# If multiple options are selected, the proposed HTML 5.1 specification
# requires all but the last selected options to get deselected.
self.assertEqual(
self._evaluate_select([('a', True), ('b', True)]),
'b')
def test_multiple_select_value_no_selected_option(self):
self.assertEqual(
self._evaluate_select([('a', False), ('b', False)], multiple=True),
set())
def test_multiple_select_value_multiple_selected_options(self):
self.assertEqual(
self._evaluate_select([('a', True), ('b', True)], multiple=True),
{'a', 'b'})
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__])
| SelectTest |
python | getsentry__sentry | tests/sentry/ratelimits/utils/test_get_ratelimit_key.py | {
"start": 1012,
"end": 1555
} | class ____(Endpoint):
permission_classes = (AllowAny,)
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(20, 1, CONCURRENT_RATE_LIMIT),
RateLimitCategory.USER: RateLimit(20, 1, CONCURRENT_RATE_LIMIT),
RateLimitCategory.ORGANIZATION: RateLimit(20, 1, CONCURRENT_RATE_LIMIT),
},
},
)
def get(self, request):
raise NotImplementedError
@all_silo_test
| APITestEndpoint |
python | getsentry__sentry | src/sentry/api/endpoints/organization_releases.py | {
"start": 37459,
"end": 39421
} | class ____(OrganizationReleasesBaseEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
def get(self, request: Request, organization: Organization) -> Response:
"""
List an Organization's Releases specifically for building timeseries
```````````````````````````````
Return a list of releases for a given organization, sorted for most recent releases.
:pparam string organization_id_or_slug: the id or slug of the organization
"""
query = request.GET.get("query")
try:
filter_params = self.get_filter_params(request, organization, date_filter_optional=True)
except NoProjects:
return Response([])
queryset = (
Release.objects.filter(
organization=organization, projects__id__in=filter_params["project_id"]
)
.annotate(
date=F("date_added"),
)
.values("version", "date")
.order_by("-date")
.distinct()
)
queryset = add_date_filter_to_queryset(queryset, filter_params)
queryset = add_environment_to_queryset(queryset, filter_params)
if query:
try:
queryset = _filter_releases_by_query(queryset, organization, query, filter_params)
except InvalidSearchQuery as e:
return Response(
{"detail": str(e)},
status=400,
)
return self.paginate(
request=request,
queryset=queryset,
paginator_cls=OffsetPaginator,
on_results=lambda x: [
{"version": release["version"], "date": serialize(release["date"])} for release in x
],
default_per_page=1000,
max_per_page=1000,
max_limit=1000,
order_by="-date",
)
| OrganizationReleasesStatsEndpoint |
python | sqlalchemy__sqlalchemy | test/orm/test_attributes.py | {
"start": 40549,
"end": 47660
} | class ____(fixtures.ORMTest):
def _fixture(self):
class Post:
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
return other is not None and other.name == self.name
class Blog:
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
return other is not None and other.name == self.name
lazy_posts = Mock()
instrumentation.register_class(Post)
instrumentation.register_class(Blog)
_register_attribute(
Post,
"blog",
uselist=False,
backref="posts",
trackparent=True,
useobject=True,
)
_register_attribute(
Blog,
"posts",
uselist=True,
backref="blog",
callable_=lazy_posts,
trackparent=True,
useobject=True,
)
return Post, Blog, lazy_posts
def test_lazy_add(self):
Post, Blog, lazy_posts = self._fixture()
p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3")
lazy_posts.return_value = attributes.PASSIVE_NO_RESULT
b = Blog("blog 1")
b1_state = attributes.instance_state(b)
p = Post("post 4")
p.blog = b
eq_(
lazy_posts.mock_calls,
[call(b1_state, attributes.PASSIVE_NO_FETCH)],
)
p = Post("post 5")
# setting blog doesn't call 'posts' callable, calls with no fetch
p.blog = b
eq_(
lazy_posts.mock_calls,
[
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_NO_FETCH),
],
)
lazy_posts.return_value = [p1, p2, p3]
# calling backref calls the callable, populates extra posts
eq_(b.posts, [p1, p2, p3, Post("post 4"), Post("post 5")])
eq_(
lazy_posts.mock_calls,
[
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_OFF),
],
)
def test_lazy_history_collection(self):
Post, Blog, lazy_posts = self._fixture()
p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3")
lazy_posts.return_value = [p1, p2, p3]
b = Blog("blog 1")
p = Post("post 4")
p.blog = b
p4 = Post("post 5")
p4.blog = b
eq_(lazy_posts.call_count, 1)
eq_(
attributes.instance_state(b).get_history(
"posts", attributes.PASSIVE_OFF
),
([p, p4], [p1, p2, p3], []),
)
eq_(lazy_posts.call_count, 1)
def test_passive_history_collection_no_value(self):
Post, Blog, lazy_posts = self._fixture()
lazy_posts.return_value = attributes.PASSIVE_NO_RESULT
b = Blog("blog 1")
p = Post("post 1")
state, dict_ = (
attributes.instance_state(b),
attributes.instance_dict(b),
)
# this sets up NO_VALUE on b.posts
p.blog = b
eq_(state.committed_state, {"posts": attributes.NO_VALUE})
assert "posts" not in dict_
# then suppose the object was made transient again,
# the lazy loader would return this
lazy_posts.return_value = attributes.ATTR_EMPTY
p2 = Post("asdf")
p2.blog = b
eq_(state.committed_state, {"posts": attributes.NO_VALUE})
eq_(dict_["posts"], [p2])
# then this would fail.
eq_(
Blog.posts.impl.get_history(
state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE
),
([p2], (), ()),
)
eq_(
Blog.posts.impl.get_all_pending(state, dict_),
[(attributes.instance_state(p2), p2)],
)
def test_state_on_add_remove(self):
Post, Blog, lazy_posts = self._fixture()
lazy_posts.return_value = attributes.PASSIVE_NO_RESULT
b = Blog("blog 1")
b1_state = attributes.instance_state(b)
p = Post("post 1")
p.blog = b
eq_(
lazy_posts.mock_calls,
[call(b1_state, attributes.PASSIVE_NO_FETCH)],
)
p.blog = None
eq_(
lazy_posts.mock_calls,
[
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_NO_FETCH),
],
)
lazy_posts.return_value = []
eq_(b.posts, [])
eq_(
lazy_posts.mock_calls,
[
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_NO_FETCH),
call(b1_state, attributes.PASSIVE_OFF),
],
)
def test_pending_combines_with_lazy(self):
Post, Blog, lazy_posts = self._fixture()
lazy_posts.return_value = attributes.PASSIVE_NO_RESULT
b = Blog("blog 1")
p = Post("post 1")
p2 = Post("post 2")
p.blog = b
eq_(lazy_posts.call_count, 1)
lazy_posts.return_value = [p, p2]
# lazy loaded + pending get added together.
# This isn't seen often with the ORM due
# to usual practices surrounding the
# load/flush/load cycle.
eq_(b.posts, [p, p2, p])
eq_(lazy_posts.call_count, 2)
def test_normal_load(self):
Post, Blog, lazy_posts = self._fixture()
lazy_posts.return_value = (p1, p2, p3) = [
Post("post 1"),
Post("post 2"),
Post("post 3"),
]
b = Blog("blog 1")
# assign without using backref system
p2.__dict__["blog"] = b
eq_(b.posts, [Post("post 1"), Post("post 2"), Post("post 3")])
eq_(lazy_posts.call_count, 1)
p2.blog = None
p4 = Post("post 4")
p4.blog = b
eq_(b.posts, [Post("post 1"), Post("post 3"), Post("post 4")])
b_state = attributes.instance_state(b)
eq_(lazy_posts.call_count, 1)
eq_(lazy_posts.mock_calls, [call(b_state, attributes.PASSIVE_OFF)])
def test_commit_removes_pending(self):
Post, Blog, lazy_posts = self._fixture()
p1 = Post("post 1")
lazy_posts.return_value = attributes.PASSIVE_NO_RESULT
b = Blog("blog 1")
p1.blog = b
b_state = attributes.instance_state(b)
p1_state = attributes.instance_state(p1)
b_state._commit_all(attributes.instance_dict(b))
p1_state._commit_all(attributes.instance_dict(p1))
lazy_posts.return_value = [p1]
eq_(b.posts, [Post("post 1")])
eq_(
lazy_posts.mock_calls,
[
call(b_state, attributes.PASSIVE_NO_FETCH),
call(b_state, attributes.PASSIVE_OFF),
],
)
| PendingBackrefTest |
python | allegroai__clearml | clearml/binding/frameworks/lightgbm_bind.py | {
"start": 344,
"end": 5299
} | class ____(PatchBaseModelIO):
_current_task = None
__patched = None
@staticmethod
def update_current_task(task: Any, **kwargs: Any) -> None:
PatchLIGHTgbmModelIO._current_task = task
if not task:
return
PatchLIGHTgbmModelIO._patch_model_io()
PostImportHookPatching.add_on_import("lightgbm", PatchLIGHTgbmModelIO._patch_model_io)
@staticmethod
def _patch_model_io() -> None:
if PatchLIGHTgbmModelIO.__patched:
return
if "lightgbm" not in sys.modules:
return
PatchLIGHTgbmModelIO.__patched = True
# noinspection PyBroadException
try:
import lightgbm as lgb # noqa
lgb.Booster.save_model = _patched_call(lgb.Booster.save_model, PatchLIGHTgbmModelIO._save)
lgb.train = _patched_call(lgb.train, PatchLIGHTgbmModelIO._train)
lgb.Booster = _patched_call(lgb.Booster, PatchLIGHTgbmModelIO._load)
except ImportError:
pass
except Exception:
pass
@staticmethod
def _save(original_fn: Callable, obj: Any, f: Union[str, IO], *args: Any, **kwargs: Any) -> Any:
ret = original_fn(obj, f, *args, **kwargs)
if not PatchLIGHTgbmModelIO._current_task:
return ret
if isinstance(f, six.string_types):
filename = f
elif hasattr(f, "name"):
filename = f.name
# noinspection PyBroadException
try:
f.flush()
except Exception:
pass
else:
filename = None
# give the model a descriptive name based on the file name
# noinspection PyBroadException
try:
model_name = Path(filename).stem
except Exception:
model_name = None
WeightsFileHandler.create_output_model(
obj,
filename,
Framework.lightgbm,
PatchLIGHTgbmModelIO._current_task,
singlefile=True,
model_name=model_name,
)
return ret
@staticmethod
def _load(original_fn: Callable, model_file: Optional[Union[str, IO]] = None, *args: Any, **kwargs: Any) -> Any:
if not PatchLIGHTgbmModelIO._current_task:
return original_fn(model_file, *args, **kwargs)
if isinstance(model_file, six.string_types):
filename = model_file
elif hasattr(model_file, "name"):
filename = model_file.name
elif len(args) == 1 and isinstance(args[0], six.string_types):
filename = args[0]
else:
filename = None
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
filename = WeightsFileHandler.restore_weights_file(
empty, filename, Framework.xgboost, PatchLIGHTgbmModelIO._current_task
)
model = original_fn(model_file=filename or model_file, *args, **kwargs)
else:
# try to load model before registering, in case we fail
model = original_fn(model_file=model_file, *args, **kwargs)
WeightsFileHandler.restore_weights_file(
empty, filename, Framework.lightgbm, PatchLIGHTgbmModelIO._current_task
)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _train(original_fn: Callable, *args: Any, **kwargs: Any) -> Any:
def trains_lightgbm_callback() -> Callable[[Any], None]:
def callback(env: Any) -> None:
# logging the results to scalars section
# noinspection PyBroadException
try:
logger = PatchLIGHTgbmModelIO._current_task.get_logger()
iteration = env.iteration
for data_title, data_series, value, _ in env.evaluation_result_list:
logger.report_scalar(
title=data_title,
series=data_series,
value="{:.6f}".format(value),
iteration=iteration,
)
except Exception:
pass
return callback
kwargs.setdefault("callbacks", []).append(trains_lightgbm_callback())
ret = original_fn(*args, **kwargs)
if not PatchLIGHTgbmModelIO._current_task:
return ret
params = args[0] if args else kwargs.get("params", {})
for k, v in params.items():
if isinstance(v, set):
params[k] = list(v)
if params:
PatchLIGHTgbmModelIO._current_task.connect(params)
return ret
| PatchLIGHTgbmModelIO |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_natural_language.py | {
"start": 1398,
"end": 5656
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudNaturalLanguageHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_credentials"
)
@mock.patch("airflow.providers.google.cloud.hooks.natural_language.LanguageServiceClient")
def test_language_service_client_creation(self, mock_client, mock_get_creds):
result = self.hook.get_conn()
mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO)
assert mock_client.return_value == result
assert self.hook._conn == result
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_entities(self, get_conn):
get_conn.return_value.analyze_entities.return_value = API_RESPONSE
result = self.hook.analyze_entities(document=DOCUMENT, encoding_type=ENCODING_TYPE)
assert result == API_RESPONSE
get_conn.return_value.analyze_entities.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_entity_sentiment(self, get_conn):
get_conn.return_value.analyze_entity_sentiment.return_value = API_RESPONSE
result = self.hook.analyze_entity_sentiment(document=DOCUMENT, encoding_type=ENCODING_TYPE)
assert result == API_RESPONSE
get_conn.return_value.analyze_entity_sentiment.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_sentiment(self, get_conn):
get_conn.return_value.analyze_sentiment.return_value = API_RESPONSE
result = self.hook.analyze_sentiment(document=DOCUMENT, encoding_type=ENCODING_TYPE)
assert result == API_RESPONSE
get_conn.return_value.analyze_sentiment.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_syntax(self, get_conn):
get_conn.return_value.analyze_syntax.return_value = API_RESPONSE
result = self.hook.analyze_syntax(document=DOCUMENT, encoding_type=ENCODING_TYPE)
assert result == API_RESPONSE
get_conn.return_value.analyze_syntax.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=DEFAULT, timeout=None, metadata=()
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_annotate_text(self, get_conn):
get_conn.return_value.annotate_text.return_value = API_RESPONSE
result = self.hook.annotate_text(document=DOCUMENT, encoding_type=ENCODING_TYPE, features=None)
assert result == API_RESPONSE
get_conn.return_value.annotate_text.assert_called_once_with(
document=DOCUMENT,
encoding_type=ENCODING_TYPE,
features=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_classify_text(self, get_conn):
get_conn.return_value.classify_text.return_value = API_RESPONSE
result = self.hook.classify_text(document=DOCUMENT)
assert result == API_RESPONSE
get_conn.return_value.classify_text.assert_called_once_with(
document=DOCUMENT, retry=DEFAULT, timeout=None, metadata=()
)
| TestCloudNaturalLanguageHook |
python | ray-project__ray | python/ray/data/preprocessor.py | {
"start": 534,
"end": 698
} | class ____(RuntimeError):
"""Error raised when the preprocessor needs to be fitted first."""
pass
@PublicAPI(stability="beta")
| PreprocessorNotFittedException |
python | getsentry__sentry | tests/apidocs/endpoints/releases/test_deploys.py | {
"start": 321,
"end": 2457
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
project = self.create_project(name="foo")
release = self.create_release(project=project, version="1")
release.add_project(project)
prod_deploy = Deploy.objects.create(
environment_id=Environment.objects.create(
organization_id=project.organization_id, name="production"
).id,
organization_id=project.organization_id,
release=release,
date_finished=datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1),
)
staging_deploy = Deploy.objects.create(
environment_id=Environment.objects.create(
organization_id=project.organization_id, name="staging"
).id,
organization_id=project.organization_id,
release=release,
)
ReleaseProjectEnvironment.objects.create(
project=project,
release_id=release.id,
environment_id=prod_deploy.environment_id,
last_deploy_id=prod_deploy.id,
)
ReleaseProjectEnvironment.objects.create(
project=project,
release_id=release.id,
environment_id=staging_deploy.environment_id,
last_deploy_id=staging_deploy.id,
)
self.url = reverse(
"sentry-api-0-organization-release-deploys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_post(self) -> None:
data = {
"name": "foo",
"environment": "production",
"url": "https://www.example.com",
}
response = self.client.post(self.url, data)
request = RequestFactory().post(self.url, data)
self.validate_schema(request, response)
| ReleaseDeploysDocs |
python | spack__spack | lib/spack/spack/vendor/jinja2/lexer.py | {
"start": 13075,
"end": 13414
} | class ____(tuple):
"""A special tuple for marking a point in the state that can have
lstrip applied.
"""
__slots__ = ()
# Even though it looks like a no-op, creating instances fails
# without this.
def __new__(cls, *members, **kwargs): # type: ignore
return super().__new__(cls, members)
| OptionalLStrip |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ltc_address.py | {
"start": 1891,
"end": 4641
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid Litecoin addresses."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"LeF6vC9k1qfFDEj6UGjM5e4fwHtiKsakTd",
"mkwV3DZkgYwKaXkphBtcXAjsYQEqZ8aB3x",
"ltc1qs54v679auflz9y88nleyy6qknalwwmfx6kcf8z",
"M8T1B2Z97gVdvmfkQcAtYbEepune1tzGua",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_ltc_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidLtcAddress().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidLtcAddress |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 132550,
"end": 134245
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
username: str,
password: Optional[str] = None,
jdbc_url_params: Optional[str] = None,
ssl: Optional[bool] = None,
):
"""Airbyte Source for Cockroachdb.
Documentation can be found at https://docs.airbyte.com/integrations/sources/cockroachdb
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): Port of the database.
database (str): Name of the database.
username (str): Username to use to access the database.
password (Optional[str]): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (Eg. key1=value1&key2=value2&key3=value3). For more information read about JDBC URL parameters.
ssl (Optional[bool]): Encrypt client/server communications for increased security.
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.ssl = check.opt_bool_param(ssl, "ssl")
super().__init__("Cockroachdb", name)
| CockroachdbSource |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 2582,
"end": 5521
} | class ____(ASTNode):
"""
AST node representing a text sequence.
The sequence is composed of one or more LeafNodes or any ASTNode.
"""
KIND = NodeKind.TEXT
def __init__(self, *tokens):
ASTNode.__init__(self)
self._tokens = tokens
for i, token in enumerate(tokens):
token.index_in_parent = i
token.parent = self
token.depth = self.depth + 1
@property
def tokens(self):
return self._tokens
@tokens.setter
def tokens(self, tokens):
self._tokens = tokens
for i, token in enumerate(tokens):
token.index_in_parent = i
token.depth = self.depth + 1
token.parent = self
def compute_position(self, offset):
polygon = []
current_offset = offset
for i, token in enumerate(self._tokens):
token.depth = self.depth + 1
current_offset = token.compute_position(current_offset)
if token.mark_for_position:
position = token.position
if i == len(self._tokens) - 1:
if len(position) == 1:
if isinstance(position, list):
position = position[0]
x, y = position[0]
position = ((x, y), (x, y + 1))
if isinstance(token, LeafNode):
if token.name == 'EPSILON':
position = ((x, y), (x, y))
polygon += list(position)
flatten_polygon = []
for segment in polygon:
if isinstance(segment, list):
flatten_polygon += segment
else:
flatten_polygon.append(segment)
segments = []
current_segment = []
current_x = None
current_y = None
previous_x = None
for x, y in flatten_polygon:
if current_x is None:
previous_x = x
current_segment.append((x, y))
elif x == current_x + 1:
current_segment.append((current_x, current_y))
segments.append(current_segment)
current_segment = [(x, y)]
previous_x = x
current_x, current_y = x, y
if current_x == previous_x:
if len(current_segment) > 0:
current_segment.append((current_x, current_y))
if len(current_segment) > 0:
segments.append(current_segment)
self.position = segments
return current_offset
def text(self):
return ''.join([token.text() for token in self._tokens])
def accept(self, visitor):
visitor.visit(self)
for token in self._tokens:
token.accept(visitor)
def delete(self):
self.to_delete = True
for token in self.tokens:
token.delete()
| TextNode |
python | facebook__pyre-check | scripts/callgraph_utilities.py | {
"start": 4536,
"end": 5321
} | class ____(InputFormat):
def extract_caller(self, qualifier: str) -> str:
return self.format_qualifier(qualifier)
@staticmethod
def format_qualifier(qualifier: str) -> str:
qualifier = qualifier.replace("<locals>.", "")
split = qualifier.split(":")
if len(split) != 2:
return qualifier
module_qualifier, callable = split
return f"{module_qualifier}.{callable}"
def extract_callee(self, callee: JSON) -> str:
if not isinstance(callee, str):
raise ValueError(
f"Expected value for individual callee to be a string, got {type(callee)}: {callee}"
)
mapped_qualifier = self.format_qualifier(callee)
return mapped_qualifier
| DynamicCallGraphInputFormat |
python | spack__spack | lib/spack/spack/llnl/util/tty/log.py | {
"start": 4407,
"end": 11397
} | class ____(preserve_terminal_settings):
"""Context manager to disable line editing and echoing.
Use this with ``sys.stdin`` for keyboard input, e.g.::
with keyboard_input(sys.stdin) as kb:
while True:
kb.check_fg_bg()
r, w, x = select.select([sys.stdin], [], [])
# ... do something with keypresses ...
The ``keyboard_input`` context manager disables canonical
(line-based) input and echoing, so that keypresses are available on
the stream immediately, and they are not printed to the
terminal. Typically, standard input is line-buffered, which means
keypresses won't be sent until the user hits return. In this mode, a
user can hit, e.g., ``v``, and it will be read on the other end of the
pipe immediately but not printed.
The handler takes care to ensure that terminal changes only take
effect when the calling process is in the foreground. If the process
is backgrounded, canonical mode and echo are re-enabled. They are
disabled again when the calling process comes back to the foreground.
This context manager works through a single signal handler for
``SIGTSTP``, along with a poolling routine called ``check_fg_bg()``.
Here are the relevant states, transitions, and POSIX signals::
[Running] -------- Ctrl-Z sends SIGTSTP ------------.
[ in FG ] <------- fg sends SIGCONT --------------. |
^ | |
| fg (no signal) | |
| | v
[Running] <------- bg sends SIGCONT ---------- [Stopped]
[ in BG ] [ in BG ]
We handle all transitions exept for ``SIGTSTP`` generated by Ctrl-Z
by periodically calling ``check_fg_bg()``. This routine notices if
we are in the background with canonical mode or echo disabled, or if
we are in the foreground without canonical disabled and echo enabled,
and it fixes the terminal settings in response.
``check_fg_bg()`` works *except* for when the process is stopped with
``SIGTSTP``. We cannot rely on a periodic timer in this case, as it
may not rrun before the process stops. We therefore restore terminal
settings in the ``SIGTSTP`` handler.
Additional notes:
* We mostly use polling here instead of a SIGARLM timer or a
thread. This is to avoid the complexities of many interrupts, which
seem to make system calls (like I/O) unreliable in older Python
versions (2.6 and 2.7). See these issues for details:
1. https://www.python.org/dev/peps/pep-0475/
2. https://bugs.python.org/issue8354
There are essentially too many ways for asynchronous signals to go
wrong if we also have to support older Python versions, so we opt
not to use them.
* ``SIGSTOP`` can stop a process (in the foreground or background),
but it can't be caught. Because of this, we can't fix any terminal
settings on ``SIGSTOP``, and the terminal will be left with
``ICANON`` and ``ECHO`` disabled until it is resumes execution.
* Technically, a process *could* be sent ``SIGTSTP`` while running in
the foreground, without the shell backgrounding that process. This
doesn't happen in practice, and we assume that ``SIGTSTP`` always
means that defaults should be restored.
* We rely on ``termios`` support. Without it, or if the stream isn't
a TTY, ``keyboard_input`` has no effect.
"""
def __init__(self, stdin: Optional[IO[str]]) -> None:
"""Create a context manager that will enable keyboard input on stream.
Args:
stdin: text io wrapper of stdin (keyboard input)
Note that stdin can be None, in which case ``keyboard_input`` will do nothing.
"""
super().__init__(stdin)
def _is_background(self) -> bool:
"""True iff calling process is in the background."""
assert self.stdin is not None, "stdin should be available"
return _is_background_tty(self.stdin)
def _get_canon_echo_flags(self) -> Tuple[bool, bool]:
"""Get current termios canonical and echo settings."""
assert termios is not None and self.stdin is not None
cfg = termios.tcgetattr(self.stdin)
return (bool(cfg[3] & termios.ICANON), bool(cfg[3] & termios.ECHO))
def _enable_keyboard_input(self) -> None:
"""Disable canonical input and echoing on ``self.stdin``."""
# "enable" input by disabling canonical mode and echo
assert termios is not None and self.stdin is not None
new_cfg = termios.tcgetattr(self.stdin)
new_cfg[3] &= ~termios.ICANON
new_cfg[3] &= ~termios.ECHO
# Apply new settings for terminal
with ignore_signal(signal.SIGTTOU):
termios.tcsetattr(self.stdin, termios.TCSANOW, new_cfg)
def _tstp_handler(self, signum, frame):
self._restore_default_terminal_settings()
os.kill(os.getpid(), signal.SIGSTOP)
def check_fg_bg(self) -> None:
# old_cfg is set up in __enter__ and indicates that we have
# termios and a valid stream.
if not self.old_cfg:
return
# query terminal flags and fg/bg status
flags = self._get_canon_echo_flags()
bg = self._is_background()
# restore sanity if flags are amiss -- see diagram in class docs
if not bg and any(flags): # fg, but input not enabled
self._enable_keyboard_input()
elif bg and not all(flags): # bg, but input enabled
self._restore_default_terminal_settings()
def __enter__(self) -> "keyboard_input":
"""Enable immediate keypress input, while this process is foreground.
If the stream is not a TTY or the system doesn't support termios,
do nothing.
"""
super().__enter__()
self.old_handlers = {}
# Ignore all this if the input stream is not a tty.
if not self.stdin or not self.stdin.isatty():
return self
if termios:
# Install a signal handler to disable/enable keyboard input
# when the process moves between foreground and background.
self.old_handlers[signal.SIGTSTP] = signal.signal(signal.SIGTSTP, self._tstp_handler)
# enable keyboard input initially (if foreground)
if not self._is_background():
self._enable_keyboard_input()
return self
def __exit__(self, exc_type, exception, traceback):
"""If termios was available, restore old settings."""
super().__exit__(exc_type, exception, traceback)
# restore SIGSTP and SIGCONT handlers
if self.old_handlers:
for signum, old_handler in self.old_handlers.items():
signal.signal(signum, old_handler)
| keyboard_input |
python | nedbat__coveragepy | coverage/plugin_support.py | {
"start": 5263,
"end": 7025
} | class ____(CoveragePlugin):
"""Wrap a plugin, and use debug to report on what it's doing."""
def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
super().__init__()
self.plugin = plugin
self.debug = debug
def file_tracer(self, filename: str) -> FileTracer | None:
tracer = self.plugin.file_tracer(filename)
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
if tracer:
debug = self.debug.add_label(f"file {filename!r}")
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
def file_reporter(self, filename: str) -> FileReporter | str:
reporter = self.plugin.file_reporter(filename)
assert isinstance(reporter, FileReporter)
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
if reporter:
debug = self.debug.add_label(f"file {filename!r}")
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
def dynamic_context(self, frame: FrameType) -> str | None:
context = self.plugin.dynamic_context(frame)
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
return context
def find_executable_files(self, src_dir: str) -> Iterable[str]:
executable_files = self.plugin.find_executable_files(src_dir)
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
return executable_files
def configure(self, config: TConfigurable) -> None:
self.debug.write(f"configure({config!r})")
self.plugin.configure(config)
def sys_info(self) -> Iterable[tuple[str, Any]]:
return self.plugin.sys_info()
| DebugPluginWrapper |
python | kamyu104__LeetCode-Solutions | Python/power-of-two.py | {
"start": 29,
"end": 179
} | class ____(object):
# @param {integer} n
# @return {boolean}
def isPowerOfTwo(self, n):
return n > 0 and (n & (n - 1)) == 0
| Solution |
python | tensorflow__tensorflow | tensorflow/python/util/fast_module_type_test.py | {
"start": 908,
"end": 1223
} | class ____(FastModuleType):
def _getattribute1(self, name): # pylint: disable=unused-argument
return 2
def _getattribute2(self, name): # pylint: disable=unused-argument
raise AttributeError("Pass to getattr")
def _getattr(self, name): # pylint: disable=unused-argument
return 3
| ChildFastModule |
python | PrefectHQ__prefect | src/prefect/runner/_observers.py | {
"start": 677,
"end": 777
} | class ____(Protocol):
def __call__(self, flow_run_id: uuid.UUID) -> None: ...
| OnCancellingCallback |
python | PrefectHQ__prefect | src/prefect/task_runners.py | {
"start": 1404,
"end": 7861
} | class ____(abc.ABC, Generic[F]):
"""
Abstract base class for task runners.
A task runner is responsible for submitting tasks to the task run engine running
in an execution environment. Submitted tasks are non-blocking and return a future
object that can be used to wait for the task to complete and retrieve the result.
Task runners are context managers and should be used in a `with` block to ensure
proper cleanup of resources.
"""
def __init__(self):
self.logger: "logging.Logger" = get_logger(f"task_runner.{self.name}")
self._started = False
@property
def name(self) -> str:
"""The name of this task runner"""
return type(self).__name__.lower().replace("taskrunner", "")
@abc.abstractmethod
def duplicate(self) -> Self:
"""Return a new instance of this task runner with the same configuration."""
...
@overload
@abc.abstractmethod
def submit(
self,
task: "Task[P, CoroutineType[Any, Any, R]]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
dependencies: dict[str, set[RunInput]] | None = None,
) -> F: ...
@overload
@abc.abstractmethod
def submit(
self,
task: "Task[Any, R]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
dependencies: dict[str, set[RunInput]] | None = None,
) -> F: ...
@abc.abstractmethod
def submit(
self,
task: "Task[P, R | CoroutineType[Any, Any, R]]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
dependencies: dict[str, set[RunInput]] | None = None,
) -> F: ...
def map(
self,
task: "Task[P, R | CoroutineType[Any, Any, R]]",
parameters: dict[str, Any | unmapped[Any] | allow_failure[Any]],
wait_for: Iterable[PrefectFuture[R]] | None = None,
) -> PrefectFutureList[F]:
"""
Submit multiple tasks to the task run engine.
Args:
task: The task to submit.
parameters: The parameters to use when running the task.
wait_for: A list of futures that the task depends on.
Returns:
An iterable of future objects that can be used to wait for the tasks to
complete and retrieve the results.
"""
if not self._started:
raise RuntimeError(
"The task runner must be started before submitting work."
)
from prefect.utilities.engine import (
collect_task_run_inputs_sync,
resolve_inputs_sync,
)
# We need to resolve some futures to map over their data, collect the upstream
# links beforehand to retain relationship tracking.
task_inputs = {
k: collect_task_run_inputs_sync(v, max_depth=0)
for k, v in parameters.items()
}
# Resolve the top-level parameters in order to get mappable data of a known length.
# Nested parameters will be resolved in each mapped child where their relationships
# will also be tracked.
parameters = resolve_inputs_sync(parameters, max_depth=0)
# Ensure that any parameters in kwargs are expanded before this check
parameters = explode_variadic_parameter(task.fn, parameters)
iterable_parameters: dict[str, Any] = {}
static_parameters: dict[str, Any] = {}
annotated_parameters: dict[str, Any] = {}
for key, val in parameters.items():
if isinstance(val, (allow_failure, quote)):
# Unwrap annotated parameters to determine if they are iterable
annotated_parameters[key] = val
val = val.unwrap()
if isinstance(val, unmapped):
static_parameters[key] = val.value
elif isiterable(val):
iterable_parameters[key] = list(val)
else:
static_parameters[key] = val
if not len(iterable_parameters):
raise MappingMissingIterable(
"No iterable parameters were received. Parameters for map must "
f"include at least one iterable. Parameters: {parameters}"
)
iterable_parameter_lengths = {
key: len(val) for key, val in iterable_parameters.items()
}
lengths = set(iterable_parameter_lengths.values())
if len(lengths) > 1:
raise MappingLengthMismatch(
"Received iterable parameters with different lengths. Parameters for map"
f" must all be the same length. Got lengths: {iterable_parameter_lengths}"
)
map_length = list(lengths)[0]
futures: list[PrefectFuture[Any]] = []
for i in range(map_length):
call_parameters: dict[str, Any] = {
key: value[i] for key, value in iterable_parameters.items()
}
call_parameters.update(
{key: value for key, value in static_parameters.items()}
)
# Add default values for parameters; these are skipped earlier since they should
# not be mapped over
for key, value in get_parameter_defaults(task.fn).items():
call_parameters.setdefault(key, value)
# Re-apply annotations to each key again
for key, annotation in annotated_parameters.items():
call_parameters[key] = annotation.rewrap(call_parameters[key])
# Collapse any previously exploded kwargs
call_parameters = collapse_variadic_parameters(task.fn, call_parameters)
futures.append(
self.submit(
task=task,
parameters=call_parameters,
wait_for=wait_for,
dependencies=task_inputs,
)
)
return PrefectFutureList(futures)
def __enter__(self) -> Self:
if self._started:
raise RuntimeError("This task runner is already started")
self.logger.debug("Starting task runner")
self._started = True
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.logger.debug("Stopping task runner")
self._started = False
| TaskRunner |
python | python-visualization__folium | folium/plugins/feature_group_sub_group.py | {
"start": 107,
"end": 2705
} | class ____(JSCSSMixin, Layer):
"""
Creates a Feature Group that adds its child layers into a parent group when
added to a map (e.g. through LayerControl). Useful to create nested groups,
or cluster markers from multiple overlays. From [0].
[0] https://github.com/ghybs/Leaflet.FeatureGroup.SubGroup
Parameters
----------
group : Layer
The MarkerCluster or FeatureGroup containing this subgroup.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
Examples
-------
Nested groups
=============
>>> fg = folium.FeatureGroup() # Main group
>>> g1 = folium.plugins.FeatureGroupSubGroup(fg, "g1") # First subgroup of fg
>>> g2 = folium.plugins.FeatureGroupSubGroup(fg, "g2") # Second subgroup of fg
>>> m.add_child(fg)
>>> m.add_child(g1)
>>> m.add_child(g2)
>>> g1.add_child(folium.Marker([0, 0]))
>>> g2.add_child(folium.Marker([0, 1]))
>>> folium.LayerControl().add_to(m)
Multiple overlays part of the same cluster group
=====================================================
>>> mcg = folium.plugins.MarkerCluster(
... control=False
... ) # Marker Cluster, hidden in controls
>>> g1 = folium.plugins.FeatureGroupSubGroup(mcg, "g1") # First group, in mcg
>>> g2 = folium.plugins.FeatureGroupSubGroup(mcg, "g2") # Second group, in mcg
>>> m.add_child(mcg)
>>> m.add_child(g1)
>>> m.add_child(g2)
>>> g1.add_child(folium.Marker([0, 0]))
>>> g2.add_child(folium.Marker([0, 1]))
>>> folium.LayerControl().add_to(m)
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.featureGroup.subGroup(
{{ this._group.get_name() }}
);
{% endmacro %}
"""
)
default_js = [
(
"featuregroupsubgroupjs",
"https://unpkg.com/leaflet.featuregroup.subgroup@1.0.2/dist/leaflet.featuregroup.subgroup.js",
),
]
def __init__(self, group, name=None, overlay=True, control=True, show=True):
super().__init__(name=name, overlay=overlay, control=control, show=show)
self._group = group
self._name = "FeatureGroupSubGroup"
| FeatureGroupSubGroup |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow_multiprotocol.py | {
"start": 283,
"end": 4816
} | class ____(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(
port=[random_port(), random_port()], protocol=['grpc', 'http', 'websocket']
)
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
port_monitoring = random_port()
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(
port=ports, protocol=protocols, monitoring=True, port_monitoring=port_monitoring
)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
resp = req.get(f'http://localhost:{port_monitoring}/')
assert resp.status_code == 200
assert (
'jina_successful_requests_total{runtime_name="gateway/rep-0"} 3.0'
in str(resp.content)
)
def test_flow_multiprotocol_with_tracing():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, tracing=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
| MyExecutor |
python | ethereum__web3.py | web3/_utils/encoding.py | {
"start": 4734,
"end": 7487
} | class ____:
"""
Friendly JSON serializer & deserializer
When encoding or decoding fails, this class collects
information on which fields failed, to show more
helpful information in the raised error messages.
"""
def _json_mapping_errors(self, mapping: dict[Any, Any]) -> Iterable[str]:
for key, val in mapping.items():
try:
self._friendly_json_encode(val)
except TypeError as exc:
yield f"{key!r}: because ({exc})"
def _json_list_errors(self, iterable: Iterable[Any]) -> Iterable[str]:
for index, element in enumerate(iterable):
try:
self._friendly_json_encode(element)
except TypeError as exc:
yield f"{index}: because ({exc})"
def _friendly_json_encode(
self, obj: dict[Any, Any], cls: type[json.JSONEncoder] | None = None
) -> str:
try:
encoded = json.dumps(obj, cls=cls)
return encoded
except TypeError as full_exception:
if hasattr(obj, "items"):
item_errors = "; ".join(self._json_mapping_errors(obj))
raise Web3TypeError(
f"dict had unencodable value at keys: {{{item_errors}}}"
)
elif is_list_like(obj):
element_errors = "; ".join(self._json_list_errors(obj))
raise Web3TypeError(
f"list had unencodable value at index: [{element_errors}]"
)
else:
raise full_exception
def json_decode(self, json_str: str) -> dict[Any, Any]:
try:
decoded = json.loads(json_str)
return decoded
except json.decoder.JSONDecodeError as exc:
err_msg = f"Could not decode {json_str!r} because of {exc}."
# Calling code may rely on catching JSONDecodeError to recognize bad json
# so we have to re-raise the same type.
raise json.decoder.JSONDecodeError(err_msg, exc.doc, exc.pos)
def json_encode(
self, obj: dict[Any, Any], cls: type[json.JSONEncoder] | None = None
) -> str:
try:
return self._friendly_json_encode(obj, cls=cls)
except TypeError as exc:
raise Web3TypeError(f"Could not encode to JSON: {exc}")
def to_4byte_hex(hex_or_str_or_bytes: HexStr | str | bytes | int) -> HexStr:
size_of_4bytes = 4 * 8
byte_str = hexstr_if_str(to_bytes, hex_or_str_or_bytes)
if len(byte_str) > 4:
raise Web3ValueError(
f"expected value of size 4 bytes. Got: {len(byte_str)} bytes"
)
hex_str = encode_hex(byte_str)
return pad_hex(hex_str, size_of_4bytes)
| FriendlyJsonSerde |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 17693,
"end": 18324
} | class ____(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
| SocketTestBase |
python | walkccc__LeetCode | solutions/2996. Smallest Missing Integer Greater Than Sequential Prefix Sum/2996.py | {
"start": 0,
"end": 275
} | class ____:
def missingInteger(self, nums: list[int]) -> int:
numsSet = set(nums)
ans = nums[0]
for i in range(1, len(nums)):
if nums[i] != nums[i - 1] + 1:
break
ans += nums[i]
while ans in numsSet:
ans += 1
return ans
| Solution |
python | google__pytype | pytype/metrics.py | {
"start": 5102,
"end": 5624
} | class ____(Metric):
"""A monotonically increasing metric."""
def __init__(self, name):
super().__init__(name)
self._total = 0
def inc(self, count=1):
"""Increment the metric by the specified amount."""
if count < 0:
raise ValueError("Counter must be monotonically increasing.")
if not _enabled:
return
self._total += count
def _summary(self):
return str(self._total)
def _merge(self, other):
# pylint: disable=protected-access
self._total += other._total
| Counter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec3.py | {
"start": 1576,
"end": 1657
} | class ____:
def __call__(self, x: int | str, y: int = 3) -> None: ...
| Callback1 |
python | kamyu104__LeetCode-Solutions | Python/maximize-sum-of-weights-after-edge-removals.py | {
"start": 2590,
"end": 4509
} | class ____(object):
def maximizeSumOfWeights(self, edges, k):
"""
:type edges: List[List[int]]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target):
i = left
while i <= right:
if compare(nums[i], target):
nums[i], nums[left] = nums[left], nums[i]
left += 1
i += 1
elif compare(target, nums[i]):
nums[i], nums[right] = nums[right], nums[i]
right -= 1
else:
i += 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx])
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def dfs(u, p):
result = 0
diff = []
for v, w in adj[u]:
if v == p:
continue
cnt = dfs(v, u)
result += cnt[0]
diff.append(max((cnt[1]+w)-cnt[0], 0))
if k-1 < len(diff):
nth_element(diff, k-1, lambda a, b: a > b)
return (result+sum(diff[i] for i in xrange(min(k, len(diff)))), result+sum(diff[i] for i in xrange(min(k-1, len(diff)))))
adj = [[] for _ in xrange(len(edges)+1)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
return dfs(0, -1)[0]
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_image_ops_test.py | {
"start": 1306,
"end": 2454
} | class ____(test.TestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = _get_weak_tensor(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertIsInstance(y, WeakTensor)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.0
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.0
self._testBrightness(x_np, y_np, delta=10.0 / 255.0)
def testPositiveDeltaFloat64(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.0
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.0
self._testBrightness(x_np, y_np, delta=10.0 / 255.0, tol=1e-3)
| AdjustBrightnessTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/discogs/views.py | {
"start": 357,
"end": 1050
} | class ____(OAuthAdapter):
provider_id = "discogs"
request_token_url = "https://api.discogs.com/oauth/request_token" # nosec
access_token_url = "https://api.discogs.com/oauth/access_token" # nosec
authorize_url = "https://discogs.com/oauth/authorize"
def complete_login(self, request, app, token, response):
client = DiscogsAPI(request, app.client_id, app.secret, self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth_login = OAuthLoginView.adapter_view(DiscogsOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(DiscogsOAuthAdapter)
| DiscogsOAuthAdapter |
python | vyperlang__vyper | tests/evm_backends/revm_env.py | {
"start": 235,
"end": 4725
} | class ____(BaseEnv):
invalid_opcode_error = "InvalidFEOpcode"
out_of_gas_error = "OutOfGas"
contract_size_limit_error = "CreateContractSizeLimit"
initcode_size_limit_error = "CreateInitCodeSizeLimit"
def __init__(
self,
gas_limit: int,
account_keys: list[PrivateKey],
tracing: bool,
block_number: int,
evm_version: str,
) -> None:
super().__init__(gas_limit, account_keys)
self._evm = EVM(
gas_limit=gas_limit,
tracing=tracing,
spec_id=evm_version,
env=Env(block=BlockEnv(number=block_number)),
)
@contextmanager
def anchor(self):
snapshot_id = self._evm.snapshot()
block = BlockEnv(number=self._evm.env.block.number, timestamp=self._evm.env.block.timestamp)
try:
yield
finally:
try:
self._evm.revert(snapshot_id)
except OverflowError:
# snapshot_id is reverted by the transaction already.
# revm updates are needed to make the journal more robust.
pass
self._evm.set_block_env(block)
# self._evm.set_tx_env(tx)
def get_balance(self, address: str) -> int:
return self._evm.get_balance(address)
def set_balance(self, address: str, value: int):
self._evm.set_balance(address, value)
@property
def block_number(self) -> int:
return self._evm.env.block.number
@block_number.setter
def block_number(self, value: int):
block = self._evm.env.block
block.number = value
self._evm.set_block_env(block)
@property
def timestamp(self) -> int | None:
return self._evm.env.block.timestamp
@timestamp.setter
def timestamp(self, value: int):
block = self._evm.env.block
block.timestamp = value
self._evm.set_block_env(block)
@property
def last_result(self) -> ExecutionResult:
result = self._evm.result
return ExecutionResult(
gas_refunded=result.gas_refunded,
gas_used=result.gas_used,
is_success=result.is_success,
logs=result.logs,
)
@property
def blob_hashes(self):
return self._evm.env.tx.blob_hashes
@blob_hashes.setter
def blob_hashes(self, value):
tx = self._evm.env.tx
tx.blob_hashes = value
self._evm.set_tx_env(tx)
def message_call(
self,
to: str,
sender: str | None = None,
data: bytes | str = b"",
value: int = 0,
gas: int | None = None,
gas_price: int = 0,
is_modifying: bool = True,
blob_hashes: Optional[list[bytes]] = None, # for blobbasefee >= Cancun
):
if isinstance(data, str):
data = bytes.fromhex(data.removeprefix("0x"))
try:
return self._evm.message_call(
to=to,
caller=sender or self.deployer,
calldata=data,
value=value,
gas=self.gas_limit if gas is None else gas,
gas_price=gas_price,
is_static=not is_modifying,
)
except RuntimeError as e:
self._parse_error(e)
raise EvmError(*e.args) from e
def clear_transient_storage(self) -> None:
self._evm.reset_transient_storage()
def get_code(self, address: str):
return self._evm.basic(address).code.rstrip(b"\0")
def get_excess_blob_gas(self) -> Optional[int]:
return self._evm.env.block.excess_blob_gas
def get_blob_gasprice(self) -> Optional[int]:
return self._evm.env.block.blob_gasprice
def set_excess_blob_gas(self, value):
self._evm.env.block.excess_blob_gas = value
def _deploy(self, code: bytes, value: int, gas: int = None) -> str:
try:
return self._evm.deploy(self.deployer, code, value, gas)
except RuntimeError as e:
self._parse_error(e)
raise EvmError(*e.args) from e
def _parse_error(self, e: RuntimeError):
# TODO: Create a custom error in pyrevm instead parsing strings
if match := re.match(r"Revert \{ gas_used: (\d+), output: 0x([0-9a-f]*) }", e.args[0]):
gas_used, output_str = match.groups()
output_bytes = bytes.fromhex(output_str)
super()._parse_revert(output_bytes, e, int(gas_used))
| RevmEnv |
python | getsentry__sentry | tests/sentry/sentry_apps/external_requests/test_select_requester.py | {
"start": 837,
"end": 13224
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(name="foo")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(slug="boop", organization=self.org)
self.sentry_app = self.create_sentry_app(
name="foo", organization=self.org, webhook_url="https://example.com", scopes=()
)
self.orm_install = self.create_sentry_app_installation(
slug="foo", organization=self.org, user=self.user
)
self.install = app_service.get_many(filter=dict(installation_ids=[self.orm_install.id]))[0]
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_makes_request(self, mock_record: MagicMock) -> None:
options = [
{"label": "An Issue", "value": "123", "default": True},
{"label": "Another Issue", "value": "456"},
]
responses.add(
method=responses.GET,
url=f"https://example.com/get-issues?installationId={self.install.uuid}&projectSlug={self.project.slug}",
json=options,
status=200,
content_type="application/json",
)
result = SelectRequester(
install=self.install, project_slug=self.project.slug, uri="/get-issues"
).run()
assert result == {
"choices": [["123", "An Issue"], ["456", "Another Issue"]],
"defaultValue": "123",
}
request = responses.calls[0].request
assert request.headers["Sentry-App-Signature"] == self.sentry_app.build_signature("")
buffer = SentryAppWebhookRequestsBuffer(self.sentry_app)
requests = buffer.get_requests()
assert len(requests) == 1
assert requests[0]["response_code"] == 200
assert requests[0]["event_type"] == "select_options.requested"
# SLO assertions
assert_success_metric(mock_record)
# EXTERNAL_REQUEST (success) -> EXTERNAL_REQUEST (success)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=2
)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_invalid_response_missing_label(self, mock_record: MagicMock) -> None:
# missing 'label'
url = f"https://example.com/get-issues?installationId={self.install.uuid}&projectSlug={self.project.slug}"
uri = "/get-issues"
invalid_format = {"value": "12345"}
responses.add(
method=responses.GET,
url=url,
json=invalid_format,
status=200,
content_type="application/json",
)
with pytest.raises(SentryAppIntegratorError) as exception_info:
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri=uri,
).run()
assert (
exception_info.value.message
== f"Invalid response format for Select FormField in {self.sentry_app.slug} from uri: {uri}"
)
assert exception_info.value.webhook_context == {
"error_type": f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestHaltReason.MISSING_FIELDS}",
"response": invalid_format,
"sentry_app_slug": self.sentry_app.slug,
"install_uuid": self.install.uuid,
"project_slug": self.project.slug,
"url": url,
}
# SLO assertions
assert_halt_metric(
mock_record,
f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestHaltReason.MISSING_FIELDS}",
)
# EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (success)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_invalid_response_missing_value(self, mock_record: MagicMock) -> None:
# missing 'label' and 'value'
invalid_format = [
{"project": "ACME", "webUrl": "foo"},
]
responses.add(
method=responses.GET,
url=f"https://example.com/get-issues?installationId={self.install.uuid}&projectSlug={self.project.slug}",
json=invalid_format,
status=200,
content_type="application/json",
)
with pytest.raises(SentryAppIntegratorError) as exception_info:
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri="/get-issues",
).run()
assert (
exception_info.value.message
== "Missing `value` or `label` in option data for Select FormField"
)
assert exception_info.value.webhook_context == {
"error_type": f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestHaltReason.MISSING_FIELDS}",
"response": invalid_format,
}
# SLO assertions
assert_halt_metric(
mock_record,
SentryAppIntegratorError(
message="Missing `value` or `label` in option data for Select FormField"
),
)
# EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (success)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
@responses.activate
def test_500_response(self) -> None:
responses.add(
method=responses.GET,
url=f"https://example.com/get-issues?installationId={self.install.uuid}&projectSlug={self.project.slug}",
body="Something failed",
status=500,
)
with pytest.raises(SentryAppIntegratorError):
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri="/get-issues",
).run()
buffer = SentryAppWebhookRequestsBuffer(self.sentry_app)
requests = buffer.get_requests()
assert len(requests) == 1
assert requests[0]["response_code"] == 500
assert requests[0]["event_type"] == "select_options.requested"
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_api_error_message(self, mock_record: MagicMock) -> None:
url = f"https://example.com/get-issues?installationId={self.install.uuid}&projectSlug={self.project.slug}"
responses.add(
method=responses.GET,
url=url,
body="Something failed",
status=500,
)
with pytest.raises(SentryAppIntegratorError) as exception_info:
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri="/get-issues",
).run()
assert (
exception_info.value.message
== f"Something went wrong while getting options for Select FormField from {self.sentry_app.slug}"
)
assert exception_info.value.webhook_context == {
"error_type": f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestHaltReason.BAD_RESPONSE}",
"sentry_app_slug": self.sentry_app.slug,
"install_uuid": self.install.uuid,
"project_slug": self.project.slug,
"url": url,
}
# SLO assertions
assert_many_halt_metrics(
mock_record,
[HTTPError(), HTTPError()],
)
# EXTERNAL_REQUEST (halt) -> EXTERNAL_REQUEST (halt)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=2
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=2
)
@responses.activate
@patch("sentry.sentry_apps.external_requests.select_requester.SelectRequester._build_url")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_url_fail_error(self, mock_record: MagicMock, mock_build_url: MagicMock) -> None:
mock_build_url.side_effect = Exception()
uri = "asdhbaljkdnaklskand"
with pytest.raises(SentryAppSentryError) as exception_info:
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri=uri,
).run()
assert (
exception_info.value.message
== "Something went wrong while preparing to get Select FormField options"
)
assert exception_info.value.webhook_context == {
"error_type": f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestFailureReason.MISSING_URL}",
"sentry_app_slug": self.sentry_app.slug,
"install_uuid": self.install.uuid,
"project_slug": self.project.slug,
"uri": uri,
"dependent_data": None,
"webhook_url": self.sentry_app.webhook_url,
}
# SLO assertions
assert_failure_metric(
mock_record,
SentryAppSentryError(
message="Something went wrong while preparing to get Select FormField options"
),
)
# EXTERNAL_REQUEST (failure)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
@responses.activate
@patch("sentry.sentry_apps.external_requests.select_requester.send_and_save_sentry_app_request")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unexpected_exception(
self, mock_record: MagicMock, mock_send_request: MagicMock
) -> None:
mock_send_request.side_effect = Exception()
uri = "asdhbaljkdnaklskand"
with pytest.raises(SentryAppSentryError) as exception_info:
SelectRequester(
install=self.install,
project_slug=self.project.slug,
uri=uri,
).run()
assert (
exception_info.value.message
== "Something went wrong while preparing to get Select FormField options"
)
assert exception_info.value.webhook_context == {
"error_type": f"{SentryAppEventType.SELECT_OPTIONS_REQUESTED}.{SentryAppExternalRequestFailureReason.UNEXPECTED_ERROR}",
"sentry_app_slug": self.sentry_app.slug,
"install_uuid": self.install.uuid,
"project_slug": self.project.slug,
"url": f"https://example.com/{uri}?installationId={self.install.uuid}&projectSlug={self.project.slug}",
}
# SLO assertions
assert_failure_metric(
mock_record,
SentryAppSentryError(
message="Something went wrong while preparing to get Select FormField options"
),
)
# EXTERNAL_REQUEST (failure)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
| TestSelectRequester |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/generative_model.py | {
"start": 18097,
"end": 19485
} | class ____(GoogleBaseHook):
"""Use the Vertex AI SDK for Python to create and manage your experiment runs."""
@GoogleBaseHook.fallback_to_default_project_id
def delete_experiment_run(
self,
experiment_run_name: str,
experiment_name: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
delete_backing_tensorboard_run: bool = False,
) -> None:
"""
Delete experiment run from the experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param delete_backing_tensorboard_run: Whether to delete the backing Vertex AI TensorBoard run
that stores time series metrics for this run.
"""
self.log.info("Next experiment run will be deleted: %s", experiment_run_name)
experiment_run = aiplatform.ExperimentRun(
run_name=experiment_run_name, experiment=experiment_name, project=project_id, location=location
)
experiment_run.delete(delete_backing_tensorboard_run=delete_backing_tensorboard_run)
| ExperimentRunHook |
python | kamyu104__LeetCode-Solutions | Python/put-boxes-into-the-warehouse-i.py | {
"start": 506,
"end": 1068
} | class ____(object):
def maxBoxesInWarehouse(self, boxes, warehouse):
"""
:type boxes: List[int]
:type warehouse: List[int]
:rtype: int
"""
boxes.sort()
for i in xrange(1, len(warehouse)):
warehouse[i] = min(warehouse[i], warehouse[i-1])
result, curr = 0, 0
for h in reversed(warehouse):
if boxes[curr] > h:
continue
result += 1
curr += 1
if curr == len(boxes):
break
return result
| Solution2 |
python | numpy__numpy | tools/swig/test/testFlat.py | {
"start": 3668,
"end": 3925
} | class ____(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
| intTestCase |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_exception_variations.py | {
"start": 1535,
"end": 8776
} | class ____(__TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception('nyaa!')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception('yarr!')
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception('ahoy!')
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception('foo!')
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception('inner exception')
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_nested_exception_in_except(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
raise Exception('inner exception')
except:
hit_inner_except = True
raise Exception('outer exception')
else:
hit_inner_else = True
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertFalse(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_nested_exception_in_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_nested_exception_in_finally_no_exception(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
hit_inner_finally = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
finally:
hit_inner_finally = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertTrue(hit_inner_finally)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_nested_exception_in_finally_with_exception(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
hit_inner_finally = False
try:
try:
raise Exception('inner exception')
except:
hit_inner_except = True
else:
hit_inner_else = True
finally:
hit_inner_finally = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertFalse(hit_inner_else)
self.assertTrue(hit_inner_finally)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
if __name__ == '__main__':
run_tests()
| ExceptTestCases |
python | apache__airflow | providers/dbt/cloud/tests/unit/dbt/cloud/utils/test_openlineage.py | {
"start": 5270,
"end": 8063
} | class ____:
@patch("importlib.metadata.version", return_value="2.3.0")
@patch("airflow.providers.openlineage.plugins.listener.get_openlineage_listener")
@patch("airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter.build_task_instance_run_id")
@patch("airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter.build_dag_run_id")
@patch.object(DbtCloudHook, "get_job_run")
@patch.object(DbtCloudHook, "get_project")
@patch.object(DbtCloudHook, "get_job_run_artifact")
def test_generate_events(
self,
mock_get_job_run_artifact,
mock_get_project,
mock_get_job_run,
mock_build_dag_run_id,
mock_build_task_instance_run_id,
mock_get_openlineage_listener,
mock_version,
):
mock_operator = MagicMock(spec=DbtCloudRunJobOperator)
mock_operator.account_id = None
mock_hook = DbtCloudHook()
mock_operator.hook = mock_hook
mock_get_job_run.return_value.json.return_value = read_file_json(
Path(__file__).parents[1] / "test_data" / "job_run.json"
)
mock_get_project.return_value.json.return_value = {
"data": {
"connection": {
"type": "snowflake",
"name": "conn_name",
"details": {
"account": "gp21411.us-east-1",
"database": "SANDBOX",
"warehouse": "HUMANS",
"allow_sso": False,
"client_session_keep_alive": False,
"role": None,
},
}
}
}
mock_get_job_run_artifact.side_effect = get_dbt_artifact
mock_operator.task_id = TASK_ID
mock_operator.run_id = 188471607
mock_task_instance = MagicMock()
mock_task_instance.task_id = TASK_ID
mock_task_instance.dag_id = DAG_ID
mock_task_instance.dag_run.clear_number = 0
mock_adapter = MagicMock()
mock_adapter.emit.side_effect = emit_event
mock_get_openlineage_listener.return_value.adapter = mock_adapter
mock_build_task_instance_run_id.return_value = TASK_UUID
mock_build_dag_run_id.return_value = DAG_UUID
generate_openlineage_events_from_dbt_cloud_run(mock_operator, task_instance=mock_task_instance)
assert mock_adapter.emit.call_count == 4
def test_do_not_raise_error_if_runid_not_set_on_operator(self):
operator = DbtCloudRunJobOperator(task_id="dbt-job-runid-taskid", job_id=1500)
assert operator.run_id is None
assert operator.get_openlineage_facets_on_complete(MagicMock()) == OperatorLineage()
| TestGenerateOpenLineageEventsFromDbtCloudRun |
python | scipy__scipy | scipy/interpolate/_fitpack2.py | {
"start": 56420,
"end": 61350
} | class ____(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
Evaluated points outside the data range will be extrapolated.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain, which means the start and end spline knots of
each dimension are set by these values. By default,
``bbox=[min(x), max(x), min(y), max(y)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
function. Default is ``s=0``, which is for interpolation.
maxit : int, optional
The maximal number of iterations maxit allowed for finding a
smoothing spline with fp=s. Default is ``maxit=20``.
See Also
--------
BivariateSpline :
a base class for bivariate splines.
UnivariateSpline :
a smooth univariate spline to fit a given set of data points.
SmoothBivariateSpline :
a smoothing bivariate spline through the given points
LSQBivariateSpline :
a bivariate spline using weighted least-squares fitting
RectSphereBivariateSpline :
a bivariate spline over a rectangular mesh on a sphere
SmoothSphereBivariateSpline :
a smoothing bivariate spline in spherical coordinates
LSQSphereBivariateSpline :
a bivariate spline in spherical coordinates using weighted
least-squares fitting
bisplrep :
a function to find a bivariate B-spline representation of a surface
bisplev :
a function to evaluate a bivariate B-spline and its derivatives
Notes
-----
If the input data is such that input dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolating.
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0, maxit=20):
x, y, bbox = ravel(x), ravel(y), ravel(bbox)
z = np.asarray(z)
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
if not np.all(diff(y) > 0.0):
raise ValueError('y must be strictly increasing')
if not x.size == z.shape[0]:
raise ValueError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise ValueError('y dimension of z must have same number of '
'elements as y')
if not bbox.shape == (4,):
raise ValueError('bbox shape should be (4,)')
if s is not None and not s >= 0.0:
raise ValueError("s should be s >= 0.0")
z = ravel(z)
xb, xe, yb, ye = bbox
with FITPACK_LOCK:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s, maxit)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, f'ier={ier}')
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
@xp_capabilities(out_of_scope=True)
| RectBivariateSpline |
python | google__jax | tests/pjit_test.py | {
"start": 3399,
"end": 44770
} | class ____(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_shardings=None, out_shardings=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 1)
self.assertAllClose(
np.asarray(actual.addressable_shards[0].data), expected, check_dtypes=False)
# Repro for a bug on addressable_shards aval
_ = repr(actual.addressable_shards)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_shardings=(P('x'), P('x')),
out_shardings=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testJitOfPjitDisallowed(self):
@partial(pjit,
in_shardings=(P('x'), P('x')),
out_shardings=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
out = jax.jit(f)(x, x + 1)
self.assertArraysEqual(out, x + x + 1)
@jtu.with_mesh([('x', 2)])
def testUnevenShardingConstraint(self):
@partial(pjit,
in_shardings=(P('x'), P('x')),
out_shardings=None)
def f(x, y):
x = x[:3]
y = y[:3]
x = with_sharding_constraint(x, P('x'))
y = with_sharding_constraint(y, P('x'))
out = x + y
return jnp.pad(out, [[0, 1]])
shape = (4,)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual[:3], expected[:3], check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data)[:3],
expected[:3], check_dtypes=False)
def testBasic1DWithMeshContextManager(self):
@partial(pjit,
in_shardings=(P('x'), P('x')),
out_shardings=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
with jtu.create_mesh((2,), ('x')) as mesh:
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertEqual(mesh, jtu.create_mesh((2,), ('x')))
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_shardings=(P(None, 'x', 'y'), P('y')),
out_shardings=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(math.prod(x_shape)).reshape(x_shape)
y = jnp.arange(math.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), split0,
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[1].data), split0,
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[2].data), split1,
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[3].data), split1,
check_dtypes=False)
def testDifferentNestedMesh(self):
with jtu.create_mesh((2, 1), ("x", "y")) as m1:
with jtu.create_mesh((2, 2), ("a", "b")) as m2:
self.assertEqual(mesh_lib.thread_resources.env.physical_mesh, m2)
self.assertEqual(mesh_lib.thread_resources.env.physical_mesh, m1)
self.assertEqual(mesh_lib.thread_resources.env.physical_mesh,
mesh_lib.EMPTY_ENV.physical_mesh)
def testSameNestedMesh(self):
mesh = jtu.create_mesh((2, 1), ("a", "b"))
thread_resources = mesh_lib.thread_resources
with mesh as m1:
with mesh as m2:
self.assertEqual(thread_resources.env.physical_mesh, m2)
self.assertEqual(thread_resources.env.physical_mesh, m1)
self.assertEqual(thread_resources.env.physical_mesh,
mesh_lib.EMPTY_ENV.physical_mesh)
def testMeshDecorator(self):
x = jnp.arange(8)
mesh_shape = (2, 2)
size = math.prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} global devices.")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
@jax.sharding.Mesh(mesh_devices, ('x', 'y'))
def dec():
return pjit(lambda x: x, in_shardings=P('x'), out_shardings=None)(x)
out = dec()
self.assertArraysEqual(out, x)
def testMeshHashRace(self):
mesh = jtu.create_mesh((2, 1), ('a', 'testMeshHashRace'))
self.assertFalse(hasattr(mesh, '_hash'))
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as pool:
fs = []
for _ in range(5):
fs.append(pool.submit(lambda: hash(mesh)))
for f in concurrent.futures.as_completed(fs):
f.result()
self.assertTrue(hasattr(mesh, '_hash'))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=jax.sharding.PartitionSpec(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 4)
splits = np.split(expected, 4)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), splits[0],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[1].data), splits[1],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[2].data), splits[2],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[3].data), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
@jtu.run_on_devices('cpu', 'gpu', 'tpu')
def testBufferDonation(self):
@partial(pjit, in_shardings=P('x'), out_shardings=P('x'), donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_shardings=P('x'), out_shardings=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.run_on_devices('cpu', 'gpu', 'tpu')
def testBufferDonationWithNames(self):
mesh = jtu.create_mesh((2,), ('x'))
s = NamedSharding(mesh, P('x'))
@partial(pjit, out_shardings=s, donate_argnames='inp2')
def f(inp1, inp2):
return inp1 + inp2
x = jax.device_put(np.ones((2, 5)) * 4, s)
y = jax.device_put(np.ones((2, 5)) * 2, s)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(x)
self.assertDeleted(y)
@jtu.run_on_devices('cpu', 'gpu', 'tpu')
def testBufferDonationWithKwargs(self):
mesh = jtu.create_mesh((2,), ('x'))
s = NamedSharding(mesh, P('x'))
@partial(pjit, out_shardings=s, donate_argnames=('inp2', 'inp3'))
def f(inp1, inp2, inp3):
return inp1 + inp2 + inp3, inp3
x = jax.device_put(np.ones((2, 5)) * 4, s)
y = jax.device_put(np.ones((2, 5)) * 2, s)
z = jax.device_put(np.ones((2, 5)), s)
expected = x + y + z
self.assertAllClose(f(x, inp2=y, inp3=z)[0], expected)
self.assertNotDeleted(x)
self.assertDeleted(y)
self.assertDeleted(z)
@jtu.run_on_devices('cpu', 'gpu', 'tpu')
def testBufferDonationWithPyTreeKwargs(self):
mesh = jtu.create_mesh((2,), ('x'))
s = NamedSharding(mesh, P('x'))
@partial(pjit, out_shardings=s, donate_argnames='inp2')
def f(inp1, inp2, inp3):
return jax.tree.map(lambda x, y, z: x + y + z, inp1, inp2, inp3)
x = np.ones((2, 5)) * 4
x_tree = jax.device_put({"a": {"b": x}, "c": x}, s)
y = np.ones((2, 5)) * 2
y_tree = jax.device_put({"a": {"b": y}, "c": y}, s)
z = np.ones((2, 5))
z_tree = jax.device_put({"a": {"b": z}, "c": z}, s)
expected = x + y + z
out = f(x_tree, inp2=y_tree, inp3=z_tree)
jax.tree.map(lambda o: self.assertAllClose(o, expected), out)
jax.tree.map(self.assertNotDeleted, x_tree)
jax.tree.map(self.assertDeleted, y_tree)
jax.tree.map(self.assertNotDeleted, z_tree)
@jtu.run_on_devices('tpu', 'cpu', 'gpu')
def testBufferDonationWithOutputShardingInference(self):
mesh = jtu.create_mesh((2,), 'x')
s = NamedSharding(mesh, P('x'))
rs = NamedSharding(mesh, P())
@partial(pjit, donate_argnames=('inp2', 'inp3'))
def f(inp1, inp2, inp3):
return (
jax.lax.with_sharding_constraint(inp1, rs),
inp1,
jax.lax.with_sharding_constraint(inp2, rs),
inp2,
jax.lax.with_sharding_constraint(inp3, rs),
inp3,
)
x = np.ones((2, 5)) * 4
x_tree = jax.device_put({'a': {'b': x}, 'c': x}, s)
y = np.ones((2, 7)) * 2
y_tree = jax.device_put({'a': {'b': y}, 'c': y}, s)
z = np.ones((2, 11))
z_tree = jax.device_put({'a': {'b': z}, 'c': z}, s)
out = f(x_tree, y_tree, z_tree)
jax.tree.map(self.assertNotDeleted, x_tree)
jax.tree.map(self.assertDeleted, y_tree)
jax.tree.map(self.assertDeleted, z_tree)
@jtu.run_on_devices('tpu')
def testBufferDonationWithOutputShardingInferenceAndTokens(self):
mesh = jtu.create_mesh((2,), 'x')
s = NamedSharding(mesh, P('x'))
def _callback(x):
self.assertIsInstance(x, jax.Array)
@partial(pjit, donate_argnames=('x'))
def f(x):
# Just to get tokens.
jax.experimental.io_callback(_callback, None, x, ordered=True)
jax.experimental.io_callback(_callback, None, x, ordered=True)
return x * x
x = np.ones((2, 5)) * 4
x = jax.device_put(x, s)
f(x)
jax.effects_barrier()
self.assertDeleted(x)
@jtu.run_on_devices('tpu', 'cpu', 'gpu')
def testBufferDonationNotDonated(self):
mesh = jtu.create_mesh((2,), 'x')
s = NamedSharding(mesh, P('x'))
@partial(pjit, donate_argnames=('x'))
def f(x):
return x @ x.T
x = jax.device_put(np.arange(16).reshape(8, 2), s)
f(x)
self.assertNotDeleted(x)
@jtu.run_on_devices('tpu', 'cpu', 'gpu')
def testBufferDonationDifferentIOShapes(self):
mesh = jtu.create_mesh((2,), 'x')
s1 = NamedSharding(mesh, P('x'))
s2 = NamedSharding(mesh, P(None, 'x', None))
x = jax.device_put(np.arange(16), s1)
y = jax.device_put(np.arange(16).reshape(16, 1), s1)
z = jax.device_put(np.arange(16).reshape(2, 2, 4), s1)
@partial(
jax.jit,
out_shardings=(s1, s1, s2),
donate_argnames=('x', 'y', 'z'),
)
def f(x, y, z):
return x, jnp.reshape(y, (16,)), z
f(x, y, z)
self.assertDeleted(x)
self.assertDeleted(y)
self.assertDeleted(z)
@jtu.run_on_devices('tpu', 'cpu', 'gpu')
def testBufferDonationMixedConstrainedness(self):
mesh = jtu.create_mesh((2,), 'x')
s = NamedSharding(mesh, P())
s2 = NamedSharding(mesh, P(P.UNCONSTRAINED, P.UNCONSTRAINED))
@partial(pjit, donate_argnames=('x', 'y'), out_shardings=(s2, s))
def f(x, y):
return x * 2, y * 2
x1 = jax.device_put(np.arange(16).reshape(8, 2), s)
x2 = jax.device_put(np.arange(16).reshape(8, 2), s)
txt = f.lower(x1, x2).as_text()
self.assertIn("jax.buffer_donor = true", txt)
self.assertIn("tf.aliasing_output = 1 : i32", txt)
f(x1, x2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintStablehlo(self):
@partial(pjit, in_shardings=None, out_shardings=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(math.prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), expected,
check_dtypes=False)
hlo = f.lower(np.ones(shape)).compiler_ir()
if config.use_shardy_partitioner.value:
# Annotation from with_sharding_constraint
self.assertIn('<@mesh, [{"x"}, {"y"}]>', str(hlo))
# Annotation from pjit
self.assertIn('sharding = #sdy.sharding<@mesh, [{}, {}]>}', str(hlo))
else:
# Annotation from with_sharding_constraint
self.assertIn('sharding = "{devices=[2,1]<=[2]}"', str(hlo))
# Annotation from pjit
self.assertIn('sharding = "{replicated}"', str(hlo))
def testShardingConstraintWithArray(self):
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
s = NamedSharding(mesh, P(None))
@partial(pjit, in_shardings=s, out_shardings=s)
def f(x):
y = x + 1
y = with_sharding_constraint(y, NamedSharding(mesh, P('x', 'y')))
return y * 2
shape = (8, 8)
x = np.arange(math.prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(actual, expected, check_dtypes=False)
hlo = f.lower(np.ones(shape)).compiler_ir(dialect="hlo")
# Annotation from with_sharding_constraint
self.assertIn('sharding={devices=[2,1]<=[2]}', hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testShardingConstraintWithArrayOpSharding(self):
shape = (8, 8)
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
s = NamedSharding(mesh, P(None))
ops = pxla.to_gspmd_sharding(
NamedSharding(mesh, P('x', 'y')), len(shape))
@partial(pjit, in_shardings=s, out_shardings=s)
def f(x):
y = x + 1
y = with_sharding_constraint(y, ops)
return y * 2
x = np.arange(math.prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, array.ArrayImpl)
self.assertLen(actual.addressable_shards, 2)
self.assertAllClose(actual, expected, check_dtypes=False)
hlo = f.lower(np.ones(shape)).compiler_ir(dialect="hlo")
# Annotation from with_sharding_constraint
self.assertIn('sharding={devices=[2,1]<=[2]}', hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testShardingConstraintPyTreeWithArray(self):
mesh = jtu.create_mesh((2, 1), ('x', 'y'))
@jax.jit
def f(x):
return with_sharding_constraint(x, NamedSharding(mesh, P('x', 'y')))
shape = (8, 8)
v = np.arange(math.prod(shape)).reshape(shape)
x = [v, v * 2]
out = f(x)
self.assertArraysEqual(out[0], v)
self.assertArraysEqual(out[1], v * 2)
self.assertLen(out[0].addressable_shards, 2)
self.assertLen(out[1].addressable_shards, 2)
hlo = f.lower(x).compiler_ir(dialect="hlo")
# Annotations from with_sharding_constraint
self.assertIn('sharding={devices=[2,1]<=[2]}', hlo.as_hlo_text())
self.assertIn('sharding={devices=[2,1]<=[2]}', hlo.as_hlo_text())
def testShardingConstraintPyTreeWithUnconstrainedDimsWithJit(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
@jax.jit
def f(x):
x = with_sharding_constraint(
x, [NamedSharding(mesh, P(P.UNCONSTRAINED, 'y', None)),
NamedSharding(mesh, P('x', P.UNCONSTRAINED, None))])
x = x.copy()
x[0]['a'] *= 2
return x
shape = (2, 8, 8)
v = np.arange(math.prod(shape)).reshape(shape)
x = [{'a': v, 'b': v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]['a'] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]['a'].addressable_shards, 4)
mlir_str = str(f.lower(x).compiler_ir())
if config.use_shardy_partitioner.value:
self.assertIn('<@mesh, [{?}, {"y"}, {}]>', mlir_str)
self.assertIn('<@mesh, [{"x"}, {?}, {}]>', mlir_str)
else:
self.assertIn("unspecified_dims=[0]", mlir_str)
self.assertIn("unspecified_dims=[1]", mlir_str)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testShardingConstraintPyTreeVmapWithUnconstrainedDims(self):
@partial(pjit, in_shardings=None, out_shardings=None)
def f(x):
x = jax.vmap(lambda x: with_sharding_constraint(
x, [P(P.UNCONSTRAINED, 'y'),
P('x', P.UNCONSTRAINED)]))(x)
x = x.copy()
x[0]['a'] *= 2
return x
shape = (2, 8, 8)
v = np.arange(math.prod(shape)).reshape(shape)
x = [{'a': v, 'b': v * 2}, v * 3]
mlir_str = str(f.lower(x).compiler_ir())
if config.use_shardy_partitioner.value:
self.assertIn('<@mesh, [{?}, {?}, {"y"}]>', mlir_str)
self.assertIn('<@mesh, [{?}, {"x"}, {?}]>', mlir_str)
else:
self.assertIn("unspecified_dims=[0,1]", mlir_str)
self.assertIn("unspecified_dims=[0,2]", mlir_str)
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with jax.sharding.Mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_shardings=P(('x', 'y')), out_shardings=None)(x)
should_be_tracing = False
pjit(f, in_shardings=P(('x', 'y')), out_shardings=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with jax.sharding.Mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_shardings=P(('x', 'y')), out_shardings=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4.)
f = pjit(
lambda x: x.sum() + h.sum(),
in_shardings=P('x', 'y'),
out_shardings=None,
)
g = pjit(
lambda x: f(jnp.sin(x)), in_shardings=P('x', None), out_shardings=None
)
x = jnp.arange(16.).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertIsInstance(y, array.ArrayImpl)
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4.)
f = pjit(
lambda x: x.sum(1) * h.sum(),
in_shardings=P('x', 'y'),
out_shardings=P(('x', 'y')),
)
g = pjit(
lambda x: f(jnp.sin(x * 4 + 2)),
in_shardings=P('x', None),
out_shardings=P(('x', 'y')),
)
jtu.check_grads(g, (jnp.arange(16.).reshape((4, 4)) / 100,), order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testAutodiffCache(self):
f = pjit(lambda x: jnp.sin(x).sum(), in_shardings=P('x'), out_shardings=None)
x = jnp.arange(16, dtype=jnp.float32)
jax.grad(f)(x) # Warm up the cache.
with jtu.count_pjit_cpp_cache_miss() as count:
jax.grad(f)(x)
self.assertEqual(count(), 0) # no cache miss i.e. cache hit
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4.), jnp.arange(5.)
f = pjit(
lambda x, y: x.sum() + jnp.sin(y),
in_shardings=(P('x'), P('y')),
out_shardings=P('y'),
)
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(
pjit(lambda x: x + 2, in_shardings=None, out_shardings=None)(1), 3
)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(
lambda x: {'b': x['a'] + 2},
in_shardings=({'a': P('x')},),
out_shardings={'b': P('x')},
)({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x), in_shardings=None, out_shardings=None)
x = jnp.arange(8, dtype=jnp.float32)
out = f(x)
self.assertAllClose(out, jnp.cos(x))
self.assertLen(out.devices(), 2)
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_shardings=spec, out_shardings=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_shardings=P('x'), out_shardings=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x[jnp.newaxis] + y)
self.assertAllClose(w, x)
self.assertEqual(
z.sharding._to_xla_hlo_sharding(z.ndim).tile_assignment_dimensions(),
[1, 2])
self.assertEqual(
w.sharding._to_xla_hlo_sharding(w.ndim).tile_assignment_dimensions(), [2])
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(
lambda x: with_sharding_constraint(x, P('x')),
in_shardings=P(),
out_shardings=P('x'),
)
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
op = constraint_eqn.params['sharding']._to_xla_hlo_sharding(x.ndim)
self.assertTrue(op.is_tiled())
self.assertListEqual(op.tile_assignment_dimensions(), [1, 2])
self.assertListEqual(op.tile_assignment_devices(), [0, 1])
self.assertFalse(op_shardings.is_hlo_sharding_replicated(op))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraintWithSpmdAxis(self):
f = pjit(
jax.vmap(
lambda x: with_sharding_constraint(x, P(None)),
spmd_axis_name='x',
),
in_shardings=P('x'),
out_shardings=P('x'),
)
x = jnp.arange(16 * 4).reshape((16, 4))
jaxpr = jax.make_jaxpr(f)(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
op = constraint_eqn.params['sharding']._to_xla_hlo_sharding(x.ndim)
self.assertTrue(op.is_tiled())
self.assertListEqual(op.tile_assignment_dimensions(), [2, 1])
self.assertListEqual(op.tile_assignment_devices(), [0, 1])
self.assertFalse(op_shardings.is_hlo_sharding_replicated(op))
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4, in_shardings=P('x'), out_shardings=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_shardings=P('x'), out_shardings=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailableWithNames(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(inp1):
return inp1
f_low = pjit(f, in_shardings=P('x'), out_shardings=P('x'),
donate_argnames=('inp1',)).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.enable_custom_prng.value:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = prng.random_seed(87, impl=prng.rbg_prng_impl)
# Make sure this doesn't crash
pjit(lambda x: x, in_shardings=None, out_shardings=None)(key)
def test_lower_with_wrapper_error(self):
@jax.jit
def f(x):
return x
self.assertAllClose(1., f(1.))
self.assertAllClose(1., f.lower(1.).compile()(1.))
wrapped_f = wraps(f)(lambda x: f(x + 1))
with self.assertRaisesRegex(AttributeError, "has no attribute 'lower'"):
wrapped_f.lower(1.)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
expected = x @ (x + 1)
lowered = f.lower(x, x + 1)
compiled = lowered.compile()
actual = compiled(x, x + 1)
self.assertEqual(lowered.in_avals, compiled.in_avals)
self.assertEqual(
lowered.in_avals,
((core.ShapedArray(x.shape, x.dtype, weak_type=False),) * 2, {}))
splits = np.split(expected, 4)
self.assertAllClose(np.asarray(actual.addressable_shards[0].data), splits[0],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[1].data), splits[1],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[2].data), splits[2],
check_dtypes=False)
self.assertAllClose(np.asarray(actual.addressable_shards[3].data), splits[3],
check_dtypes=False)
for obj in [lowered, compiled]:
self.assertFalse(obj._no_kwargs)
self.assertEqual(obj.in_tree, jax.tree.flatten(((0, 0), {}))[1])
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@pjit
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1, a=1, b=2).compile()
out = exe(x, x + 1, a=1, b=2)
self.assertArraysEqual(out, x @ (x + 1))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError,
'Function compiled with input pytree does not match the input pytree it'
' was called with',
lambda: exe([x], [x + 1]),
)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
with self.assertRaisesRegex(
TypeError,
r"Argument types differ .*"
r"The mismatches are:\n"
r"Argument 'x' compiled with.*float32.*and called with.*int32.*\n"
r"Argument 'y' compiled with.*float32.*and called with.*int32.*"):
exe(x_i32, x_i32)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerAsText(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1)
self.assertIsInstance(f.as_text(), str)
self.assertIsInstance(f.as_text(dialect='hlo'), str)
self.assertIsInstance(f.as_text(dialect='stablehlo'), str)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompilerIR(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect='stablehlo'))
@jtu.with_mesh([('x', 2)])
def testLowerPartitionsAttribute(self):
@partial(pjit,
in_shardings=(P('x'), P('x')),
out_shardings=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
hlo = f.lower(x, x + 1).as_text("stablehlo")
self.assertIn("mhlo.num_replicas = 1", hlo)
self.assertIn("mhlo.num_partitions = 2", hlo)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileCompilerIR(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.runtime_executable())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileAsText(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsInstance(f.as_text(), (str, type(None)))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCostAnalysis(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1)
f.cost_analysis() # doesn't raise
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileCostAnalysis(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
f.cost_analysis() # doesn't raise
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileMemoryAnalysis(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
f.memory_analysis() # doesn't raise
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileExecutable(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(math.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.runtime_executable())
@jtu.with_mesh([('x', 2)])
def test_static_argnums(self):
@partial(pjit, in_shardings=None, out_shardings=None,
static_argnums=(1,))
def f(x, y):
return x + (3 if y == 'hi' else 4)
self.assertEqual(f(1, 'hi' ), 4)
self.assertEqual(f(1, 'bye'), 5)
@jtu.with_mesh([('x', 4), ('y', 2)])
def testLowerCompileWithAvals(self):
@partial(pjit,
in_shardings=P(('x', 'y'),),
out_shardings=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
aval = core.ShapedArray(shape, dtypes.default_int_dtype())
x = jnp.arange(math.prod(shape)).reshape(shape)
exe = f.lower(aval, x).compile()
self.assertIsInstance(exe, stages.Compiled)
self.assertArraysEqual(exe(x, x), x @ x)
def test_local_sharded_key_array_sda(self):
input_shape = (8, 4)
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
seeds = jnp.arange(
math.prod(input_shape), dtype=np.uint32).reshape(input_shape)
with mesh:
def make_keys(seeds):
make_key = partial(prng.random_seed, impl=prng.threefry_prng_impl)
return make_key(seeds)
f = pjit(make_keys, in_shardings=P(None), out_shardings=P(None))
out = f(seeds)
self.assertTrue(jax.dtypes.issubdtype(out.dtype, jax.dtypes.prng_key))
self.assertEqual(out.shape, input_shape)
jax.random.key_data(out) # doesn't crash
def test_with_sharding_constraint_is_compatible_error(self):
mesh = jtu.create_mesh((1, 1, 2), ('replica', 'data', 'mdl'))
with mesh:
def f(x):
y = with_sharding_constraint(x, P(None, ('mdl',), None, None))
z = y + 2
return z
pjit_f = pjit(f, in_shardings=P(None), out_shardings=P(None))
with self.assertRaisesRegex(
ValueError,
r"One of with_sharding_constraint.*Sharding "
r"NamedSharding.*PartitionSpec\(None, 'mdl', None, None\).*\) is only "
"valid for values of rank at least 4, but was applied to a value of rank 1"):
pjit_f(jnp.array([1, 2, 3]))
def test_pretty_print(self):
f = pjit(lambda x: x**2)
g = pjit(lambda x: f(x) + f(x))
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(g)(x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
let lambda = { lambda ; a:f32[1]. let b:f32[1] = integer_pow[y=2] a in (b,) } in
{ lambda ; c:f32[1]. let
d:f32[1] = jit[
name=<lambda>
jaxpr={ lambda ; c:f32[1]. let
e:f32[1] = jit[name=<lambda> jaxpr=lambda] c
f:f32[1] = jit[name=<lambda> jaxpr=lambda] c
d:f32[1] = add e f
in (d,) }
] c
in (d,) }
""").strip(),
)
def test_pretty_print_pjit_id(self):
f = pjit(lambda x, y: x)
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(lambda y: y + f(y, y))(x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
{ lambda ; a:f32[1]. let
b:f32[1] = jit[
name=<lambda>
jaxpr={ lambda ; a:f32[1] c:f32[1]. let in (a,) }
] a a
d:f32[1] = add a b
in (d,) }
""").strip(),
)
def test_pretty_print_with_constant_pjit_arg(self):
f = pjit(lambda x, y: x * y)
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(lambda x: f(x, np.float32(1.0)))(x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
{ lambda ; a:f32[1]. let
b:f32[1] = jit[
name=<lambda>
jaxpr={ lambda ; a:f32[1] c:f32[]. let b:f32[1] = mul a c in (b,) }
] a 1.0:f32[]
in (b,) }
""").strip(),
)
def test_pretty_print_with_aliased_args(self):
f = pjit(lambda x, y, z: x * y * z)
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(lambda x: f(x, x, x))(x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
{ lambda ; a:f32[1]. let
b:f32[1] = jit[
name=<lambda>
jaxpr={ lambda ; a:f32[1] c:f32[1] d:f32[1]. let
e:f32[1] = mul a c
b:f32[1] = mul e d
in (b,) }
] a a a
in (b,) }
""").strip(),
)
def test_pretty_print_with_literal_outvar(self):
f = pjit(lambda x: (np.int32(2), x))
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(f)(x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
{ lambda ; a:f32[1]. let
b:i32[] c:f32[1] = jit[
name=<lambda>
jaxpr={ lambda ; a:f32[1]. let in (2:i32[], a) }
] a
in (b, c) }
""").strip(),
)
def test_pretty_print_with_closure(self):
@pjit
def g(x, y):
@pjit
def f(x):
return x * y
return f(x) + f(y)
x = jnp.array([4.2], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(g)(x, x)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
let f = { lambda ; a:f32[1] b:f32[1]. let c:f32[1] = mul b a in (c,) } in
{ lambda ; d:f32[1] e:f32[1]. let
g:f32[1] = jit[
name=g
jaxpr={ lambda ; d:f32[1] e:f32[1]. let
h:f32[1] = jit[name=f jaxpr=f] e d
i:f32[1] = jit[name=f jaxpr=f] e e
g:f32[1] = add h i
in (g,) }
] d e
in (g,) }
""").strip(),
)
def test_pretty_print_with_name_clash(self):
@pjit
def g(x, y):
@pjit
def f(x):
return x
return f(x)*f(x) + f(y)*f(y)
x = jnp.array([4.2], dtype=jnp.float32)
y = jnp.array([4.2, 2.4], dtype=jnp.float32)
jaxpr = jax.make_jaxpr(g)(x, y)
self.assertEqual(
jaxpr.pretty_print(use_color=False),
textwrap.dedent("""
let f = { lambda ; a:f32[1]. let in (a,) } in
let f1 = { lambda ; b:f32[2]. let in (b,) } in
{ lambda ; c:f32[1] d:f32[2]. let
e:f32[2] = jit[
name=g
jaxpr={ lambda ; c:f32[1] d:f32[2]. let
g:f32[1] = jit[name=f jaxpr=f] c
h:f32[1] = jit[name=f jaxpr=f] c
i:f32[1] = mul g h
j:f32[2] = jit[name=f jaxpr=f1] d
k:f32[2] = jit[name=f jaxpr=f1] d
l:f32[2] = mul j k
e:f32[2] = add i l
in (e,) }
] c d
in (e,) }
""").strip(),
)
def test_with_sharding_constraint_vmap_spmd_axis_name_error(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
def f(x):
return jax.lax.with_sharding_constraint(x, NamedSharding(mesh, P('x')))
xs = jnp.arange(4 * 16.).reshape(4, 16)
with self.assertRaisesRegex(ValueError, "spmd_axis_name"):
jax.vmap(f, spmd_axis_name='x')(xs)
def test_cache_bug(self):
devices = list(jax.devices())
if len(devices) < 2:
raise unittest.SkipTest("Test requires 2 devices")
def under_jvp(f):
return jax.jvp(f, (), ())
x0 = jnp.zeros(1, device=devices[0])
x1 = jnp.zeros(1, device=devices[1])
# comments describe how caches worked under the old `_most_recent_pjit_call_executable` system
under_jvp(lambda: jnp.sin(x0)) # cpp_pjit miss, pjit_call_impl miss
jnp.sin(x1) # cpp_pjit miss, pjit_call_impl miss
ans1 = jnp.sin(x0) # cpp_pjit miss, pjit_call_impl hit. Bad cpp_pjit entry created
ans2 = jnp.sin(x0) # cpp_pjit hit with bad cache entry
assert(ans1.devices() == ans2.devices())
def test_zero_literal_equality(self):
# This test verifies that we don't accidentally conflate positive and
# negative zeros when deduplicating literals in the IR.
f = jax.jit(lambda x: (x / np.float32(-0.0), x / np.float32(0.0)))
a, b = f(np.float32(1.0))
self.assertEqual(a, -np.inf)
self.assertEqual(b, np.inf)
ir = f.lower(np.float32(1.0)).as_text()
self.assertIn("stablehlo.constant dense<0.000000e+00>", ir)
self.assertIn("stablehlo.constant dense<-0.000000e+00>", ir)
def test_device_put_copy_donate(self):
x = np.arange(1000)
y = jax.device_put(x, device=jax.devices()[0], may_alias=False, donate=False)
z = jax.device_put(y, device=jax.devices()[0], may_alias=False, donate=False)
a = jax.jit(lambda y: y * 2, donate_argnums=0)(y)
self.assertDeleted(y)
self.assertNotDeleted(z)
self.assertArraysEqual(a, x * 2)
def test_basic_vjp3(self):
f = jax.jit(lambda x: jnp.sin(jnp.sin(x)))
_, f_vjp = vjp3(f, 1.)
g, = f_vjp(1.0)
self.assertAllClose(g, jnp.cos(jnp.sin(1.)) * jnp.cos(1.), check_dtypes=False)
@jtu.pytest_mark_if_available('multiaccelerator')
| PJitTest |
python | urllib3__urllib3 | test/with_dummyserver/test_socketlevel.py | {
"start": 80117,
"end": 82783
} | class ____(SocketDummyServerTestCase):
def test_stream_none_unchunked_response_does_not_hang(self) -> None:
done_event = Event()
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.send(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 12\r\n"
b"Content-type: text/plain\r\n"
b"\r\n"
b"hello, world"
)
done_event.wait(5)
sock.close()
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
r = pool.request("GET", "/", timeout=LONG_TIMEOUT, preload_content=False)
# Stream should read to the end.
assert [b"hello, world"] == list(r.stream(None))
done_event.set()
def test_large_compressed_stream(self) -> None:
done_event = Event()
expected_total_length = 296085
def socket_handler(listener: socket.socket) -> None:
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b"x" * expected_total_length)
data += compress.flush()
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: %d\r\n"
b"Content-Encoding: gzip\r\n"
b"\r\n" % (len(data),) + data
)
done_event.wait(5)
sock.close()
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
r = pool.request("GET", "/", timeout=LONG_TIMEOUT, preload_content=False)
# Chunks must all be equal or less than 10240
# and only the last chunk is allowed to be smaller
# than 10240.
total_length = 0
chunks_smaller_than_10240 = 0
for chunk in r.stream(10240, decode_content=True):
assert 0 < len(chunk) <= 10240
if len(chunk) < 10240:
chunks_smaller_than_10240 += 1
else:
assert chunks_smaller_than_10240 == 0
total_length += len(chunk)
assert chunks_smaller_than_10240 == 1
assert expected_total_length == total_length
done_event.set()
| TestStream |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 421636,
"end": 421902
} | class ____(BatchRequest):
"""
Updates a batch of tasks.
Headers Content type should be 'application/json-lines'.
"""
_service = "tasks"
_action = "update_batch"
_version = "2.20"
_batched_request_cls = UpdateRequest
| UpdateBatchRequest |
python | apache__airflow | airflow-core/src/airflow/jobs/scheduler_job_runner.py | {
"start": 6160,
"end": 7716
} | class ____:
"""
Dataclass to represent concurrency maps.
It contains a map from (dag_id, task_id) to # of task instances, a map from (dag_id, task_id)
to # of task instances in the given state list and a map from (dag_id, run_id, task_id)
to # of task instances in the given state list in each DAG run.
"""
def __init__(self):
self.dag_run_active_tasks_map: Counter[tuple[str, str]] = Counter()
self.task_concurrency_map: Counter[tuple[str, str]] = Counter()
self.task_dagrun_concurrency_map: Counter[tuple[str, str, str]] = Counter()
def load(self, session: Session) -> None:
self.dag_run_active_tasks_map.clear()
self.task_concurrency_map.clear()
self.task_dagrun_concurrency_map.clear()
query = session.execute(
select(TI.dag_id, TI.task_id, TI.run_id, func.count("*"))
.where(TI.state.in_(EXECUTION_STATES))
.group_by(TI.task_id, TI.run_id, TI.dag_id)
)
for dag_id, task_id, run_id, c in query:
self.dag_run_active_tasks_map[dag_id, run_id] += c
self.task_concurrency_map[(dag_id, task_id)] += c
self.task_dagrun_concurrency_map[(dag_id, run_id, task_id)] += c
def _is_parent_process() -> bool:
"""
Whether this is a parent process.
Return True if the current process is the parent process.
False if the current process is a child process started by multiprocessing.
"""
return multiprocessing.current_process().name == "MainProcess"
| ConcurrencyMap |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-pebblo/llama_index/readers/pebblo/base.py | {
"start": 510,
"end": 9182
} | class ____(BaseReader):
"""
Pebblo Safe Loader class is a wrapper around document loaders enabling the data
to be scrutinized.
"""
_discover_sent: bool = False
_loader_sent: bool = False
def __init__(
self,
llama_reader: BaseReader,
name: str,
owner: str = "",
description: str = "",
):
if not name or not isinstance(name, str):
raise NameError("Must specify a valid name.")
self.app_name = name
self.load_id = str(uuid.uuid4())
self.reader = llama_reader
self.owner = owner
self.description = description
self.reader_name = str(type(self.reader)).split(".")[-1].split("'")[0]
self.source_type = get_reader_type(self.reader_name)
self.docs: List[Document] = []
self.source_aggr_size = 0
# generate app
self.app = self._get_app_details()
self._send_discover()
def load_data(self, **kwargs) -> List[Document]:
"""
Load Documents.
Returns:
list: Documents fetched from load method of the wrapped `reader`.
"""
self.docs = self.reader.load_data(**kwargs)
self._send_reader_doc(loading_end=True, **kwargs)
return self.docs
@classmethod
def set_discover_sent(cls) -> None:
cls._discover_sent = True
@classmethod
def set_reader_sent(cls) -> None:
cls._reader_sent = True
def _set_reader_details(self, **kwargs) -> None:
self.source_path = get_reader_full_path(self.reader, self.reader_name, **kwargs)
self.source_owner = PebbloSafeReader.get_file_owner_from_path(self.source_path)
self.source_path_size = self.get_source_size(self.source_path)
self.reader_details = {
"loader": self.reader_name,
"source_path": self.source_path,
"source_type": self.source_type,
**(
{"source_path_size": str(self.source_path_size)}
if self.source_path_size > 0
else {}
),
}
def _send_reader_doc(self, loading_end: bool = False, **kwargs) -> None:
"""
Send documents fetched from reader to pebblo-server. Internal method.
Args:
loading_end (bool, optional): Flag indicating the halt of data
loading by reader. Defaults to False.
"""
headers = {"Accept": "application/json", "Content-Type": "application/json"}
docs = []
self._set_reader_details(**kwargs)
for doc in self.docs:
page_content = str(doc.get_content(metadata_mode=MetadataMode.NONE))
page_content_size = self.calculate_content_size(page_content)
self.source_aggr_size += page_content_size
docs.append(
{
"doc": page_content,
"source_path": self.source_path,
"last_modified": doc.metadata.get("last_modified", None),
"file_owner": self.source_owner,
**(
{"source_path_size": self.source_path_size}
if self.source_path_size is not None
else {}
),
}
)
payload: Dict[str, Any] = {
"name": self.app_name,
"owner": self.owner,
"docs": docs,
"plugin_version": PLUGIN_VERSION,
"load_id": self.load_id,
"loader_details": self.reader_details,
"loading_end": "false",
"source_owner": self.source_owner,
}
if loading_end is True:
payload["loading_end"] = "true"
if "loader_details" in payload:
payload["loader_details"]["source_aggr_size"] = self.source_aggr_size
payload = Doc(**payload).dict(exclude_unset=True)
load_doc_url = f"{CLASSIFIER_URL}/v1/loader/doc"
try:
resp = requests.post(
load_doc_url, headers=headers, json=payload, timeout=20
)
if resp.status_code not in [HTTPStatus.OK, HTTPStatus.BAD_GATEWAY]:
logger.warning(
f"Received unexpected HTTP response code: {resp.status_code}"
)
logger.debug(
f"send_loader_doc: request \
url {resp.request.url}, \
body {str(resp.request.body)[:999]} \
len {len(resp.request.body if resp.request.body else [])} \
response status{resp.status_code} body {resp.json()}"
)
except requests.exceptions.RequestException:
logger.warning("Unable to reach pebblo server.")
except Exception:
logger.warning("An Exception caught in _send_loader_doc.")
if loading_end is True:
PebbloSafeReader.set_reader_sent()
@staticmethod
def calculate_content_size(page_content: str) -> int:
"""
Calculate the content size in bytes:
- Encode the string to bytes using a specific encoding (e.g., UTF-8)
- Get the length of the encoded bytes.
Args:
page_content (str): Data string.
Returns:
int: Size of string in bytes.
"""
# Encode the content to bytes using UTF-8
encoded_content = page_content.encode("utf-8")
return len(encoded_content)
def _send_discover(self) -> None:
"""Send app discovery payload to pebblo-server. Internal method."""
headers = {"Accept": "application/json", "Content-Type": "application/json"}
payload = self.app.dict(exclude_unset=True)
app_discover_url = f"{CLASSIFIER_URL}/v1/app/discover"
try:
resp = requests.post(
app_discover_url, headers=headers, json=payload, timeout=20
)
logger.debug(
f"send_discover: request \
url {resp.request.url}, \
headers {resp.request.headers}, \
body {str(resp.request.body)[:999]} \
len {len(resp.request.body if resp.request.body else [])} \
response status{resp.status_code} body {resp.json()}"
)
if resp.status_code in [HTTPStatus.OK, HTTPStatus.BAD_GATEWAY]:
PebbloSafeReader.set_discover_sent()
else:
logger.warning(
f"Received unexpected HTTP response code: {resp.status_code}"
)
except requests.exceptions.RequestException:
logger.warning("Unable to reach pebblo server.")
except Exception:
logger.warning("An Exception caught in _send_discover.")
def _get_app_details(self) -> App:
"""
Fetch app details. Internal method.
Returns:
App: App details.
"""
framework, runtime = get_runtime()
return App(
name=self.app_name,
owner=self.owner,
description=self.description,
load_id=self.load_id,
runtime=runtime,
framework=framework,
plugin_version=PLUGIN_VERSION,
)
@staticmethod
def get_file_owner_from_path(file_path: str) -> str:
"""
Fetch owner of local file path.
Args:
file_path (str): Local file path.
Returns:
str: Name of owner.
"""
try:
import pwd
file_owner_uid = os.stat(file_path).st_uid
file_owner_name = pwd.getpwuid(file_owner_uid).pw_name
except Exception:
file_owner_name = "unknown"
return file_owner_name
def get_source_size(self, source_path: str) -> int:
"""
Fetch size of source path. Source can be a directory or a file.
Args:
source_path (str): Local path of data source.
Returns:
int: Source size in bytes.
"""
if not source_path:
return 0
size = 0
if os.path.isfile(source_path):
size = os.path.getsize(source_path)
elif os.path.isdir(source_path):
total_size = 0
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
size = total_size
return size
| PebbloSafeReader |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_ignore_error04.py | {
"start": 315,
"end": 973
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("ignore_error04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string("A1", "123")
worksheet.write_string("C3", "123")
worksheet.write_string("E5", "123")
worksheet.ignore_errors({"number_stored_as_text": "A1 C3 E5"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__gym | gym/wrappers/transform_reward.py | {
"start": 115,
"end": 1332
} | class ____(RewardWrapper):
"""Transform the reward via an arbitrary function.
Warning:
If the base environment specifies a reward range which is not invariant under :attr:`f`, the :attr:`reward_range` of the wrapped environment will be incorrect.
Example:
>>> import gym
>>> env = gym.make('CartPole-v1')
>>> env = TransformReward(env, lambda r: 0.01*r)
>>> env.reset()
>>> observation, reward, terminated, truncated, info = env.step(env.action_space.sample())
>>> reward
0.01
"""
def __init__(self, env: gym.Env, f: Callable[[float], float]):
"""Initialize the :class:`TransformReward` wrapper with an environment and reward transform function :param:`f`.
Args:
env: The environment to apply the wrapper
f: A function that transforms the reward
"""
super().__init__(env)
assert callable(f)
self.f = f
def reward(self, reward):
"""Transforms the reward using callable :attr:`f`.
Args:
reward: The reward to transform
Returns:
The transformed reward
"""
return self.f(reward)
| TransformReward |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 35888,
"end": 36837
} | class ____(nn.Module):
def __init__(self, config: Kosmos2_5TextConfig):
super().__init__()
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim)
self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim)
self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.ffn_layernorm(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
| Kosmos2_5TextFFN |
python | scipy__scipy | scipy/sparse/linalg/_eigen/tests/test_svds.py | {
"start": 3501,
"end": 4028
} | class ____(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
# --- Test Input Validation ---
# Tests input validation on parameters `k` and `which`.
# Needs better input validation checks for all other parameters.
| CheckingLinearOperator |
python | dask__distributed | distributed/pytest_resourceleaks.py | {
"start": 5397,
"end": 5935
} | class ____(ResourceChecker, name="threads"):
def measure(self) -> set[threading.Thread]:
return set(threading.enumerate())
def has_leak(
self, before: set[threading.Thread], after: set[threading.Thread]
) -> bool:
return not after <= before
def format(
self, before: set[threading.Thread], after: set[threading.Thread]
) -> str:
leaked = after - before
assert leaked
return f"leaked {len(leaked)} Python thread(s): {sorted(leaked, key=str)}"
| ActiveThreadsChecker |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 26707,
"end": 36506
} | class ____(DefinedFunction):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
This function can be extended to complex `n` and `m` where `n` is not a
negative integer or `m` is a nonpositive integer as
.. math:: \operatorname{H}_{n,m} = \begin{cases} \zeta(m) - \zeta(m, n+1)
& m \ne 1 \\ \psi(n+1) + \gamma & m = 1 \end{cases}
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can evaluate harmonic numbers for all integral and positive
rational arguments:
>>> from sympy import S, expand_func, simplify
>>> harmonic(8)
761/280
>>> harmonic(11)
83711/27720
>>> H = harmonic(1/S(3))
>>> H
harmonic(1/3)
>>> He = expand_func(H)
>>> He
-log(6) - sqrt(3)*pi/6 + 2*Sum(log(sin(_k*pi/3))*cos(2*_k*pi/3), (_k, 1, 1))
+ 3*Sum(1/(3*_k + 1), (_k, 0, 0))
>>> He.doit()
-log(6) - sqrt(3)*pi/6 - log(sqrt(3)/2) + 3
>>> H = harmonic(25/S(7))
>>> He = simplify(expand_func(H).doit())
>>> He
log(sin(2*pi/7)**(2*cos(16*pi/7))/(14*sin(pi/7)**(2*cos(pi/7))*cos(pi/14)**(2*sin(pi/14)))) + pi*tan(pi/14)/2 + 30247/9900
>>> He.n(40)
1.983697455232980674869851942390639915940
>>> harmonic(25/S(7)).n(40)
1.983697455232980674869851942390639915940
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m", integer=True, positive=True)
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 + zeta(3)
>>> simplify(harmonic(n,m).rewrite(polygamma))
Piecewise((polygamma(0, n + 1) + EulerGamma, Eq(m, 1)),
(-(-1)**m*polygamma(m - 1, n + 1)/factorial(m - 1) + zeta(m), True))
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
zeta(3)
For `m > 1`, `H_{n,m}` tends to `\zeta(m)` in the limit of infinite `n`:
>>> m = Symbol("m", positive=True)
>>> limit(harmonic(n, m+1), n, oo)
zeta(m + 1)
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas, genocchi, partition, tribonacci
References
==========
.. [1] https://en.wikipedia.org/wiki/Harmonic_number
.. [2] https://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] https://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
"""
# This prevents redundant recalculations and speeds up harmonic number computations.
harmonic_cache: dict[Integer, Callable[[int], Rational]] = {}
@classmethod
def eval(cls, n, m=None):
from sympy.functions.special.zeta_functions import zeta
if m is S.One:
return cls(n)
if m is None:
m = S.One
if n.is_zero:
return S.Zero
elif m.is_zero:
return n
elif n is S.Infinity:
if m.is_negative:
return S.NaN
elif is_le(m, S.One):
return S.Infinity
elif is_gt(m, S.One):
return zeta(m)
elif m.is_Integer and m.is_nonpositive:
return (bernoulli(1-m, n+1) - bernoulli(1-m)) / (1-m)
elif n.is_Integer:
if n.is_negative and (m.is_integer is False or m.is_nonpositive is False):
return S.ComplexInfinity if m is S.One else S.NaN
if n.is_nonnegative:
if m.is_Integer:
if m not in cls.harmonic_cache:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls.harmonic_cache[m] = f
return cls.harmonic_cache[m](int(n))
return Add(*(k**(-m) for k in range(1, int(n) + 1)))
def _eval_rewrite_as_polygamma(self, n, m=S.One, **kwargs):
from sympy.functions.special.gamma_functions import gamma, polygamma
if m.is_integer and m.is_positive:
return Piecewise((polygamma(0, n+1) + S.EulerGamma, Eq(m, 1)),
(S.NegativeOne**m * (polygamma(m-1, 1) - polygamma(m-1, n+1)) /
gamma(m), True))
def _eval_rewrite_as_digamma(self, n, m=1, **kwargs):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1, **kwargs):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None, **kwargs):
from sympy.concrete.summations import Sum
k = Dummy("k", integer=True)
if m is None:
m = S.One
return Sum(k**(-m), (k, 1, n))
def _eval_rewrite_as_zeta(self, n, m=S.One, **kwargs):
from sympy.functions.special.zeta_functions import zeta
from sympy.functions.special.gamma_functions import digamma
return Piecewise((digamma(n + 1) + S.EulerGamma, Eq(m, 1)),
(zeta(m) - zeta(m, n+1), True))
def _eval_expand_func(self, **hints):
from sympy.concrete.summations import Sum
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in range(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in range(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
if n.is_Rational:
# Expansions for harmonic numbers at general rational arguments (u + p/q)
# Split n as u + p/q with p < q
p, q = n.as_numer_denom()
u = p // q
p = p - u * q
if u.is_nonnegative and p.is_positive and q.is_positive and p < q:
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.trigonometric import sin, cos, cot
k = Dummy("k")
t1 = q * Sum(1 / (q * k + p), (k, 0, u))
t2 = 2 * Sum(cos((2 * pi * p * k) / S(q)) *
log(sin((pi * k) / S(q))),
(k, 1, floor((q - 1) / S(2))))
t3 = (pi / 2) * cot((pi * p) / q) + log(2 * q)
return t1 + t2 - t3
return self
def _eval_rewrite_as_tractable(self, n, m=1, limitvar=None, **kwargs):
from sympy.functions.special.zeta_functions import zeta
from sympy.functions.special.gamma_functions import polygamma
pg = self.rewrite(polygamma)
if not isinstance(pg, harmonic):
return pg.rewrite("tractable", deep=True)
arg = m - S.One
if arg.is_nonzero:
return (zeta(m) - zeta(m, n+1)).rewrite("tractable", deep=True)
def _eval_evalf(self, prec):
if not all(x.is_number for x in self.args):
return
n = self.args[0]._to_mpmath(prec)
m = (self.args[1] if len(self.args) > 1 else S.One)._to_mpmath(prec)
if mp.isint(n) and n < 0:
return S.NaN
with workprec(prec):
if m == 1:
res = mp.harmonic(n)
else:
res = mp.zeta(m) - mp.zeta(m, n+1)
return Expr._from_mpmath(res, prec)
def fdiff(self, argindex=1):
from sympy.functions.special.zeta_functions import zeta
if len(self.args) == 2:
n, m = self.args
else:
n, m = self.args + (1,)
if argindex == 1:
return m * zeta(m+1, n+1)
else:
raise ArgumentIndexError
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
| harmonic |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 23146,
"end": 23544
} | class ____(sgqlc.types.Enum):
"""The possible GitHub Enterprise deployments where this user can
exist.
Enumeration Choices:
* `CLOUD`: The user is part of a GitHub Enterprise Cloud
deployment.
* `SERVER`: The user is part of a GitHub Enterprise Server
deployment.
"""
__schema__ = github_schema
__choices__ = ("CLOUD", "SERVER")
| EnterpriseUserDeployment |
python | django__django | tests/template_tests/filter_tests/test_slice.py | {
"start": 843,
"end": 1738
} | class ____(SimpleTestCase):
def test_zero_length(self):
self.assertEqual(slice_filter("abcdefg", "0"), "")
def test_index(self):
self.assertEqual(slice_filter("abcdefg", "1"), "a")
def test_index_integer(self):
self.assertEqual(slice_filter("abcdefg", 1), "a")
def test_negative_index(self):
self.assertEqual(slice_filter("abcdefg", "-1"), "abcdef")
def test_range(self):
self.assertEqual(slice_filter("abcdefg", "1:2"), "b")
def test_range_multiple(self):
self.assertEqual(slice_filter("abcdefg", "1:3"), "bc")
def test_range_step(self):
self.assertEqual(slice_filter("abcdefg", "0::2"), "aceg")
def test_fail_silently(self):
obj = object()
self.assertEqual(slice_filter(obj, "0::2"), obj)
def test_empty_dict(self):
self.assertEqual(slice_filter({}, "1"), {})
| FunctionTests |
python | tensorflow__tensorflow | tensorflow/python/data/ops/shuffle_op.py | {
"start": 1330,
"end": 2826
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(
self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=True,
name=None,
):
"""See `Dataset.shuffle()` for details."""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
self._reshuffle_each_iteration = reshuffle_each_iteration
self._name = name
if (tf2.enabled() and
(context.executing_eagerly() or ops.inside_function())):
variant_tensor = gen_dataset_ops.shuffle_dataset_v3(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
seed_generator=gen_dataset_ops.dummy_seed_generator(),
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._common_args)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._common_args)
super().__init__(input_dataset, variant_tensor)
| _ShuffleDataset |
python | kamyu104__LeetCode-Solutions | Python/sum-of-k-mirror-numbers.py | {
"start": 1384,
"end": 2274
} | class ____(object):
def kMirror(self, k, n):
"""
:type k: int
:type n: int
:rtype: int
"""
def num_gen(k):
digits = ['0']
while True:
for i in xrange(len(digits)//2, len(digits)):
if int(digits[i])+1 < k:
digits[i] = digits[-1-i] = str(int(digits[i])+1)
break
digits[i] = digits[-1-i] = '0'
else:
digits.insert(0, '1')
digits[-1] = '1'
yield "".join(digits)
def mirror_num(gen):
while True:
x = int(next(gen, k), k)
if str(x) == str(x)[::-1]:
break
return x
gen = num_gen(k)
return sum(mirror_num(gen) for _ in xrange(n))
| Solution2 |
python | ray-project__ray | python/ray/util/scheduling_strategies.py | {
"start": 5524,
"end": 7616
} | class ____:
"""
Label based node affinity scheduling strategy
scheduling_strategy=NodeLabelSchedulingStrategy({
"region": In("us"),
"gpu_type": Exists(),
})
"""
def __init__(
self, hard: LabelMatchExpressionsT, *, soft: LabelMatchExpressionsT = None
):
self.hard = _convert_map_to_expressions(hard, "hard")
self.soft = _convert_map_to_expressions(soft, "soft")
self._check_usage()
def _check_usage(self):
if not (self.hard or self.soft):
raise ValueError(
"The `hard` and `soft` parameter "
"of NodeLabelSchedulingStrategy cannot both be empty."
)
def _convert_map_to_expressions(map_expressions: LabelMatchExpressionsT, param: str):
expressions = []
if map_expressions is None:
return expressions
if not isinstance(map_expressions, Dict):
raise ValueError(
f'The {param} parameter must be a map (e.g. {{"key1": In("value1")}}) '
f"but got type {type(map_expressions)}."
)
for key, value in map_expressions.items():
if not isinstance(key, str):
raise ValueError(
f"The map key of the {param} parameter must "
f'be of type str (e.g. {{"key1": In("value1")}}) '
f"but got {str(key)} of type {type(key)}."
)
if not isinstance(value, (In, NotIn, Exists, DoesNotExist)):
raise ValueError(
f"The map value for key {key} of the {param} parameter "
f"must be one of the `In`, `NotIn`, `Exists` or `DoesNotExist` "
f'operator (e.g. {{"key1": In("value1")}}) '
f"but got {str(value)} of type {type(value)}."
)
expressions.append(_LabelMatchExpression(key, value))
return expressions
SchedulingStrategyT = Union[
None,
str, # Literal["DEFAULT", "SPREAD"]
PlacementGroupSchedulingStrategy,
NodeAffinitySchedulingStrategy,
NodeLabelSchedulingStrategy,
]
| NodeLabelSchedulingStrategy |
python | pennersr__django-allauth | allauth/account/migrations/0005_emailaddress_idx_upper_email.py | {
"start": 131,
"end": 530
} | class ____(migrations.Migration):
dependencies = [
("account", "0004_alter_emailaddress_drop_unique_email"),
]
operations = [
migrations.AddIndex(
model_name="emailaddress",
index=models.Index(
django.db.models.functions.text.Upper("email"),
name="account_emailaddress_upper",
),
),
]
| Migration |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 12054,
"end": 15915
} | class ____:
"""
Global optimisation tests with Simplicial sampling:
"""
def test_f1_1_simplicial(self):
"""Multivariate test function 1:
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
run_test(test1_1, n=1, sampling_method='simplicial')
def test_f1_2_simplicial(self):
"""Multivariate test function 1:
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
run_test(test1_2, n=1, sampling_method='simplicial')
def test_f1_3_simplicial(self):
"""Multivariate test function 1: x[0]**2 + x[1]**2
with bounds=[(None, None),(None, None)]"""
run_test(test1_3, n=5, sampling_method='simplicial')
def test_f2_1_simplicial(self):
"""Univariate test function on
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
options = {'minimize_every_iter': False}
run_test(test2_1, n=200, iters=7, options=options,
sampling_method='simplicial')
def test_f2_2_simplicial(self):
"""Univariate test function on
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
run_test(test2_2, n=1, sampling_method='simplicial')
def test_f3_simplicial(self):
"""NLP: Hock and Schittkowski problem 18"""
run_test(test3_1, n=1, sampling_method='simplicial')
@pytest.mark.slow
def test_f4_simplicial(self):
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
run_test(test4_1, n=1, sampling_method='simplicial')
def test_lj_symmetry_old(self):
"""LJ: Symmetry-constrained test function"""
options = {'symmetry': True,
'disp': True}
args = (6,) # Number of atoms
run_test(testLJ, args=args, n=300,
options=options, iters=1,
sampling_method='simplicial')
def test_f5_1_lj_symmetry(self):
"""LJ: Symmetry constrained test function"""
options = {'symmetry': [0, ] * 6,
'disp': True}
args = (6,) # No. of atoms
run_test(testLJ, args=args, n=300,
options=options, iters=1,
sampling_method='simplicial')
def test_f5_2_cons_symmetry(self):
"""Symmetry constrained test function"""
options = {'symmetry': [0, 0],
'disp': True}
run_test(test1_1, n=200,
options=options, iters=1,
sampling_method='simplicial')
@pytest.mark.fail_slow(10)
def test_f5_3_cons_symmetry(self):
"""Asymmetrically constrained test function"""
options = {'symmetry': [0, 0, 0, 3],
'disp': True}
run_test(test_s, n=10000,
options=options,
iters=1,
sampling_method='simplicial')
@pytest.mark.skip("Not a test")
def test_f0_min_variance(self):
"""Return a minimum on a perfectly symmetric problem, based on
gh10429"""
avg = 0.5 # Given average value of x
cons = {'type': 'eq', 'fun': lambda x: np.mean(x) - avg}
# Minimize the variance of x under the given constraint
res = shgo(np.var, bounds=6 * [(0, 1)], constraints=cons)
assert res.success
assert_allclose(res.fun, 0, atol=1e-15)
assert_allclose(res.x, 0.5)
@pytest.mark.skip("Not a test")
def test_f0_min_variance_1D(self):
"""Return a minimum on a perfectly symmetric 1D problem, based on
gh10538"""
def fun(x):
return x * (x - 1.0) * (x - 0.5)
bounds = [(0, 1)]
res = shgo(fun, bounds=bounds)
ref = minimize_scalar(fun, bounds=bounds[0])
assert res.success
assert_allclose(res.fun, ref.fun)
assert_allclose(res.x, ref.x, rtol=1e-6)
# Argument test functions
| TestShgoSimplicialTestFunctions |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.