language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1501452,
"end": 1501788
} | class ____(sgqlc.types.Type, GitSignature):
"""Represents an SSH signature on a Commit or Tag."""
__schema__ = github_schema
__field_names__ = ("key_fingerprint",)
key_fingerprint = sgqlc.types.Field(String, graphql_name="keyFingerprint")
"""Hex-encoded fingerprint of the key that signed this object."""
| SshSignature |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 8519,
"end": 8639
} | class ____(Message):
message = "'...'.format(...) mixes automatic and manual numbering"
| StringDotFormatMixingAutomatic |
python | kamyu104__LeetCode-Solutions | Python/find-indices-of-stable-mountains.py | {
"start": 37,
"end": 300
} | class ____(object):
def stableMountains(self, height, threshold):
"""
:type height: List[int]
:type threshold: int
:rtype: List[int]
"""
return [i for i in xrange(1, len(height)) if height[i-1] > threshold]
| Solution |
python | PrefectHQ__prefect | src/prefect/_experimental/sla/objects.py | {
"start": 1549,
"end": 1995
} | class ____(ServiceLevelAgreement):
"""An SLA that triggers when a completed flow run is not detected in the specified time.
For example, if stale_after is 1 hour, if a flow run does not complete
within an hour of the previous flow run, the SLA will trigger.
"""
stale_after: timedelta = Field(
default=...,
description="The amount of time after which a flow run is considered in violation.",
)
| FrequencySla |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/persistence/file_format.py | {
"start": 146,
"end": 794
} | class ____:
format_version: int
width: int
height: int
@classmethod
def read(cls, file: BinaryIO) -> "FileHeader":
assert file.read(len(MAGIC_NUMBER)) == MAGIC_NUMBER, (
"Unknown file type"
)
(format_version,) = struct.unpack("B", file.read(1))
width, height = struct.unpack("<2I", file.read(2 * 4))
return cls(format_version, width, height)
def write(self, file: BinaryIO) -> None:
file.write(MAGIC_NUMBER)
file.write(struct.pack("B", self.format_version))
file.write(struct.pack("<2I", self.width, self.height))
@dataclass(frozen=True)
| FileHeader |
python | python-openxml__python-docx | src/docx/image/png.py | {
"start": 7115,
"end": 8214
} | class ____(_Chunk):
"""PYHs chunk, contains the image dpi information."""
def __init__(self, chunk_type, horz_px_per_unit, vert_px_per_unit, units_specifier):
super(_pHYsChunk, self).__init__(chunk_type)
self._horz_px_per_unit = horz_px_per_unit
self._vert_px_per_unit = vert_px_per_unit
self._units_specifier = units_specifier
@classmethod
def from_offset(cls, chunk_type, stream_rdr, offset):
"""Return a _pHYsChunk instance containing the image resolution extracted from
the pHYs chunk in `stream` at `offset`."""
horz_px_per_unit = stream_rdr.read_long(offset)
vert_px_per_unit = stream_rdr.read_long(offset, 4)
units_specifier = stream_rdr.read_byte(offset, 8)
return cls(chunk_type, horz_px_per_unit, vert_px_per_unit, units_specifier)
@property
def horz_px_per_unit(self):
return self._horz_px_per_unit
@property
def vert_px_per_unit(self):
return self._vert_px_per_unit
@property
def units_specifier(self):
return self._units_specifier
| _pHYsChunk |
python | django__django | django/db/models/fields/__init__.py | {
"start": 1915,
"end": 3025
} | class ____:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
| NOT_PROVIDED |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_numeric_tower.py | {
"start": 867,
"end": 1323
} | class ____(int):
"""Dummy Integral class to test conversion of the Rational to float."""
def __mul__(self, other):
return DummyIntegral(super().__mul__(other))
__rmul__ = __mul__
def __truediv__(self, other):
return NotImplemented
__rtruediv__ = __truediv__
@property
def numerator(self):
return DummyIntegral(self)
@property
def denominator(self):
return DummyIntegral(1)
| DummyIntegral |
python | Lightning-AI__lightning | src/lightning/pytorch/plugins/layer_sync.py | {
"start": 1189,
"end": 3516
} | class ____(LayerSync):
"""A plugin that wraps all batch normalization layers of a model with synchronization logic for multiprocessing.
This plugin has no effect in single-device operation.
"""
@override
def apply(self, model: Module) -> Module:
"""Add global batchnorm for a model spread across multiple GPUs and nodes.
Override this method to synchronize batchnorm layers between specific process groups instead
of the whole world.
Args:
model: Reference to the current LightningModule
Return:
LightningModule with batchnorm layers synchronized within the process groups.
"""
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@override
def revert(self, model: Module) -> Module:
"""Convert the wrapped batchnorm layers back to regular batchnorm layers.
Args:
model: Reference to the current LightningModule
Return:
LightningModule with regular batchnorm layers that will no longer sync across processes.
"""
# Code adapted from https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547
# Original author: Kapil Yedidi (@kapily)
converted_module = model
if isinstance(model, torch.nn.modules.batchnorm.SyncBatchNorm):
# Unfortunately, LayerSync does not store the original class - if it did
# we could return the one that was originally created.
converted_module = _BatchNormXd(
model.num_features, model.eps, model.momentum, model.affine, model.track_running_stats
)
if model.affine:
with torch.no_grad():
converted_module.weight = model.weight
converted_module.bias = model.bias
converted_module.running_mean = model.running_mean
converted_module.running_var = model.running_var
converted_module.num_batches_tracked = model.num_batches_tracked
if hasattr(model, "qconfig"):
converted_module.qconfig = model.qconfig
for name, child in model.named_children():
converted_module.add_module(name, self.revert(child))
del model
return converted_module
| TorchSyncBatchNorm |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 12050,
"end": 13050
} | class ____(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
# %% ../../nbs/05_data.transforms.ipynb 85
| MultiCategorize |
python | mlflow__mlflow | mlflow/legacy_databricks_cli/configure/provider.py | {
"start": 11591,
"end": 14327
} | class ____(DatabricksConfigProvider):
"""Loads from OAuth credentials in the Databricks Model Serving environment."""
def get_config(self):
from mlflow.utils.databricks_utils import should_fetch_model_serving_environment_oauth
try:
if should_fetch_model_serving_environment_oauth():
config = DatabricksModelServingConfigProvider._get_databricks_model_serving_config()
if config.is_valid:
return config
else:
return None
except Exception as e:
_logger.warning("Unexpected error resolving Databricks Model Serving config: %s", e)
@staticmethod
def _get_databricks_model_serving_config():
from mlflow.utils.databricks_utils import get_model_dependency_oauth_token
# Since we do not record OAuth expiration time in OAuth file, perform periodic refresh
# of OAuth environment variable cache here. As currently configured (02/24) OAuth token
# in model serving environment is guaranteed to have at least 30 min remaining on TTL
# at any point in time but refresh at higher rate of every 5 min here to be safe
# and conform with refresh logic for Brickstore tables.
OAUTH_CACHE_REFRESH_DURATION_SEC = 5 * 60
OAUTH_CACHE_ENV_VAR = "DB_DEPENDENCY_OAUTH_CACHE"
OAUTH_CACHE_EXPIRATION_ENV_VAR = "DB_DEPENDENCY_OAUTH_CACHE_EXPIRY_TS"
MODEL_SERVING_HOST_ENV_VAR = "DATABRICKS_MODEL_SERVING_HOST_URL"
DB_MODEL_SERVING_HOST_ENV_VAR = "DB_MODEL_SERVING_HOST_URL"
# read from DB_MODEL_SERVING_HOST_ENV_VAR if available otherwise MODEL_SERVING_HOST_ENV_VAR
host = os.environ.get(DB_MODEL_SERVING_HOST_ENV_VAR) or os.environ.get(
MODEL_SERVING_HOST_ENV_VAR
)
# check if dependency is cached in env var before reading from file
oauth_token = ""
if (
OAUTH_CACHE_ENV_VAR in os.environ
and OAUTH_CACHE_EXPIRATION_ENV_VAR in os.environ
and float(os.environ[OAUTH_CACHE_EXPIRATION_ENV_VAR]) > time.time()
):
oauth_token = os.environ[OAUTH_CACHE_ENV_VAR]
else:
oauth_token = get_model_dependency_oauth_token()
os.environ[OAUTH_CACHE_ENV_VAR] = oauth_token
os.environ[OAUTH_CACHE_EXPIRATION_ENV_VAR] = str(
time.time() + OAUTH_CACHE_REFRESH_DURATION_SEC
)
return DatabricksConfig(
host=host,
token=oauth_token,
username=None,
password=None,
refresh_token=None,
insecure=None,
jobs_api_version=None,
)
| DatabricksModelServingConfigProvider |
python | OmkarPathak__pygorithm | tests/test_geometry.py | {
"start": 16747,
"end": 22527
} | class ____(unittest.TestCase):
def setUp(self):
self.vec_1_1 = vector2.Vector2(1, 1)
def test_constructor(self):
_aal = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
self.assertIsNotNone(_aal.axis)
self.assertIsNotNone(_aal.min)
self.assertIsNotNone(_aal.max)
self.assertEqual(1, _aal.axis.x)
self.assertEqual(1, _aal.axis.y)
self.assertEqual(0, _aal.min)
self.assertEqual(1, _aal.max)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 1, 0)
self.assertEqual(0, _aal.min)
self.assertEqual(1, _aal.max)
def test_intersects_false(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 2, 3)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal1, _aal2)
self.assertFalse(touching)
self.assertFalse(overlapping)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal2, _aal1)
self.assertFalse(touching)
self.assertFalse(overlapping)
def test_intersects_touching(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 1, 2)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal1, _aal2)
self.assertTrue(touching)
self.assertFalse(overlapping)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal2, _aal1)
self.assertTrue(touching)
self.assertFalse(overlapping)
def test_intersects_overlapping(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, -1, -3)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, -2, 5)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal1, _aal2)
self.assertFalse(touching)
self.assertTrue(overlapping)
touching, overlapping = axisall.AxisAlignedLine.intersects(_aal2, _aal1)
self.assertFalse(touching)
self.assertTrue(overlapping)
def test_find_intersection_false(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 2, 3)
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal1, _aal2)
self.assertFalse(touching)
self.assertIsNone(mtv)
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal2, _aal1)
self.assertFalse(touching)
self.assertIsNone(mtv)
def test_find_intersection_touching(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 1, 2)
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal1, _aal2)
self.assertTrue(touching)
self.assertIsNotNone(mtv)
self.assertIsNone(mtv[0])
self.assertEqual(1, mtv[1])
self.assertEqual(1, mtv[2])
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal2, _aal1)
self.assertTrue(touching)
self.assertIsNotNone(mtv)
self.assertIsNone(mtv[0])
self.assertEqual(1, mtv[1])
self.assertEqual(1, mtv[2])
def test_find_intersection_overlapping(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, -3, -1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, -2, 5)
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal1, _aal2)
self.assertTrue(touching)
self.assertEqual(-1, mtv[0])
self.assertEqual(-2, mtv[1])
self.assertEqual(-1, mtv[2])
touching, mtv = axisall.AxisAlignedLine.find_intersection(_aal2, _aal1)
self.assertTrue(touching)
self.assertEqual(1, mtv[0])
self.assertEqual(-2, mtv[1])
self.assertEqual(-1, mtv[2])
def test_contains_point_false(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, -1)
self.assertFalse(outer)
self.assertFalse(inner)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, 1.5)
self.assertFalse(outer)
self.assertFalse(inner)
def test_contains_point_outer(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, 0)
self.assertTrue(outer)
self.assertFalse(inner)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, 1)
self.assertTrue(outer)
self.assertFalse(inner)
def test_contains_point_inner(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, 0.25)
self.assertFalse(outer)
self.assertTrue(inner)
outer, inner = axisall.AxisAlignedLine.contains_point(_aal1, 0.75)
self.assertFalse(outer)
self.assertTrue(inner)
def test_repr(self):
_aal = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
exp = "AxisAlignedLine(axis=vector2(x=1, y=1), min=0, max=1)"
self.assertEqual(exp, repr(_aal))
def test_str(self):
_aal1 = axisall.AxisAlignedLine(self.vec_1_1, 0, 1)
_aal2 = axisall.AxisAlignedLine(self.vec_1_1, 0.707123, 0.707123)
exp1 = "axisall(along <1, 1> from 0 to 1)"
exp2 = "axisall(along <1, 1> from 0.707 to 0.707)"
self.assertEqual(exp1, str(_aal1))
self.assertEqual(exp2, str(_aal2))
| TestAxisAlignedLine |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 10392,
"end": 10561
} | class ____(models.Model):
random_char_field = RandomCharField(length=8, unique=True)
class Meta:
app_label = "django_extensions"
| RandomCharTestModelUnique |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 4475,
"end": 4830
} | class ____(Blockwise):
_parameters = [
"frame",
"window",
"kwargs",
"how",
"how_args",
"how_kwargs",
"groupby_kwargs",
"groupby_slice",
]
operation = staticmethod(_rolling_agg)
@functools.cached_property
def _meta(self):
return self.frame._meta
| RollingAggregation |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 19981,
"end": 29213
} | class ____(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"secondary",
metadata,
Column(
"left_id", Integer, ForeignKey("parent.id"), nullable=False
),
Column(
"right_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("cls", String(50)),
)
Table(
"child1",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
)
Table(
"child2",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
cls.mapper_registry.map_imperatively(
Parent, parent, polymorphic_on=parent.c.cls
)
cls.mapper_registry.map_imperatively(
Child1,
child1,
inherits=Parent,
polymorphic_identity="child1",
properties={
"left_child2": relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children",
)
},
)
cls.mapper_registry.map_imperatively(
Child2, child2, inherits=Parent, polymorphic_identity="child2"
)
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# auto alias test:
# test that the join to Child2 doesn't alias Child1 in the select
stmt = select(Child1).join(Child1.left_child2)
with _aliased_join_warning(r"Child2\(child2\)"):
eq_(
set(sess.execute(stmt).scalars().unique()),
{c11, c12, c13},
)
with _aliased_join_warning(r"Child2\(child2\)"):
eq_(
set(sess.query(Child1, Child2).join(Child1.left_child2)),
{(c11, c22), (c12, c22), (c13, c23)},
)
# manual alias test:
c2 = aliased(Child2)
stmt = select(Child1).join(Child1.left_child2.of_type(c2))
eq_(
set(sess.execute(stmt).scalars().unique()),
{c11, c12, c13},
)
eq_(
set(sess.query(Child1, c2).join(Child1.left_child2.of_type(c2))),
{(c11, c22), (c12, c22), (c13, c23)},
)
# test __eq__() on property is annotating correctly
stmt = (
select(Child2)
.join(Child2.right_children)
.where(Child1.left_child2 == c22)
)
with _aliased_join_warning(r"Child1\(child1\)"):
eq_(
set(sess.execute(stmt).scalars().unique()),
{c22},
)
# manual aliased version
c1 = aliased(Child1, flat=True)
stmt = (
select(Child2)
.join(Child2.right_children.of_type(c1))
.where(c1.left_child2 == c22)
)
eq_(
set(sess.execute(stmt).scalars().unique()),
{c22},
)
# test the same again
with _aliased_join_warning(r"Child1\(child1\)"):
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)
.statement,
"SELECT child2.id, parent.id AS id_1, parent.cls "
"FROM parent "
"JOIN child2 ON parent.id = child2.id "
"JOIN secondary AS secondary_1 ON parent.id = "
"secondary_1.left_id "
"JOIN (parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) ON parent_1.id = "
"secondary_1.right_id, secondary AS secondary_2 "
"WHERE parent_1.id = secondary_2.right_id "
"AND :param_1 = secondary_2.left_id",
)
# non aliased version
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children.of_type(c1))
.filter(c1.left_child2 == c22)
.statement,
"SELECT child2.id, parent.id AS id_1, parent.cls "
"FROM parent "
"JOIN child2 ON parent.id = child2.id "
"JOIN secondary AS secondary_1 ON parent.id = secondary_1.left_id "
"JOIN (parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) ON parent_1.id = "
"secondary_1.right_id, secondary AS secondary_2 "
"WHERE parent_1.id = secondary_2.right_id "
"AND :param_1 = secondary_2.left_id",
)
def test_query_crit_core_workaround(self):
# do a test in the style of orm/test_core_compilation.py
Child1, Child2 = self.classes.Child1, self.classes.Child2
secondary = self.tables.secondary
configure_mappers()
from sqlalchemy.sql import join
C1 = aliased(Child1, flat=True)
# this was "figure out all the things we need to do in Core to make
# the identical query that the ORM renders.", however as of
# I765a0b912b3dcd0e995426427d8bb7997cbffd51 this is using the ORM
# to create the query in any case
salias = secondary.alias()
stmt = (
select(Child2)
.select_from(
join(
Child2,
salias,
Child2.id.expressions[1] == salias.c.left_id,
).join(C1, salias.c.right_id == C1.id.expressions[1])
)
.where(C1.left_child2 == Child2(id=1))
)
self.assert_compile(
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls "
"FROM "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_1 ON parent.id = secondary_1.left_id JOIN "
"(parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) "
"ON parent_1.id = secondary_1.right_id, secondary AS secondary_2 "
"WHERE "
"parent_1.id = secondary_2.right_id AND :param_1 = "
"secondary_2.left_id",
)
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload(Child1.left_child2))
self.assert_compile(
q.limit(1).statement,
"SELECT child1.id, parent.id AS id_1, parent.cls, "
"child2_1.id AS id_2, parent_1.id AS id_3, parent_1.cls AS cls_1 "
"FROM parent JOIN child1 ON parent.id = child1.id "
"LEFT OUTER JOIN (secondary AS secondary_1 "
"JOIN (parent AS parent_1 JOIN child2 AS child2_1 "
"ON parent_1.id = child2_1.id) ON parent_1.id = "
"secondary_1.left_id) ON parent.id = secondary_1.right_id "
"LIMIT :param_1",
checkparams={"param_1": 1},
)
# another way to check
eq_(
sess.scalar(
select(func.count("*")).select_from(q.limit(1).subquery())
),
1,
)
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload(Child1.left_child2))
for row in query_.all():
assert row.left_child2
| SelfReferentialM2MTest |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 60460,
"end": 66191
} | class ____(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# overridable boolean whether it is a cpdef
# modifiers ['inline']
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope=None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
if self.declarators:
templates = self.declarators[0].analyse_templates()
else:
templates = None
if templates is not None:
if self.visibility != 'extern':
error(self.pos, "Only extern functions allowed")
if len(self.declarators) > 1:
error(self.declarators[1].pos, "Can't multiply declare template types")
env = TemplateScope('func_template', env)
env.directives = env.outer_scope.directives
for template_param in templates:
env.declare_type(template_param.name, template_param, self.pos)
base_type = self.base_type.analyse(env)
# Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
modifiers = None
if self.base_type.is_templated_type_node:
modifiers = self.base_type.analyse_pytyping_modifiers(env)
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
return error_type
self.entry = None
visibility = self.visibility
for declarator in self.declarators:
if (len(self.declarators) > 1
and not isinstance(declarator, CNameDeclaratorNode)
and env.directives['warn.multiple_declarators']):
warning(
declarator.pos,
"Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). "
"Each pointer declaration should be on its own line.", 1)
create_extern_wrapper = (self.overridable
and self.visibility == 'extern'
and env.is_module_scope)
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(
base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
else:
name_declarator, type = declarator.analyse(
base_type, env, visibility=visibility, in_pxd=self.in_pxd)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos, "Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos, "Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_reference and self.visibility != 'extern':
error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
if type.is_rvalue_reference and self.visibility != 'extern':
error(declarator.pos, "C++ rvalue-references cannot be declared")
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
self.entry = dest_scope.declare_cfunction(
name, type, declarator.pos,
cname=cname, visibility=self.visibility, in_pxd=self.in_pxd,
api=self.api, modifiers=self.modifiers, overridable=self.overridable)
if self.entry is not None:
self.entry.directive_locals = copy.copy(self.directive_locals)
if create_extern_wrapper:
self.entry.type.create_to_py_utility_code(env)
self.entry.create_wrapper = True
else:
if self.overridable:
if env.is_c_class_scope:
error(self.pos,
"Variables cannot be declared with 'cpdef'. Use 'cdef', 'cdef public' or 'cdef readonly' instead.")
else:
error(self.pos, "Variables cannot be declared with 'cpdef'. Use 'cdef' instead.")
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(
name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
api=self.api, is_cdef=True, pytyping_modifiers=modifiers)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
| CVarDefNode |
python | davidhalter__jedi | test/completion/generators.py | {
"start": 685,
"end": 1031
} | class ____():
def __iter__(self):
if random.choice([0, 1]):
yield 1
else:
yield ""
b = []
for a in Get():
#? int() str()
a
b += [a]
#? list()
b
#? int() str()
b[0]
g = iter(Get())
#? int() str()
next(g)
g = iter([1.0])
#? float()
next(g)
x, y = Get()
#? int() str()
x
#? int() str()
x
| Get |
python | walkccc__LeetCode | solutions/2282. Number of People That Can Be Seen in a Grid/2282.py | {
"start": 0,
"end": 994
} | class ____:
def seePeople(self, heights: list[list[int]]) -> list[list[int]]:
m = len(heights)
n = len(heights[0])
ans = [[0] * n for _ in range(m)]
for i, row in enumerate(heights):
stack = []
for j, height in enumerate(row):
hasEqualHeight = False
while stack and row[stack[-1]] <= height:
if row[stack[-1]] == height:
# edge case: [4, 2, 1, 1, 3]
hasEqualHeight = True
ans[i][stack.pop()] += 1
if stack and not hasEqualHeight:
ans[i][stack[-1]] += 1
stack.append(j)
for j, col in enumerate(zip(*heights)):
stack = []
for i, height in enumerate(col):
hasEqualHeight = False
while stack and col[stack[-1]] <= height:
if col[stack[-1]] == height:
hasEqualHeight = True
ans[stack.pop()][j] += 1
if stack and not hasEqualHeight:
ans[stack[-1]][j] += 1
stack.append(i)
return ans
| Solution |
python | python-poetry__poetry | src/poetry/repositories/lockfile_repository.py | {
"start": 188,
"end": 549
} | class ____(Repository):
"""
Special repository that distinguishes packages not only by name and version,
but also by source type, url, etc.
"""
def __init__(self) -> None:
super().__init__("poetry-lockfile")
def has_package(self, package: Package) -> bool:
return any(p == package for p in self.packages)
| LockfileRepository |
python | spyder-ide__spyder | spyder/widgets/reporterror.py | {
"start": 4062,
"end": 4652
} | class ____(TracebackLinksMixin, ConsoleBaseWidget, BaseEditMixin,
SpyderFontsMixin):
"""Widget to show errors as they appear in the Internal console."""
QT_CLASS = QPlainTextEdit
sig_go_to_error_requested = Signal(str)
def __init__(self, parent=None):
ConsoleBaseWidget.__init__(self, parent)
BaseEditMixin.__init__(self)
TracebackLinksMixin.__init__(self)
self.setReadOnly(True)
self.set_pythonshell_font(
self.get_font(SpyderFontType.MonospaceInterface, font_size_delta=1)
)
| ShowErrorWidget |
python | rapidsai__cudf | python/cudf/cudf/core/column/column.py | {
"start": 3712,
"end": 4204
} | class ____:
# A wrapper that exposes the __cuda_array_interface__ of a mask that accounts for
# the mask being a bitmask in the mask size calculation.
def __init__(self, mask: Any) -> None:
self._mask = mask
@property
def __cuda_array_interface__(self) -> Mapping:
cai = self._mask.__cuda_array_interface__.copy()
cai["shape"] = (
plc.null_mask.bitmask_allocation_size_bytes(cai["shape"][0]),
)
return cai
| MaskCAIWrapper |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 87309,
"end": 88028
} | class ____(INT8MMTemplateConfigMixin, ROCmConfigHeuristic):
"""Int8 MM template heuristic for ROCm"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use int8_mm_configs
self.mm_configs = self.int8_mm_configs
# NOTE: overriding exhaustive configs here to be the same as mm_configs
# as we haven't validated exhaustive support here yet
# TODO(coconutruben): remove this once we have validated exhaustive support
# for scaled_mm
self.exhaustive_configs = self.int8_mm_configs
@register_template_heuristic(
mm_plus_mm_template.uid,
"cuda",
register=torch.version.hip is not None,
)
| ROCmInt8MMTemplateConfigHeuristic |
python | coleifer__peewee | playhouse/cockroachdb.py | {
"start": 7472,
"end": 9083
} | class ____(_atomic):
def __enter__(self):
if self.db.transaction_depth() > 0:
if not isinstance(self.db.top_transaction(), _manual):
raise NotImplementedError(TXN_ERR_MSG)
return super(_crdb_atomic, self).__enter__()
def run_transaction(db, callback, max_attempts=None, system_time=None,
priority=None):
"""
Run transactional SQL in a transaction with automatic retries.
User-provided `callback`:
* Must accept one parameter, the `db` instance representing the connection
the transaction is running under.
* Must not attempt to commit, rollback or otherwise manage transactions.
* May be called more than once.
* Should ideally only contain SQL operations.
Additionally, the database must not have any open transaction at the time
this function is called, as CRDB does not support nested transactions.
"""
max_attempts = max_attempts or -1
with db.atomic(system_time=system_time, priority=priority) as txn:
db.execute_sql('SAVEPOINT cockroach_restart')
while max_attempts != 0:
try:
result = callback(db)
db.execute_sql('RELEASE SAVEPOINT cockroach_restart')
return result
except OperationalError as exc:
if exc.orig.pgcode == '40001':
max_attempts -= 1
db.execute_sql('ROLLBACK TO SAVEPOINT cockroach_restart')
continue
raise
raise ExceededMaxAttempts(None, 'unable to commit transaction')
| _crdb_atomic |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 31448,
"end": 34888
} | class ____:
def test_add_role(self, db_request):
role_name = "Maintainer"
project = ProjectFactory.create(name="foo")
UserFactory.create(username="admin")
user = UserFactory.create(username="bar")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST["username"] = user.username
db_request.POST["role_name"] = role_name
db_request.user = UserFactory.create()
views.add_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call(f"Added 'bar' as '{role_name}' on 'foo'", queue="success")
]
role = db_request.db.query(Role).one()
assert role.role_name == role_name
assert role.user == user
assert role.project == project
def test_add_role_no_username(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.POST = {}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
with pytest.raises(HTTPSeeOther):
views.add_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Provide a username", queue="error")
]
def test_add_role_no_user(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.POST = {"username": "bar"}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
with pytest.raises(HTTPSeeOther):
views.add_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Unknown username 'bar'", queue="error")
]
def test_add_role_no_role_name(self, db_request):
project = ProjectFactory.create(name="foo")
UserFactory.create(username="bar")
db_request.POST = {"username": "bar"}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
with pytest.raises(HTTPSeeOther):
views.add_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Provide a role", queue="error")
]
def test_add_role_with_existing_role(self, db_request):
project = ProjectFactory.create(name="foo")
user = UserFactory.create(username="bar")
role = RoleFactory.create(project=project, user=user)
db_request.POST = {"username": "bar", "role_name": role.role_name}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
with pytest.raises(HTTPSeeOther):
views.add_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("User 'bar' already has a role on this project", queue="error")
]
| TestAddRole |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 10329,
"end": 10918
} | class ____(ABC):
"""Hold the shared rendezvous state synced with other nodes."""
@property
@abstractmethod
def state(self) -> _RendezvousState:
"""Get the local state."""
@abstractmethod
def sync(self) -> bool | None:
"""Read or writes the latest state.
Returns:
A boolean value indicating whether the local state, in case marked
as dirty, was successfully synced with other nodes.
"""
@abstractmethod
def mark_dirty(self) -> None:
"""Mark the local state as dirty."""
| _RendezvousStateHolder |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_asyncio.py | {
"start": 2014,
"end": 2686
} | class ____(TestCase):
# In principle, these tests could indeed run on emscripten if we grab the existing
# event loop and run them there. However, that seems to have hit an infinite loop
# and so we're just skipping them for now and will revisit later.
def execute_example(self, f):
asyncio.run(f())
@skipif_emscripten
@given(x=st.text())
@coro_decorator
def test_foo_yield_from(self, x):
assume(x)
yield from asyncio.sleep(0.001)
assert x
@skipif_emscripten
@given(st.text())
async def test_foo_await(self, x):
assume(x)
await asyncio.sleep(0.001)
assert x
| TestAsyncioRun |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 48823,
"end": 50599
} | class ____(fixtures.MappedTest):
"""Syncrules on foreign keys that are also primary"""
@classmethod
def define_tables(cls, metadata):
Table(
"tableA",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
test_needs_fk=True,
)
Table(
"tableB",
metadata,
Column("id", Integer, primary_key=True),
Column("_a_id", Integer, key="a_id", primary_key=True),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
@property
def a_id(self):
return self._a_id
def test_synonym_fk(self):
"""test that active history is enabled on a
one-to-many/one that has use_get==True"""
tableB, A, B, tableA = (
self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA,
)
self.mapper_registry.map_imperatively(
B, tableB, properties={"a_id": synonym("_a_id", map_column=True)}
)
self.mapper_registry.map_imperatively(
A,
tableA,
properties={
"b": relationship(
B,
primaryjoin=(tableA.c.id == foreign(B.a_id)),
uselist=False,
)
},
)
sess = fixture_session()
b = B(id=0)
a = A(id=0, b=b)
sess.add(a)
sess.add(b)
sess.flush()
sess.expunge_all()
assert a.b == b
assert a.id == b.a_id
assert a.id == b._a_id
| SynonymsAsFKsTest |
python | pallets__jinja | src/jinja2/ext.py | {
"start": 20565,
"end": 20931
} | class ____(Extension):
"""Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
tags = {"do"}
def parse(self, parser: "Parser") -> nodes.ExprStmt:
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
| ExprStmtExtension |
python | kamyu104__LeetCode-Solutions | Python/convert-a-number-to-hexadecimal.py | {
"start": 32,
"end": 510
} | class ____(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if not num:
return "0"
result = []
while num and len(result) != 8:
h = num & 15
if h < 10:
result.append(str(chr(ord('0') + h)))
else:
result.append(str(chr(ord('a') + h-10)))
num >>= 4
result.reverse()
return "".join(result)
| Solution |
python | numba__numba | numba/tests/test_globals.py | {
"start": 2586,
"end": 7329
} | class ____(unittest.TestCase):
def check_global_ndarray(self, **jitargs):
# (see github issue #448)
ctestfunc = jit(**jitargs)(global_ndarray_func)
self.assertEqual(ctestfunc(1), 11)
def test_global_ndarray(self):
# This also checks we can access an unhashable global value
# (see issue #697)
self.check_global_ndarray(forceobj=True)
def test_global_ndarray_npm(self):
self.check_global_ndarray(nopython=True)
def check_global_complex_arr(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_cplx_arr_copy)
arr = np.zeros(len(cplx_X), dtype=np.complex128)
ctestfunc(arr)
np.testing.assert_equal(arr, cplx_X)
def test_global_complex_arr(self):
self.check_global_complex_arr(forceobj=True)
def test_global_complex_arr_npm(self):
self.check_global_complex_arr(nopython=True)
def check_global_rec_arr(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_rec_arr_copy)
arr = np.zeros(rec_X.shape, dtype=x_dt)
ctestfunc(arr)
np.testing.assert_equal(arr, rec_X)
def test_global_rec_arr(self):
self.check_global_rec_arr(forceobj=True)
def test_global_rec_arr_npm(self):
self.check_global_rec_arr(nopython=True)
def check_global_rec_arr_extract(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_rec_arr_extract_fields)
arr1 = np.zeros(rec_X.shape, dtype=np.int32)
arr2 = np.zeros(rec_X.shape, dtype=np.float32)
ctestfunc(arr1, arr2)
np.testing.assert_equal(arr1, rec_X.a)
np.testing.assert_equal(arr2, rec_X.b)
def test_global_rec_arr_extract(self):
self.check_global_rec_arr_extract(forceobj=True)
def test_global_rec_arr_extract_npm(self):
self.check_global_rec_arr_extract(nopython=True)
def check_two_global_rec_arrs(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_two_rec_arrs)
arr1 = np.zeros(rec_X.shape, dtype=np.int32)
arr2 = np.zeros(rec_X.shape, dtype=np.float32)
arr3 = np.zeros(rec_Y.shape, dtype=np.int16)
arr4 = np.zeros(rec_Y.shape, dtype=np.float64)
ctestfunc(arr1, arr2, arr3, arr4)
np.testing.assert_equal(arr1, rec_X.a)
np.testing.assert_equal(arr2, rec_X.b)
np.testing.assert_equal(arr3, rec_Y.c)
np.testing.assert_equal(arr4, rec_Y.d)
def test_two_global_rec_arrs(self):
self.check_two_global_rec_arrs(forceobj=True)
def test_two_global_rec_arrs_npm(self):
self.check_two_global_rec_arrs(nopython=True)
def test_global_module(self):
# (see github issue #1059)
res = global_module_func(5, 6)
self.assertEqual(True, res)
def test_global_record(self):
# (see github issue #1081)
x = np.recarray(1, dtype=x_dt)[0]
x.a = 1
res = global_record_func(x)
self.assertEqual(True, res)
x.a = 2
res = global_record_func(x)
self.assertEqual(False, res)
def test_global_int_tuple(self):
pyfunc = global_int_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_str_tuple(self):
pyfunc = global_str_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_mixed_tuple(self):
pyfunc = global_mixed_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_float_tuple(self):
pyfunc = global_float_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_npy_int_tuple(self):
pyfunc = global_npy_int_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_write_to_arr_in_tuple(self):
# Test writing to an array in a global tuple
# See issue https://github.com/numba/numba/issues/7120
for func in (global_write_to_arr_in_tuple,
global_write_to_arr_in_mixed_tuple):
jitfunc = njit(func)
with self.assertRaises(errors.TypingError) as e:
jitfunc()
msg = "Cannot modify readonly array of type:"
self.assertIn(msg, str(e.exception))
def test_global_npy_bool(self):
# Test global NumPy bool
# See issue https://github.com/numba/numba/issues/6979
pyfunc = global_npy_bool
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
if __name__ == '__main__':
unittest.main()
| TestGlobals |
python | getsentry__sentry | tests/sentry/api/serializers/test_fields.py | {
"start": 618,
"end": 1620
} | class ____(unittest.TestCase):
def test_simple(self) -> None:
data = {"a_field": [{"b_field": "abcdefg", "d_field": "gfedcba"}]}
serializer = DummySerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
"a_field": [{"b_field": "abcdefg", "d_field": "gfedcba"}]
}
def test_allow_null(self) -> None:
data = {"a_field": [None]}
serializer = DummySerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {
"a_field": [ErrorDetail(string="This field may not be null.", code="null")]
}
def test_child_validates(self) -> None:
data = {"a_field": [{"b_field": "abcdefg"}]}
serializer = DummySerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {
"a_field": {"d_field": [ErrorDetail(string="This field is required.", code="required")]}
}
| TestListField |
python | doocs__leetcode | solution/1700-1799/1774.Closest Dessert Cost/Solution.py | {
"start": 0,
"end": 866
} | class ____:
def closestCost(
self, baseCosts: List[int], toppingCosts: List[int], target: int
) -> int:
def dfs(i, t):
if i >= len(toppingCosts):
arr.append(t)
return
dfs(i + 1, t)
dfs(i + 1, t + toppingCosts[i])
arr = []
dfs(0, 0)
arr.sort()
d = ans = inf
# 选择一种冰激淋基料
for x in baseCosts:
# 枚举子集和
for y in arr:
# 二分查找
i = bisect_left(arr, target - x - y)
for j in (i, i - 1):
if 0 <= j < len(arr):
t = abs(x + y + arr[j] - target)
if d > t or (d == t and ans > x + y + arr[j]):
d = t
ans = x + y + arr[j]
return ans
| Solution |
python | openai__openai-python | src/openai/types/file_create_params.py | {
"start": 1021,
"end": 1394
} | class ____(TypedDict, total=False):
anchor: Required[Literal["created_at"]]
"""Anchor timestamp after which the expiration policy applies.
Supported anchors: `created_at`.
"""
seconds: Required[int]
"""The number of seconds after the anchor time that the file will expire.
Must be between 3600 (1 hour) and 2592000 (30 days).
"""
| ExpiresAfter |
python | mahmoud__glom | glom/grouping.py | {
"start": 7948,
"end": 9163
} | class ____:
"""
Limits the number of values passed to sub-accumulator
>>> glom([1, 2, 3], Group(Limit(2)))
[1, 2]
To override the default untransformed list output, set the subspec kwarg:
>>> glom(range(10), Group(Limit(3, subspec={(lambda x: x % 2): [T]})))
{0: [0, 2], 1: [1]}
You can even nest Limits in other ``Group`` specs:
>>> glom(range(10), Group(Limit(5, {(lambda x: x % 2): Limit(2)})))
{0: [0, 2], 1: [1, 3]}
"""
__slots__ = ('n', 'subspec')
def __init__(self, n, subspec=_MISSING):
if subspec is _MISSING:
subspec = [T]
self.n = n
self.subspec = subspec
def glomit(self, target, scope):
if scope[MODE] is not GROUP:
raise BadSpec("Limit() only valid in Group mode")
tree = scope[ACC_TREE] # current accumulator support structure
if self not in tree:
tree[self] = [0, {}]
scope[ACC_TREE] = tree[self][1]
tree[self][0] += 1
if tree[self][0] > self.n:
return STOP
return scope[glom](target, self.subspec, scope)
def __repr__(self):
return f'{self.__class__.__name__}({self.n!r}, {self.subspec!r})'
| Limit |
python | ray-project__ray | python/ray/dag/tests/experimental/test_compiled_graphs.py | {
"start": 16401,
"end": 29749
} | class ____:
def test_multi_args_basic(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i[0])
branch2 = a2.inc.bind(i[1])
dag = c.collect_two.bind(branch2, branch1)
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(2, 3)
result = ray.get(ref)
assert result == [3, 2]
def test_multi_args_single_actor(self, ray_start_regular):
c = Collector.remote()
with InputNode() as i:
dag = c.collect_three.bind(i[0], i[1], i[0])
compiled_dag = dag.experimental_compile()
expected = [[0, 1, 0], [0, 1, 0, 1, 2, 1], [0, 1, 0, 1, 2, 1, 2, 3, 2]]
for i in range(3):
ref = compiled_dag.execute(i, i + 1)
result = ray.get(ref)
assert result == expected[i]
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) must be called with 2 "
"positional args, got 1",
):
compiled_dag.execute((2, 3))
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) must be called with 2 "
"positional args, got 0",
):
compiled_dag.execute()
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) must be called with 2 "
"positional args, got 0",
):
compiled_dag.execute(args=(2, 3))
def test_multi_args_branch(self, ray_start_regular):
a = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch = a.inc.bind(i[0])
dag = c.collect_two.bind(branch, i[1])
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(2, 3)
result = ray.get(ref)
assert result == [2, 3]
def test_kwargs_basic(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i.x)
branch2 = a2.inc.bind(i.y)
dag = c.collect_two.bind(branch2, branch1)
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(x=2, y=3)
result = ray.get(ref)
assert result == [3, 2]
def test_kwargs_single_actor(self, ray_start_regular):
c = Collector.remote()
with InputNode() as i:
dag = c.collect_two.bind(i.y, i.x)
compiled_dag = dag.experimental_compile()
for i in range(3):
ref = compiled_dag.execute(x=2, y=3)
result = ray.get(ref)
assert result == [3, 2] * (i + 1)
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) must be called with kwarg",
):
compiled_dag.execute()
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) "
"must be called with kwarg `x`",
):
compiled_dag.execute(y=3)
with pytest.raises(
ValueError,
match=r"dag.execute\(\) or dag.execute_async\(\) "
"must be called with kwarg `y`",
):
compiled_dag.execute(x=3)
def test_kwargs_branch(self, ray_start_regular):
a = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch = a.inc.bind(i.x)
dag = c.collect_two.bind(i.y, branch)
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(x=2, y=3)
result = ray.get(ref)
assert result == [3, 2]
def test_multi_args_and_kwargs(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i[0])
branch2 = a2.inc.bind(i.y)
dag = c.collect_three.bind(branch2, i.z, branch1)
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(2, y=3, z=4)
result = ray.get(ref)
assert result == [3, 4, 2]
def test_multi_args_and_torch_type(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
i.with_tensor_transport()
branch1 = a1.echo.bind(i[0])
branch1.with_tensor_transport()
branch2 = a2.echo.bind(i[1])
branch2.with_tensor_transport()
dag = c.collect_two.bind(branch2, branch1)
dag.with_tensor_transport()
compiled_dag = dag.experimental_compile()
cpu_tensors = [torch.tensor([0, 0, 0, 0, 0]), torch.tensor([1, 1, 1, 1, 1])]
ref = compiled_dag.execute(cpu_tensors[0], cpu_tensors[1])
tensors = ray.get(ref)
assert len(tensors) == len(cpu_tensors)
assert torch.equal(tensors[0], cpu_tensors[1])
assert torch.equal(tensors[1], cpu_tensors[0])
def test_mix_entire_input_and_args(self, ray_start_regular):
"""
It is not allowed to consume both the entire input and a partial
input (i.e., an InputAttributeNode) as arguments.
"""
a = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch = a.inc_two.bind(i[0], i[1])
dag = c.collect_two.bind(i, branch)
with pytest.raises(
ValueError,
match=re.escape(
"All tasks must either use InputNode() directly, "
"or they must index to specific args or kwargs."
),
):
dag.experimental_compile()
def test_multi_args_same_actor(self, ray_start_regular):
a1 = Actor.remote(0)
with InputNode() as i:
branch1 = a1.inc.bind(i[0])
branch2 = a1.inc.bind(i[1])
dag = MultiOutputNode([branch1, branch2])
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(1, 2)
result = ray.get(ref)
assert result == [1, 3]
def test_multi_args_basic_asyncio(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i[0])
branch2 = a2.inc.bind(i[1])
dag = c.collect_two.bind(branch2, branch1)
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut = await compiled_dag.execute_async(2, 3)
result = await fut
assert result == [3, 2]
loop = get_or_create_event_loop()
loop.run_until_complete(asyncio.gather(main()))
def test_multi_args_branch_asyncio(self, ray_start_regular):
a = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch = a.inc.bind(i[0])
dag = c.collect_two.bind(branch, i[1])
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut = await compiled_dag.execute_async(2, 3)
result = await fut
assert result == [2, 3]
loop = get_or_create_event_loop()
loop.run_until_complete(asyncio.gather(main()))
def test_kwargs_basic_asyncio(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i.x)
branch2 = a2.inc.bind(i.y)
dag = c.collect_two.bind(branch2, branch1)
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut = await compiled_dag.execute_async(x=2, y=3)
result = await fut
assert result == [3, 2]
loop = get_or_create_event_loop()
loop.run_until_complete(asyncio.gather(main()))
def test_kwargs_branch_asyncio(self, ray_start_regular):
a = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch = a.inc.bind(i.x)
dag = c.collect_two.bind(i.y, branch)
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut = await compiled_dag.execute_async(x=2, y=3)
result = await fut
assert result == [3, 2]
loop = get_or_create_event_loop()
loop.run_until_complete(asyncio.gather(main()))
def test_multi_args_and_kwargs_asyncio(self, ray_start_regular):
a1 = Actor.remote(0)
a2 = Actor.remote(0)
c = Collector.remote()
with InputNode() as i:
branch1 = a1.inc.bind(i[0])
branch2 = a2.inc.bind(i.y)
dag = c.collect_three.bind(branch2, i.z, branch1)
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut = await compiled_dag.execute_async(2, y=3, z=4)
result = await fut
assert result == [3, 4, 2]
loop = get_or_create_event_loop()
loop.run_until_complete(asyncio.gather(main()))
@pytest.mark.parametrize("num_actors", [1, 4])
@pytest.mark.parametrize("single_fetch", [True, False])
def test_scatter_gather_dag(ray_start_regular, num_actors, single_fetch):
actors = [Actor.remote(0) for _ in range(num_actors)]
with InputNode() as i:
out = [a.inc.bind(i) for a in actors]
dag = MultiOutputNode(out)
compiled_dag = dag.experimental_compile()
for i in range(3):
refs = compiled_dag.execute(1)
if single_fetch:
assert isinstance(refs, list)
for j in range(num_actors):
result = ray.get(refs[j])
assert result == i + 1
else:
results = ray.get(refs)
assert results == [i + 1] * num_actors
@pytest.mark.parametrize("num_actors", [1, 4])
def test_chain_dag(ray_start_regular, num_actors):
actors = [Actor.remote(i) for i in range(num_actors)]
with InputNode() as inp:
dag = inp
for a in actors:
dag = a.append_to.bind(dag)
compiled_dag = dag.experimental_compile()
for i in range(3):
ref = compiled_dag.execute([])
result = ray.get(ref)
assert result == list(range(num_actors))
def test_compiled_dag_ref_del(ray_start_regular):
a = Actor.remote(0)
with InputNode() as inp:
dag = a.inc.bind(inp)
compiled_dag = dag.experimental_compile()
# Test that when ref is deleted or goes out of scope, the corresponding
# execution result is retrieved and immediately discarded. This is confirmed
# when future execute() methods do not block.
for _ in range(10):
ref = compiled_dag.execute(1)
del ref
def test_asyncio(ray_start_regular):
a = Actor.remote(0)
with InputNode() as i:
dag = a.echo.bind(i)
loop = get_or_create_event_loop()
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main(i):
# Use numpy so that the return value will be zero-copy deserialized. If
# there is a memory leak in the DAG backend, then only the first task
# will succeed.
val = np.ones(100) * i
fut = await compiled_dag.execute_async(val)
result = await fut
assert (result == val).all()
loop.run_until_complete(asyncio.gather(*[main(i) for i in range(10)]))
def test_asyncio_out_of_order_get(ray_start_regular):
c = Collector.remote()
with InputNode() as i:
dag = c.collect.bind(i)
loop = get_or_create_event_loop()
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main():
fut_a = await compiled_dag.execute_async("a")
fut_b = await compiled_dag.execute_async("b")
result_b = await fut_b
assert result_b == ["a", "b"]
result_a = await fut_a
assert result_a == ["a"]
loop.run_until_complete(main())
@pytest.mark.parametrize("gather_futs", [True, False])
def test_asyncio_multi_output(ray_start_regular, gather_futs):
a = Actor.remote(0)
b = Actor.remote(0)
with InputNode() as i:
dag = MultiOutputNode([a.echo.bind(i), b.echo.bind(i)])
loop = get_or_create_event_loop()
compiled_dag = dag.experimental_compile(enable_asyncio=True)
async def main(i):
# Use numpy so that the return value will be zero-copy deserialized. If
# there is a memory leak in the DAG backend, then only the first task
# will succeed.
val = np.ones(100) * i
futs = await compiled_dag.execute_async(val)
assert len(futs) == 2
if gather_futs:
results = await asyncio.gather(*futs)
assert len(results) == 2
for result in results:
assert (result == val).all()
else:
for fut in futs:
result = await fut
assert (result == val).all()
loop.run_until_complete(asyncio.gather(*[main(i) for i in range(10)]))
| TestMultiArgs |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/abc.py | {
"start": 22,
"end": 905
} | class ____(ABC):
"""An abstract base class for Rich renderables.
Note that there is no need to extend this class, the intended use is to check if an
object supports the Rich renderable protocol. For example::
if isinstance(my_object, RichRenderable):
console.print(my_object)
"""
@classmethod
def __subclasshook__(cls, other: type) -> bool:
"""Check if this class supports the rich render protocol."""
return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.text import Text
t = Text()
print(isinstance(Text, RichRenderable))
print(isinstance(t, RichRenderable))
class Foo:
pass
f = Foo()
print(isinstance(f, RichRenderable))
print(isinstance("", RichRenderable))
| RichRenderable |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchExhaustion1.py | {
"start": 1173,
"end": 1383
} | class ____:
def method1(self) -> str:
match self:
case ClassA():
return ""
def func7() -> int:
match [10]:
case [*values]:
return values[0]
| ClassA |
python | walkccc__LeetCode | solutions/1272. Remove Interval/1272.py | {
"start": 0,
"end": 476
} | class ____:
def removeInterval(self, intervals: list[list[int]],
toBeRemoved: list[int]) -> list[list[int]]:
ans = []
for a, b in intervals:
if a >= toBeRemoved[1] or b <= toBeRemoved[0]:
ans.append([a, b])
else: # a < toBeRemoved[1] and b > toBeRemoved[0]
if a < toBeRemoved[0]:
ans.append([a, toBeRemoved[0]])
if b > toBeRemoved[1]:
ans.append([toBeRemoved[1], b])
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/smolvlm/modeling_smolvlm.py | {
"start": 12023,
"end": 14604
} | class ____(SmolVLMPreTrainedModel):
config: SmolVLMVisionConfig
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_can_record_outputs = {
"hidden_states": SmolVLMEncoderLayer,
"attentions": SmolVLMVisionAttention,
}
def __init__(self, config: SmolVLMVisionConfig):
super().__init__(config)
embed_dim = config.hidden_size
self.embeddings = SmolVLMVisionEmbeddings(config)
self.encoder = SmolVLMEncoder(config)
self.patch_size = config.patch_size
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings = value
@check_model_inputs(tie_last_hidden_states=False)
def forward(
self,
pixel_values,
patch_attention_mask: Optional[torch.BoolTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
batch_size = pixel_values.size(0)
if patch_attention_mask is None:
patch_size = self.patch_size
patch_attention_mask = torch.ones(
(
batch_size,
pixel_values.size(2) // patch_size,
pixel_values.size(3) // patch_size,
)
)
patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
# Create the correct attention mask based on the attention implementation
patch_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=patch_attention_mask,
)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
attention_mask=patch_attention_mask,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
return BaseModelOutput(
last_hidden_state=last_hidden_state,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for SmolVLM model's outputs that may also contain a past key/values (to speed up sequential decoding).
"""
)
| SmolVLMVisionTransformer |
python | Lightning-AI__lightning | tests/tests_fabric/helpers/datasets.py | {
"start": 132,
"end": 429
} | class ____(Dataset):
def __init__(self, size: int, length: int) -> None:
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index: int) -> Tensor:
return self.data[index]
def __len__(self) -> int:
return self.len
| RandomDataset |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py | {
"start": 29972,
"end": 34072
} | class ____(CloudSQLBaseOperator):
"""
Update resource containing information about a database using patch semantics.
See: https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLPatchInstanceDatabaseOperator`
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database to be updated in the instance.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/patch#request-body
:param project_id: Optional, Google Cloud Project ID.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_db_patch_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"database",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_db_patch_template_fields]
ui_color = "#ECF4D9"
operator_extra_links = (CloudSQLInstanceDatabaseLink(),)
def __init__(
self,
*,
instance: str,
database: str,
body: dict,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.database = database
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
if not self.database:
raise AirflowException("The required parameter 'database' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_DATABASE_PATCH_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
self._validate_body_fields()
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_db_exists(self.database, hook):
raise AirflowException(
f"Cloud SQL instance with ID {self.instance} does not contain database '{self.database}'. "
"Please specify another database to patch."
)
CloudSQLInstanceDatabaseLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return hook.patch_database(
project_id=self.project_id, instance=self.instance, database=self.database, body=self.body
)
| CloudSQLPatchInstanceDatabaseOperator |
python | huggingface__transformers | tests/models/olmo/test_modeling_olmo.py | {
"start": 1500,
"end": 5835
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="silu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return OlmoConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = OlmoModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| OlmoModelTester |
python | getsentry__sentry | src/sentry/utils/marketo_client.py | {
"start": 151,
"end": 220
} | class ____(TypedDict):
errors: list[ErrorDict]
| MarketoErrorResponse |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-modelscope/llama_index/embeddings/modelscope/base.py | {
"start": 725,
"end": 3753
} | class ____(BaseEmbedding):
"""ModelScope Embedding."""
model_name: str = Field(
default=DEFAULT_MODELSCOPE_MODEL,
description=(
"The model name to use from ModelScope. "
"Unused if `model` is passed in directly."
),
)
model_revision: str = Field(
default=DEFAULT_MODELSCOPE_MODEL_REVISION,
description=(
"The model revision to use from ModelScope. "
"Unused if `model` is passed in directly."
),
)
task_name: str = Field(
default=DEFAULT_MODELSCOPE_TASK,
description=(
"The ModelScope task type, for embedding use default sentence_embedding."
),
)
sequence_length: int = Field(
default=128,
description="The maximum length of the input sequence. Defaults to 128.",
)
model_kwargs: dict = Field(
default_factory=dict,
description="The kwargs to pass to the model during initialization.",
)
generate_kwargs: dict = Field(
default_factory=dict,
description="The kwargs to pass to the model during generation.",
)
_pipeline: Any = PrivateAttr()
def __init__(
self,
model_name: str = DEFAULT_MODELSCOPE_MODEL,
model_revision: str = DEFAULT_MODELSCOPE_MODEL_REVISION,
task_name: str = DEFAULT_MODELSCOPE_TASK,
sequence_length: int = DEFAULT_MODELSCOPE_SEQUENCE_LENGTH,
model: Optional[Any] = None,
model_kwargs: Optional[dict] = None,
generate_kwargs: Optional[dict] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
) -> None:
"""Initialize params."""
model_kwargs = model_kwargs or {}
if model:
pipeline = model
else:
pipeline = pipeline_builder(
task=task_name,
model=model_name,
model_revision=model_revision,
sequence_length=sequence_length,
)
super().__init__(
model_kwargs=model_kwargs or {},
generate_kwargs=generate_kwargs or {},
pydantic_program_mode=pydantic_program_mode,
)
self._pipeline = pipeline
def _get_query_embedding(self, query: str) -> Embedding:
"""Get the embedding for a query."""
return output_to_embedding(self._pipeline(sentence_to_input(query)))
async def _aget_query_embedding(self, query: str) -> Embedding:
"""Get the embedding for a query."""
return output_to_embedding(self._pipeline(sentence_to_input(query)))
def _get_text_embedding(self, text: str) -> Embedding:
"""Get the embedding for a text."""
return output_to_embedding(self._pipeline(sentence_to_input(text)))
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""Get the embeddings for a list of texts."""
return outputs_to_embeddings(self._pipeline(sentences_to_input(texts)))
| ModelScopeEmbedding |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 120225,
"end": 120411
} | class ____(Numeric[_N]):
"""The SQL NUMERIC type.
.. seealso::
:class:`_types.Numeric` - documentation for the base type.
"""
__visit_name__ = "NUMERIC"
| NUMERIC |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 24653,
"end": 24794
} | class ____(models.Model):
name = models.CharField(max_length=15, unique=True)
history = HistoricalRecords()
| TestOrganizationWithHistory |
python | PrefectHQ__prefect | tests/server/models/test_saved_searches.py | {
"start": 2174,
"end": 2918
} | class ____:
async def test_read_saved_search_by_id(self, session):
saved_search = await models.saved_searches.create_saved_search(
session=session,
saved_search=schemas.core.SavedSearch(
name="My SavedSearch",
),
)
read_saved_search = await models.saved_searches.read_saved_search(
session=session, saved_search_id=saved_search.id
)
assert read_saved_search.name == saved_search.name
async def test_read_saved_search_by_id_returns_none_if_does_not_exist(
self, session
):
assert not await models.saved_searches.read_saved_search(
session=session, saved_search_id=uuid4()
)
| TestReadSavedSearch |
python | ray-project__ray | python/ray/serve/_private/benchmarks/streaming/streaming_handle_throughput.py | {
"start": 175,
"end": 240
} | class ____(Endpoint):
pass
@serve.deployment
| EndpointDeployment |
python | django__django | django/views/generic/edit.py | {
"start": 6280,
"end": 6479
} | class ____(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object, with a response rendered by a template.
"""
template_name_suffix = "_form"
| CreateView |
python | vyperlang__vyper | vyper/semantics/types/primitives.py | {
"start": 3536,
"end": 8120
} | class ____(_PrimT):
_is_signed: bool
_bits: int
_invalid_ops: tuple
# the type this can assume in the AST
ast_type: type
@property
def ast_bounds(self):
raise NotImplementedError("should be overridden!")
# get the integer bounds on IR values of this type.
# note the distinction for decimals: ast_bounds will return a Decimal,
# int_bounds will return the fully expanded int range.
@cached_property
def int_bounds(self) -> Tuple[int, int]:
return int_bounds(signed=self.is_signed, bits=self.bits)
@cached_property
def bits(self) -> int:
return self._bits
@cached_property
def is_signed(self) -> bool:
return self._is_signed
def validate_literal(self, node: vy_ast.Constant) -> None:
super().validate_literal(node)
lower, upper = self.ast_bounds
if node.value < lower:
raise OverflowException(f"Value is below lower bound for given type ({lower})", node)
if node.value > upper:
raise OverflowException(f"Value exceeds upper bound for given type ({upper})", node)
def validate_numeric_op(
self, node: Union[vy_ast.UnaryOp, vy_ast.BinOp, vy_ast.AugAssign]
) -> None:
if isinstance(node.op, self._invalid_ops):
self._raise_invalid_op(node)
def _get_lr():
if isinstance(node, vy_ast.BinOp):
return node.left.reduced(), node.right.reduced()
elif isinstance(node, vy_ast.AugAssign):
return node.target.reduced(), node.value.reduced()
else:
raise CompilerPanic(f"Unexpected node type for numeric op: {type(node).__name__}")
if isinstance(node.op, (vy_ast.LShift, vy_ast.RShift)):
if self._bits != 256:
raise InvalidOperation(
f"Cannot perform {node.op.description} on non-int256/uint256 type!", node
)
if isinstance(node.op, vy_ast.Pow):
left, right = _get_lr()
value_bits = self._bits - (1 if self._is_signed else 0)
# TODO double check: this code seems duplicated with constant eval
# constant folding ensures one of `(left, right)` is never a literal
if isinstance(left, vy_ast.Int):
if left.value >= 2**value_bits:
raise OverflowException(
f"Base is too large for {self}, calculation will always overflow", left
)
elif left.value < -(2**value_bits):
raise OverflowException(
f"Base is too small for {self}, calculation will always underflow", left
)
elif isinstance(right, vy_ast.Int):
if right.value < 0:
raise InvalidOperation("Cannot calculate a negative power", right)
if right.value > value_bits:
raise OverflowException(
"Power is too large, calculation will always overflow", right
)
else:
msg = (
"Cannot apply an overflow check on exponentiation when both "
"the base and power are unknown at compile-time."
)
if not self._is_signed:
msg = (
f"{msg} To perform this operation without an overflow check, use "
f"`pow_mod256({left.node_source_code}, {right.node_source_code})`"
)
raise InvalidOperation(msg, node)
def validate_comparator(self, node: vy_ast.Compare) -> None:
# all comparators are valid on numeric types
return
def _add_div_hint(node, e):
if isinstance(node.op, vy_ast.Div):
suggested = vy_ast.FloorDiv._pretty
elif isinstance(node.op, vy_ast.FloorDiv):
suggested = vy_ast.Div._pretty
else:
return e
def _get_source(node):
source = node.node_source_code
if isinstance(node, vy_ast.BinOp):
# parenthesize, to preserve precedence
return f"({source})"
return source
if isinstance(node, vy_ast.BinOp):
e._hint = f"did you mean `{_get_source(node.left)} "
e._hint += f"{suggested} {_get_source(node.right)}`?"
elif isinstance(node, vy_ast.AugAssign):
e._hint = f"did you mean `{node.target.node_source_code} "
e._hint += f"{suggested}= {node.value.node_source_code}`?"
return e
| NumericT |
python | falconry__falcon | examples/things_advanced_asgi.py | {
"start": 2803,
"end": 4693
} | class ____:
# NOTE: Normally you would simply use req.get_media() and resp.media for
# this particular use case; this example serves only to illustrate
# what is possible.
async def process_request(self, req, resp):
# NOTE: Test explicitly for 0, since this property could be None in
# the case that the Content-Length header is missing (in which case we
# can't know if there is a body without actually attempting to read
# it from the request stream.)
if req.content_length == 0:
# Nothing to do
return
body = await req.stream.read()
if not body:
raise falcon.HTTPBadRequest(
title='Empty request body',
description='A valid JSON document is required.',
)
try:
req.context.doc = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
description = (
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.'
)
raise falcon.HTTPBadRequest(title='Malformed JSON', description=description)
async def process_response(self, req, resp, resource, req_succeeded):
if not hasattr(resp.context, 'result'):
return
resp.text = json.dumps(resp.context.result)
def max_body(limit):
async def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = (
'The size of the request is too large. The body must not '
'exceed ' + str(limit) + ' bytes in length.'
)
raise falcon.HTTPContentTooLarge(
title='Request body is too large', description=msg
)
return hook
| JSONTranslator |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/endpoints/pre_configured.py | {
"start": 1069,
"end": 5449
} | class ____(
AuthorizationEndpoint,
IntrospectEndpoint,
TokenEndpoint,
ResourceEndpoint,
RevocationEndpoint,
UserInfoEndpoint,
):
"""
An all-in-one endpoint featuring all four major grant types
and extension grants.
"""
def __init__(
self,
request_validator,
token_expires_in=None,
token_generator=None,
refresh_token_generator=None,
*args,
**kwargs,
):
"""Construct a new all-grants-in-one server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.auth_grant = OAuth2AuthorizationCodeGrant(request_validator)
self.implicit_grant = OAuth2ImplicitGrant(request_validator)
self.password_grant = ResourceOwnerPasswordCredentialsGrant(request_validator)
self.credentials_grant = ClientCredentialsGrant(request_validator)
self.refresh_grant = RefreshTokenGrant(request_validator)
self.openid_connect_auth = AuthorizationCodeGrant(request_validator)
self.openid_connect_implicit = ImplicitGrant(request_validator)
self.openid_connect_hybrid = HybridGrant(request_validator)
self.device_code_grant = DeviceCodeGrant(request_validator, **kwargs)
self.bearer = BearerToken(
request_validator, token_generator, token_expires_in, refresh_token_generator
)
self.jwt = JWTToken(
request_validator, token_generator, token_expires_in, refresh_token_generator
)
self.auth_grant_choice = AuthorizationCodeGrantDispatcher(
default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth
)
self.implicit_grant_choice = ImplicitTokenGrantDispatcher(
default_grant=self.implicit_grant, oidc_grant=self.openid_connect_implicit
)
# See http://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#Combinations for valid combinations
# internally our AuthorizationEndpoint will ensure they can appear in any order for any valid combination
AuthorizationEndpoint.__init__(
self,
default_response_type="code",
response_types={
"code": self.auth_grant_choice,
"token": self.implicit_grant_choice,
"id_token": self.openid_connect_implicit,
"id_token token": self.openid_connect_implicit,
"code token": self.openid_connect_hybrid,
"code id_token": self.openid_connect_hybrid,
"code id_token token": self.openid_connect_hybrid,
"none": self.auth_grant,
},
default_token_type=self.bearer,
)
self.token_grant_choice = AuthorizationTokenGrantDispatcher(
request_validator, default_grant=self.auth_grant, oidc_grant=self.openid_connect_auth
)
TokenEndpoint.__init__(
self,
default_grant_type="authorization_code",
grant_types={
"authorization_code": self.token_grant_choice,
"password": self.password_grant,
"client_credentials": self.credentials_grant,
"refresh_token": self.refresh_grant,
"urn:ietf:params:oauth:grant-type:device_code": self.device_code_grant,
},
default_token_type=self.bearer,
)
ResourceEndpoint.__init__(
self, default_token="Bearer", token_types={"Bearer": self.bearer, "JWT": self.jwt}
)
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
UserInfoEndpoint.__init__(self, request_validator)
| Server |
python | sqlalchemy__sqlalchemy | test/sql/test_selectable.py | {
"start": 94730,
"end": 97270
} | class ____(fixtures.TestBase, AssertsExecutionResults):
def test_table(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.is_derived_from(t1)
assert not t2.is_derived_from(t1)
def test_alias(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.alias().is_derived_from(t1)
assert not t2.alias().is_derived_from(t1)
assert not t1.is_derived_from(t1.alias())
assert not t1.is_derived_from(t2.alias())
def test_select(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
assert t1.select().is_derived_from(t1)
assert not t2.select().is_derived_from(t1)
assert select(t1, t2).is_derived_from(t1)
assert t1.select().alias("foo").is_derived_from(t1)
assert select(t1, t2).alias("foo").is_derived_from(t1)
assert not t2.select().alias("foo").is_derived_from(t1)
def test_join(self):
meta = MetaData()
t1 = Table(
"t1",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t2 = Table(
"t2",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
t3 = Table(
"t3",
meta,
Column("c1", Integer, primary_key=True),
Column("c2", String(30)),
)
j1 = t1.join(t2, t1.c.c1 == t2.c.c1)
assert j1.is_derived_from(j1)
assert j1.is_derived_from(t1)
assert j1._annotate({"foo": "bar"}).is_derived_from(j1)
assert not j1.is_derived_from(t3)
| DerivedTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 533427,
"end": 534176
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateAttributionInvitation"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "owner", "source", "target")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
owner = sgqlc.types.Field("Organization", graphql_name="owner")
"""The owner scoping the reattributable data."""
source = sgqlc.types.Field("Claimable", graphql_name="source")
"""The account owning the data to reattribute."""
target = sgqlc.types.Field("Claimable", graphql_name="target")
"""The account which may claim the data."""
| CreateAttributionInvitationPayload |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_autofit04.py | {
"start": 315,
"end": 945
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("autofit04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "Hello")
worksheet.write(0, 1, "World")
worksheet.write(0, 2, 123)
worksheet.write(0, 3, 1234567)
worksheet.autofit()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | walkccc__LeetCode | solutions/1639. Number of Ways to Form a Target String Given a Dictionary/1639-2.py | {
"start": 0,
"end": 951
} | class ____:
def numWays(self, words: list[str], target: str) -> int:
MOD = 1_000_000_007
wordLength = len(words[0])
# dp[i][j] := the number of ways to form the first i characters of the
# `target` using the j first characters in each word
dp = [[0] * (wordLength + 1) for _ in range(len(target) + 1)]
# counts[j] := the count map of words[i][j], where 0 <= i < |words|
counts = [collections.Counter() for _ in range(wordLength)]
for i in range(wordLength):
for word in words:
counts[i][word[i]] += 1
dp[0][0] = 1
for i in range(len(target) + 1):
for j in range(wordLength):
if i < len(target):
# Pick the character target[i] from word[j].
dp[i + 1][j + 1] = dp[i][j] * counts[j][target[i]]
dp[i + 1][j + 1] %= MOD
# Skip the word[j].
dp[i][j + 1] += dp[i][j]
dp[i][j + 1] %= MOD
return dp[len(target)][wordLength]
| Solution |
python | plotly__plotly.py | plotly/graph_objs/scatterpolar/marker/colorbar/_title.py | {
"start": 233,
"end": 4056
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar.marker.colorbar"
_path_str = "scatterpolar.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatterpolar.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.m
arker.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py | {
"start": 12155,
"end": 15915
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2_5_VLModel`]. It is used to instantiate a
Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen2_5_VLTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen2_5_VLVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 151652):
The token index to denote start of vision input.
vision_end_token_id (`int`, *optional*, defaults to 151653):
The token index to denote end of vision input.
```python
>>> from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLConfig
>>> # Initializing a Qwen2_5_VL style configuration
>>> configuration = Qwen2_5_VLConfig()
>>> # Initializing a model from the Qwen2-VL-7B style configuration
>>> model = Qwen2_5_VLForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2_5_vl"
sub_configs = {"vision_config": Qwen2_5_VLVisionConfig, "text_config": Qwen2_5_VLTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151655,
video_token_id=151656,
vision_start_token_id=151652,
vision_end_token_id=151653,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
# Hub configs are saved as flat dicts so we pop some of kwargs to init `TextConfig`
text_params = inspect.signature(self.sub_configs["text_config"].__init__).parameters.keys()
text_params = list(text_params) + ["rope_scaling", "rope_theta"]
text_config = {key: kwargs.pop(key) for key in text_params if key in kwargs}
text_config["dtype"] = kwargs.get("torch_dtype", kwargs.get("dtype")) # don't pop the dtype
self.text_config = self.sub_configs["text_config"](**text_config)
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
# FIXME: arthur/cyril - tying has to be used from the text config
kwargs["tie_word_embeddings"] = self.text_config.tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["Qwen2_5_VLConfig", "Qwen2_5_VLTextConfig"]
| Qwen2_5_VLConfig |
python | ray-project__ray | python/ray/_private/runtime_env/_clonevirtualenv.py | {
"start": 365,
"end": 10970
} | class ____(Exception):
pass
def _dirmatch(path, matchwith):
"""Check if path is within matchwith's tree.
>>> _dirmatch('/home/foo/bar', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar/', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar2', '/home/foo/bar')
False
>>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')
False
"""
matchlen = len(matchwith)
if path.startswith(matchwith) and path[matchlen : matchlen + 1] in [os.sep, ""]:
return True
return False
def _virtualenv_sys(venv_path):
"""obtain version and path info from a virtualenv."""
executable = os.path.join(venv_path, env_bin_dir, "python")
if _WIN32:
env = os.environ.copy()
else:
env = {}
# Must use "executable" as the first argument rather than as the
# keyword argument "executable" to get correct value from sys.path
p = subprocess.Popen(
[
executable,
"-c",
"import sys;"
'print ("%d.%d" % (sys.version_info.major, sys.version_info.minor));'
'print ("\\n".join(sys.path));',
],
env=env,
stdout=subprocess.PIPE,
)
stdout, err = p.communicate()
assert not p.returncode and stdout
lines = stdout.decode("utf-8").splitlines()
return lines[0], list(filter(bool, lines[1:]))
def clone_virtualenv(src_dir, dst_dir):
if not os.path.exists(src_dir):
raise UserError("src dir %r does not exist" % src_dir)
if os.path.exists(dst_dir):
raise UserError("dest dir %r exists" % dst_dir)
# sys_path = _virtualenv_syspath(src_dir)
logger.info("cloning virtualenv '%s' => '%s'..." % (src_dir, dst_dir))
shutil.copytree(
src_dir, dst_dir, symlinks=True, ignore=shutil.ignore_patterns("*.pyc")
)
version, sys_path = _virtualenv_sys(dst_dir)
logger.info("fixing scripts in bin...")
fixup_scripts(src_dir, dst_dir, version)
has_old = lambda s: any(i for i in s if _dirmatch(i, src_dir)) # noqa: E731
if has_old(sys_path):
# only need to fix stuff in sys.path if we have old
# paths in the sys.path of new python env. right?
logger.info("fixing paths in sys.path...")
fixup_syspath_items(sys_path, src_dir, dst_dir)
v_sys = _virtualenv_sys(dst_dir)
remaining = has_old(v_sys[1])
assert not remaining, v_sys
fix_symlink_if_necessary(src_dir, dst_dir)
def fix_symlink_if_necessary(src_dir, dst_dir):
# sometimes the source virtual environment has symlinks that point to itself
# one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib
# this function makes sure
# $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib
# usually this goes unnoticed unless one tries to upgrade a package though pip,
# so this bug is hard to find.
logger.info("scanning for internal symlinks that point to the original virtual env")
for dirpath, dirnames, filenames in os.walk(dst_dir):
for a_file in itertools.chain(filenames, dirnames):
full_file_path = os.path.join(dirpath, a_file)
if os.path.islink(full_file_path):
target = os.path.realpath(full_file_path)
if target.startswith(src_dir):
new_target = target.replace(src_dir, dst_dir)
logger.debug("fixing symlink in %s" % (full_file_path,))
os.remove(full_file_path)
os.symlink(new_target, full_file_path)
def fixup_scripts(old_dir, new_dir, version, rewrite_env_python=False):
bin_dir = os.path.join(new_dir, env_bin_dir)
root, dirs, files = next(os.walk(bin_dir))
pybinre = re.compile(r"pythonw?([0-9]+(\.[0-9]+(\.[0-9]+)?)?)?$")
for file_ in files:
filename = os.path.join(root, file_)
if file_ in ["python", "python%s" % version, "activate_this.py"]:
continue
elif file_.startswith("python") and pybinre.match(file_):
# ignore other possible python binaries
continue
elif file_.endswith(".pyc"):
# ignore compiled files
continue
elif file_ == "activate" or file_.startswith("activate."):
fixup_activate(os.path.join(root, file_), old_dir, new_dir)
elif os.path.islink(filename):
fixup_link(filename, old_dir, new_dir)
elif os.path.isfile(filename):
fixup_script_(
root,
file_,
old_dir,
new_dir,
version,
rewrite_env_python=rewrite_env_python,
)
def fixup_script_(root, file_, old_dir, new_dir, version, rewrite_env_python=False):
old_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(old_dir))
new_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(new_dir))
env_shebang = "#!/usr/bin/env python"
filename = os.path.join(root, file_)
with open(filename, "rb") as f:
if f.read(2) != b"#!":
# no shebang
return
f.seek(0)
lines = f.readlines()
if not lines:
# warn: empty script
return
def rewrite_shebang(version=None):
logger.debug("fixing %s" % filename)
shebang = new_shebang
if version:
shebang = shebang + version
shebang = (shebang + "\n").encode("utf-8")
with open(filename, "wb") as f:
f.write(shebang)
f.writelines(lines[1:])
try:
bang = lines[0].decode("utf-8").strip()
except UnicodeDecodeError:
# binary file
return
# This takes care of the scheme in which shebang is of type
# '#!/venv/bin/python3' while the version of system python
# is of type 3.x e.g. 3.5.
short_version = bang[len(old_shebang) :]
if not bang.startswith("#!"):
return
elif bang == old_shebang:
rewrite_shebang()
elif bang.startswith(old_shebang) and bang[len(old_shebang) :] == version:
rewrite_shebang(version)
elif (
bang.startswith(old_shebang)
and short_version
and bang[len(old_shebang) :] == short_version
):
rewrite_shebang(short_version)
elif rewrite_env_python and bang.startswith(env_shebang):
if bang == env_shebang:
rewrite_shebang()
elif bang[len(env_shebang) :] == version:
rewrite_shebang(version)
else:
# can't do anything
return
def fixup_activate(filename, old_dir, new_dir):
logger.debug("fixing %s" % filename)
with open(filename, "rb") as f:
data = f.read().decode("utf-8")
data = data.replace(old_dir, new_dir)
with open(filename, "wb") as f:
f.write(data.encode("utf-8"))
def fixup_link(filename, old_dir, new_dir, target=None):
logger.debug("fixing %s" % filename)
if target is None:
target = os.readlink(filename)
origdir = os.path.dirname(os.path.abspath(filename)).replace(new_dir, old_dir)
if not os.path.isabs(target):
target = os.path.abspath(os.path.join(origdir, target))
rellink = True
else:
rellink = False
if _dirmatch(target, old_dir):
if rellink:
# keep relative links, but don't keep original in case it
# traversed up out of, then back into the venv.
# so, recreate a relative link from absolute.
target = target[len(origdir) :].lstrip(os.sep)
else:
target = target.replace(old_dir, new_dir, 1)
# else: links outside the venv, replaced with absolute path to target.
_replace_symlink(filename, target)
def _replace_symlink(filename, newtarget):
tmpfn = "%s.new" % filename
os.symlink(newtarget, tmpfn)
os.rename(tmpfn, filename)
def fixup_syspath_items(syspath, old_dir, new_dir):
for path in syspath:
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if _dirmatch(path, old_dir):
path = path.replace(old_dir, new_dir, 1)
if not os.path.exists(path):
continue
elif not _dirmatch(path, new_dir):
continue
root, dirs, files = next(os.walk(path))
for file_ in files:
filename = os.path.join(root, file_)
if filename.endswith(".pth"):
fixup_pth_file(filename, old_dir, new_dir)
elif filename.endswith(".egg-link"):
fixup_egglink_file(filename, old_dir, new_dir)
def fixup_pth_file(filename, old_dir, new_dir):
logger.debug("fixup_pth_file %s" % filename)
with open(filename, "r") as f:
lines = f.readlines()
has_change = False
for num, line in enumerate(lines):
line = (line.decode("utf-8") if hasattr(line, "decode") else line).strip()
if not line or line.startswith("#") or line.startswith("import "):
continue
elif _dirmatch(line, old_dir):
lines[num] = line.replace(old_dir, new_dir, 1)
has_change = True
if has_change:
with open(filename, "w") as f:
payload = os.linesep.join([line.strip() for line in lines]) + os.linesep
f.write(payload)
def fixup_egglink_file(filename, old_dir, new_dir):
logger.debug("fixing %s" % filename)
with open(filename, "rb") as f:
link = f.read().decode("utf-8").strip()
if _dirmatch(link, old_dir):
link = link.replace(old_dir, new_dir, 1)
with open(filename, "wb") as f:
link = (link + "\n").encode("utf-8")
f.write(link)
def main():
parser = optparse.OptionParser(
"usage: %prog [options] /path/to/existing/venv /path/to/cloned/venv"
)
parser.add_option(
"-v", action="count", dest="verbose", default=False, help="verbosity"
)
options, args = parser.parse_args()
try:
old_dir, new_dir = args
except ValueError:
print("virtualenv-clone %s" % (__version__,))
parser.error("not enough arguments given.")
old_dir = os.path.realpath(old_dir)
new_dir = os.path.realpath(new_dir)
loglevel = (logging.WARNING, logging.INFO, logging.DEBUG)[min(2, options.verbose)]
logging.basicConfig(level=loglevel, format="%(message)s")
try:
clone_virtualenv(old_dir, new_dir)
except UserError:
e = sys.exc_info()[1]
parser.error(str(e))
if __name__ == "__main__":
main()
| UserError |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 75696,
"end": 77266
} | class ____:
def test_timeframes(self):
assert self.locale._format_timeframe("hours", 2) == "2 ore"
assert self.locale._format_timeframe("months", 2) == "2 luni"
assert self.locale._format_timeframe("days", 2) == "2 zile"
assert self.locale._format_timeframe("years", 2) == "2 ani"
assert self.locale._format_timeframe("hours", 3) == "3 ore"
assert self.locale._format_timeframe("months", 4) == "4 luni"
assert self.locale._format_timeframe("days", 3) == "3 zile"
assert self.locale._format_timeframe("years", 5) == "5 ani"
def test_relative_timeframes(self):
assert self.locale._format_relative("acum", "now", 0) == "acum"
assert self.locale._format_relative("o oră", "hour", 1) == "peste o oră"
assert self.locale._format_relative("o oră", "hour", -1) == "o oră în urmă"
assert self.locale._format_relative("un minut", "minute", 1) == "peste un minut"
assert (
self.locale._format_relative("un minut", "minute", -1) == "un minut în urmă"
)
assert (
self.locale._format_relative("câteva secunde", "seconds", -1)
== "câteva secunde în urmă"
)
assert (
self.locale._format_relative("câteva secunde", "seconds", 1)
== "peste câteva secunde"
)
assert self.locale._format_relative("o zi", "day", -1) == "o zi în urmă"
assert self.locale._format_relative("o zi", "day", 1) == "peste o zi"
@pytest.mark.usefixtures("lang_locale")
| TestRomanianLocale |
python | numba__numba | numba/tests/test_svml.py | {
"start": 6474,
"end": 11118
} | class ____(TestCase):
""" Tests all SVML-generating functions produce desired calls """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
# RE for a generic symbol reference and for each particular SVML function
asm_filter = re.compile('|'.join([r'\$[a-z_]\w+,']+list(svml_funcs)))
@classmethod
def mp_runner(cls, testname, outqueue):
method = getattr(cls, testname)
try:
ok, msg = method()
except Exception:
msg = traceback.format_exc()
ok = False
outqueue.put({'status': ok, 'msg': msg})
@classmethod
def _inject_test(cls, dtype, mode, vlen, flags):
# unsupported combinations
if dtype.startswith('complex') and mode != 'numpy':
return
# TODO: address skipped tests below
skipped = dtype.startswith('int') and vlen == 2
sig = (numba.int64,)
# unit test body template
@staticmethod
def run_template():
fn, contains, avoids = combo_svml_usecase(dtype, mode, vlen,
flags['fastmath'],
flags['name'])
# look for specific patterns in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \
override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]):
# recompile for overridden CPU
try:
jitted_fn = njit(sig, fastmath=flags['fastmath'],
error_model=flags['error_model'],)(fn)
except:
raise Exception("raised while compiling "+fn.__doc__)
asm = jitted_fn.inspect_asm(sig)
missed = [pattern for pattern in contains if not pattern in asm]
found = [pattern for pattern in avoids if pattern in asm]
ok = not missed and not found
detail = '\n'.join(
[line for line in asm.split('\n')
if cls.asm_filter.search(line) and not '"' in line])
msg = (
f"While expecting {missed} and not {found},\n"
f"it contains:\n{detail}\n"
f"when compiling {fn.__doc__}"
)
return ok, msg
# inject it into the class
postfix = usecase_name(dtype, mode, vlen, flags['name'])
testname = f"run_{postfix}"
setattr(cls, testname, run_template)
@unittest.skipUnless(not skipped, "Not implemented")
def test_runner(self):
ctx = mp.get_context("spawn")
q = ctx.Queue()
p = ctx.Process(target=type(self).mp_runner, args=[testname, q])
p.start()
# timeout to avoid hanging and long enough to avoid bailing too
# early. Note: this was timeout=10 but that seemed to caused
# intermittent failures on heavily loaded machines.
term_or_timeout = p.join(timeout=30)
exitcode = p.exitcode
if term_or_timeout is None:
if exitcode is None:
self.fail("Process timed out.")
elif exitcode < 0:
self.fail(f"Process terminated with signal {-exitcode}.")
self.assertEqual(exitcode, 0, msg="process ended unexpectedly")
out = q.get()
status = out['status']
msg = out['msg']
self.assertTrue(status, msg=msg)
setattr(cls, f"test_{postfix}", test_runner)
@classmethod
def autogenerate(cls):
flag_list = [{'fastmath':False, 'error_model':'numpy',
'name':'usecase'},
{'fastmath':True, 'error_model':'numpy',
'name':'fastmath_usecase'},]
# main loop covering all the modes and use-cases
for dtype in ('complex64', 'float64', 'float32', 'int32', ):
for vlen in vlen2cpu:
for flags in flag_list:
for mode in "scalar", "range", "prange", "numpy":
cls._inject_test(dtype, mode, vlen, dict(flags))
# mark important
for n in ( "test_int32_range4_usecase", # issue #3016
):
setattr(cls, n, tag("important")(getattr(cls, n)))
TestSVMLGeneration.autogenerate()
def math_sin_scalar(x):
return math.sin(x)
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
@needs_svml
| TestSVMLGeneration |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial005.py | {
"start": 110,
"end": 1386
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item = Body(
openapi_examples={
"normal": {
"summary": "A normal example",
"description": "A **normal** item works correctly.",
"value": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
},
"converted": {
"summary": "An example with converted data",
"description": "FastAPI can convert price `strings` to actual `numbers` automatically",
"value": {
"name": "Bar",
"price": "35.4",
},
},
"invalid": {
"summary": "Invalid data is rejected with an error",
"value": {
"name": "Baz",
"price": "thirty five point four",
},
},
},
),
):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | walkccc__LeetCode | solutions/1943. Describe the Painting/1943.py | {
"start": 42,
"end": 500
} | class ____:
def splitPainting(self, segments: list[list[int]]) -> list[list[int]]:
ans = []
prevIndex = 0
runningMix = 0
line = SortedDict()
for start, end, color in segments:
line[start] = line.get(start, 0) + color
line[end] = line.get(end, 0) - color
for i, mix in line.items():
if runningMix > 0:
ans.append([prevIndex, i, runningMix])
runningMix += mix
prevIndex = i
return ans
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py | {
"start": 6630,
"end": 8608
} | class ____(Step):
context: PublishConnectorContext
title = "Push connector image to registry"
@property
def latest_docker_image_name(self) -> str:
return f"{self.context.docker_repository}:latest"
@property
def should_push_latest_tag(self) -> bool:
"""
We don't want to push the latest tag for release candidates or pre-releases.
Returns:
bool: True if the latest tag should be pushed, False otherwise.
"""
is_release_candidate = "-rc" in self.context.connector.version
is_pre_release = self.context.pre_release
return not (is_release_candidate or is_pre_release)
async def _run(self, built_containers_per_platform: List[Container], attempts: int = 3) -> StepResult:
try:
image_ref = await built_containers_per_platform[0].publish(
f"docker.io/{self.context.docker_image}",
platform_variants=built_containers_per_platform[1:],
forced_compression=ImageLayerCompression.Gzip,
)
if self.should_push_latest_tag:
image_ref = await built_containers_per_platform[0].publish(
f"docker.io/{self.latest_docker_image_name}",
platform_variants=built_containers_per_platform[1:],
forced_compression=ImageLayerCompression.Gzip,
)
return StepResult(step=self, status=StepStatus.SUCCESS, stdout=f"Published {image_ref}")
except QueryError as e:
if attempts > 0:
self.context.logger.error(str(e))
self.context.logger.warn(f"Failed to publish {self.context.docker_image}. Retrying. {attempts} attempts left.")
await anyio.sleep(5)
return await self._run(built_containers_per_platform, attempts - 1)
return StepResult(step=self, status=StepStatus.FAILURE, stderr=str(e))
| PushConnectorImageToRegistry |
python | spack__spack | lib/spack/docs/conf.py | {
"start": 7969,
"end": 20377
} | class ____(RSTParser):
def parse(self, inputstring, document):
if isinstance(inputstring, str):
lines = inputstring.splitlines()
inputstring = StringList(lines, document.current_source)
super().parse(inputstring, document)
def add_package_api_version_line(app, what, name: str, obj, options, lines: List[str]):
"""Add versionadded directive to package API docstrings"""
# We're adding versionadded directive here instead of in spack/package.py because most symbols
# are re-exported, and we don't want to modify __doc__ of symbols we don't own.
if name.startswith("spack.package."):
symbol = name[len("spack.package.") :]
for version, symbols in spack.package.api.items():
if symbol in symbols:
lines.extend(["", f".. versionadded:: {version}"])
break
def skip_member(app, what, name, obj, skip, options):
# Do not skip (Make)Executable.__call__
if name == "__call__" and "Executable" in obj.__qualname__:
return False
return skip
def setup(sphinx):
# autodoc-process-docstring
sphinx.connect("autodoc-process-docstring", add_package_api_version_line)
sphinx.connect("autodoc-skip-member", skip_member)
sphinx.add_domain(PatchedPythonDomain, override=True)
sphinx.add_source_parser(NoTabExpansionRSTParser, override=True)
sphinx.add_lexer("spec", SpecLexer)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.4"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_last_updated_by_git",
"sphinx_sitemap",
"sphinxcontrib.inkscapeconverter",
"sphinxcontrib.programoutput",
]
copybutton_exclude = ".linenos, .gp, .go"
# Set default graphviz options
graphviz_dot_args = [
"-Grankdir=LR",
"-Gbgcolor=transparent",
"-Nshape=box",
"-Nfontname=monaco",
"-Nfontsize=10",
]
# Get nice vector graphics
graphviz_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Spack"
copyright = "Spack Project Developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import spack
import spack.package
version = ".".join(str(s) for s in spack.spack_version_info[:2])
# The full version, including alpha/beta/rc tags.
release = spack.spack_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Places to look for .po/.mo files for doc translations
# locale_dirs = []
# Sphinx gettext settings
gettext_compact = True
gettext_uuid = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_spack_root", ".spack-env", ".spack", ".venv"]
autodoc_mock_imports = ["llnl"]
autodoc_default_options = {"no-value": True}
nitpicky = True
nitpick_ignore = [
# Python classes that intersphinx is unable to resolve
("py:class", "argparse.HelpFormatter"),
("py:class", "contextlib.contextmanager"),
("py:class", "module"),
("py:class", "_io.BufferedReader"),
("py:class", "_io.BytesIO"),
("py:class", "unittest.case.TestCase"),
("py:class", "_frozen_importlib_external.SourceFileLoader"),
("py:class", "clingo.Control"),
("py:class", "six.moves.urllib.parse.ParseResult"),
("py:class", "TextIO"),
("py:class", "hashlib._Hash"),
("py:class", "concurrent.futures._base.Executor"),
("py:class", "multiprocessing.context.Process"),
# Spack classes that are private and we don't want to expose
("py:class", "spack.repo._PrependFileLoader"),
("py:class", "spack_repo.builtin.build_systems._checks.BuilderWithDefaults"),
# Spack classes that intersphinx is unable to resolve
("py:class", "spack.version.StandardVersion"),
("py:class", "spack.spec.DependencySpec"),
("py:class", "spack.spec.ArchSpec"),
("py:class", "spack.spec.InstallStatus"),
("py:class", "spack.spec.SpecfileReaderBase"),
("py:class", "spack.filesystem_view.SimpleFilesystemView"),
("py:class", "spack.traverse.EdgeAndDepth"),
("py:class", "spack.vendor.archspec.cpu.microarchitecture.Microarchitecture"),
("py:class", "spack.compiler.CompilerCache"),
# TypeVar that is not handled correctly
("py:class", "spack.llnl.util.lang.T"),
("py:class", "spack.llnl.util.lang.KT"),
("py:class", "spack.llnl.util.lang.VT"),
("py:class", "spack.llnl.util.lang.K"),
("py:class", "spack.llnl.util.lang.V"),
("py:class", "spack.llnl.util.lang.ClassPropertyType"),
("py:obj", "spack.llnl.util.lang.KT"),
("py:obj", "spack.llnl.util.lang.VT"),
("py:obj", "spack.llnl.util.lang.ClassPropertyType"),
("py:obj", "spack.llnl.util.lang.K"),
("py:obj", "spack.llnl.util.lang.V"),
]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ["_themes"]
# Google Search Console verification file
html_extra_path = ["google5fda5f94b4ffb8de.html"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_theme_options = {
"sidebar_hide_name": True,
"light_logo": "spack-logo-text.svg",
"dark_logo": "spack-logo-white-text.svg",
}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_spack_root/share/spack/logo/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
pygments_style = "default"
pygments_dark_style = "monokai"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Base URL for the documentation, used to generate <link rel="canonical"/> for better indexing
html_baseurl = "https://spack.readthedocs.io/en/latest/"
# Output file base name for HTML help builder.
htmlhelp_basename = "Spackdoc"
# Sitemap settings
sitemap_show_lastmod = True
sitemap_url_scheme = "{link}"
sitemap_excludes = ["search.html", "_modules/*"]
# -- Options for LaTeX output --------------------------------------------------
latex_engine = "lualatex"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [("index", "Spack.tex", "Spack Documentation", "", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "spack", "Spack Documentation", ["Todd Gamblin"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Spack",
"Spack Documentation",
"Todd Gamblin",
"Spack",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Extension configuration -------------------------------------------------
# sphinx.ext.intersphinx
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
rst_epilog = f"""
.. |package_api_version| replace:: v{spack.package_api_version[0]}.{spack.package_api_version[1]}
.. |min_package_api_version| replace:: v{spack.min_package_api_version[0]}.{spack.min_package_api_version[1]}
.. |spack_version| replace:: {spack.spack_version}
"""
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_context = {}
if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True
| NoTabExpansionRSTParser |
python | wandb__wandb | wandb/sdk/internal/thread_local_settings.py | {
"start": 197,
"end": 527
} | class ____(threading.local):
api_key: Optional[str]
cookies: Optional[Dict]
headers: Optional[Dict]
def __init__(self) -> None:
self.api_key = None
self.cookies = None
self.headers = None
_thread_local_api_settings: _ThreadLocalApiSettings = _ThreadLocalApiSettings()
| _ThreadLocalApiSettings |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_test.py | {
"start": 26943,
"end": 32577
} | class ____(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in range(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| BenchmarkRNN |
python | google__pytype | pytype/tests/test_utils.py | {
"start": 6369,
"end": 6833
} | class ____:
"""Mixin providing a method to check in-place operators."""
_HAS_DYNAMIC_ATTRIBUTES = True
def _check_inplace(self, op, assignments, expected_return):
"""Check the inplace operator."""
assignments = "; ".join(assignments)
src = f"""
def f(x, y):
{assignments}
x {op}= y
return x
"""
ty = self.Infer(src)
self.assertTypesMatchPytd(ty, f"def f(x, y) -> {expected_return}: ...")
| InplaceTestMixin |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 17942,
"end": 20256
} | class ____(testing.TestCase):
def test_unweighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 1.4, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 2.8, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(loss, [1.2, 1.6])
# Bad reduction
with self.assertRaisesRegex(ValueError, "Invalid value for argument"):
losses.CategoricalHinge(reduction="abc")
def test_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = [1, 0]
# Reduction = "sum_over_batch_size"
hinge_obj = losses.CategoricalHinge(reduction="sum_over_batch_size")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.6, 3)
# Reduction = "sum"
hinge_obj = losses.CategoricalHinge(reduction="sum")
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.2, 3)
# Reduction = None
hinge_obj = losses.CategoricalHinge(reduction=None)
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, [1.2, 0.0])
def test_zero_weighted(self):
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
sample_weight = 0.0
hinge_obj = losses.CategoricalHinge()
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(loss, 0.0)
def test_dtype_arg(self):
hinge_obj = losses.CategoricalHinge(dtype="bfloat16")
y_true = np.array([[0.0, 1.0], [0.0, 0.0]])
y_pred = np.array([[0.6, 0.4], [0.4, 0.6]])
loss = hinge_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
| CategoricalHingeTest |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_url_is_available.py | {
"start": 883,
"end": 1870
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.url_is_available"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_available(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesUrlIsAvailable |
python | ray-project__ray | python/ray/tests/test_runtime_env_plugin.py | {
"start": 11308,
"end": 11941
} | class ____(RuntimeEnvPlugin):
name = PRIORITY_TEST_PLUGIN1_NAME
priority = 11
env_value = " world"
@staticmethod
def validate(runtime_env_dict: dict) -> str:
return None
def modify_context(
self,
uris: List[str],
plugin_config_dict: dict,
ctx: RuntimeEnvContext,
logger: logging.Logger,
) -> None:
if PRIORITY_TEST_ENV_VAR_NAME in ctx.env_vars:
ctx.env_vars[PRIORITY_TEST_ENV_VAR_NAME] += PriorityTestPlugin1.env_value
else:
ctx.env_vars[PRIORITY_TEST_ENV_VAR_NAME] = PriorityTestPlugin1.env_value
| PriorityTestPlugin1 |
python | plotly__plotly.py | codegen/utils.py | {
"start": 31597,
"end": 32793
} | class ____(PlotlyNode):
"""
Class representing datatypes in the frames hierarchy
"""
# Constructor
def __init__(self, plotly_schema, node_path=(), parent=None):
super().__init__(plotly_schema, node_path, parent)
@property
def name_base_datatype(self):
return "BaseFrameHierarchyType"
@property
def root_name(self):
return ""
@property
def plotly_name(self) -> str:
if len(self.node_path) < 2:
return self.root_name
elif len(self.node_path) == 2:
return "frame" # override frames_entry
else:
return self.node_path[-1]
def tidy_path_part(self, p):
return "frame" if p == "frames_entry" else p
# Description
@property
def description(self) -> str:
desc = self.node_data.get("description", "")
if isinstance(desc, list):
desc = "".join(desc)
return format_description(desc)
# Raw data
@property
def node_data(self) -> dict:
node_data = self.plotly_schema["frames"]
for prop_name in self.node_path:
node_data = node_data[prop_name]
return node_data
| FrameNode |
python | huggingface__transformers | tests/models/dpt/test_image_processing_dpt.py | {
"start": 1157,
"end": 3537
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_reduce_labels=False,
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_reduce_labels = do_reduce_labels
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"do_reduce_labels": self.do_reduce_labels,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
| DPTImageProcessingTester |
python | redis__redis-py | redis/multidb/failover.py | {
"start": 1796,
"end": 3575
} | class ____(FailoverStrategyExecutor):
"""
Executes given failover strategy.
"""
def __init__(
self,
strategy: FailoverStrategy,
failover_attempts: int = DEFAULT_FAILOVER_ATTEMPTS,
failover_delay: float = DEFAULT_FAILOVER_DELAY,
):
self._strategy = strategy
self._failover_attempts = failover_attempts
self._failover_delay = failover_delay
self._next_attempt_ts: int = 0
self._failover_counter: int = 0
@property
def failover_attempts(self) -> int:
return self._failover_attempts
@property
def failover_delay(self) -> float:
return self._failover_delay
@property
def strategy(self) -> FailoverStrategy:
return self._strategy
def execute(self) -> SyncDatabase:
try:
database = self._strategy.database()
self._reset()
return database
except NoValidDatabaseException as e:
if self._next_attempt_ts == 0:
self._next_attempt_ts = time.time() + self._failover_delay
self._failover_counter += 1
elif time.time() >= self._next_attempt_ts:
self._next_attempt_ts += self._failover_delay
self._failover_counter += 1
if self._failover_counter > self._failover_attempts:
self._reset()
raise e
else:
raise TemporaryUnavailableException(
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
def _reset(self) -> None:
self._next_attempt_ts = 0
self._failover_counter = 0
| DefaultFailoverStrategyExecutor |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 60716,
"end": 64610
} | class ____(SimpleElement):
"""
TIMESYS_ element: defines a time system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ["ID", "timeorigin", "timescale", "refposition"]
_element_name = "TIMESYS"
def __init__(
self,
ID=None,
timeorigin=None,
timescale=None,
refposition=None,
id=None,
config=None,
pos=None,
**extra,
):
if config is None:
config = {}
self._config = config
self._pos = pos
# TIMESYS is supported starting in version 1.4
if not config.get("version_1_4_or_later"):
warn_or_raise(W54, W54, config.get("version", "unknown"), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.timeorigin = timeorigin
self.timescale = timescale
self.refposition = refposition
warn_unknown_attrs(
"TIMESYS",
extra.keys(),
config,
pos,
["ID", "timeorigin", "timescale", "refposition"],
)
@property
def ID(self):
"""
[*required*] The XML ID of the TIMESYS_ element, used for
cross-referencing. Must be a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if ID is None:
vo_raise(E22, (), self._config, self._pos)
xmlutil.check_id(ID, "ID", self._config, self._pos)
self._ID = ID
@property
def timeorigin(self):
"""
Specifies the time origin of the time coordinate,
given as a Julian Date for the time scale and
reference point defined. It is usually given as a
floating point literal; for convenience, the magic
strings "MJD-origin" (standing for 2400000.5) and
"JD-origin" (standing for 0) are also allowed.
The timeorigin attribute MUST be given unless the
time’s representation contains a year of a calendar
era, in which case it MUST NOT be present. In VOTables,
these representations currently are Gregorian calendar
years with xtype="timestamp", or years in the Julian
or Besselian calendar when a column has yr, a, or Ba as
its unit and no time origin is given.
"""
return self._timeorigin
@timeorigin.setter
def timeorigin(self, timeorigin):
if (
timeorigin is not None
and timeorigin != "MJD-origin"
and timeorigin != "JD-origin"
):
try:
timeorigin = float(timeorigin)
except ValueError:
warn_or_raise(E23, E23, timeorigin, self._config, self._pos)
self._timeorigin = timeorigin
@timeorigin.deleter
def timeorigin(self):
self._timeorigin = None
@property
def timescale(self):
"""
[*required*] String specifying the time scale used. Values
should be taken from the IVOA timescale vocabulary (documented
at http://www.ivoa.net/rdf/timescale).
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
@timescale.deleter
def timescale(self):
self._timescale = None
@property
def refposition(self):
"""
[*required*] String specifying the reference position. Values
should be taken from the IVOA refposition vocabulary (documented
at http://www.ivoa.net/rdf/refposition).
"""
return self._refposition
@refposition.setter
def refposition(self, refposition):
self._refposition = refposition
@refposition.deleter
def refposition(self):
self._refposition = None
| TimeSys |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/utils/constants.py | {
"start": 822,
"end": 3090
} | class ____:
"""Define constants for Teradata Provider."""
CC_CREATE_OPR = "CREATE"
CC_CREATE_SUSPEND_OPR = "CREATE_SUSPEND"
CC_DROP_OPR = "DROP"
CC_SUSPEND_OPR = "SUSPEND"
CC_RESUME_OPR = "RESUME"
CC_INITIALIZE_DB_STATUS = "Initializing"
CC_SUSPEND_DB_STATUS = "Suspended"
CC_RESUME_DB_STATUS = "Running"
CC_OPR_SUCCESS_STATUS_MSG = "Compute Cluster %s %s operation completed successfully."
CC_OPR_EMPTY_PROFILE_ERROR_MSG = "Failed to %s the Vantage Cloud Lake Compute Cluster Instance due to an invalid compute cluster profile name."
CC_GRP_PRP_NON_EXISTS_MSG = "Failed to %s the Vantage Cloud Lake Compute Cluster Instance because the specified compute cluster does not exist or the user lacks the necessary permissions to access the Compute Cluster Instance."
CC_GRP_LAKE_SUPPORT_ONLY_MSG = "Failed to %s the Vantage Cloud Lake Compute Cluster Instance because the Compute Cluster feature is supported only on the Vantage Cloud Lake system."
CC_OPR_TIMEOUT_ERROR = "Failed to %s the Vantage Cloud Lake Compute Cluster Instance `%s`. Please contact the administrator for assistance."
CC_ERR_VERSION_GET = "Failed to manage the Vantage Cloud Lake Compute Cluster Instance due to an error while getting the Teradata database version."
BTEQ_REMOTE_ERROR_MSG = (
"Failed to establish a SSH connection to the remote machine for executing the BTEQ script."
)
BTEQ_UNEXPECTED_ERROR_MSG = "Failure while executing BTEQ script due to unexpected error."
BTEQ_TIMEOUT_ERROR_MSG = "Failed to execute BTEQ script due to timeout after %s seconds."
BTEQ_MISSED_PARAMS = "Failed to execute BTEQ script due to missing required parameters: either 'sql' or 'file_path' must be provided."
BTEQ_INVALID_PATH = (
"Failed to execute BTEQ script due to invalid file path: '%s' does not exist or is inaccessible."
)
BTEQ_INVALID_CHARSET = "Failed to execute BTEQ script because the provided file '%s' encoding differs from the specified BTEQ I/O encoding %s"
BTEQ_REMOTE_FILE_PATH_INVALID = "Failed to execute BTEQ script due to invalid remote file path: '%s' does not exist or is inaccessible on the remote machine."
CC_OPR_TIME_OUT = 1200
CC_POLL_INTERVAL = 60
| Constants |
python | ray-project__ray | rllib/examples/_old_api_stack/policy/cliff_walking_wall_policy.py | {
"start": 500,
"end": 4181
} | class ____(Policy):
"""Optimal RLlib policy for the CliffWalkingWallEnv environment, defined in
ray/rllib/examples/env/cliff_walking_wall_env.py, with epsilon-greedy exploration.
The policy takes a random action with probability epsilon, specified
by `config["epsilon"]`, and the optimal action with probability 1 - epsilon.
"""
@override(Policy)
def __init__(
self,
observation_space: gym.Space,
action_space: gym.Space,
config: AlgorithmConfigDict,
):
update_global_seed_if_necessary(seed=config.get("seed"))
super().__init__(observation_space, action_space, config)
# Known optimal action dist for each of the 48 states and 4 actions
self.action_dist = np.zeros((48, 4), dtype=float)
# Starting state: go up
self.action_dist[36] = (1, 0, 0, 0)
# Cliff + Goal: never actually used, set to random
self.action_dist[37:] = (0.25, 0.25, 0.25, 0.25)
# Row 2; always go right
self.action_dist[24:36] = (0, 1, 0, 0)
# Row 0 and Row 1; go down or go right
self.action_dist[0:24] = (0, 0.5, 0.5, 0)
# Col 11; always go down, supercedes previous values
self.action_dist[[11, 23, 35]] = (0, 0, 1, 0)
assert np.allclose(self.action_dist.sum(-1), 1)
# Epsilon-Greedy action selection
epsilon = config.get("epsilon", 0.0)
self.action_dist = self.action_dist * (1 - epsilon) + epsilon / 4
assert np.allclose(self.action_dist.sum(-1), 1)
# Attributes required for RLlib; note that while CliffWalkingWallPolicy
# inherits from Policy, it actually implements TorchPolicyV2.
self.view_requirements[SampleBatch.ACTION_PROB] = ViewRequirement()
self.device = "cpu"
self.model = None
self.dist_class = TorchCategorical
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
obs = np.array(obs_batch, dtype=int)
action_probs = self.action_dist[obs]
actions = np.zeros(len(obs), dtype=int)
for i in range(len(obs)):
actions[i] = np.random.choice(4, p=action_probs[i])
return (
actions,
[],
{SampleBatch.ACTION_PROB: action_probs[np.arange(len(obs)), actions]},
)
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
**kwargs,
) -> TensorType:
obs = np.array(obs_batch, dtype=int)
actions = np.array(actions, dtype=int)
# Compute action probs for all possible actions
action_probs = self.action_dist[obs]
# Take the action_probs corresponding to the specified actions
action_probs = action_probs[np.arange(len(obs)), actions]
# Ignore RuntimeWarning thrown by np.log(0) if action_probs is 0
with np.errstate(divide="ignore"):
return np.log(action_probs)
def action_distribution_fn(
self, model, obs_batch: TensorStructType, **kwargs
) -> Tuple[TensorType, type, List[TensorType]]:
obs = np.array(obs_batch[SampleBatch.OBS], dtype=int)
action_probs = self.action_dist[obs]
# Ignore RuntimeWarning thrown by np.log(0) if action_probs is 0
with np.errstate(divide="ignore"):
return np.log(action_probs), TorchCategorical, None
| CliffWalkingWallPolicy |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_supervisor.py | {
"start": 4580,
"end": 6294
} | class ____:
@pytest.mark.parametrize(
("server", "dry_run", "expectation"),
[
("/execution/", False, pytest.raises(ValueError, match="Invalid execution API server URL")),
("", False, pytest.raises(ValueError, match="Invalid execution API server URL")),
("http://localhost:8080", True, pytest.raises(ValueError, match="Can only specify one of")),
(None, True, nullcontext()),
("http://localhost:8080/execution/", False, nullcontext()),
("https://localhost:8080/execution/", False, nullcontext()),
],
)
def test_supervise(
self,
server,
dry_run,
expectation,
test_dags_dir,
client_with_ti_start,
):
"""
Test that the supervisor validates server URL and dry_run parameter combinations correctly.
"""
ti = TaskInstance(
id=uuid7(),
task_id="async",
dag_id="super_basic_deferred_run",
run_id="d",
try_number=1,
dag_version_id=uuid7(),
)
bundle_info = BundleInfo(name="my-bundle", version=None)
kw = {
"ti": ti,
"dag_rel_path": "super_basic_deferred_run.py",
"token": "",
"bundle_info": bundle_info,
"dry_run": dry_run,
"server": server,
}
if isinstance(expectation, nullcontext):
kw["client"] = client_with_ti_start
with patch.dict(os.environ, local_dag_bundle_cfg(test_dags_dir, bundle_info.name)):
with expectation:
supervise(**kw)
@pytest.mark.usefixtures("disable_capturing")
| TestSupervisor |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 88802,
"end": 89321
} | class ____(_PrintableStructure):
_fields_ = [("version", c_uint),
("revision", c_uint),
("hostDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE),
("pgpuVirtualizationCaps", c_uint),
("reserved", c_uint * 5),
("hostSupportedVgpuRange", c_nvmlVgpuVersion_t),
("opaqueDataSize", c_uint),
("opaqueData", c_char * NVML_VGPU_PGPU_METADATA_OPAQUE_DATA_SIZE)
]
| c_nvmlVgpuPgpuMetadata_t |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 87807,
"end": 88421
} | class ____:
xlPivotCellBlankCell = 9 # from enum XlPivotCellType
xlPivotCellCustomSubtotal = 7 # from enum XlPivotCellType
xlPivotCellDataField = 4 # from enum XlPivotCellType
xlPivotCellDataPivotField = 8 # from enum XlPivotCellType
xlPivotCellGrandTotal = 3 # from enum XlPivotCellType
xlPivotCellPageFieldItem = 6 # from enum XlPivotCellType
xlPivotCellPivotField = 5 # from enum XlPivotCellType
xlPivotCellPivotItem = 1 # from enum XlPivotCellType
xlPivotCellSubtotal = 2 # from enum XlPivotCellType
xlPivotCellValue = 0 # from enum XlPivotCellType
| PivotCellType |
python | lazyprogrammer__machine_learning_examples | hmm_class/hmmd_theano2.py | {
"start": 803,
"end": 4851
} | class ____:
def __init__(self, M):
self.M = M # number of hidden states
def fit(self, X, learning_rate=0.001, max_iter=10, V=None, print_period=1):
# train the HMM model using stochastic gradient descent
# print "X to train:", X
# determine V, the vocabulary size
# assume observables are already integers from 0..V-1
# X is a jagged array of observed sequences
if V is None:
V = max(max(x) for x in X) + 1
N = len(X)
print("number of train samples:", N)
preSoftmaxPi0 = np.zeros(self.M) # initial state distribution
preSoftmaxA0 = np.random.randn(self.M, self.M) # state transition matrix
preSoftmaxB0 = np.random.randn(self.M, V) # output distribution
thx, cost = self.set(preSoftmaxPi0, preSoftmaxA0, preSoftmaxB0)
pi_update = self.preSoftmaxPi - learning_rate*T.grad(cost, self.preSoftmaxPi)
A_update = self.preSoftmaxA - learning_rate*T.grad(cost, self.preSoftmaxA)
B_update = self.preSoftmaxB - learning_rate*T.grad(cost, self.preSoftmaxB)
updates = [
(self.preSoftmaxPi, pi_update),
(self.preSoftmaxA, A_update),
(self.preSoftmaxB, B_update),
]
train_op = theano.function(
inputs=[thx],
updates=updates,
allow_input_downcast=True,
)
costs = []
for it in range(max_iter):
if it % print_period == 0:
print("it:", it)
for n in range(N):
# this would of course be much faster if we didn't do this on
# every iteration of the loop
c = self.get_cost_multi(X).sum()
costs.append(c)
train_op(X[n])
# print "A:", self.A.get_value()
# print "B:", self.B.get_value()
# print "pi:", self.pi.get_value()
plt.plot(costs)
plt.show()
def get_cost(self, x):
# returns log P(x | model)
# using the forward part of the forward-backward algorithm
# print "getting cost for:", x
return self.cost_op(x)
def log_likelihood(self, x):
return -self.cost_op(x)
def get_cost_multi(self, X):
return np.array([self.get_cost(x) for x in X])
def set(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB):
self.preSoftmaxPi = theano.shared(preSoftmaxPi)
self.preSoftmaxA = theano.shared(preSoftmaxA)
self.preSoftmaxB = theano.shared(preSoftmaxB)
pi = T.nnet.softmax(self.preSoftmaxPi).flatten()
# softmax returns 1xD if input is a 1-D array of size D
A = T.nnet.softmax(self.preSoftmaxA)
B = T.nnet.softmax(self.preSoftmaxB)
# define cost
thx = T.ivector('thx')
def recurrence(t, old_a, x):
a = old_a.dot(A) * B[:, x[t]]
s = a.sum()
return (a / s), s
[alpha, scale], _ = theano.scan(
fn=recurrence,
sequences=T.arange(1, thx.shape[0]),
outputs_info=[pi*B[:,thx[0]], None],
n_steps=thx.shape[0]-1,
non_sequences=thx
)
cost = -T.log(scale).sum()
self.cost_op = theano.function(
inputs=[thx],
outputs=cost,
allow_input_downcast=True,
)
return thx, cost
def fit_coin():
X = []
for line in open('coin_data.txt'):
# 1 for H, 0 for T
x = [1 if e == 'H' else 0 for e in line.rstrip()]
X.append(x)
hmm = HMM(2)
hmm.fit(X)
L = hmm.get_cost_multi(X).sum()
print("LL with fitted params:", L)
# try true values
# remember these must be in their "pre-softmax" forms
pi = np.log( np.array([0.5, 0.5]) )
A = np.log( np.array([[0.1, 0.9], [0.8, 0.2]]) )
B = np.log( np.array([[0.6, 0.4], [0.3, 0.7]]) )
hmm.set(pi, A, B)
L = hmm.get_cost_multi(X).sum()
print("LL with true params:", L)
if __name__ == '__main__':
fit_coin()
| HMM |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 16847,
"end": 16955
} | class ____(PydanticValueError):
msg_template = 'could not interpret byte unit: {unit}'
| InvalidByteSizeUnit |
python | vyperlang__vyper | vyper/venom/passes/cfg_normalization.py | {
"start": 251,
"end": 4826
} | class ____(IRPass):
"""
This pass splits basic blocks when there are multiple conditional predecessors.
The code generator expect a normalized CFG, that has the property that
each basic block has at most one conditional predecessor.
"""
cfg: CFGAnalysis
changes = 0
def _get_phi_instructions(self, bb: IRBasicBlock) -> Iterator[IRInstruction]:
"""Get all phi instructions in a basic block."""
for inst in bb.instructions:
if inst.opcode != "phi":
break # phis are always at the beginning
yield inst
def _process_block_predecessors(self, bb: IRBasicBlock) -> None:
"""Check if any predecessors need split blocks inserted."""
# iterate over the predecessors to this basic block
for pred_bb in list(self.cfg.cfg_in(bb)):
assert bb in self.cfg.cfg_out(pred_bb)
# handle branching in the predecessor bb
if len(self.cfg.cfg_out(pred_bb)) > 1:
self._insert_split_basicblock(bb, pred_bb)
self.changes += 1
break
def _insert_split_basicblock(self, bb: IRBasicBlock, pred_bb: IRBasicBlock) -> IRBasicBlock:
# create an intermediary basic block and append it
fn = self.function
split_label = IRLabel(f"{pred_bb.label.value}_split_{bb.label.value}")
split_bb = IRBasicBlock(split_label, fn)
pred_terminal = pred_bb.instructions[-1]
pred_terminal.replace_label_operands({bb.label: split_label})
# variables referenced in the phi node from pred_bb might be defined
# either by a phi in pred_bb, or in a block that dominates pred_bb.
# these need forwarding through a store instruction in the split block.
var_replacements: dict[IRVariable, IRVariable] = {}
for inst in self._get_phi_instructions(bb):
for label, var in inst.phi_operands:
if label != pred_bb.label:
continue
assert isinstance(var, IRVariable) # help mypy
if var in var_replacements:
continue
if self._needs_forwarding_store(var, pred_bb):
new_var = split_bb.append_instruction("assign", var)
assert new_var is not None # help mypy
var_replacements[var] = new_var
split_bb.append_instruction("jmp", bb.label)
fn.append_basic_block(split_bb)
# update phi nodes in bb to reference split_bb instead of pred_bb
self._update_phi_nodes(bb, pred_bb, split_bb, var_replacements)
return split_bb
def _needs_forwarding_store(self, var: IRVariable, pred_bb: IRBasicBlock) -> bool:
for inst in pred_bb.instructions:
if var in inst.get_outputs():
# variable defined by phi needs forwarding
return inst.opcode == "phi"
# variable not defined in predecessor needs forwarding
return True
def _update_phi_nodes(
self,
bb: IRBasicBlock,
old_pred: IRBasicBlock,
new_pred: IRBasicBlock,
var_replacements: dict[IRVariable, IRVariable],
) -> None:
for inst in self._get_phi_instructions(bb):
# manually update operands since phi_operands is read-only
for i in range(0, len(inst.operands), 2):
if inst.operands[i] == old_pred.label:
inst.operands[i] = new_pred.label
# update variable if it was forwarded
var = inst.operands[i + 1]
assert isinstance(var, IRVariable) # help mypy
if var in var_replacements:
inst.operands[i + 1] = var_replacements[var]
def _run_pass(self) -> int:
fn = self.function
self.changes = 0
self.cfg = self.analyses_cache.request_analysis(CFGAnalysis)
# split blocks that need splitting
for bb in list(fn.get_basic_blocks()):
if len(self.cfg.cfg_in(bb)) > 1:
self._process_block_predecessors(bb)
# if we made changes, recalculate the cfg
if self.changes > 0:
self.analyses_cache.invalidate_analysis(CFGAnalysis)
return self.changes
def run_pass(self):
fn = self.function
for _ in range(fn.num_basic_blocks * 2):
if self._run_pass() == 0:
break
else:
raise CompilerPanic("Normalization pass did not converge")
| CFGNormalization |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py | {
"start": 1499,
"end": 6511
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def _test(self, global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static):
"""Test that all constraints are met for given parameters."""
if not is_batch_size_static:
# Adding a constant value here prevents downstream computation from
# statically deriving the value of global batch size when running
# in graph mode.
global_batch_size += constant_op.constant(0, dtypes.int64)
batch_sizes_list = []
for i in range(num_workers):
batch_sizes_list.append(
self.evaluate(
distribute.batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker, i)))
for batch_sizes in batch_sizes_list:
# Constraint (A): for any worker, len(batch_sizes) == W * R
self.assertLen(batch_sizes, num_workers * num_replicas_per_worker)
# Constraint (B): for any worker, sum(batch_sizes) == G
self.assertAllEqual(np.sum(batch_sizes), global_batch_size)
# Each per-worker batch is split into num_workers global steps
for step_index in range(num_workers):
actual_global_batch = 0
offset = step_index * num_replicas_per_worker
for batch_sizes in batch_sizes_list:
actual_global_batch += np.sum(batch_sizes[offset:offset +
num_replicas_per_worker])
# Constraint (C): for any step, batch size across all workers add up to G.
self.assertAllEqual(
global_batch_size,
actual_global_batch,
)
# Constraint (D): Batch size of any two replicas differs by at most one
self.assertLessEqual(np.max(batch_sizes_list) - np.min(batch_sizes_list), 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBasic(self, is_batch_size_static):
# Manually verify basic test case.
global_batch_size = 8
num_workers = 2
num_replicas_per_worker = 2
for worker_index in range(4):
batch_sizes = distribute.batch_sizes_for_worker(global_batch_size,
num_workers,
num_replicas_per_worker,
worker_index)
self.assertAllEqual([2, 2, 2, 2],
tensor_util.constant_value(batch_sizes))
self._test(global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeIndivisibleByNumWorkers(self, is_batch_size_static):
global_batch_size = 4
num_workers = 3
num_replicas_per_worker = 1
def get_batch_sizes_for_worker(worker_index):
return tensor_util.constant_value(
distribute.batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker,
worker_index))
# Manually verify this test case.
self.assertAllEqual([2, 1, 1], get_batch_sizes_for_worker(0))
self.assertAllEqual([1, 1, 2], get_batch_sizes_for_worker(1))
self.assertAllEqual([1, 2, 1], get_batch_sizes_for_worker(2))
self._test(global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeIndivisibleByNumReplicas(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=1,
num_replicas_per_worker=5,
is_batch_size_static=is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeSmallerThanNumReplicas(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=2,
num_replicas_per_worker=5,
is_batch_size_static=is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeSmallerThanNumWorkers(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=5,
num_replicas_per_worker=1,
is_batch_size_static=is_batch_size_static)
def _flat_shapes(dataset):
return [
ts.as_list()
for ts in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
]
| BatchSizesForWorkerTest |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 6037,
"end": 6801
} | class ____(nn.Module):
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileViTV2InvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if i == 0 else 1,
)
self.layer.append(layer)
in_channels = out_channels
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
features = layer_module(features)
return features
| MobileViTV2MobileNetLayer |
python | buildout__buildout | src/zc/buildout/buildout.py | {
"start": 59017,
"end": 90057
} | class ____(DictMixin):
def __init__(self, buildout, section, data):
self.buildout = buildout
self.name = section
self._raw = data
self._cooked = {}
self._data = {}
def _initialize(self):
name = self.name
__doing__ = 'Initializing section %s.', name
if '<' in self._raw:
self._raw = self._do_extend_raw(name, self._raw, [])
# force substitutions
for k, v in sorted(self._raw.items()):
if '${' in v:
self._dosub(k, v)
if name == 'buildout':
return # buildout section can never be a part
for dname in self.get('<part-dependencies>', '').split():
# force use of dependencies in buildout:
self.buildout[dname]
if self.get('recipe'):
self.initialize()
self.buildout._parts.append(name)
def initialize(self):
reqs, entry = _recipe(self._data)
buildout = self.buildout
recipe_class = _install_and_load(reqs, 'zc.buildout', entry, buildout)
name = self.name
self.recipe = recipe_class(buildout, name, self)
def _do_extend_raw(self, name, data, doing):
if name == 'buildout':
return data
if name in doing:
raise zc.buildout.UserError("Infinite extending loop %r" % name)
doing.append(name)
try:
to_do = data.get('<', None)
if to_do is None:
return data
__doing__ = 'Loading input sections for %r', name
result = {}
for iname in to_do.split('\n'):
iname = iname.strip()
if not iname:
continue
raw = self.buildout._raw.get(iname)
if raw is None:
raise zc.buildout.UserError("No section named %r" % iname)
result.update(self._do_extend_raw(iname, raw, doing))
result = _annotate_section(result, "")
data = _annotate_section(copy.deepcopy(data), "")
result = _update_section(result, data)
result = _unannotate_section(result)
result.pop('<', None)
return result
finally:
assert doing.pop() == name
def _dosub(self, option, v):
__doing__ = 'Getting option %s:%s.', self.name, option
seen = [(self.name, option)]
v = '$$'.join([self._sub(s, seen) for s in v.split('$$')])
self._cooked[option] = v
def get(self, option, default=None, seen=None):
try:
return self._data[option]
except KeyError:
pass
v = self._cooked.get(option)
if v is None:
v = self._raw.get(option)
if v is None:
return default
__doing__ = 'Getting option %s:%s.', self.name, option
if '${' in v:
key = self.name, option
if seen is None:
seen = [key]
elif key in seen:
raise zc.buildout.UserError(
"Circular reference in substitutions.\n"
)
else:
seen.append(key)
v = '$$'.join([self._sub(s, seen) for s in v.split('$$')])
seen.pop()
self._data[option] = v
return v
_template_split = re.compile('([$]{[^}]*})').split
_simple = re.compile('[-a-zA-Z0-9 ._]+$').match
_valid = re.compile(r'\${[-a-zA-Z0-9 ._]*:[-a-zA-Z0-9 ._]+}$').match
def _sub(self, template, seen):
value = self._template_split(template)
subs = []
for ref in value[1::2]:
s = tuple(ref[2:-1].split(':'))
if not self._valid(ref):
if len(s) < 2:
raise zc.buildout.UserError("The substitution, %s,\n"
"doesn't contain a colon."
% ref)
if len(s) > 2:
raise zc.buildout.UserError("The substitution, %s,\n"
"has too many colons."
% ref)
if not self._simple(s[0]):
raise zc.buildout.UserError(
"The section name in substitution, %s,\n"
"has invalid characters."
% ref)
if not self._simple(s[1]):
raise zc.buildout.UserError(
"The option name in substitution, %s,\n"
"has invalid characters."
% ref)
section, option = s
if not section:
section = self.name
v = self.buildout[section].get(option, None, seen)
if v is None:
if option == '_buildout_section_name_':
v = self.name
else:
raise MissingOption("Referenced option does not exist:",
section, option)
subs.append(v)
subs.append('')
return ''.join([''.join(v) for v in zip(value[::2], subs)])
def __getitem__(self, key):
try:
return self._data[key]
except KeyError:
pass
v = self.get(key)
if v is None:
raise MissingOption("Missing option: %s:%s" % (self.name, key))
return v
def __setitem__(self, option, value):
if not isinstance(value, str):
raise TypeError('Option values must be strings', value)
self._data[option] = value
def __delitem__(self, key):
if key in self._raw:
del self._raw[key]
if key in self._data:
del self._data[key]
if key in self._cooked:
del self._cooked[key]
elif key in self._data:
del self._data[key]
else:
raise KeyError(key)
def keys(self):
raw = self._raw
return list(self._raw) + [k for k in self._data if k not in raw]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def copy(self):
result = copy.deepcopy(self._raw)
result.update(self._cooked)
result.update(self._data)
return result
def _call(self, f):
buildout_directory = self.buildout['buildout']['directory']
self._created = []
try:
try:
os.chdir(buildout_directory)
return f()
except:
for p in self._created:
if os.path.isdir(p):
rmtree(p)
elif os.path.isfile(p):
os.remove(p)
else:
self.buildout._logger.warning("Couldn't clean up %r.", p)
raise
finally:
self._created = None
os.chdir(buildout_directory)
def created(self, *paths):
try:
self._created.extend(paths)
except AttributeError:
raise TypeError(
"Attempt to register a created path while not installing",
self.name)
return self._created
def __repr__(self):
return repr(dict(self))
Buildout.Options = Options
_spacey_nl = re.compile('[ \t\r\f\v]*\n[ \t\r\f\v\n]*'
'|'
'^[ \t\r\f\v]+'
'|'
'[ \t\r\f\v]+$'
)
_spacey_defaults = [
('%(__buildout_space__)s', ' '),
('%(__buildout_space_n__)s', '\n'),
('%(__buildout_space_r__)s', '\r'),
('%(__buildout_space_f__)s', '\f'),
('%(__buildout_space_v__)s', '\v'),
]
def _quote_spacey_nl(match):
match = match.group(0).split('\n', 1)
result = '\n\t'.join(
[(s
.replace(' ', '%(__buildout_space__)s')
.replace('\r', '%(__buildout_space_r__)s')
.replace('\f', '%(__buildout_space_f__)s')
.replace('\v', '%(__buildout_space_v__)s')
.replace('\n', '%(__buildout_space_n__)s')
)
for s in match]
)
return result
def _save_option(option, value, f):
value = _spacey_nl.sub(_quote_spacey_nl, value)
if value.startswith('\n\t'):
value = '%(__buildout_space_n__)s' + value[2:]
if value.endswith('\n\t'):
value = value[:-2] + '%(__buildout_space_n__)s'
print_(option, '=', value, file=f)
def _save_options(section, options, f):
print_('[%s]' % section, file=f)
items = list(options.items())
items.sort()
for option, value in items:
_save_option(option, value, f)
def _default_globals():
"""Return a mapping of default and precomputed expressions.
These default expressions are convenience defaults available when eveluating
section headers expressions.
NB: this is wrapped in a function so that the computing of these expressions
is lazy and done only if needed (ie if there is at least one section with
an expression) because the computing of some of these expressions can be
expensive.
"""
# partially derived or inspired from its.py
# Copyright (c) 2012, Kenneth Reitz All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# default available modules, explicitly re-imported locally here on purpose
import sys
import os
import platform
import re
globals_defs = {'sys': sys, 'os': os, 'platform': platform, 're': re,}
# major python major_python_versions as python2 and python3
major_python_versions = tuple(map(str, platform.python_version_tuple()))
globals_defs.update({'python2': major_python_versions[0] == '2',
'python3': major_python_versions[0] == '3'})
# minor python major_python_versions as python24, python25 ... python39
minor_python_versions = ('24', '25', '26', '27',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'310', '311', '312', '313', '314', '315')
for v in minor_python_versions:
globals_defs['python' + v] = ''.join(major_python_versions[:2]) == v
# interpreter type
sys_version = sys.version.lower()
pypy = 'pypy' in sys_version
jython = 'java' in sys_version
ironpython ='iron' in sys_version
# assume CPython, if nothing else.
cpython = not any((pypy, jython, ironpython,))
globals_defs.update({'cpython': cpython,
'pypy': pypy,
'jython': jython,
'ironpython': ironpython})
# operating system
sys_platform = str(sys.platform).lower()
globals_defs.update({'linux': 'linux' in sys_platform,
'windows': 'win32' in sys_platform,
'cygwin': 'cygwin' in sys_platform,
'solaris': 'sunos' in sys_platform,
'macosx': 'darwin' in sys_platform,
'posix': 'posix' in os.name.lower()})
#bits and endianness
import struct
void_ptr_size = struct.calcsize('P') * 8
globals_defs.update({'bits32': void_ptr_size == 32,
'bits64': void_ptr_size == 64,
'little_endian': sys.byteorder == 'little',
'big_endian': sys.byteorder == 'big'})
return globals_defs
variable_template_split = re.compile('([$]{[^}]*})').split
def _open(
base, filename, seen, download_options,
override, downloaded, user_defaults
):
"""Open a configuration file and return the result as a dictionary,
Recursively open other files based on buildout options found.
"""
download_options = _update_section(download_options, override)
raw_download_options = _unannotate_section(download_options)
newest = bool_option(raw_download_options, 'newest', 'false')
fallback = newest and not (filename in downloaded)
extends_cache = raw_download_options.get('extends-cache')
if extends_cache and variable_template_split(extends_cache)[1::2]:
raise ValueError(
"extends-cache '%s' may not contain ${section:variable} to expand."
% extends_cache
)
download = zc.buildout.download.Download(
raw_download_options, cache=extends_cache,
fallback=fallback, hash_name=True)
is_temp = False
downloaded_filename = None
if _isurl(filename):
downloaded_filename, is_temp = download(filename)
fp = open(downloaded_filename)
base = filename[:filename.rfind('/')]
elif _isurl(base):
if os.path.isabs(filename):
fp = open(filename)
base = os.path.dirname(filename)
else:
filename = base + '/' + filename
downloaded_filename, is_temp = download(filename)
fp = open(downloaded_filename)
base = filename[:filename.rfind('/')]
else:
filename = os.path.join(base, filename)
fp = open(filename)
base = os.path.dirname(filename)
downloaded.add(filename)
if filename in seen:
if is_temp:
fp.close()
os.remove(downloaded_filename)
raise zc.buildout.UserError("Recursive file include", seen, filename)
root_config_file = not seen
seen.append(filename)
filename_for_logging = filename
if downloaded_filename:
filename_for_logging = '%s (downloaded as %s)' % (
filename, downloaded_filename)
result = zc.buildout.configparser.parse(
fp, filename_for_logging, _default_globals)
fp.close()
if is_temp:
os.remove(downloaded_filename)
options = result.get('buildout', {})
extends = options.pop('extends', None)
if 'extended-by' in options:
raise zc.buildout.UserError(
'No-longer supported "extended-by" option found in %s.' %
filename)
result = _annotate(result, filename)
if root_config_file and 'buildout' in result:
download_options = _update_section(
download_options, result['buildout']
)
# Process extends to handle nested += and -=
eresults = []
if extends:
extends = extends.split()
for fname in extends:
next_extend, user_defaults = _open(
base, fname, seen, download_options, override,
downloaded, user_defaults)
eresults.extend(next_extend)
else:
if user_defaults:
result = _update(user_defaults, result)
user_defaults = {}
optional_extends = options.pop('optional-extends', None)
if optional_extends:
optional_extends = optional_extends.value.split()
for fname in optional_extends:
if not os.path.exists(fname):
print("optional-extends file not found: %s" % fname)
continue
next_extend, user_defaults = _open(
base, fname, seen, download_options, override,
downloaded, user_defaults)
eresults.extend(next_extend)
eresults.append(result)
seen.pop()
if root_config_file:
final_result = {}
for eresult in eresults:
final_result = _update(final_result, eresult)
return final_result, user_defaults
else:
return eresults, user_defaults
ignore_directories = '.svn', 'CVS', '__pycache__', '.git'
_dir_hashes = {}
def _dir_hash(dir):
dir_hash = _dir_hashes.get(dir, None)
if dir_hash is not None:
return dir_hash
hash = md5()
for (dirpath, dirnames, filenames) in os.walk(dir):
dirnames[:] = sorted(n for n in dirnames if n not in ignore_directories)
filenames[:] = sorted(f for f in filenames
if (not (f.endswith('pyc') or f.endswith('pyo'))
and os.path.exists(os.path.join(dirpath, f)))
)
for_hash = ' '.join(dirnames + filenames)
if isinstance(for_hash, str):
for_hash = for_hash.encode()
hash.update(for_hash)
for name in filenames:
path = os.path.join(dirpath, name)
if name == 'entry_points.txt':
f = open(path)
# Entry points aren't written in stable order. :(
try:
sections = zc.buildout.configparser.parse(f, path)
data = repr([(sname, sorted(sections[sname].items()))
for sname in sorted(sections)]).encode('utf-8')
except Exception:
f.close()
f = open(path, 'rb')
data = f.read()
else:
f = open(path, 'rb')
data = f.read()
f.close()
hash.update(data)
_dir_hashes[dir] = dir_hash = hash.hexdigest()
return dir_hash
def _dists_sig(dists):
seen = set()
result = []
for dist in sorted(dists):
if dist in seen:
continue
seen.add(dist)
location = dist.location
if dist.precedence == pkg_resources.DEVELOP_DIST:
result.append(dist.project_name + '-' + _dir_hash(location))
else:
result.append(os.path.basename(location))
return result
def _update_section(in1, s2):
s1 = copy.deepcopy(in1)
# Base section 2 on section 1; section 1 is copied, with key-value pairs
# in section 2 overriding those in section 1. If there are += or -=
# operators in section 2, process these to add or subtract items (delimited
# by newlines) from the preexisting values.
s2 = copy.deepcopy(s2) # avoid mutating the second argument, which is unexpected
# Sort on key, then on the addition or subtraction operator (+ comes first)
for k, v in sorted(s2.items(), key=lambda x: (x[0].rstrip(' +'), x[0][-1])):
if k.endswith('+'):
key = k.rstrip(' +')
implicit_value = SectionKey("", "IMPLICIT_VALUE")
# Find v1 in s2 first; it may have been defined locally too.
section_key = s2.get(key, s1.get(key, implicit_value))
section_key = copy.deepcopy(section_key)
section_key.addToValue(v.value, v.source)
s2[key] = section_key
del s2[k]
elif k.endswith('-'):
key = k.rstrip(' -')
implicit_value = SectionKey("", "IMPLICIT_VALUE")
# Find v1 in s2 first; it may have been set by a += operation first
section_key = s2.get(key, s1.get(key, implicit_value))
section_key = copy.deepcopy(section_key)
section_key.removeFromValue(v.value, v.source)
s2[key] = section_key
del s2[k]
_update_verbose(s1, s2)
return s1
def _update_verbose(s1, s2):
for key, v2 in s2.items():
if key in s1:
v1 = s1[key]
v1.overrideValue(v2)
else:
s1[key] = copy.deepcopy(v2)
def _update(in1, d2):
d1 = copy.deepcopy(in1)
for section in d2:
if section in d1:
d1[section] = _update_section(d1[section], d2[section])
elif '<' not in d2[section].keys():
# Skip sections that extend in other sections (macros), as we don't
# have all the data (these will be processed when the section is
# extended)
temp = copy.deepcopy(d2[section])
# 641 - Process base definitions done with += and -=
for k, v in sorted(temp.items(), key=lambda item: item[0]):
# Process + before -, configparser resolves conflicts
if k[-1] == '+' and k[:-2] not in temp:
# Turn += without a preceding = into an assignment
temp[k[:-2]] = temp[k]
del temp[k]
elif k[-1] == '-' and k[:-2] not in temp:
# Turn -= without a preceding = into an empty assignment
temp[k[:-2]] = temp[k]
temp[k[:-2]].removeFromValue(
temp[k[:-2]].value, "IMPLICIT_VALUE"
)
del temp[k]
# 656 - Handle multiple option assignments/extensions/removals
# in the same file, which can happen with conditional sections
d1[section] = _update_section({}, temp)
else:
d1[section] = copy.deepcopy(d2[section])
return d1
def _recipe(options):
recipe = options['recipe']
if ':' in recipe:
recipe, entry = recipe.split(':')
else:
entry = 'default'
return recipe, entry
def _doing():
_, v, tb = sys.exc_info()
message = str(v)
doing = []
while tb is not None:
d = tb.tb_frame.f_locals.get('__doing__')
if d:
doing.append(d)
tb = tb.tb_next
if doing:
sys.stderr.write('While:\n')
for d in doing:
if not isinstance(d, str):
d = d[0] % d[1:]
sys.stderr.write(' %s\n' % d)
def _error(*message):
sys.stderr.write('Error: ' + ' '.join(message) +'\n')
sys.exit(1)
_internal_error_template = """
An internal error occurred due to a bug in either zc.buildout or in a
recipe being used:
"""
def _check_for_unused_options_in_section(buildout, section):
options = buildout[section]
unused = [option for option in sorted(options._raw)
if option not in options._data]
if unused:
buildout._logger.warning(
"Section `%s` contains unused option(s): %s.\n"
"This may be an indication for either a typo in the option's name "
"or a bug in the used recipe." %
(section, ' '.join(map(repr, unused)))
)
_usage = """\
Usage: buildout [options] [assignments] [command [command arguments]]
Options:
-c config_file
Specify the path to the buildout configuration file to be used.
This defaults to the file named "buildout.cfg" in the current
working directory.
-D
Debug errors. If an error occurs, then the post-mortem debugger
will be started. This is especially useful for debugging recipe
problems.
-h, --help
Print this message and exit.
-N
Run in non-newest mode. This is equivalent to the assignment
buildout:newest=false. With this setting, buildout will not seek
new distributions if installed distributions satisfy it's
requirements.
-q
Decrease the level of verbosity. This option can be used multiple times.
-t socket_timeout
Specify the socket timeout in seconds.
-U
Don't read user defaults.
-v
Increase the level of verbosity. This option can be used multiple times.
--version
Print buildout version number and exit.
Assignments are of the form: section:option=value and are used to
provide configuration options that override those given in the
configuration file. For example, to run the buildout in offline mode,
use buildout:offline=true.
Options and assignments can be interspersed.
Commands:
install
Install the parts specified in the buildout configuration. This is
the default command if no command is specified.
bootstrap
Create a new buildout in the current working directory, copying
the buildout and setuptools eggs and, creating a basic directory
structure and a buildout-local buildout script.
init [requirements]
Initialize a buildout, creating a minimal buildout.cfg file if it doesn't
exist and then performing the same actions as for the bootstrap
command.
If requirements are supplied, then the generated configuration
will include an interpreter script that requires them. This
provides an easy way to quickly set up a buildout to experiment
with some packages.
setup script [setup command and options]
Run a given setup script arranging that setuptools is in the
script's path and and that it has been imported so that
setuptools-provided commands (like bdist_egg) can be used even if
the setup script doesn't import setuptools.
The script can be given either as a script path or a path to a
directory containing a setup.py script.
annotate
Display annotated sections. All sections are displayed, sorted
alphabetically. For each section, all key-value pairs are displayed,
sorted alphabetically, along with the origin of the value (file name or
COMPUTED_VALUE, DEFAULT_VALUE, COMMAND_LINE_VALUE).
query section:key
Display value of given section key pair.
"""
def _help():
print_(_usage)
sys.exit(0)
def _version():
version = pkg_resources.working_set.find(
pkg_resources.Requirement.parse('zc.buildout')).version
print_("buildout version %s" % version)
sys.exit(0)
def main(args=None):
if args is None:
args = sys.argv[1:]
config_file = 'buildout.cfg'
verbosity = 0
options = []
use_user_defaults = True
debug = False
while args:
if args[0][0] == '-':
op = orig_op = args.pop(0)
op = op[1:]
while op and op[0] in 'vqhWUoOnNDA':
if op[0] == 'v':
verbosity += 10
elif op[0] == 'q':
verbosity -= 10
elif op[0] == 'U':
use_user_defaults = False
elif op[0] == 'o':
options.append(('buildout', 'offline', 'true'))
elif op[0] == 'O':
options.append(('buildout', 'offline', 'false'))
elif op[0] == 'n':
options.append(('buildout', 'newest', 'true'))
elif op[0] == 'N':
options.append(('buildout', 'newest', 'false'))
elif op[0] == 'D':
debug = True
else:
_help()
op = op[1:]
if op[:1] in ('c', 't'):
op_ = op[:1]
op = op[1:]
if op_ == 'c':
if op:
config_file = op
else:
if args:
config_file = args.pop(0)
else:
_error("No file name specified for option", orig_op)
elif op_ == 't':
try:
timeout_string = args.pop(0)
timeout = int(timeout_string)
options.append(
('buildout', 'socket-timeout', timeout_string))
except IndexError:
_error("No timeout value specified for option", orig_op)
except ValueError:
_error("Timeout value must be numeric", orig_op)
elif op:
if orig_op == '--help':
_help()
elif orig_op == '--version':
_version()
else:
_error("Invalid option", '-'+op[0])
elif '=' in args[0]:
option, value = args.pop(0).split('=', 1)
option = option.split(':')
if len(option) == 1:
option = 'buildout', option[0]
elif len(option) != 2:
_error('Invalid option:', option)
section, option = option
options.append((section.strip(), option.strip(), value.strip()))
else:
# We've run out of command-line options and option assignments
# The rest should be commands, so we'll stop here
break
if verbosity:
options.append(('buildout', 'verbosity', str(verbosity)))
if args:
command = args.pop(0)
if command not in Buildout.COMMANDS:
_error('invalid command:', command)
else:
command = 'install'
try:
try:
buildout = Buildout(config_file, options,
use_user_defaults, command, args)
getattr(buildout, command)(args)
except SystemExit:
logging.shutdown()
# Make sure we properly propagate an exit code from a restarted
# buildout process.
raise
except Exception:
v = sys.exc_info()[1]
_doing()
exc_info = sys.exc_info()
import pdb, traceback
if debug:
traceback.print_exception(*exc_info)
sys.stderr.write('\nStarting pdb:\n')
pdb.post_mortem(exc_info[2])
else:
if isinstance(v, (zc.buildout.UserError,
distutils.errors.DistutilsError
)
):
_error(str(v))
else:
sys.stderr.write(_internal_error_template)
traceback.print_exception(*exc_info)
sys.exit(1)
finally:
logging.shutdown()
_bool_names = {'true': True, 'false': False, True: True, False: False}
def bool_option(options, name, default=None):
value = options.get(name, default)
if value is None:
raise KeyError(name)
try:
return _bool_names[value]
except KeyError:
raise zc.buildout.UserError(
'Invalid value for %r option: %r' % (name, value))
| Options |
python | viewflow__viewflow | viewflow/conf.py | {
"start": 889,
"end": 1743
} | class ____(object):
def __init__(self, custom=None):
if custom is None:
custom = getattr(django_settings, "VIEWFLOW", {})
self.settings = deepcopy(DEFAULTS)
for key, value in custom.get("WIDGET_RENDERERS", {}).items():
widget_class, renderer_class = import_string(key), import_string(value)
self.settings["WIDGET_RENDERERS"][widget_class] = renderer_class
def __getattr__(self, attr):
if attr not in self.settings:
raise AttributeError("Invalid viewflow setting: '%s'" % attr)
return self.settings[attr]
settings = Settings()
def reload_settings(*args, **kwargs):
global settings
setting, value = kwargs["setting"], kwargs["value"]
if setting == "VIEWFLOW":
settings = Settings(value)
setting_changed.connect(reload_settings)
| Settings |
python | sympy__sympy | sympy/vector/deloperator.py | {
"start": 93,
"end": 3191
} | class ____(Basic):
"""
Represents the vector differential operator, usually represented in
mathematical expressions as the 'nabla' symbol.
"""
def __new__(cls):
obj = super().__new__(cls)
obj._name = "delop"
return obj
def gradient(self, scalar_field, doit=False):
"""
Returns the gradient of the given scalar field, as a
Vector instance.
Parameters
==========
scalar_field : SymPy expression
The scalar field to calculate the gradient of.
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> C = CoordSys3D('C')
>>> delop = Del()
>>> delop.gradient(9)
0
>>> delop(C.x*C.y*C.z).doit()
C.y*C.z*C.i + C.x*C.z*C.j + C.x*C.y*C.k
"""
return gradient(scalar_field, doit=doit)
__call__ = gradient
__call__.__doc__ = gradient.__doc__
def dot(self, vect, doit=False):
"""
Represents the dot product between this operator and a given
vector - equal to the divergence of the vector field.
Parameters
==========
vect : Vector
The vector whose divergence is to be calculated.
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> delop = Del()
>>> C = CoordSys3D('C')
>>> delop.dot(C.x*C.i)
Derivative(C.x, C.x)
>>> v = C.x*C.y*C.z * (C.i + C.j + C.k)
>>> (delop & v).doit()
C.x*C.y + C.x*C.z + C.y*C.z
"""
return divergence(vect, doit=doit)
__and__ = dot
__and__.__doc__ = dot.__doc__
def cross(self, vect, doit=False):
"""
Represents the cross product between this operator and a given
vector - equal to the curl of the vector field.
Parameters
==========
vect : Vector
The vector whose curl is to be calculated.
doit : bool
If True, the result is returned after calling .doit() on
each component. Else, the returned expression contains
Derivative instances
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> C = CoordSys3D('C')
>>> delop = Del()
>>> v = C.x*C.y*C.z * (C.i + C.j + C.k)
>>> delop.cross(v, doit = True)
(-C.x*C.y + C.x*C.z)*C.i + (C.x*C.y - C.y*C.z)*C.j +
(-C.x*C.z + C.y*C.z)*C.k
>>> (delop ^ C.i).doit()
0
"""
return curl(vect, doit=doit)
__xor__ = cross
__xor__.__doc__ = cross.__doc__
def _sympystr(self, printer):
return self._name
| Del |
python | PyCQA__pylint | doc/exts/pylint_options.py | {
"start": 795,
"end": 8140
} | class ____(NamedTuple):
name: str
optdict: OptionDict
checker: BaseChecker
extension: bool
OptionsDataDict = dict[str, list[OptionsData]]
PYLINT_BASE_PATH = Path(__file__).resolve().parent.parent.parent
"""Base path to the project folder."""
PYLINT_USERGUIDE_PATH = PYLINT_BASE_PATH / "doc" / "user_guide"
"""Path to the messages documentation folder."""
DYNAMICALLY_DEFINED_OPTIONS: dict[str, dict[str, str]] = {
# Option name, key / values we want to modify
"py-version": {"default": "sys.version_info[:2]"},
"spelling-dict": {
"choices": "Values from 'enchant.Broker().list_dicts()' depending on your local enchant installation",
"help": "Spelling dictionary name. Available dictionaries depends on your local enchant installation",
},
}
def _register_all_checkers_and_extensions(linter: PyLinter) -> None:
"""Registers all checkers and extensions found in the default folders."""
initialize_checkers(linter)
initialize_extensions(linter)
def _get_all_options(linter: PyLinter) -> OptionsDataDict:
"""Get all options registered to a linter and return the data."""
all_options: OptionsDataDict = defaultdict(list)
for checker in sorted(linter.get_checkers()):
for option_name, option_info in checker.options:
changes_to_do = DYNAMICALLY_DEFINED_OPTIONS.get(option_name, {})
if changes_to_do:
for key_to_change, new_value in changes_to_do.items():
print(
f"Doc value for {option_name!r}['{key_to_change}'] changed to "
f"{new_value!r} (instead of {option_info[key_to_change]!r})"
)
option_info[key_to_change] = new_value
all_options[checker.name].append(
OptionsData(
option_name,
option_info,
checker,
getmodule(checker).__name__.startswith("pylint.extensions."), # type: ignore[union-attr]
)
)
return all_options
def _create_checker_section(
checker: str, options: list[OptionsData], linter: PyLinter
) -> str:
checker_string = f".. _{checker}-options:\n\n"
checker_string += get_rst_title(f"``{checker.capitalize()}`` **Checker**", "-")
toml_doc = tomlkit.document()
tool_table = tomlkit.table(is_super_table=True)
toml_doc.add(tomlkit.key("tool"), tool_table)
pylint_tool_table = tomlkit.table(is_super_table=True)
tool_table.add(tomlkit.key("pylint"), pylint_tool_table)
checker_table = tomlkit.table()
for option in sorted(options, key=lambda x: x.name):
checker_string += get_rst_title(f"--{option.name}", '"')
checker_string += f"*{option.optdict.get('help')}*\n\n"
if option.optdict.get("default") == "":
checker_string += '**Default:** ``""``\n\n\n'
else:
checker_string += f"**Default:** ``{option.optdict.get('default')}``\n\n\n"
# Start adding the option to the toml example
if option.optdict.get("hide_from_config_file"):
continue
# Get current value of option
try:
value = DYNAMICALLY_DEFINED_OPTIONS[option.name]["default"]
except KeyError:
value = getattr(linter.config, option.name.replace("-", "_"))
# Create a comment if the option has no value
if value is None:
checker_table.add(tomlkit.comment(f"{option.name} ="))
checker_table.add(tomlkit.nl())
continue
# Display possible choices
choices = option.optdict.get("choices", "")
if choices:
checker_table.add(tomlkit.comment(f"Possible choices: {choices}"))
# Tomlkit doesn't support regular expressions
if isinstance(value, re.Pattern):
value = value.pattern
elif (
isinstance(value, (list, tuple))
and value
and isinstance(value[0], re.Pattern)
):
value = [i.pattern for i in value]
# Sorting in order for the output to be the same on all interpreters
# Don't sort everything here, alphabetical order do not make a lot of sense
# for options most of the time. Only dict based 'unstable' options need this
if isinstance(value, (list, tuple)) and option.name in ["disable"]:
value = sorted(value, key=lambda x: str(x))
# Add to table
checker_table.add(option.name, value)
checker_table.add(tomlkit.nl())
pylint_tool_table.add(options[0].checker.name.lower(), checker_table)
toml_string = "\n".join(
f" {i}" if i else "" for i in tomlkit.dumps(toml_doc).split("\n")
)
checker_string += f"""
.. raw:: html
<details>
<summary><a>Example configuration section</a></summary>
**Note:** Only ``tool.pylint`` is required, the section title is not. These are the default values.
.. code-block:: toml
{toml_string}
.. raw:: html
</details>
"""
return checker_string
def _write_options_page(options: OptionsDataDict, linter: PyLinter) -> None:
"""Create or overwrite the options page."""
sections: list[str] = [
".. This file is auto-generated. Make any changes to the associated\n"
".. docs extension in 'doc/exts/pylint_options.py'.\n\n"
".. _all-options:",
get_rst_title("Standard Checkers", "^"),
]
found_extensions = False
# We can't sort without using the 'key' keyword because if keys in 'options' were
# checkers then it wouldn't be possible to have a checker with the same name
# spanning multiple classes. It would make pylint plugin code less readable by
# forcing to use a single class / file.
for checker_name, checker_options in sorted(
options.items(), key=lambda x: x[1][0].checker
):
if not found_extensions and checker_options[0].extension:
sections.append(get_rst_title("Extensions", "^"))
found_extensions = True
sections.append(_create_checker_section(checker_name, checker_options, linter))
all_options_path = PYLINT_USERGUIDE_PATH / "configuration" / "all-options.rst"
sections_string = "\n\n".join(sections)
with open(all_options_path, "w", encoding="utf-8") as stream:
stream.write(f"\n\n{sections_string}")
# pylint: disable-next=unused-argument
def build_options_page(app: Sphinx | None) -> None:
"""Overwrite messages files by printing the documentation to a stream.
Documentation is written in ReST format.
"""
# Create linter, register all checkers and extensions and get all options
linter = PyLinter()
_register_all_checkers_and_extensions(linter)
options = _get_all_options(linter)
# Write options page
_write_options_page(options, linter)
def setup(app: Sphinx) -> dict[str, bool]:
"""Connects the extension to the Sphinx process."""
# Register callback at the builder-inited Sphinx event
# See https://www.sphinx-doc.org/en/master/extdev/appapi.html
app.connect("builder-inited", build_options_page)
return {"parallel_read_safe": True}
if __name__ == "__main__":
print("Uncomment the following line to allow running this script directly.")
# build_options_page(None)
| OptionsData |
python | getsentry__sentry | tests/sentry/rules/filters/test_latest_adopted_release.py | {
"start": 187,
"end": 7588
} | class ____(RuleTestCase):
rule_cls = LatestAdoptedReleaseFilter
def test_semver(self) -> None:
event = self.get_event()
now = datetime.now(UTC)
prod = self.create_environment(name="prod")
test = self.create_environment(name="test")
newest_release = self.create_release(
project=event.group.project,
version="test@2.0",
date_added=now - timedelta(days=2),
environments=[test],
adopted=now - timedelta(days=2),
)
oldest_release = self.create_release(
project=event.group.project,
version="test@1.0",
date_added=now - timedelta(days=1),
environments=[prod],
adopted=now - timedelta(days=1),
)
middle_release = self.create_release(
project=event.group.project,
version="test@1.5",
date_added=now,
environments=[prod],
adopted=now,
)
# Test no release
data = {"oldest_or_newest": "oldest", "older_or_newer": "newer", "environment": prod.name}
rule = self.get_rule(data=data)
self.assertDoesNotPass(rule, event)
self.create_group_release(group=self.event.group, release=newest_release)
rule = self.get_rule(data=data)
self.assertPasses(rule, event)
event_2 = self.store_event(data={"fingerprint": ["group2"]}, project_id=self.project.id)
group_2 = event_2.group
self.create_group_release(group=group_2, release=newest_release)
self.create_group_release(group=group_2, release=oldest_release)
self.assertDoesNotPass(rule, event_2)
event_3 = self.store_event(data={"fingerprint": ["group3"]}, project_id=self.project.id)
group_3 = event_3.group
self.create_group_release(group=group_3, release=middle_release)
self.assertDoesNotPass(rule, event_3)
# Check that the group cache invalidation works by adding an older release to the first group
self.create_group_release(group=self.event.group, release=oldest_release)
self.assertDoesNotPass(rule, event)
# Check that the project cache invalidation works by adding a newer release to the project
event_4 = self.store_event(data={"fingerprint": ["group4"]}, project_id=self.project.id)
group_4 = event_4.group
self.create_group_release(group=group_4, release=newest_release)
self.assertPasses(rule, event_4)
self.create_release(
project=event.group.project,
version="test@3.0",
date_added=now - timedelta(days=5),
environments=[prod],
adopted=now - timedelta(days=2),
)
self.assertDoesNotPass(rule, event_4)
def test_semver_with_release_without_adoption(self) -> None:
event = self.get_event()
now = datetime.now(UTC)
prod = self.create_environment(name="prod")
test = self.create_environment(name="test")
test_release = self.create_release(
project=event.group.project,
version="test@1.9",
date_added=now,
environments=[test],
adopted=now,
)
test_bad_release = self.create_release(
project=event.group.project,
version="test@0.9",
date_added=now - timedelta(days=2),
environments=[prod],
adopted=now - timedelta(days=2),
)
# Latest unadopted release
self.create_release(
project=event.group.project,
version="test@2.0",
date_added=now - timedelta(days=1),
environments=[prod],
adopted=None,
)
# Latest adopted release
self.create_release(
project=event.group.project,
version="test@1.0",
date_added=now - timedelta(days=3),
environments=[prod],
adopted=now - timedelta(days=3),
)
self.create_group_release(group=self.event.group, release=test_bad_release)
data = {"oldest_or_newest": "oldest", "older_or_newer": "newer", "environment": prod.name}
rule = self.get_rule(data=data)
# Oldest release for group is .9, latest adopted release for environment is 1.0
self.assertDoesNotPass(rule, event)
event_2 = self.store_event(data={"fingerprint": ["group2"]}, project_id=self.project.id)
group_2 = event_2.group
self.create_group_release(group=group_2, release=test_release)
# Oldest release for group is 1.9, latest adopted release for environment is 1.0
self.assertPasses(rule, event_2)
def test_no_adopted_release(self) -> None:
event = self.get_event()
now = datetime.now(UTC)
prod = self.create_environment(name="prod")
test = self.create_environment(name="test")
test_release = self.create_release(
project=event.group.project,
version="test@1.9",
date_added=now,
environments=[test],
adopted=now,
)
self.create_release(
project=event.group.project,
version="test@0.9",
date_added=now - timedelta(days=2),
environments=[prod],
adopted=None,
)
self.create_group_release(group=self.event.group, release=test_release)
data = {"oldest_or_newest": "oldest", "older_or_newer": "newer", "environment": prod.name}
rule = self.get_rule(data=data)
self.assertDoesNotPass(rule, event)
def test_date(self) -> None:
event = self.get_event()
now = datetime.now(UTC)
prod = self.create_environment(name="prod")
test = self.create_environment(name="test")
oldest_release = self.create_release(
project=event.group.project,
version="1",
date_added=now - timedelta(days=2),
environments=[prod],
adopted=now - timedelta(days=2),
)
middle_release = self.create_release(
project=event.group.project,
version="2",
date_added=now - timedelta(days=1),
environments=[prod],
adopted=now - timedelta(days=1),
)
newest_release = self.create_release(
project=event.group.project,
version="3",
date_added=now,
environments=[test],
adopted=now,
)
self.create_group_release(group=self.event.group, release=newest_release)
data = {"oldest_or_newest": "oldest", "older_or_newer": "newer", "environment": prod.name}
rule = self.get_rule(data=data)
self.assertPasses(rule, event)
event_2 = self.store_event(data={"fingerprint": ["group2"]}, project_id=self.project.id)
group_2 = event_2.group
self.create_group_release(group=group_2, release=newest_release)
self.create_group_release(group=group_2, release=oldest_release)
self.assertDoesNotPass(rule, event_2)
event_3 = self.store_event(data={"fingerprint": ["group3"]}, project_id=self.project.id)
group_3 = event_3.group
self.create_group_release(group=group_3, release=middle_release)
self.assertDoesNotPass(rule, event_3)
| LatestAdoptedReleaseFilterTest |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 52896,
"end": 57890
} | class ____(BaseConfigHeuristic):
"""
Placeholder child class for Intel GPU specific overrides.
"""
def __init__(self) -> None:
super().__init__()
self.xpu_default_flex_config = {
(torch.float32, 64): FlexConfig(128, 32, 1, 16),
(torch.float32, 128): FlexConfig(128, 32, 1, 16),
(torch.float32, 256): FlexConfig(64, 16, 1, 8),
(torch.bfloat16, 64): FlexConfig(128, 64, 1, 16),
(torch.bfloat16, 128): FlexConfig(128, 64, 1, 16),
(torch.bfloat16, 256): FlexConfig(32, 64, 1, 4),
(torch.float16, 64): FlexConfig(128, 64, 1, 16),
(torch.float16, 128): FlexConfig(128, 64, 1, 16),
(torch.float16, 256): FlexConfig(32, 64, 1, 4),
}
self.flex_attn_fwd_autotune_configs: list[FlexConfig] = [
FlexConfig(32, 16, 2, 4),
FlexConfig(128, 64, 2, 16),
FlexConfig(128, 64, 2, 8),
FlexConfig(128, 32, 2, 16),
FlexConfig(128, 32, 2, 8),
]
self.flex_attn_bwd_autotune_configs: list[FlexBwDConfig] = []
self.flex_decode_autotune_configs: list[FlexDecodeConfig] = []
if not bool(os.getenv("CI")):
self.flex_attn_bwd_autotune_configs += [
# See Note: flex bwd configs
FlexBwDConfig(BLOCK1, BLOCK2, BLOCK2, BLOCK1, s, w)
for BLOCK1 in [32, 64]
for BLOCK2 in [32, 64, 128]
for s in [1, 3, 4, 5] # num_stages
for w in ([4, 8] if BLOCK1 >= 128 or BLOCK2 >= 128 else [4])
if BLOCK2 % BLOCK1 == 0
]
self.flex_decode_autotune_configs += [
FlexDecodeConfig(32, 1, 2),
FlexDecodeConfig(32, 1, 1),
FlexDecodeConfig(32, 2, 2),
FlexDecodeConfig(32, 2, 1),
FlexDecodeConfig(64, 1, 2),
FlexDecodeConfig(64, 1, 1),
FlexDecodeConfig(64, 2, 2),
FlexDecodeConfig(64, 2, 1),
]
def get_flex_attn_fwd_configs(self, head_dim: int, dtype: Any) -> list[FlexConfig]:
flex_attn_fwd_configs: list[FlexConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_fwd_configs
flex_attn_fwd_configs += self.flex_attn_fwd_autotune_configs
if head_dim <= 256:
if dtype == torch.float32:
default_config = FlexConfig(64, 64, 1, 8)
else:
default_config = FlexConfig(128, 64, 1, 16)
default_config = self.xpu_default_flex_config.get(
(dtype, head_dim), default_config
)
else:
if dtype == torch.float32:
default_config = FlexConfig(32, 16, 1, 4)
else:
default_config = FlexConfig(64, 32, 1, 8)
if default_config not in flex_attn_fwd_configs:
flex_attn_fwd_configs.append(default_config)
return flex_attn_fwd_configs
def get_flex_attn_bwd_configs(
self, head_dim: int, dtype: Any
) -> list[FlexBwDConfig]:
flex_attn_bwd_configs: list[FlexBwDConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_bwd_configs
flex_attn_bwd_configs += self.flex_attn_bwd_autotune_configs
if dtype == torch.float32:
default_config = FlexBwDConfig(16, 16, 16, 16, 1, 4)
elif head_dim <= 256:
if head_dim == 64:
default_config = FlexBwDConfig(64, 64, 64, 64, 1, 8)
elif head_dim == 128:
default_config = FlexBwDConfig(64, 128, 64, 128, 1, 8)
else:
default_config = FlexBwDConfig(64, 64, 64, 64, 1, 8)
else: # modest hardware or extremely large head_dim
default_config = FlexBwDConfig(16, 16, 16, 16, 1, 4)
if default_config not in flex_attn_bwd_configs:
flex_attn_bwd_configs.append(default_config)
return flex_attn_bwd_configs
def get_flex_decode_configs(
self, head_dim: int, dtype: Any
) -> list[FlexDecodeConfig]:
flex_decode_configs: list[FlexDecodeConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_decode_configs
flex_decode_configs += self.flex_decode_autotune_configs
default_config = FlexDecodeConfig(64, 1, 2)
if default_config not in flex_decode_configs:
flex_decode_configs.append(default_config)
return flex_decode_configs
def _prune_exhaustive_configs(
self,
configs: list[BaseConfig],
dtype_size: int,
) -> list[BaseConfig]:
return configs
| XPUConfigHeuristic |
python | pyinstaller__pyinstaller | PyInstaller/archive/writers.py | {
"start": 14825,
"end": 20744
} | class ____:
"""
Writer for the splash screen resources archive.
The resulting archive is added as an entry into the CArchive with the typecode PKG_ITEM_SPLASH.
"""
# This struct describes the splash resources as it will be in an buffer inside the bootloader. All necessary parts
# are bundled, the *_len and *_offset fields describe the data beyond this header definition.
# Whereas script and image fields are binary data, the requirements fields describe an array of strings. Each string
# is null-terminated in order to easily iterate over this list from within C.
#
# typedef struct _splash_data_header
# {
# char tcl_shared_library_name[32];
# char tk_shared_library_name[32];
# char tcl_module_directory_name[16];
# char tk_module_directory_name[16];
#
# uint32_t script_len;
# uint32_t script_offset;
#
# uint32_t image_len;
# uint32_t image_offset;
#
# uint32_t requirements_len;
# uint32_t requirements_offset;
# } SPLASH_DATA_HEADER;
#
_HEADER_FORMAT = '!32s 32s 16s 16s II II II'
_HEADER_LENGTH = struct.calcsize(_HEADER_FORMAT)
# The created archive is compressed by the CArchive, so no need to compress the data here.
def __init__(
self,
filename,
requirements_list,
tcl_shared_library_name,
tk_shared_library_name,
tcl_module_directory_name,
tk_module_directory_name,
image,
script,
):
"""
Writer for splash screen resources that are bundled into the CArchive as a single archive/entry.
:param filename: The filename of the archive to create
:param requirements_list: List of filenames for the requirements array
:param str tcl_shared_library_name: Basename of the Tcl shared library
:param str tk_shared_library_name: Basename of the Tk shared library
:param str tcl_module_directory_name: Basename of the Tcl module directory (e.g., tcl/)
:param str tk_module_directory_name: Basename of the Tk module directory (e.g., tk/)
:param Union[str, bytes] image: Image like object
:param str script: The tcl/tk script to execute to create the screen.
"""
# Ensure forward slashes in dependency names are on Windows converted to back slashes '\\', as on Windows the
# bootloader works only with back slashes.
def _normalize_filename(filename):
filename = os.path.normpath(filename)
if is_win and os.path.sep == '/':
# When building under MSYS, the above path normalization uses Unix-style separators, so replace them
# manually.
filename = filename.replace(os.path.sep, '\\')
return filename
requirements_list = [_normalize_filename(name) for name in requirements_list]
with open(filename, "wb") as fp:
# Reserve space for the header.
fp.write(b'\0' * self._HEADER_LENGTH)
# Serialize the requirements list. This list (more an array) contains the names of all files the bootloader
# needs to extract before the splash screen can be started. The implementation terminates every name with a
# null-byte, that keeps the list short memory wise and makes it iterable from C.
requirements_len = 0
requirements_offset = fp.tell()
for name in requirements_list:
name = name.encode('utf-8') + b'\0'
fp.write(name)
requirements_len += len(name)
# Write splash script
script_offset = fp.tell()
script_len = len(script)
fp.write(script.encode("utf-8"))
# Write splash image. If image is a bytes buffer, it is written directly into the archive. Otherwise, it
# is assumed to be a path and the file is copied into the archive.
image_offset = fp.tell()
if isinstance(image, bytes):
# Image was converted by PIL/Pillow and is already in buffer
image_len = len(image)
fp.write(image)
else:
# Read image into buffer
with open(image, 'rb') as image_fp:
image_data = image_fp.read()
image_len = len(image_data)
fp.write(image_data)
del image_data
# The following strings are written to 16-character fields with zero-padding, which means that we need to
# ensure that their length is strictly below 16 characters (if it were exactly 16, the field would have no
# terminating NULL character!).
def _encode_str(value, field_name, limit):
enc_value = value.encode("utf-8")
if len(enc_value) >= limit:
raise ValueError(
f"Length of the encoded field {field_name!r} ({len(enc_value)}) is greater or equal to the "
f"limit of {limit} characters!"
)
return enc_value
# Write header
header_data = struct.pack(
self._HEADER_FORMAT,
_encode_str(tcl_shared_library_name, 'tcl_shared_library_name', 32),
_encode_str(tk_shared_library_name, 'tk_shared_library_name', 32),
_encode_str(tcl_module_directory_name, 'tcl_module_directory_name', 16),
_encode_str(tk_module_directory_name, 'tk_module_directory_name', 16),
script_len,
script_offset,
image_len,
image_offset,
requirements_len,
requirements_offset,
)
fp.seek(0, os.SEEK_SET)
fp.write(header_data)
| SplashWriter |
python | google__jax | tests/debug_nans_test.py | {
"start": 959,
"end": 7091
} | class ____(jtu.JaxTestCase):
def testSinc(self):
# Regression test for #6936
self.assertEqual(jnp.sinc(0.0), 1.0)
def testSingleResultPrimitiveNoNaN(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans = jnp.tanh(A)
ans.block_until_ready()
def testMultipleResultPrimitiveNoNaN(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans, _ = jnp.linalg.eigh(A)
ans.block_until_ready()
def testJitComputationNoNaN(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans = jax.jit(jnp.tanh)(A)
ans.block_until_ready()
def testJitComputationNaN(self):
A = jnp.array(0.)
with self.assertRaises(FloatingPointError):
ans = jax.jit(lambda x: 0. / x)(A)
ans.block_until_ready()
@jax.debug_nans(False)
def testJitComputationNaNContextManager(self):
A = jnp.array(0.)
f = jax.jit(lambda x: 0. / x)
ans = f(A)
ans = f(A)
with self.assertRaises(FloatingPointError):
with jax.debug_nans(True):
ans = f(A)
ans.block_until_ready()
def testSingleResultPrimitiveNaN(self):
A = jnp.array(0.)
with self.assertRaises(FloatingPointError):
ans = 0. / A
ans.block_until_ready()
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def testCallDeoptimized(self, jit):
@jit
def f(x):
return jax.lax.cond(
x == 1, lambda _: np.nan, lambda _: 2., operand=None)
# This makes sure, when using the C++ jit, that the Python code has been
# run to compile, and the next call won't go through `cache_miss`.
f(2)
# 'cond' not 'xla_call'
msg = r"invalid value \(nan\) encountered in .*cond.*"
with self.assertRaisesRegex(FloatingPointError, msg):
f(1)
def testShardMap(self):
mesh = jtu.create_mesh((1,), ('x',))
f = shard_map(lambda x: 0. / x, mesh=mesh, in_specs=(P('x')), out_specs=P('x'))
# For the Cpp pmap, the first execution always goes through Python.
f(jnp.array([1.]))
with self.assertRaisesRegex(
FloatingPointError,
r"Invalid value \(nan\) encountered in sharded computation"):
ans = f(jnp.array([0.]))
ans.block_until_ready()
if jax.device_count() >= 2:
with self.assertRaisesRegex(
FloatingPointError,
r"Invalid value \(nan\) encountered in sharded computation"):
ans = f(jnp.array([1., 0.]))
ans.block_until_ready()
def testPmap(self):
pmap_funcs = [api._cpp_pmap]
for pmap in pmap_funcs:
f = pmap(lambda x: 0. / x)
# For the Cpp pmap, the first execution always goes through Python.
f(jnp.array([1.]))
with self.assertRaisesRegex(
FloatingPointError,
r"invalid value \(nan\) encountered in div"):
ans = f(jnp.array([0.]))
ans.block_until_ready()
if jax.device_count() >= 2:
with self.assertRaisesRegex(
FloatingPointError,
r"Invalid value \(nan\) encountered in parallel computation"):
ans = f(jnp.array([1., 0.]))
ans.block_until_ready()
def testGradPmap(self):
@jax.jit
def f(x):
y = x**2
return jnp.log(y)
_, f_vjp = jax.vjp(jax.pmap(f), jnp.zeros([1]))
if config.pmap_shmap_merge.value:
expected_regex = r"Invalid value \(nan\) encountered in sharded computation."
else:
expected_regex = r"invalid value \(nan\) encountered in mul\nWhen differentiating"
with self.assertRaisesRegex(
FloatingPointError, expected_regex):
ans, = f_vjp(jnp.ones([1]))
ans.block_until_ready()
def testGradShardMap(self):
@jax.jit
def f(x):
y = x**2
return jnp.log(y)
mesh = jtu.create_mesh((1,), ('x',))
shmap_f = shard_map(f, mesh=mesh, in_specs=(P('x')), out_specs=P('x'))
_, f_vjp = jax.vjp(shmap_f, jnp.zeros([1]))
with self.assertRaisesRegex(
FloatingPointError, r"Invalid value \(nan\) encountered"):
ans, = f_vjp(jnp.ones([1]))
ans.block_until_ready()
def testPmapNoNaN(self):
ans = jax.pmap(lambda x: 0. / x)(jnp.array([1.]))
ans.block_until_ready()
@jtu.ignore_warning(message=".*is an experimental.*")
def test_jit(self):
if jax.device_count() < 2:
raise SkipTest("test requires >=2 devices")
p = jax.sharding.PartitionSpec('x')
f = jax.jit(lambda x: 0. / x, in_shardings=p, out_shardings=p)
inp = jnp.array([0., 1.])
with jax.set_mesh(
jax.sharding.Mesh(np.array(jax.local_devices()[:2]), ('x',))):
with self.assertRaises(FloatingPointError):
ans = f(inp)
ans.block_until_ready()
def testDebugNansJitWithDonation(self):
# https://github.com/jax-ml/jax/issues/12514
a = jnp.array(0.)
with self.assertRaises(FloatingPointError):
ans = jax.jit(lambda x: 0. / x, donate_argnums=(0,))(a)
ans.block_until_ready()
def testDebugNansPmapWithDonation(self):
a = jnp.zeros((1,))
with self.assertRaises(FloatingPointError):
ans = jax.pmap(lambda x: 0. / x, donate_argnums=(0,))(a)
ans.block_until_ready()
def testDebugNansJitWithDonationSharded(self):
if jax.device_count() < 2:
raise SkipTest("test requires >=2 devices")
inp = jnp.array([0., 1.])
f = jax.jit(lambda x: 0. / x, in_shardings=jax.P('x'),
out_shardings=jax.P('x'), donate_argnums=(0,))
with jax.set_mesh(
jax.sharding.Mesh(np.array(jax.local_devices()[:2]), ('x',))):
with self.assertRaises(FloatingPointError):
ans = f(inp)
ans.block_until_ready()
def testDebugNansZeroDiv(self):
inp = jnp.zeros(())
def f(x, y):
return x / y
with self.assertRaisesRegex(
FloatingPointError,
r"invalid value \(nan\) encountered in div"):
f(inp, inp)
with self.assertRaisesRegex(
FloatingPointError,
r"invalid value \(nan\) encountered in div"):
jax.jit(f)(inp, inp)
def testDebugNansInput(self):
@jax.jit
def f(x):
return x * 3.
with self.assertRaisesRegex(FloatingPointError, "the de-optimized function did not .*input"):
f(np.nan)
@jtu.with_config(jax_debug_infs=True)
| DebugNaNsTest |
python | catalyst-team__catalyst | examples/detection/dataset.py | {
"start": 8272,
"end": 12795
} | class ____(Dataset):
def __init__(self, coco_json_path, images_dir=None, transforms=None, down_ratio=4):
self.file = coco_json_path
self.img_dir = images_dir
self.transforms = transforms
self.down_ratio = down_ratio
self.images, self.categories = load_coco_json(coco_json_path)
self.images_list = sorted(self.images.keys())
self.class_to_cid = {
cls_idx: cat_id
for cls_idx, cat_id in enumerate(sorted(self.categories.keys()))
}
self.cid_to_class = {v: k for k, v in self.class_to_cid.items()}
self.num_classes = len(self.class_to_cid)
self.class_labels = [
self.categories[self.class_to_cid[cls_idx]]
for cls_idx in range(len(self.class_to_cid))
]
def __len__(self):
return len(self.images_list)
def __getitem__(self, index):
img_id = self.images_list[index]
img_record = self.images[img_id]
path = img_record["file_name"]
if self.img_dir is not None:
path = os.path.join(self.img_dir, path)
image = read_image(path)
original_size = [image.shape[0], image.shape[1]] # height, width
boxes = [] # each element is a tuple of (x1, y1, x2, y2, "class")
for annotation in img_record["annotations"]:
pixel_xywh = annotation["bbox"]
# skip bounding boxes with 0 height or 0 width
if pixel_xywh[2] == 0 or pixel_xywh[3] == 0:
continue
xyxy = pixels_to_absolute(
pixel_xywh, width=img_record["width"], height=img_record["height"]
)
xyxy = clip(xyxy, 0.0, 1.0)
bbox_class = str(self.cid_to_class[annotation["category_id"]])
boxes.append(xyxy + [str(bbox_class)])
if self.transforms is not None:
transformed = self.transforms(image=image, bboxes=boxes)
image, boxes = transformed["image"], transformed["bboxes"]
else:
image = torch.from_numpy((image / 255.0).astype(np.float32)).permute(2, 0, 1)
labels = np.array([int(items[4]) for items in boxes])
boxes = np.array([items[:4] for items in boxes], dtype=np.float32)
# boxes = change_box_order(boxes, "xyxy2xywh") # (x1, y1, x2, y2) -> (cx, cy, w, h)
heatmap_height = image.shape[1] // self.down_ratio
heatmap_width = image.shape[2] // self.down_ratio
# draw class centers
heatmap = np.zeros(
(self.num_classes, heatmap_height, heatmap_width), dtype=np.float32
)
for (x1, y1, x2, y2), cls_channel in zip(boxes, labels):
w, h = abs(x2 - x1), abs(y2 - y1)
xc, yc = x1 + w // 2, y1 + h // 2
scaled_xc = int(xc * heatmap_width)
scaled_yc = int(yc * heatmap_height)
draw_msra_gaussian(
heatmap, cls_channel, (scaled_xc, scaled_yc), sigma=np.clip(w * h, 2, 4)
)
# draw regression squares
wh_regr = np.zeros((2, heatmap_height, heatmap_width), dtype=np.float32)
regrs = boxes[:, 2:] - boxes[:, :2] # width, height
for r, (x1, y1, x2, y2) in zip(regrs, boxes):
w, h = abs(x2 - x1), abs(y2 - y1)
xc, yc = x1 + w // 2, y1 + h // 2
scaled_xc = int(xc * heatmap_width)
scaled_yc = int(yc * heatmap_height)
for i in range(-2, 2 + 1):
for j in range(-2, 2 + 1):
try:
a = max(scaled_xc + i, 0)
b = min(scaled_yc + j, heatmap_height)
wh_regr[:, a, b] = r
except: # noqa: E722
pass
wh_regr[0] = wh_regr[0].T
wh_regr[1] = wh_regr[1].T
return {
"image": image,
"original_size": original_size,
"size": [image.size(1), image.size(2)],
"heatmap": torch.from_numpy(heatmap),
"wh_regr": torch.from_numpy(wh_regr),
"bboxes": boxes,
"labels": labels,
}
@staticmethod
def collate_fn(batch):
keys = list(batch[0].keys())
packed_batch = {k: [] for k in keys}
for element in batch:
for k in keys:
packed_batch[k].append(element[k])
for k in ("image", "heatmap", "wh_regr"):
packed_batch[k] = torch.stack(packed_batch[k], 0)
return packed_batch
| CenterNetDataset |
python | huggingface__transformers | tests/models/sam2/test_modeling_sam2.py | {
"start": 4630,
"end": 10832
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Sam2VisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Sam2VisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=Sam2VisionConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Overriding as attention shape depends on window_size
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = sum(self.model_tester.blocks_per_stage)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
window_size = config.backbone_config.window_size_per_stage[0]
out_dim = config.backbone_config.hidden_size
patch_stride = config.backbone_config.patch_stride
num_windows = (
self.model_tester.batch_size * (config.backbone_config.image_size // (window_size * patch_stride)) ** 2
)
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-4:]),
[num_windows, window_size, window_size, out_dim],
)
# Overriding as attention shape depends on window_size
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = sum(self.model_tester.blocks_per_stage) + 1
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-4:]),
[
self.model_tester.batch_size,
self.model_tester.image_size // self.model_tester.patch_stride,
self.model_tester.image_size // self.model_tester.patch_stride,
self.model_tester.hidden_size,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = self.model_tester.image_size
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class, image_size)
# Override as diffence slightly higher than the threshold
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_sdpa_can_compile_dynamic(self):
self.skipTest(reason="SAM model can't be compiled dynamic yet")
| Sam2VisionModelTest |
python | getsentry__sentry | src/sentry/sentry_apps/external_requests/issue_link_requester.py | {
"start": 1159,
"end": 1251
} | class ____(StrEnum):
CREATE = "create"
LINK = "link"
@dataclass
| IssueRequestActionType |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 12542,
"end": 12802
} | class ____(sgqlc.types.Enum):
"""The possible sides of a diff.
Enumeration Choices:
* `LEFT`: The left side of the diff.
* `RIGHT`: The right side of the diff.
"""
__schema__ = github_schema
__choices__ = ("LEFT", "RIGHT")
| DiffSide |
python | huggingface__transformers | src/transformers/models/xlm_roberta/modeling_xlm_roberta.py | {
"start": 22270,
"end": 23682
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([XLMRobertaLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| XLMRobertaEncoder |
python | pypa__warehouse | warehouse/cache/interfaces.py | {
"start": 78,
"end": 802
} | class ____(Interface):
"""
A cache for expensive/slow database query results.
Example usage:
>>> some_expensive_query = request.db.query(...)
>>> cache_service = request.find_service(IQueryResultsCache)
>>> cache_service.set("some_key_name", some_expensive_query)
# Later, retrieve the cached results:
>>> results = cache_service.get("some_key_name")
"""
def create_service(context, request):
"""Create the service, bootstrap any configuration needed."""
def get(key: str):
"""Get a cached result by key."""
def set(key: str, value):
"""Set a cached result by key."""
# TODO: do we need a set-with-expiration, a la `setex`?
| IQueryResultsCache |
python | allegroai__clearml | clearml/backend_api/services/v2_20/workers.py | {
"start": 84968,
"end": 85221
} | class ____(Response):
"""
Response of workers.status_report endpoint.
"""
_service = "workers"
_action = "status_report"
_version = "2.20"
_schema = {"definitions": {}, "properties": {}, "type": "object"}
| StatusReportResponse |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.