language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/utils/shapes.py | {
"start": 14452,
"end": 19305
} | class ____(ValueError):
def __init__(
self,
shape_a: tuple[int, ...],
shape_a_idx: int,
shape_b: tuple[int, ...],
shape_b_idx: int,
) -> None:
super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx)
@deprecated("7.0", alternative="np.broadcast_shapes")
def check_broadcast(*shapes: tuple[int, ...]) -> tuple[int, ...]:
"""
Determines whether two or more Numpy arrays can be broadcast with each
other based on their shape tuple alone.
Parameters
----------
*shapes : tuple
All shapes to include in the comparison. If only one shape is given it
is passed through unmodified. If no shapes are given returns an empty
`tuple`.
Returns
-------
broadcast : `tuple`
If all shapes are mutually broadcastable, returns a tuple of the full
broadcast shape.
"""
if len(shapes) == 0:
return ()
elif len(shapes) == 1:
return shapes[0]
reversed_shapes = (reversed(shape) for shape in shapes)
full_shape = []
for dims in zip_longest(*reversed_shapes, fillvalue=1):
max_dim = 1
max_dim_idx = None
for idx, dim in enumerate(dims):
if dim == 1:
continue
if max_dim == 1:
# The first dimension of size greater than 1
max_dim = dim
max_dim_idx = idx
elif dim != max_dim:
raise IncompatibleShapeError(
shapes[max_dim_idx], max_dim_idx, shapes[idx], idx
)
full_shape.append(max_dim)
return tuple(full_shape[::-1])
def unbroadcast(array: NDArray[DT]) -> NDArray[DT]:
"""
Given an array, return a new array that is the smallest subset of the
original array that can be re-broadcasted back to the original array.
See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays
for more details.
"""
if array.ndim == 0:
return array
array = array[
tuple((slice(0, 1) if stride == 0 else slice(None)) for stride in array.strides)
]
# Remove leading ones, which are not needed in numpy broadcasting.
first_not_unity = next(
(i for (i, s) in enumerate(array.shape) if s > 1), array.ndim
)
return array.reshape(array.shape[first_not_unity:])
def simplify_basic_index(
basic_index: int | slice | Sequence[int | slice | EllipsisType | None],
*,
shape: Sequence[int],
) -> tuple[int | slice, ...]:
"""
Given a Numpy basic index, return a tuple of integers and slice objects
with no default values (`None`) if possible.
If one of the dimensions has a slice and the step is negative and the stop
value of the slice was originally `None`, the new stop value of the slice
may still be set to `None`.
For more information on valid basic indices, see
https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing
Parameters
----------
basic_index
A valid Numpy basic index
shape
The shape of the array being indexed
"""
ndim = len(shape)
if not isinstance(basic_index, (tuple, list)): # We just have a single int
basic_index = (basic_index,)
new_index = list(basic_index)
if Ellipsis in new_index:
if new_index.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = new_index.index(Ellipsis)
new_index.remove(Ellipsis)
n_e = ndim - len(new_index)
for i in range(n_e):
ind = e_ind + i
new_index.insert(ind, slice(0, shape[ind], 1))
if len(new_index) > ndim:
raise ValueError(
f"The dimensionality of the basic index {basic_index} can not be greater "
f"than the dimensionality ({ndim}) of the data."
)
for i in range(ndim):
if i < len(new_index):
slc = new_index[i]
if isinstance(slc, slice):
indices = list(slc.indices(shape[i]))
# The following case is the only one where slice(*indices) does
# not give the 'correct' answer because it will set stop to -1
# which means the last element in the array.
if indices[1] == -1:
indices[1] = None
new_index[i] = slice(*indices)
elif isinstance(slc, numbers.Integral):
new_index[i] = normalize_axis_index(int(slc), shape[i])
else:
raise ValueError(f"Unexpected index element in basic index: {slc}")
else:
new_index.append(slice(0, shape[i], 1))
return tuple(new_index)
| IncompatibleShapeError |
python | pytorch__pytorch | torch/_inductor/remote_cache.py | {
"start": 11279,
"end": 11994
} | class ____(RedisRemoteCache):
pass
def create_cache(
key: str,
is_fbcode: bool,
fb_cache_cls: str,
oss_cache_cls: str,
) -> Optional[RemoteCache[JsonDataTy]]:
try:
if is_fbcode:
import torch._inductor.fb.remote_cache
cache_cls = getattr(torch._inductor.fb.remote_cache, fb_cache_cls)
return cache_cls(key)
else:
this_module = sys.modules[__name__]
cache_cls = getattr(this_module, oss_cache_cls)
return cache_cls(key)
except Exception:
log.warning("Unable to create a remote cache", exc_info=True)
return None
# Some simple stat capture
@dataclasses.dataclass
| RemoteDynamoPGOCache |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/errors.py | {
"start": 94,
"end": 196
} | class ____(Exception):
def __init__(self, message: str):
super().__init__(message)
| ExportError |
python | walkccc__LeetCode | solutions/3360. Stone Removal Game/3360.py | {
"start": 0,
"end": 164
} | class ____:
def canAliceWin(self, n: int) -> bool:
for stones in range(10, -1, -1):
if stones > n:
return stones % 2 == 1
n -= stones
| Solution |
python | kamyu104__LeetCode-Solutions | Python/reverse-words-in-a-string-iii.py | {
"start": 29,
"end": 504
} | class ____(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
| Solution |
python | getsentry__sentry | src/sentry/migrations/0954_user_option_json_field.py | {
"start": 244,
"end": 1745
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0953_make_releasefiles_tti"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[mod.to_jsonb("sentry_useroption", "value")],
state_operations=[
migrations.AlterField(
model_name="useroption",
name="value",
field=models.JSONField(null=True),
),
],
)
]
| Migration |
python | django__django | tests/db_typecasts/tests.py | {
"start": 2499,
"end": 2994
} | class ____(unittest.TestCase):
def test_typeCasts(self):
for k, v in TEST_CASES.items():
for inpt, expected in v:
with self.subTest(k=k, inpt=inpt):
got = getattr(typecasts, k)(inpt)
self.assertEqual(
got,
expected,
"In %s: %r doesn't match %r. Got %r instead."
% (k, inpt, expected, got),
)
| DBTypeCasts |
python | apache__airflow | airflow-core/src/airflow/example_dags/plugins/workday.py | {
"start": 1566,
"end": 4177
} | class ____(Timetable):
def get_next_workday(self, d: DateTime, incr=1) -> DateTime:
next_start = d
while True:
if next_start.weekday() not in (5, 6): # not on weekend
if holiday_calendar is None:
holidays = set()
else:
holidays = holiday_calendar.holidays(start=next_start, end=next_start).to_pydatetime()
if next_start not in holidays:
break
next_start = next_start.add(days=incr)
return next_start
# [START howto_timetable_infer_manual_data_interval]
def infer_manual_data_interval(self, run_after: DateTime) -> DataInterval:
start = DateTime.combine((run_after - timedelta(days=1)).date(), Time.min).replace(tzinfo=UTC)
# Skip backwards over weekends and holidays to find last run
start = self.get_next_workday(start, incr=-1)
return DataInterval(start=start, end=(start + timedelta(days=1)))
# [END howto_timetable_infer_manual_data_interval]
# [START howto_timetable_next_dagrun_info]
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if last_automated_data_interval is not None: # There was a previous run on the regular schedule.
last_start = last_automated_data_interval.start
next_start = DateTime.combine((last_start + timedelta(days=1)).date(), Time.min)
# Otherwise this is the first ever run on the regular schedule...
elif (earliest := restriction.earliest) is None:
return None # No start_date. Don't schedule.
elif not restriction.catchup:
# If the DAG has catchup=False, today is the earliest to consider.
next_start = max(earliest, DateTime.combine(Date.today(), Time.min, tzinfo=UTC))
elif earliest.time() != Time.min:
# If earliest does not fall on midnight, skip to the next day.
next_start = DateTime.combine(earliest.date() + timedelta(days=1), Time.min)
else:
next_start = earliest
# Skip weekends and holidays
next_start = self.get_next_workday(next_start.replace(tzinfo=UTC))
if restriction.latest is not None and next_start > restriction.latest:
return None # Over the DAG's scheduled end; don't schedule.
return DagRunInfo.interval(start=next_start, end=(next_start + timedelta(days=1)))
# [END howto_timetable_next_dagrun_info]
| AfterWorkdayTimetable |
python | walkccc__LeetCode | solutions/1235. Maximum Profit in Job Scheduling/1235.py | {
"start": 0,
"end": 641
} | class ____:
def jobScheduling(
self,
startTime: list[int],
endTime: list[int],
profit: list[int],
) -> int:
jobs = sorted([(s, e, p) for s, e, p in zip(startTime, endTime, profit)])
# Will use binary search to find the first available startTime
for i in range(len(startTime)):
startTime[i] = jobs[i][0]
@functools.lru_cache(None)
def dp(i: int) -> int:
"""Returns the maximum profit to schedule jobs[i..n)."""
if i == len(startTime):
return 0
j = bisect.bisect_left(startTime, jobs[i][1])
return max(jobs[i][2] + dp(j), dp(i + 1))
return dp(0)
| Solution |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 13249,
"end": 14492
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.value = 11.111
self.widget = widgets.NumberWidget()
self.widget_coerce_to_string = widgets.NumberWidget(coerce_to_string=True)
def test_is_empty_value_is_none(self):
self.assertTrue(self.widget.is_empty(None))
def test_is_empty_value_is_empty_string(self):
self.assertTrue(self.widget.is_empty(""))
def test_is_empty_value_is_whitespace(self):
self.assertTrue(self.widget.is_empty(" "))
def test_is_empty_value_is_zero(self):
self.assertFalse(self.widget.is_empty(0))
def test_render(self):
self.assertEqual("11.111", self.widget.render(self.value))
def test_render_None_coerce_to_string_False(self):
self.assertEqual("", self.widget.render(None))
def test_render_invalid_type(self):
self.assertEqual(self.widget.render("a"), "")
@override_settings(LANGUAGE_CODE="fr-fr")
def test_locale_render_coerce_to_string_gte4(self):
self.assertEqual("11,111", self.widget_coerce_to_string.render(self.value))
def test_coerce_to_string_value_is_None(self):
self.assertEqual("", self.widget_coerce_to_string.render(None))
| NumberWidgetTest |
python | langchain-ai__langchain | libs/langchain/tests/integration_tests/cache/fake_embeddings.py | {
"start": 1270,
"end": 2462
} | class ____(FakeEmbeddings):
"""Consistent fake embeddings.
Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts.
"""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text)),
]
out_vectors.append(vector)
return out_vectors
@override
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown.
Args:
text: Text to embed.
Returns:
Embedding.
"""
return self.embed_documents([text])[0]
| ConsistentFakeEmbeddings |
python | openai__openai-python | src/openai/types/responses/web_search_tool_param.py | {
"start": 1262,
"end": 1862
} | class ____(TypedDict, total=False):
type: Required[Literal["web_search", "web_search_2025_08_26"]]
"""The type of the web search tool.
One of `web_search` or `web_search_2025_08_26`.
"""
filters: Optional[Filters]
"""Filters for the search."""
search_context_size: Literal["low", "medium", "high"]
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation]
"""The approximate location of the user."""
| WebSearchToolParam |
python | sympy__sympy | sympy/categories/baseclasses.py | {
"start": 1101,
"end": 3856
} | class ____(Basic):
"""
The base class for any morphism in an abstract category.
Explanation
===========
In abstract categories, a morphism is an arrow between two
category objects. The object where the arrow starts is called the
domain, while the object where the arrow ends is called the
codomain.
Two morphisms between the same pair of objects are considered to
be the same morphisms. To distinguish between morphisms between
the same objects use :class:`NamedMorphism`.
It is prohibited to instantiate this class. Use one of the
derived classes instead.
See Also
========
IdentityMorphism, NamedMorphism, CompositeMorphism
"""
def __new__(cls, domain, codomain):
raise(NotImplementedError(
"Cannot instantiate Morphism. Use derived classes instead."))
@property
def domain(self):
"""
Returns the domain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.domain
Object("A")
"""
return self.args[0]
@property
def codomain(self):
"""
Returns the codomain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.codomain
Object("B")
"""
return self.args[1]
def compose(self, other):
r"""
Composes self with the supplied morphism.
The order of elements in the composition is the usual order,
i.e., to construct `g\circ f` use ``g.compose(f)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> (g * f).domain
Object("A")
>>> (g * f).codomain
Object("C")
"""
return CompositeMorphism(other, self)
def __mul__(self, other):
r"""
Composes self with the supplied morphism.
The semantics of this operation is given by the following
equation: ``g * f == g.compose(f)`` for composable morphisms
``g`` and ``f``.
See Also
========
compose
"""
return self.compose(other)
| Morphism |
python | facebookresearch__faiss | tests/test_binary_io.py | {
"start": 2333,
"end": 3029
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
d = 32
nt = 200
nb = 1500
nq = 500
(self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq)
def test_read_index_ownership(self):
d = self.xq.shape[1] * 8
index = faiss.IndexBinaryFlat(d)
index.add(self.xb)
fd, tmpnam = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index_binary(index, tmpnam)
index2 = faiss.read_index_binary(tmpnam)
assert index2.thisown
finally:
os.remove(tmpnam)
| TestObjectOwnership |
python | keras-team__keras | keras/src/regularizers/regularizers.py | {
"start": 212,
"end": 5707
} | class ____:
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API
will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
and `Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's
output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers
and the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=L1(0.01),
... activity_regularizer=L2(0.01))
>>> tensor = ops.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size)
>>> # is 5
>>> ops.sum(layer.losses)
5.25
## Available penalties
```python
L1(0.3) # L1 Regularization Penalty
L2(0.1) # L2 Regularization Penalty
L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = L2(2.)
>>> tensor = ops.ones(shape=(5, 5))
>>> regularizer(tensor)
50.0
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> def l1_reg(weight_matrix):
... return 0.01 * ops.sum(ops.absolute(weight_matrix))
...
>>> layer = Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
0.25
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> class L2Regularizer(Regularizer):
... def __init__(self, l2=0.):
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * ops.sum(ops.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
12.5
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for saving and
loading models to HDF5 format, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this
functionality, you must make sure any python process running your model has
also defined and registered your custom regularizer.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.0
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(f"{self} does not implement get_config()")
@keras_export(["keras.regularizers.L1L2", "keras.regularizers.l1_l2"])
| Regularizer |
python | ray-project__ray | rllib/connectors/env_to_module/env_to_module_pipeline.py | {
"start": 538,
"end": 1844
} | class ____(ConnectorPipelineV2):
@override(ConnectorPipelineV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Optional[Dict[str, Any]] = None,
episodes: List[EpisodeType],
explore: bool,
shared_data: Optional[dict] = None,
metrics: Optional[MetricsLogger] = None,
**kwargs,
):
# Log the sum of lengths of all episodes incoming.
if metrics:
metrics.log_value(
ENV_TO_MODULE_SUM_EPISODES_LENGTH_IN,
sum(map(len, episodes)),
)
# Make sure user does not necessarily send initial input into this pipeline.
# Might just be empty and to be populated from `episodes`.
ret = super().__call__(
rl_module=rl_module,
batch=batch if batch is not None else {},
episodes=episodes,
explore=explore,
shared_data=shared_data if shared_data is not None else {},
metrics=metrics,
**kwargs,
)
# Log the sum of lengths of all episodes outgoing.
if metrics:
metrics.log_value(
ENV_TO_MODULE_SUM_EPISODES_LENGTH_OUT,
sum(map(len, episodes)),
)
return ret
| EnvToModulePipeline |
python | pytorch__pytorch | test/distributed/tensor/parallel/test_parallelize_api.py | {
"start": 694,
"end": 835
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x
| DummyModule |
python | great-expectations__great_expectations | tests/datasource/fluent/test_config_str.py | {
"start": 11531,
"end": 12652
} | class ____:
@pytest.mark.parametrize("uri", ["invalid_uri", "http:/example.com"])
def test_invalid_uri(self, uri: str):
with pytest.raises(pydantic.ValidationError):
_ = pydantic.parse_obj_as(ConfigUri, uri)
@pytest.mark.parametrize(
"uri",
[
"${MY_SCHEME}://me:secret@account/db/schema",
"snowflake://me:secret@${MY_ACCOUNT}/db/schema",
"snowflake://me:secret@account/${MY_DB}/schema",
"snowflake://me:secret@account/db/${MY_SCHEMA}",
"snowflake://me:secret@account/db/my_schema?${MY_QUERY_PARAMS}",
"snowflake://me:secret@account/db/my_schema?role=${MY_ROLE}",
],
)
def test_disallowed_substitution(self, uri: str):
with pytest.raises(pydantic.ValidationError):
_ = pydantic.parse_obj_as(ConfigUri, uri)
def test_no_template_str(self):
with pytest.raises(pydantic.ValidationError):
_ = pydantic.parse_obj_as(ConfigUri, "snowflake://me:password@account/db")
if __name__ == "__main__":
pytest.main([__file__, "-vv"])
| TestConfigUriInvalid |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 294105,
"end": 299786
} | class ____(rv_continuous):
r"""An Irwin-Hall (Uniform Sum) continuous random variable.
An `Irwin-Hall <https://en.wikipedia.org/wiki/Irwin-Hall_distribution/>`_
continuous random variable is the sum of :math:`n` independent
standard uniform random variables [1]_ [2]_.
%(before_notes)s
Notes
-----
Applications include `Rao's Spacing Test
<https://jammalam.faculty.pstat.ucsb.edu/html/favorite/test.htm>`_,
a more powerful alternative to the Rayleigh test
when the data are not unimodal, and radar [3]_.
Conveniently, the pdf and cdf are the :math:`n`-fold convolution of
the ones for the standard uniform distribution, which is also the
definition of the cardinal B-splines of degree :math:`n-1`
having knots evenly spaced from :math:`1` to :math:`n` [4]_ [5]_.
The Bates distribution, which represents the *mean* of statistically
independent, uniformly distributed random variables, is simply the
Irwin-Hall distribution scaled by :math:`1/n`. For example, the frozen
distribution ``bates = irwinhall(10, scale=1/10)`` represents the
distribution of the mean of 10 uniformly distributed random variables.
%(after_notes)s
References
----------
.. [1] P. Hall, "The distribution of means for samples of size N drawn
from a population in which the variate takes values between 0 and 1,
all such values being equally probable",
Biometrika, Volume 19, Issue 3-4, December 1927, Pages 240-244,
:doi:`10.1093/biomet/19.3-4.240`.
.. [2] J. O. Irwin, "On the frequency distribution of the means of samples
from a population having any law of frequency with finite moments,
with special reference to Pearson's Type II,
Biometrika, Volume 19, Issue 3-4, December 1927, Pages 225-239,
:doi:`0.1093/biomet/19.3-4.225`.
.. [3] K. Buchanan, T. Adeyemi, C. Flores-Molina, S. Wheeland and D. Overturf,
"Sidelobe behavior and bandwidth characteristics
of distributed antenna arrays,"
2018 United States National Committee of
URSI National Radio Science Meeting (USNC-URSI NRSM),
Boulder, CO, USA, 2018, pp. 1-2.
https://www.usnc-ursi-archive.org/nrsm/2018/papers/B15-9.pdf.
.. [4] Amos Ron, "Lecture 1: Cardinal B-splines and convolution operators", p. 1
https://pages.cs.wisc.edu/~deboor/887/lec1new.pdf.
.. [5] Trefethen, N. (2012, July). B-splines and convolution. Chebfun.
Retrieved April 30, 2024, from http://www.chebfun.org/examples/approx/BSplineConv.html.
%(example)s
""" # noqa: E501
@replace_notes_in_docstring(rv_continuous, notes="""\
Raises a ``NotImplementedError`` for the Irwin-Hall distribution because
the generic `fit` implementation is unreliable and no custom implementation
is available. Consider using `scipy.stats.fit`.\n\n""")
def fit(self, data, *args, **kwds):
fit_notes = ("The generic `fit` implementation is unreliable for this "
"distribution, and no custom implementation is available. "
"Consider using `scipy.stats.fit`.")
raise NotImplementedError(fit_notes)
def _argcheck(self, n):
return (n > 0) & _isintegral(n) & np.isrealobj(n)
def _get_support(self, n):
return 0, n
def _shape_info(self):
return [_ShapeInfo("n", True, (1, np.inf), (True, False))]
def _munp(self, order, n):
# see https://link.springer.com/content/pdf/10.1007/s10959-020-01050-9.pdf
# page 640, with m=n, j=n+order
def vmunp(order, n):
n = np.asarray(n, dtype=np.int64)
return (sc.stirling2(n+order, n, exact=True)
/ sc.comb(n+order, n, exact=True))
# exact rationals, but we convert to float anyway
return np.vectorize(vmunp, otypes=[np.float64])(order, n)
@staticmethod
def _cardbspl(n):
t = np.arange(n+1)
return BSpline.basis_element(t)
def _pdf(self, x, n):
def vpdf(x, n):
return self._cardbspl(n)(x)
return np.vectorize(vpdf, otypes=[np.float64])(x, n)
def _cdf(self, x, n):
def vcdf(x, n):
return self._cardbspl(n).antiderivative()(x)
return np.vectorize(vcdf, otypes=[np.float64])(x, n)
def _sf(self, x, n):
def vsf(x, n):
return self._cardbspl(n).antiderivative()(n-x)
return np.vectorize(vsf, otypes=[np.float64])(x, n)
def _rvs(self, n, size=None, random_state=None, *args):
@_vectorize_rvs_over_shapes
def _rvs1(n, size=None, random_state=None):
n = np.floor(n).astype(int)
usize = (n,) if size is None else (n, *size)
return random_state.uniform(size=usize).sum(axis=0)
return _rvs1(n, size=size, random_state=random_state)
def _stats(self, n):
# mgf = ((exp(t) - 1)/t)**n
# m'th derivative follows from the generalized Leibniz rule
# Moments follow directly from the definition as the sum of n iid unif(0,1)
# and the summation rules for moments of a sum of iid random variables
# E(IH((n))) = n*E(U(0,1)) = n/2
# Var(IH((n))) = n*Var(U(0,1)) = n/12
# Skew(IH((n))) = Skew(U(0,1))/sqrt(n) = 0
# Kurt(IH((n))) = Kurt(U(0,1))/n = -6/(5*n) -- Fisher's excess kurtosis
# See e.g. https://en.wikipedia.org/wiki/Irwin%E2%80%93Hall_distribution
return n/2, n/12, 0, -6/(5*n)
irwinhall = irwinhall_gen(name="irwinhall")
irwinhall._support = (0.0, 'n')
| irwinhall_gen |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/footsies/game/constants.py | {
"start": 81,
"end": 235
} | class ____:
NONE = 0
BACK = 1
FORWARD = 2
ATTACK = 3
BACK_ATTACK = 4
FORWARD_ATTACK = 5
SPECIAL_CHARGE = 6
@dataclass
| EnvActions |
python | pytorch__pytorch | test/inductor/test_remote_cache.py | {
"start": 428,
"end": 577
} | class ____(RemoteCacheBackend):
def _get(self, key):
return None
def _put(self, key, data):
return None
@dataclass
| NoopBackend |
python | scipy__scipy | scipy/special/tests/test_spherical_bessel.py | {
"start": 4700,
"end": 5434
} | class ____:
def test_spherical_jn_yn_cross_product_1(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 1, x))
right = 1/x**2
assert_allclose(left, right)
def test_spherical_jn_yn_cross_product_2(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 2, x))
right = (2*n + 3)/x**3
assert_allclose(left, right)
| TestSphericalJnYnCrossProduct |
python | google__jax | jax/_src/api_util.py | {
"start": 3989,
"end": 26920
} | class ____:
"""Box object used when comparing static arguments as a jit key.
Requires exact type equality using `is` and value equality."""
__slots__ = ["val"]
def __init__(self, val):
self.val = val
def __hash__(self):
return hash(self.val)
def __eq__(self, other):
return type(self.val) is type(other.val) and self.val == other.val
_POSITIONAL_ARGUMENTS = (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD
)
def _validate_argnums(sig: inspect.Signature, argnums: tuple[int, ...], argnums_name: str) -> None:
"""
Validate that the argnums are sensible for a given function.
For functions that accept a variable number of positions arguments
(`f(..., *args)`) all positive argnums are considered valid.
"""
n_pos_args = 0
for param in sig.parameters.values():
if param.kind in _POSITIONAL_ARGUMENTS:
n_pos_args += 1
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
# We can have any number of positional arguments
return
if argnums and (-min(argnums) > n_pos_args or max(argnums) >= n_pos_args):
raise ValueError(f"Jitted function has {argnums_name}={argnums}, "
f"but only accepts {n_pos_args} positional arguments.")
_INVALID_KEYWORD_ARGUMENTS = (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.VAR_POSITIONAL
)
_KEYWORD_ARGUMENTS = (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
)
def _validate_argnames(
sig: inspect.Signature, argnames: tuple[str, ...], argnames_name: str
) -> None:
"""
Validate that the argnames are sensible for a given function.
For functions that accept a variable keyword arguments
(`f(..., **kwargs)`) all argnames are considered valid except those
marked as position-only (`f(pos_only, /, ...)`).
"""
var_kwargs = False
valid_kwargs: set[str] = set()
invalid_kwargs: set[str] = set()
for param_name, param in sig.parameters.items():
if param.kind in _KEYWORD_ARGUMENTS:
valid_kwargs.add(param_name)
elif param.kind is inspect.Parameter.VAR_KEYWORD:
var_kwargs = True
elif param.kind in _INVALID_KEYWORD_ARGUMENTS:
invalid_kwargs.add(param_name)
# Check whether any kwargs are invalid due to position only
if invalid_argnames := (invalid_kwargs & set(argnames)):
raise ValueError(f"Jitted function has invalid argnames {invalid_argnames} "
f"in {argnames_name}. These are positional-only")
# Takes any kwargs
if var_kwargs:
return
# Check that all argnames exist on function
if invalid_argnames := (set(argnames) - valid_kwargs):
raise ValueError(f"Jitted function has invalid argnames {invalid_argnames} "
f"in {argnames_name}. Function does not take these args.")
def argnums_partial(f: lu.WrappedFun, dyn_argnums: int | Sequence[int],
args: Sequence, require_static_args_hashable=True):
dyn_argnums = _ensure_index_tuple(dyn_argnums)
dyn_argnums = _ensure_inbounds(False, len(args), dyn_argnums)
fixed_args: list
if require_static_args_hashable:
fixed_args = []
for i, arg in enumerate(args):
if i in dyn_argnums: continue
if not is_hashable(arg):
raise ValueError(
"Non-hashable static arguments are not supported, as this can lead "
f"to unexpected cache-misses. Static argument (index {i}) of type "
f"{type(arg)} for function {f.__name__} is non-hashable.")
fixed_args.append(_HashableWithStrictTypeEquality(arg))
else:
fixed_args = [Unhashable(arg) for i, arg in enumerate(args)
if i not in dyn_argnums]
dyn_args = tuple(args[i] for i in dyn_argnums)
return _argnums_partial(f, dyn_argnums, tuple(fixed_args)), dyn_args
def prepend_static_args(f, static_args):
return _prepend_static_args(f, tuple(Unhashable(arg) for arg in static_args))
@lu.transformation2
def _prepend_static_args(f, static_args, *args, **kwargs):
static_args = tuple(arg.val for arg in static_args)
all_args = static_args + args
return f(*all_args, **kwargs)
def _ensure_inbounds(allow_invalid: bool, num_args: int, argnums: Sequence[int]
) -> tuple[int, ...]:
"""Ensure argnum is within bounds. Also resolves negative argnums."""
result = []
for i in argnums:
if i >= num_args and allow_invalid: continue
if not -num_args <= i < num_args:
raise ValueError(
"Positional argument indices, e.g. for `static_argnums`, must have "
"value greater than or equal to -len(args) and less than len(args), "
f"but got value {i} for len(args) == {num_args}.")
result.append(i % num_args) # Resolve negative
return tuple(result)
def _split_args(static_argnums, args, allow_invalid):
static_argnums = _ensure_inbounds(allow_invalid, len(args), static_argnums)
dyn_argnums = tuple(i for i in range(len(args)) if i not in static_argnums)
dyn_args = tuple(args[i] for i in dyn_argnums)
return static_argnums, dyn_argnums, dyn_args
def argnums_partial_except(f: lu.WrappedFun, static_argnums: tuple[int, ...],
args: tuple[Any, ...], *, allow_invalid: bool):
"Version of ``argnums_partial`` that checks hashability of static_argnums."
if not static_argnums:
return f, args
static_argnums, dyn_argnums, dyn_args = _split_args(
static_argnums, args, allow_invalid)
fixed_args = []
for i in sorted(static_argnums):
# TODO(shoyer): set allow_invalid=True permanently after static_argnames.
if allow_invalid and i >= len(args):
continue
static_arg = args[i]
if not is_hashable(static_arg):
raise ValueError(
"Non-hashable static arguments are not supported, as this can lead "
f"to unexpected cache-misses. Static argument (index {i}) of type "
f"{type(static_arg)} for function {f.__name__} is non-hashable.")
else:
fixed_args.append(_HashableWithStrictTypeEquality(static_arg))
return _argnums_partial(f, dyn_argnums, tuple(fixed_args)), dyn_args
@lu.transformation2
def _argnums_partial(_fun: Callable,
_dyn_argnums: Sequence[int],
_fixed_args: Sequence, *dyn_args, **kwargs):
sentinel = object()
args = [sentinel] * (len(_fixed_args) + len(dyn_args))
for i, arg in zip(_dyn_argnums, dyn_args):
args[i] = arg
fixed_args_ = iter(_fixed_args)
args = [next(fixed_args_).val if x is sentinel else x for x in args]
assert next(fixed_args_, sentinel) is sentinel
return _fun(*args, **kwargs)
def argnames_partial_except(f: lu.WrappedFun, static_argnames: tuple[str, ...],
kwargs: dict[str, Any]):
if not static_argnames:
return f, kwargs
dyn_kwargs = {k: v for k, v in kwargs.items() if k not in static_argnames}
fixed_kwargs: dict[str, Any] = {}
for k, arg in kwargs.items():
if k not in dyn_kwargs:
try:
hash(arg)
except TypeError:
raise ValueError(
"Non-hashable static arguments are not supported, as this can lead "
f"to unexpected cache-misses. Static argument (name {k}) of type "
f"{type(arg)} for function {f.__name__} is non-hashable.")
else:
fixed_kwargs[k] = Hashable(arg)
return _argnames_partial(f, WrapKwArgs(fixed_kwargs)), dyn_kwargs
@lu.transformation2
def _argnames_partial(_fun, _fixed_kwargs: WrapKwArgs, *args, **dyn_kwargs):
kwargs = dict({k: v.val for k, v in _fixed_kwargs.val.items()}, **dyn_kwargs)
return _fun(*args, **kwargs)
@lru_cache(maxsize=4096)
def donation_vector(donate_argnums, donate_argnames, in_tree,
kws: bool = True) -> tuple[bool, ...]:
"""Returns a tuple with a boolean value for each leaf in args and kwargs.
What if a user specifies donate_argnums but calls the function with kwargs
or vice-versa? In that case, in `resolve_argnums` using the signature of the
function, the counterpart (donate_argnames or donate_argnums respectively) is
calculated so when this function is called both donate_argnums and
donate_argnames are available. This allows JAX to donate kwargs when only
donate_argnums is specified and vice-versa.
When both donate_argnums and donate_argnames are specified, only the args and
kwargs specified are donated.
"""
res: list[bool] = []
if kws:
args_tree, kwargs_tree = treedef_children(in_tree)
else:
args_tree, kwargs_tree = in_tree, None
for i, arg in enumerate(args_tree.children()):
donate = bool(i in donate_argnums)
res.extend((donate,) * arg.num_leaves)
if kwargs_tree is not None:
for key, val in zip(kwargs_tree.node_data()[1], kwargs_tree.children()): # type: ignore
donate = key in donate_argnames
res.extend((donate,) * val.num_leaves)
return tuple(res)
def rebase_donate_argnums(donate_argnums, static_argnums) -> tuple[int, ...]:
"""Shifts donate to account for static.
>>> rebase_donate_argnums((3, 4), (0, 1))
(1, 2)
Args:
donate_argnums: An iterable of ints.
static_argnums: An iterable of ints.
Returns:
A tuple of unique, sorted integer values based on donate_argnums with each
element offset to account for static_argnums.
"""
if not (static_argnums or donate_argnums):
return tuple(sorted(donate_argnums))
static_argnums = sorted(set(static_argnums))
donate_argnums = sorted(set(donate_argnums))
i = j = o = 0
out = []
while j < len(donate_argnums):
if i < len(static_argnums) and static_argnums[i] == donate_argnums[j]:
raise ValueError(f"`static_argnums` {static_argnums} and "
f"`donate_argnums` {donate_argnums} cannot intersect.")
if i < len(static_argnums) and static_argnums[i] < donate_argnums[j]:
o += 1
i += 1
else:
out.append(donate_argnums[j] - o)
j += 1
return tuple(out)
def is_hashable(arg):
try:
hash(arg)
return True
except TypeError:
return False
SENTINEL = object()
def flatten_axes(name, treedef, axis_tree, *, kws=False, tupled_args=False):
# given an axis spec tree axis_tree (a pytree with integers and Nones at the
# leaves, i.e. the Nones are to be considered leaves) that is a tree prefix of
# the given treedef, build a complete axis spec tree with the same structure
# and return the flattened result
axis_tree_leaves, axis_treedef = none_leaf_registry.flatten(axis_tree)
try:
axes = broadcast_flattened_prefix_with_treedef(
axis_tree_leaves, axis_treedef, treedef)
except ValueError:
if kws:
# if keyword arguments are included in the tree, we make adapt the error
# message only to be about the positional arguments
treedef, _ = treedef_children(treedef)
axis_tree, _ = axis_tree
hint = ""
if tupled_args:
hint += (f" Note that {name} that are non-trivial pytrees should always be "
f"wrapped in a tuple representing the argument list.")
if len(treedef.children()) == 1:
try:
flatten_axes(name, treedef, (axis_tree,))
except ValueError:
pass # That's not the issue.
else:
hint += (f" In particular, you're passing in a single argument which "
f"means that {name} might need to be wrapped in "
f"a singleton tuple.")
raise ValueError(f"{name} specification must be a tree prefix of the "
f"corresponding value, got specification {axis_tree} "
f"for value tree {treedef}.{hint}") from None
assert len(axes) == treedef.num_leaves
return axes
def flat_out_axes(
f: lu.WrappedFun, out_spec: Any
) -> tuple[lu.WrappedFun, Callable]:
leaves, treedef = tree_flatten(out_spec)
f, out_axes = _flat_out_axes(f, tuple(leaves), treedef)
return f, HashableFunction(out_axes, closure=(tuple(leaves), treedef))
@lu.transformation_with_aux2
def _flat_out_axes(_fun, _store, _leaves, _treedef, *args, **kwargs):
ans = _fun(*args, **kwargs)
spec = tree_unflatten(_treedef, _leaves)
try:
spec_flat = tuple(broadcast_prefix(spec, ans, is_leaf=lambda x: x is None))
except ValueError:
e, *_ = prefix_errors(spec, ans)
# TODO(mattjj): currently hardcoded for pmap; generalize to vmap in followup
msg, = e('pmap out_axes').args
msg += ("\n\nThe full pytree is the output of the pmapped function. Ensure "
"that the `out_axes` argument to `pmap` is a pytree prefix of the "
"pmapped function's output.")
raise ValueError(msg) from None
_store.store(spec_flat)
return ans
def check_callable(fun):
# In Python 3.10+, the only thing stopping us from supporting staticmethods
# is that we can't take weak references to them, which the C++ JIT requires.
if isinstance(fun, staticmethod):
raise TypeError(f"staticmethod arguments are not supported, got {fun}")
if not callable(fun):
raise TypeError(f"Expected a callable value, got {fun}")
if inspect.isgeneratorfunction(fun):
raise TypeError(f"Expected a function, got a generator function: {fun}")
_POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
def infer_argnums_and_argnames(
sig: inspect.Signature,
argnums: int | Iterable[int] | None,
argnames: str | Iterable[str] | None,
) -> tuple[tuple[int, ...], tuple[str, ...]]:
"""Infer missing argnums and argnames for a function with inspect."""
if argnums is None and argnames is None:
return (), ()
if argnums is not None and argnames is not None:
argnums = _ensure_index_tuple(argnums)
argnames = _ensure_str_tuple(argnames)
return argnums, argnames
parameters = sig.parameters
if argnums is None:
assert argnames is not None
argnames = _ensure_str_tuple(argnames)
argnums = tuple(
i for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and k in argnames
)
else:
argnums = _ensure_index_tuple(argnums)
argnames = tuple(
k for i, (k, param) in enumerate(parameters.items())
if param.kind == _POSITIONAL_OR_KEYWORD and i in argnums
)
return argnums, argnames
def resolve_argnums(
fun: Callable,
signature: inspect.Signature | None,
donate_argnums: int | Sequence[int] | None,
donate_argnames: str | Iterable[str] | None,
static_argnums: int | Sequence[int] | None,
static_argnames: str | Iterable[str] | None,
) -> tuple[tuple[int, ...], tuple[str, ...], tuple[int, ...], tuple[str, ...]]:
"""Validates and completes the argnum/argname specification for a jit.
* fills in any missing pieces (e.g., names given numbers, or vice versa),
* validates the argument names/numbers against the function signature,
* validates that donated and static arguments don't intersect.
* rebases the donated arguments so they index into the dynamic arguments,
(after static arguments have been removed), in the order that parameters
are passed into the compiled function.
"""
if signature is None:
# Some built-in functions don't support signature.
# See: https://github.com/python/cpython/issues/73485
# In this case no validation is done
static_argnums = () if static_argnums is None else _ensure_index_tuple(
static_argnums)
static_argnames = () if static_argnames is None else _ensure_str_tuple(
static_argnames)
donate_argnums = () if donate_argnums is None else _ensure_index_tuple(
donate_argnums)
if donate_argnames is not None:
raise ValueError(f"Getting the signature of function {fun} failed. "
"Pass donate_argnums instead of donate_argnames.")
assert donate_argnames is None
donate_argnames = ()
else:
# Infer argnums and argnames according to docstring
# If nums is None and names is not None, then nums are inferred from the
# names and vice-versa.
static_argnums, static_argnames = infer_argnums_and_argnames(
signature, static_argnums, static_argnames)
donate_argnums, donate_argnames = infer_argnums_and_argnames(
signature, donate_argnums, donate_argnames)
# Validation
_validate_argnums(signature, static_argnums, "static_argnums")
_validate_argnames(signature, static_argnames, "static_argnames")
_validate_argnums(signature, donate_argnums, "donate_argnums")
_validate_argnames(signature, donate_argnames, "donate_argnames")
# Compensate for static argnums absorbing args
_assert_no_intersection(static_argnames, donate_argnames)
donate_argnums = rebase_donate_argnums(donate_argnums, static_argnums)
return donate_argnums, donate_argnames, static_argnums, static_argnames
def _assert_no_intersection(static_argnames, donate_argnames):
out = set(static_argnames).intersection(set(donate_argnames))
if out:
raise ValueError(
"static_argnames and donate_argnames cannot intersect. Argument names "
f"{out} appear in both static_argnames and donate_argnames")
def resolve_kwargs(fun: Callable, args, kwargs) -> tuple[Any, ...]:
"""Resolve input arguments to positional following a function's signature.
This will raise a TypeError if any keyword-only arguments were passed by the
caller.
"""
if isinstance(fun, partial):
# functools.partial should have an opaque signature.
fun = lambda *args, **kwargs: None
ba = inspect.signature(fun).bind(*args, **kwargs)
ba.apply_defaults()
if ba.kwargs:
passed_kwargs = [k for k in ba.kwargs if k in kwargs]
if passed_kwargs:
raise TypeError(
"The following keyword arguments could not be resolved to positions: "
f"{', '.join(passed_kwargs)}"
)
return ba.args
def _dtype(x):
try:
return dtypes.result_type(x)
except ValueError:
return dtypes.result_type(getattr(x, 'dtype'))
# This decorator exists to make it easier to monkey-patch APIs in JAX.
# By default it does nothing, but it can be monkey-patched to do other things.
def api_hook(fun, tag: str):
return fun
def debug_info(
traced_for: str,
fun: Callable,
args: Sequence[Any],
kwargs: dict[str, Any],
*,
static_argnums: Sequence[int] = (),
static_argnames: Sequence[str] = (),
result_paths_thunk: Callable[[], tuple[str, ...]] | core.InitialResultPaths = core.initial_result_paths,
# TODO(necula): check if we really need this, e.g., to speed up tracing?
sourceinfo: str | None = None,
signature: inspect.Signature | None = None,
) -> core.DebugInfo:
"""Construct core.DebugInfo for a function given example args and kwargs.
`args` and `kwargs` are example positional and keyword arguments, used with
`inspect.Signature` to get the names of arguments. The arguments that are
considered static for tracing purposes should be included, and designated
using `static_argnums` and `static_argnames`.
See docstring for linear_util.DebugInfo.
"""
res = getattr(fun, "__fun_debug_info__", None)
if res is not None:
return res
if sourceinfo is None:
sourceinfo = fun_sourceinfo(fun)
if signature is None:
signature = fun_signature(fun)
arg_names = _non_static_arg_names(signature, args, kwargs, static_argnums,
static_argnames)
return core.DebugInfo(traced_for, sourceinfo, arg_names, result_paths_thunk)
def fun_signature(fun: Callable) -> inspect.Signature | None:
try:
return inspect.signature(fun)
except (ValueError, TypeError):
return None
def save_wrapped_fun_debug_info(wrapper: Callable,
dbg: core.DebugInfo) -> None:
setattr(wrapper, "__fun_debug_info__", dbg)
_fun_name_re = re.compile(r"(?:<built-in function (\S+)>)")
# TODO(mattjj): make this function internal to this module
def fun_sourceinfo(fun: Callable) -> str:
# See DebugInfo.fun_src_info
while isinstance(fun, partial):
fun = fun.func
fun = inspect.unwrap(fun)
try:
filename = fun.__code__.co_filename
lineno = fun.__code__.co_firstlineno
return f"{fun.__name__} at {filename}:{lineno}"
except AttributeError as e:
try:
fun_str = str(fun)
except:
return "<unknown>"
# By contract, the function name has no spaces; also, we want to avoid
# fun_sourceinfo of the form "<object Foo at 0x1234>", because it makes
# lowering non-deterministic.
if m := _fun_name_re.match(fun_str):
return m.group(1)
return "<unknown>"
def _non_static_arg_names(fn_signature: inspect.Signature | None,
args: Sequence[Any], kwargs: dict[str, Any],
static_argnums: Sequence[int],
static_argnames: Sequence[str],
) -> tuple[str, ...]:
"""Returns the names of the non-static arguments.
If the `fn_signature` is given then we get from it the names of the
top-level arguments. In other cases, including when the `args` and `kwargs`
do not match the signature, we use names like `args[0]`, `args[1]`, etc.
"""
# Use the same argument parsing as jit: positional followed by kwargs
# sorted by keys.
static = object()
static_argnums_ = _ensure_inbounds(True, len(args), static_argnums)
static_argnames_ = set(static_argnames)
args_ = [static if i in static_argnums_ else x for i, x in enumerate(args)]
kwargs_ = {k: static if k in static_argnames_ else x for k, x in kwargs.items()}
ordered_args: Sequence[tuple[str, Any]] | None = None
if fn_signature is not None:
try:
ba = fn_signature.bind(*args_, **kwargs_)
except (ValueError, TypeError):
pass
else:
# Do we have a **kwargs
kwargs_name = next((name for name, p in fn_signature.parameters.items()
if p.kind == inspect.Parameter.VAR_KEYWORD), None)
# Positional argument are those not passed by keyword and not passed
# by **kwargs.
positional = [(name, x) for name, x in ba.arguments.items()
if name not in kwargs and name != kwargs_name]
# Keyword arguments are passed sorted by actual kwarg keyword
sorted_kwargs = sorted(((name, x) for name, x in kwargs_.items()),
key=lambda name_x: name_x[0])
sorted_kwargs = [(name if name in ba.arguments else f"{kwargs_name}['{name}']",
x)
for name, x in sorted_kwargs]
ordered_args = positional + sorted_kwargs
if ordered_args is None:
positional = [("args", args_)]
keyword = sorted([(f"kwargs['{name}']", x) for name, x in kwargs_.items() if x is not static],
key=lambda name_x: name_x[0])
ordered_args = positional + keyword
return tuple(f'{name}{lu._clean_keystr_arg_names(path)}'
for name, x in ordered_args
for path, l in generate_key_paths(x) if l is not static)
| _HashableWithStrictTypeEquality |
python | weaviate__weaviate-python-client | weaviate/cluster/types.py | {
"start": 544,
"end": 753
} | class ____(TypedDict):
batchStats: BatchStats
gitHash: str
name: str
shards: Optional[List[Shard]]
stats: Stats
status: str
version: str
Verbosity = Literal["minimal", "verbose"]
| Node |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 10980,
"end": 11130
} | class ____(_NumberBoundError):
code = 'number.not_le'
msg_template = 'ensure this value is less than or equal to {limit_value}'
| NumberNotLeError |
python | openai__openai-python | examples/realtime/audio_util.py | {
"start": 926,
"end": 4266
} | class ____:
def __init__(self):
self.queue = []
self.lock = threading.Lock()
self.stream = sd.OutputStream(
callback=self.callback,
samplerate=SAMPLE_RATE,
channels=CHANNELS,
dtype=np.int16,
blocksize=int(CHUNK_LENGTH_S * SAMPLE_RATE),
)
self.playing = False
self._frame_count = 0
def callback(self, outdata, frames, time, status): # noqa
with self.lock:
data = np.empty(0, dtype=np.int16)
# get next item from queue if there is still space in the buffer
while len(data) < frames and len(self.queue) > 0:
item = self.queue.pop(0)
frames_needed = frames - len(data)
data = np.concatenate((data, item[:frames_needed]))
if len(item) > frames_needed:
self.queue.insert(0, item[frames_needed:])
self._frame_count += len(data)
# fill the rest of the frames with zeros if there is no more data
if len(data) < frames:
data = np.concatenate((data, np.zeros(frames - len(data), dtype=np.int16)))
outdata[:] = data.reshape(-1, 1)
def reset_frame_count(self):
self._frame_count = 0
def get_frame_count(self):
return self._frame_count
def add_data(self, data: bytes):
with self.lock:
# bytes is pcm16 single channel audio data, convert to numpy array
np_data = np.frombuffer(data, dtype=np.int16)
self.queue.append(np_data)
if not self.playing:
self.start()
def start(self):
self.playing = True
self.stream.start()
def stop(self):
self.playing = False
self.stream.stop()
with self.lock:
self.queue = []
def terminate(self):
self.stream.close()
async def send_audio_worker_sounddevice(
connection: AsyncRealtimeConnection,
should_send: Callable[[], bool] | None = None,
start_send: Callable[[], Awaitable[None]] | None = None,
):
sent_audio = False
device_info = sd.query_devices()
print(device_info)
read_size = int(SAMPLE_RATE * 0.02)
stream = sd.InputStream(
channels=CHANNELS,
samplerate=SAMPLE_RATE,
dtype="int16",
)
stream.start()
try:
while True:
if stream.read_available < read_size:
await asyncio.sleep(0)
continue
data, _ = stream.read(read_size)
if should_send() if should_send else True:
if not sent_audio and start_send:
await start_send()
await connection.send(
{"type": "input_audio_buffer.append", "audio": base64.b64encode(data).decode("utf-8")}
)
sent_audio = True
elif sent_audio:
print("Done, triggering inference")
await connection.send({"type": "input_audio_buffer.commit"})
await connection.send({"type": "response.create", "response": {}})
sent_audio = False
await asyncio.sleep(0)
except KeyboardInterrupt:
pass
finally:
stream.stop()
stream.close()
| AudioPlayerAsync |
python | numpy__numpy | numpy/random/tests/test_random.py | {
"start": 11866,
"end": 45716
} | class ____:
# Make sure the random distribution returns the correct value for a
# given seed
seed = 1234567890
def test_rand(self):
rng = random.RandomState(self.seed)
actual = rng.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
rng = random.RandomState(self.seed)
actual = rng.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
rng = random.RandomState(self.seed)
actual = rng.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
rng = random.RandomState(self.seed)
with pytest.warns(DeprecationWarning):
actual = rng.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with pytest.warns(DeprecationWarning):
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random(self):
rng = random.RandomState(self.seed)
actual = rng.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
rng = random.RandomState(self.seed)
actual = rng.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
rng = random.RandomState(self.seed)
actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
rng = random.RandomState(self.seed)
actual = rng.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
rng = random.RandomState(self.seed)
actual = rng.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
rng = random.RandomState(self.seed)
actual = rng.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = ()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
rng = random.RandomState(self.seed)
actual = rng.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object), ("b", np.int32)])]:
rng = random.RandomState(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
rng.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
def test_shuffle_untyped_warning(self, random):
# Create a dict works like a sequence but isn't one
values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
with pytest.warns(UserWarning,
match="you are shuffling a 'dict' object") as rec:
random.shuffle(values)
assert "test_random" in rec[0].filename
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
@pytest.mark.parametrize("use_array_like", [True, False])
def test_shuffle_no_object_unpacking(self, random, use_array_like):
class MyArr(np.ndarray):
pass
items = [
None, np.array([3]), np.float64(3), np.array(10), np.float64(7)
]
arr = np.array(items, dtype=object)
item_ids = {id(i) for i in items}
if use_array_like:
arr = arr.view(MyArr)
# The array was created fine, and did not modify any objects:
assert all(id(i) in item_ids for i in arr)
if use_array_like and not isinstance(random, np.random.Generator):
# The old API gives incorrect results, but warns about it.
with pytest.warns(UserWarning,
match="Shuffling a one dimensional array.*"):
random.shuffle(arr)
else:
random.shuffle(arr)
assert all(id(i) in item_ids for i in arr)
def test_shuffle_memoryview(self):
# gh-18273
# allow graceful handling of memoryviews
# (treat the same as arrays)
rng = random.RandomState(self.seed)
a = np.arange(5).data
rng.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 4, 3, 2])
rng = random.RandomState(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 2, 3, 4])
rng = np.random.default_rng(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [4, 1, 0, 3, 2])
def test_shuffle_not_writeable(self):
a = np.zeros(3)
a.flags.writeable = False
with pytest.raises(ValueError, match='read-only'):
np.random.shuffle(a)
def test_beta(self):
rng = random.RandomState(self.seed)
actual = rng.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
rng = random.RandomState(self.seed)
actual = rng.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
rng = random.RandomState(self.seed)
actual = rng.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
rng = random.RandomState(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = rng.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_exponential(self):
rng = random.RandomState(self.seed)
actual = rng.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
rng = random.RandomState(self.seed)
actual = rng.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
rng = random.RandomState(self.seed)
actual = rng.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
rng = random.RandomState(self.seed)
actual = rng.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
rng = random.RandomState(self.seed)
actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
rng = random.RandomState(self.seed)
actual = rng.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = rng.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = rng.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = rng.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = rng.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
rng = random.RandomState(self.seed)
actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
rng = random.RandomState(self.seed)
actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
rng = random.RandomState(self.seed)
actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
rng = random.RandomState(self.seed)
actual = rng.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
rng = random.RandomState(self.seed)
actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
rng = random.RandomState(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = rng.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = rng.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(rng.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, rng.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with warnings.catch_warnings():
warnings.simplefilter('error')
rng.multivariate_normal(mean, cov)
def test_negative_binomial(self):
rng = random.RandomState(self.seed)
actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
rng = random.RandomState(self.seed)
actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
rng = random.RandomState(self.seed)
actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
rng = random.RandomState(self.seed)
actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
rng = random.RandomState(self.seed)
actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
rng = random.RandomState(self.seed)
actual = rng.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
rng = random.RandomState(self.seed)
actual = rng.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg] * 10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig] * 10)
def test_power(self):
rng = random.RandomState(self.seed)
actual = rng.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
rng = random.RandomState(self.seed)
actual = rng.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
rng = random.RandomState(self.seed)
actual = rng.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
rng = random.RandomState(self.seed)
actual = rng.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
rng = random.RandomState(self.seed)
actual = rng.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
rng = random.RandomState(self.seed)
actual = rng.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
rng = random.RandomState(self.seed)
actual = rng.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
rng = random.RandomState(self.seed)
actual = rng.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
rng = random.RandomState(self.seed)
actual = rng.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
__index__ = __int__
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
rng = random.RandomState(self.seed)
actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
rng = random.RandomState(self.seed)
actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
rng = random.RandomState(self.seed)
actual = rng.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
rng = random.RandomState(self.seed)
actual = rng.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
| TestRandomDist |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 43918,
"end": 51927
} | class ____:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1. / 3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1 + 2j, 2 + 3j, 3 + 4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j])
assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3])
assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4])
assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)])
assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2])
assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197,
(-117 - 44j) / 15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j),
ncu.sqrt(3 + 4j)])
norm = 1. / ((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443 + 16124j, 23161315 + 58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex128)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z * z)
assert_complex_equal(z**3, z * z * z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1 + 0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0 + 1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1 + 0.2j), cnan)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_zero_power_nonzero(self):
# Testing 0^{Non-zero} issue 18378
zero = np.array([0.0 + 0.0j])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# Complex powers with positive real part will not generate a warning
assert_complex_equal(np.power(zero, 1 + 4j), zero)
assert_complex_equal(np.power(zero, 2 - 3j), zero)
# Testing zero values when real part is greater than zero
assert_complex_equal(np.power(zero, 1 + 1j), zero)
assert_complex_equal(np.power(zero, 1 + 0j), zero)
assert_complex_equal(np.power(zero, 1 - 1j), zero)
# Complex powers will negative real part or 0 (provided imaginary
# part is not zero) will generate a NAN and hence a RUNTIME warning
with pytest.warns(expected_warning=RuntimeWarning) as r:
assert_complex_equal(np.power(zero, -1 + 1j), cnan)
assert_complex_equal(np.power(zero, -2 - 3j), cnan)
assert_complex_equal(np.power(zero, -7 + 0j), cnan)
assert_complex_equal(np.power(zero, 0 + 1j), cnan)
assert_complex_equal(np.power(zero, 0 - 1j), cnan)
assert len(r) == 5
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
res = x**2.0
assert_((x**2.00001).dtype is res.dtype)
assert_array_equal(res, [1, 4, 9])
# check the inplace operation on the casted copy doesn't mess with x
assert_(not np.may_share_memory(res, x))
assert_array_equal(x, [1, 2, 3])
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
def test_integer_power(self):
a = np.array([15, 15], 'i8')
b = np.power(a, a)
assert_equal(b, [437893890380859375, 437893890380859375])
def test_integer_power_with_integer_zero_exponent(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
arr = np.arange(-10, 10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
dtypes = np.typecodes['UnsignedInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
def test_integer_power_of_1(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(1, arr), np.ones_like(arr))
def test_integer_power_of_zero(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(1, 10, dtype=dt)
assert_equal(np.power(0, arr), np.zeros_like(arr))
def test_integer_to_negative_power(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
a = np.array([0, 1, 2, 3], dtype=dt)
b = np.array([0, 1, 2, -3], dtype=dt)
one = np.array(1, dtype=dt)
minusone = np.array(-1, dtype=dt)
assert_raises(ValueError, np.power, a, b)
assert_raises(ValueError, np.power, a, minusone)
assert_raises(ValueError, np.power, one, b)
assert_raises(ValueError, np.power, one, minusone)
def test_float_to_inf_power(self):
for dt in [np.float32, np.float64]:
a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt)
b = np.array([np.inf, -np.inf, np.inf, -np.inf,
np.inf, -np.inf, np.inf, -np.inf], dt)
r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt)
assert_equal(np.power(a, b), r)
def test_power_fast_paths(self):
# gh-26055
for dt in [np.float32, np.float64]:
a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt)
expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf])
result = np.power(a, 2.)
assert_array_max_ulp(result, expected.astype(dt), maxulp=1)
a = np.array([0, 1.1, 2, 12e12], dt)
expected = np.sqrt(a).astype(dt)
result = np.power(a, 0.5)
assert_array_max_ulp(result, expected, maxulp=1)
| TestPower |
python | pypa__hatch | tests/cli/test/test_test.py | {
"start": 30560,
"end": 36380
} | class ____:
@pytest.mark.usefixtures("env_run")
@pytest.mark.parametrize("option", ["--include", "--exclude"])
def test_usage_with_all(self, hatch, temp_dir, config_file, helpers, option):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("test", "--all", option, "py=3.10")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
The --all option cannot be used with the --include or --exclude options.
"""
)
def test_include(self, hatch, temp_dir, config_file, helpers, env_run, mocker):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config["tool"]["hatch"]["envs"] = {
"hatch-test": {
"matrix": [{"python": ["3.12", "3.10", "3.8"]}],
"scripts": {
"run": "test {env_name}",
"run-cov": "test with coverage",
"cov-combine": "combine coverage",
"cov-report": "show coverage",
},
}
}
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("test", "-i", "py=3.10")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
──────────────────────────────────────── hatch-test.py3.10 ─────────────────────────────────────────
"""
)
assert env_run.call_args_list == [
mocker.call("test hatch-test.py3.10", shell=True),
]
assert not (data_path / ".config" / "coverage").exists()
def test_exclude(self, hatch, temp_dir, config_file, helpers, env_run, mocker):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config["tool"]["hatch"]["envs"] = {
"hatch-test": {
"matrix": [{"python": ["3.12", "3.10", "3.8"]}],
"scripts": {
"run": "test {env_name}",
"run-cov": "test with coverage",
"cov-combine": "combine coverage",
"cov-report": "show coverage",
},
}
}
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("test", "-x", "py=3.10")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
──────────────────────────────────────── hatch-test.py3.12 ─────────────────────────────────────────
───────────────────────────────────────── hatch-test.py3.8 ─────────────────────────────────────────
"""
)
assert env_run.call_args_list == [
mocker.call("test hatch-test.py3.12", shell=True),
mocker.call("test hatch-test.py3.8", shell=True),
]
assert not (data_path / ".config" / "coverage").exists()
def test_python(self, hatch, temp_dir, config_file, helpers, env_run, mocker):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config["tool"]["hatch"]["envs"] = {
"hatch-test": {
"matrix": [{"python": ["3.12", "3.10", "3.8"]}],
"scripts": {
"run": "test {env_name}",
"run-cov": "test with coverage",
"cov-combine": "combine coverage",
"cov-report": "show coverage",
},
}
}
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("test", "-py", "3.10")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
──────────────────────────────────────── hatch-test.py3.10 ─────────────────────────────────────────
"""
)
assert env_run.call_args_list == [
mocker.call("test hatch-test.py3.10", shell=True),
]
assert not (data_path / ".config" / "coverage").exists()
| TestFilters |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 33933,
"end": 35744
} | class ____(LinkInlineProcessor):
""" Match to a stored reference and return link element. """
NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE)
RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE)
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
"""
Return [`Element`][xml.etree.ElementTree.Element] returned by `makeTag` method or `(None, None, None)`.
"""
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
id, end, handled = self.evalId(data, index, text)
if not handled:
return None, None, None
# Clean up line breaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if id not in self.md.references: # ignore undefined refs
return None, m.start(0), end
href, title = self.md.references[id]
return self.makeTag(href, title, text), m.start(0), end
def evalId(self, data: str, index: int, text: str) -> tuple[str | None, int, bool]:
"""
Evaluate the id portion of `[ref][id]`.
If `[ref][]` use `[ref]`.
"""
m = self.RE_LINK.match(data, pos=index)
if not m:
return None, index, False
else:
id = m.group(1).lower()
end = m.end(0)
if not id:
id = text.lower()
return id, end, True
def makeTag(self, href: str, title: str, text: str) -> etree.Element:
""" Return an `a` [`Element`][xml.etree.ElementTree.Element]. """
el = etree.Element('a')
el.set('href', href)
if title:
el.set('title', title)
el.text = text
return el
| ReferenceInlineProcessor |
python | numpy__numpy | numpy/_core/tests/test_overrides.py | {
"start": 850,
"end": 4435
} | class ____:
def test_ndarray(self):
array = np.array(1)
args = _get_implementing_args([array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
array = np.array(1).view(np.ndarray)
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
args = _get_implementing_args(
[override_sub, no_override_sub])
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
class A:
__array_function__ = _return_not_implemented
class B(A):
__array_function__ = _return_not_implemented
class C(A):
__array_function__ = _return_not_implemented
class D:
__array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
assert_equal(_get_implementing_args([1]), [])
assert_equal(_get_implementing_args([a]), [a])
assert_equal(_get_implementing_args([a, 1]), [a])
assert_equal(_get_implementing_args([a, a, a]), [a])
assert_equal(_get_implementing_args([a, d, a]), [a, d])
assert_equal(_get_implementing_args([a, b]), [b, a])
assert_equal(_get_implementing_args([b, a]), [b, a])
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
def test_too_many_duck_arrays(self):
namespace = {'__array_function__': _return_not_implemented}
types = [type('A' + str(i), (object,), namespace) for i in range(65)]
relevant_args = [t() for t in types]
actual = _get_implementing_args(relevant_args[:64])
assert_equal(actual, relevant_args[:64])
with assert_raises_regex(TypeError, 'distinct argument types'):
_get_implementing_args(relevant_args)
| TestGetImplementingArgs |
python | openai__gym | gym/envs/classic_control/cartpole.py | {
"start": 368,
"end": 11570
} | class ____(gym.Env[np.ndarray, Union[int, np.ndarray]]):
"""
### Description
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson in
["Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem"](https://ieeexplore.ieee.org/document/6313077).
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track.
The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces
in the left and right direction on the cart.
### Action Space
The action is a `ndarray` with shape `(1,)` which can take values `{0, 1}` indicating the direction
of the fixed force the cart is pushed with.
| Num | Action |
|-----|------------------------|
| 0 | Push cart to the left |
| 1 | Push cart to the right |
**Note**: The velocity that is reduced or increased by the applied force is not fixed and it depends on the angle
the pole is pointing. The center of gravity of the pole varies the amount of energy needed to move the cart underneath it
### Observation Space
The observation is a `ndarray` with shape `(4,)` with the values corresponding to the following positions and velocities:
| Num | Observation | Min | Max |
|-----|-----------------------|---------------------|-------------------|
| 0 | Cart Position | -4.8 | 4.8 |
| 1 | Cart Velocity | -Inf | Inf |
| 2 | Pole Angle | ~ -0.418 rad (-24°) | ~ 0.418 rad (24°) |
| 3 | Pole Angular Velocity | -Inf | Inf |
**Note:** While the ranges above denote the possible values for observation space of each element,
it is not reflective of the allowed values of the state space in an unterminated episode. Particularly:
- The cart x-position (index 0) can be take values between `(-4.8, 4.8)`, but the episode terminates
if the cart leaves the `(-2.4, 2.4)` range.
- The pole angle can be observed between `(-.418, .418)` radians (or **±24°**), but the episode terminates
if the pole angle is not in the range `(-.2095, .2095)` (or **±12°**)
### Rewards
Since the goal is to keep the pole upright for as long as possible, a reward of `+1` for every step taken,
including the termination step, is allotted. The threshold for rewards is 475 for v1.
### Starting State
All observations are assigned a uniformly random value in `(-0.05, 0.05)`
### Episode End
The episode ends if any one of the following occurs:
1. Termination: Pole Angle is greater than ±12°
2. Termination: Cart Position is greater than ±2.4 (center of the cart reaches the edge of the display)
3. Truncation: Episode length is greater than 500 (200 for v0)
### Arguments
```
gym.make('CartPole-v1')
```
No additional arguments are currently supported.
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 50,
}
def __init__(self, render_mode: Optional[str] = None):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = self.masspole + self.masscart
self.length = 0.5 # actually half the pole's length
self.polemass_length = self.masspole * self.length
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = "euler"
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array(
[
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max,
],
dtype=np.float32,
)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.render_mode = render_mode
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.isopen = True
self.state = None
self.steps_beyond_terminated = None
def step(self, action):
err_msg = f"{action!r} ({type(action)}) invalid"
assert self.action_space.contains(action), err_msg
assert self.state is not None, "Call reset before using step method."
x, x_dot, theta, theta_dot = self.state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (
force + self.polemass_length * theta_dot**2 * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length * (4.0 / 3.0 - self.masspole * costheta**2 / self.total_mass)
)
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == "euler":
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x, x_dot, theta, theta_dot)
terminated = bool(
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
if not terminated:
reward = 1.0
elif self.steps_beyond_terminated is None:
# Pole just fell!
self.steps_beyond_terminated = 0
reward = 1.0
else:
if self.steps_beyond_terminated == 0:
logger.warn(
"You are calling 'step()' even though this "
"environment has already returned terminated = True. You "
"should always call 'reset()' once you receive 'terminated = "
"True' -- any further steps are undefined behavior."
)
self.steps_beyond_terminated += 1
reward = 0.0
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), reward, terminated, False, {}
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
):
super().reset(seed=seed)
# Note that if you use custom reset bounds, it may lead to out-of-bound
# state/observations.
low, high = utils.maybe_parse_reset_bounds(
options, -0.05, 0.05 # default low
) # default high
self.state = self.np_random.uniform(low=low, high=high, size=(4,))
self.steps_beyond_terminated = None
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), {}
def render(self):
if self.render_mode is None:
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[classic_control]`"
)
if self.screen is None:
pygame.init()
if self.render_mode == "human":
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height)
)
else: # mode == "rgb_array"
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.x_threshold * 2
scale = self.screen_width / world_width
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.state is None:
return None
x = self.state
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART
carty = 100 # TOP OF CART
cart_coords = [(l, b), (l, t), (r, t), (r, b)]
cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords]
gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0))
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole_coords = []
for coord in [(l, b), (l, t), (r, t), (r, b)]:
coord = pygame.math.Vector2(coord).rotate_rad(-x[2])
coord = (coord[0] + cartx, coord[1] + carty + axleoffset)
pole_coords.append(coord)
gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.aacircle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.filled_circle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0))
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
if self.render_mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
| CartPoleEnv |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/integration/test_vendor_direct_fulfillment_shipping.py | {
"start": 2136,
"end": 6564
} | class ____:
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().build(),
_vendor_direct_fulfillment_shipping_response().with_record(_shipping_label_record()).build(),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_two_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().build(),
_vendor_direct_fulfillment_shipping_response().with_pagination().with_record(_shipping_label_record()).build(),
)
query_params_with_next_page_token = {
_REPLICATION_START_FIELD: _START_DATE.strftime(TIME_FORMAT),
_REPLICATION_END_FIELD: _END_DATE.strftime(TIME_FORMAT),
"nextToken": NEXT_TOKEN_STRING,
}
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().with_query_params(query_params_with_next_page_token).build(),
_vendor_direct_fulfillment_shipping_response()
.with_record(_shipping_label_record())
.with_record(_shipping_label_record())
.build(),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_two_slices_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
end_date = _START_DATE.add(days=8)
mock_auth(http_mocker)
query_params_first_slice = {
_REPLICATION_START_FIELD: _START_DATE.strftime(TIME_FORMAT),
_REPLICATION_END_FIELD: _START_DATE.add(days=6, hours=23, minutes=59, seconds=59).strftime(TIME_FORMAT),
}
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().with_query_params(query_params_first_slice).build(),
_vendor_direct_fulfillment_shipping_response().with_record(_shipping_label_record()).build(),
)
query_params_second_slice = {
_REPLICATION_START_FIELD: _START_DATE.add(days=7).strftime(TIME_FORMAT),
_REPLICATION_END_FIELD: end_date.strftime(TIME_FORMAT),
}
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().with_query_params(query_params_second_slice).build(),
_vendor_direct_fulfillment_shipping_response().with_record(_shipping_label_record()).build(),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(end_date))
assert len(output.records) == 2
@HttpMocker()
def test_given_http_status_500_then_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().build(),
[
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
_vendor_direct_fulfillment_shipping_response().with_record(_shipping_label_record()).build(),
],
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_on_availability_when_read_then_raise_system_error(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_direct_fulfillment_shipping_request().build(),
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | redis__redis-py | redis/auth/err.py | {
"start": 196,
"end": 522
} | class ____(Exception):
"""
Represents an exception related to invalid token schema.
"""
def __init__(self, missing_fields: Iterable[str] = []):
super().__init__(
"Unexpected token schema. Following fields are missing: "
+ ", ".join(missing_fields)
)
| InvalidTokenSchemaErr |
python | numba__numba | numba/cuda/tests/cudapy/test_multithreads.py | {
"start": 1214,
"end": 2861
} | class ____(CUDATestCase):
@unittest.skipIf(not has_concurrent_futures, "no concurrent.futures")
def test_concurrent_compiling(self):
check_concurrent_compiling()
@unittest.skipIf(not has_mp_get_context, "no multiprocessing.get_context")
def test_spawn_concurrent_compilation(self):
# force CUDA context init
cuda.get_current_device()
# use "spawn" to avoid inheriting the CUDA context
ctx = multiprocessing.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=spawn_process_entry, args=(q,))
p.start()
try:
err = q.get()
finally:
p.join()
if err is not None:
raise AssertionError(err)
self.assertEqual(p.exitcode, 0, 'test failed in child process')
def test_invalid_context_error_with_d2h(self):
def d2h(arr, out):
out[:] = arr.copy_to_host()
arr = np.arange(1, 4)
out = np.zeros_like(arr)
darr = cuda.to_device(arr)
th = threading.Thread(target=d2h, args=[darr, out])
th.start()
th.join()
np.testing.assert_equal(arr, out)
def test_invalid_context_error_with_d2d(self):
def d2d(dst, src):
dst.copy_to_device(src)
arr = np.arange(100)
common = cuda.to_device(arr)
darr = cuda.to_device(np.zeros(common.shape, dtype=common.dtype))
th = threading.Thread(target=d2d, args=[darr, common])
th.start()
th.join()
np.testing.assert_equal(darr.copy_to_host(), arr)
if __name__ == '__main__':
unittest.main()
| TestMultiThreadCompiling |
python | sphinx-doc__sphinx | sphinx/util/_files.py | {
"start": 223,
"end": 1912
} | class ____(dict[str, tuple[set[str], str]]): # NoQA: FURB189
"""A dictionary that automatically generates unique names for its keys,
interpreted as filenames, and keeps track of a set of docnames they
appear in. Used for images and downloadable files in the environment.
"""
def __init__(self) -> None:
super().__init__()
self._existing: set[str] = set()
def add_file(self, docname: str, newfile: str | os.PathLike[str]) -> str:
newfile = str(newfile)
if newfile in self:
docnames, unique_name = self[newfile]
docnames.add(docname)
return unique_name
new_file = Path(newfile)
unique_name = new_file.name
base = new_file.stem
ext = new_file.suffix
i = 0
while unique_name in self._existing:
i += 1
unique_name = f'{base}{i}{ext}'
self[newfile] = ({docname}, unique_name)
self._existing.add(unique_name)
return unique_name
def purge_doc(self, docname: str) -> None:
for filename, (docs, unique) in list(self.items()):
docs.discard(docname)
if not docs:
del self[filename]
self._existing.discard(unique)
def merge_other(
self, docnames: Set[str], other: dict[str, tuple[set[str], str]]
) -> None:
for filename, (docs, _unique) in other.items():
for doc in docs & set(docnames):
self.add_file(doc, filename)
def __getstate__(self) -> set[str]:
return self._existing
def __setstate__(self, state: set[str]) -> None:
self._existing = state
| FilenameUniqDict |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 5716,
"end": 5920
} | class ____(Accessor):
@classmethod
def reader(cls, options):
return Pipeline().append_stage(
ExpandRangeStage(options), only_if=options.get("expand", None)
)
| BaseAccessor |
python | TheAlgorithms__Python | data_structures/hashing/hash_table_with_linked_list.py | {
"start": 67,
"end": 846
} | class ____(HashTable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _set_value(self, key, data):
self.values[key] = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(data)
self._keys[key] = self.values[key]
def balanced_factor(self):
return (
sum(self.charge_factor - len(slot) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def _collision_resolution(self, key, data=None):
if not (
len(self.values[key]) == self.charge_factor and self.values.count(None) == 0
):
return key
return super()._collision_resolution(key, data)
| HashTableWithLinkedList |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py | {
"start": 261,
"end": 332
} | class ____(Enum):
A = 1.0
B = 2.5
C = 2.5 # PIE796
| FakeEnum4 |
python | sanic-org__sanic | sanic/cli/base.py | {
"start": 145,
"end": 371
} | class ____(ArgumentParser):
def _check_value(self, action: Action, value: Any) -> None:
if isinstance(action, SanicSubParsersAction):
return
super()._check_value(action, value)
| SanicArgumentParser |
python | pytorch__pytorch | test/inductor/test_op_completeness.py | {
"start": 419,
"end": 1590
} | class ____(TestCase):
def verify_ops_handler_completeness(self, handler):
for op in OP_NAMES:
self.assertIsNot(
getattr(handler, op),
getattr(OpsHandler, op),
msg=f"{handler} must implement {op}",
)
extra_ops = list_ops(handler) - OP_NAMES
if extra_ops:
raise AssertionError(
f"{handler} has an extra ops: {extra_ops}, add them to OpHandler class or prefix with `_`"
)
def test_triton_overrides(self):
self.verify_ops_handler_completeness(TritonKernelOverrides)
def test_cpp_overrides(self):
self.verify_ops_handler_completeness(CppOverrides)
def test_cpp_vec_overrides(self):
self.verify_ops_handler_completeness(CppVecOverrides)
def test_halide_overrides(self):
self.verify_ops_handler_completeness(HalideOverrides)
@unittest.skip("MPS backend not yet finished")
def test_metal_overrides(self):
self.verify_ops_handler_completeness(MetalOverrides)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
run_tests()
| TestOpCompleteness |
python | celery__celery | celery/exceptions.py | {
"start": 6466,
"end": 6540
} | class ____(CeleryError):
"""Security related exception."""
| SecurityError |
python | getsentry__sentry | src/sentry/integrations/utils/metrics.py | {
"start": 1576,
"end": 3458
} | class ____(EventLifecycleMetric, ABC):
"""A metric relating to integrations that uses a standard naming structure."""
def get_metrics_domain(self) -> str:
"""Return a constant describing the top-level metrics category.
This defaults to a catch-all value but can optionally be overridden.
"""
return "slo"
@abstractmethod
def get_integration_domain(self) -> IntegrationDomain:
"""Return the domain that the integration belongs to."""
raise NotImplementedError
@abstractmethod
def get_integration_name(self) -> str:
"""Return the name of the integration.
This value generally should match a package name from `sentry.integrations`.
"""
raise NotImplementedError
@abstractmethod
def get_interaction_type(self) -> str:
"""Return a key representing the category of interaction being captured.
Generally, this string value should always come from an instance of an Enum
class. But each subclass can define its own Enum of interaction types and
there is no strict contract that relies on the Enum class.
"""
raise NotImplementedError
def get_metric_key(self, outcome: EventLifecycleOutcome) -> str:
tokens = ("integrations", self.get_metrics_domain(), str(outcome))
return ".".join(tokens)
def get_metric_tags(self) -> Mapping[str, str]:
return {
"integration_domain": str(self.get_integration_domain()),
"integration_name": self.get_integration_name(),
"interaction_type": self.get_interaction_type(),
}
def capture(
self, assume_success: bool = True, sample_log_rate: float = 1.0
) -> "IntegrationEventLifecycle":
return IntegrationEventLifecycle(self, assume_success, sample_log_rate)
| IntegrationEventLifecycleMetric |
python | pytorch__pytorch | torch/ao/nn/intrinsic/quantized/modules/conv_relu.py | {
"start": 5781,
"end": 8513
} | class ____(nnq.Conv3d):
r"""
A ConvReLU3d module is a fused module of Conv3d and ReLU
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv3d`.
Attributes: Same as torch.ao.nn.quantized.Conv3d
"""
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
device=None,
dtype=None,
):
assert padding_mode != "reflect", "Conv3d does not support reflection padding"
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
device=device,
dtype=dtype,
)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
if self.padding_mode != "zeros":
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(
input, _reversed_padding_repeated_twice, mode=self.padding_mode
)
return torch.ops.quantized.conv3d_relu(
input, self._packed_params, self.scale, self.zero_point
)
def _get_name(self):
return "QuantizedConvReLU3d"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
if type(mod) is torch.ao.nn.intrinsic.qat.ConvBnReLU3d:
assert mod.bn.running_var is not None and mod.bn.running_mean is not None
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight,
mod.bias,
mod.bn.running_mean,
mod.bn.running_var,
mod.bn.eps,
mod.bn.weight,
mod.bn.bias,
)
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
assert type(ref_qconv) is not torch.ao.nn.intrinsic.ConvBnReLU3d, (
"BatchNorm3d should be fused into Conv3d before converting to reference module"
)
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
| ConvReLU3d |
python | langchain-ai__langchain | libs/core/langchain_core/agents.py | {
"start": 1302,
"end": 3294
} | class ____(Serializable):
"""Represents a request to execute an action by an agent.
The action consists of the name of the tool to execute and the input to pass
to the tool. The log is used to pass along extra information about the action.
"""
tool: str
"""The name of the Tool to execute."""
tool_input: str | dict
"""The input to pass in to the Tool."""
log: str
"""Additional information to log about the action.
This log can be used in a few ways. First, it can be used to audit what exactly the
LLM predicted to lead to this `(tool, tool_input)`.
Second, it can be used in future iterations to show the LLMs prior thoughts. This is
useful when `(tool, tool_input)` does not contain full information about the LLM
prediction (for example, any `thought` before the tool/tool_input).
"""
type: Literal["AgentAction"] = "AgentAction"
# Override init to support instantiation by position for backward compat.
def __init__(self, tool: str, tool_input: str | dict, log: str, **kwargs: Any):
"""Create an `AgentAction`.
Args:
tool: The name of the tool to execute.
tool_input: The input to pass in to the `Tool`.
log: Additional information to log about the action.
"""
super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""`AgentAction` is serializable.
Returns:
`True`
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "agent"]`
"""
return ["langchain", "schema", "agent"]
@property
def messages(self) -> Sequence[BaseMessage]:
"""Return the messages that correspond to this action."""
return _convert_agent_action_to_messages(self)
| AgentAction |
python | jazzband__django-polymorphic | src/polymorphic/templatetags/polymorphic_admin_tags.py | {
"start": 87,
"end": 1704
} | class ____(Node):
def __init__(self, base_opts, nodelist):
self.base_opts = base_opts
self.nodelist = nodelist # Note, takes advantage of Node.child_nodelists
@classmethod
def parse(cls, parser, token):
bits = token.split_contents()
if len(bits) == 2:
(tagname, base_opts) = bits
base_opts = parser.compile_filter(base_opts)
nodelist = parser.parse(("endbreadcrumb_scope",))
parser.delete_first_token()
return cls(base_opts=base_opts, nodelist=nodelist)
else:
raise TemplateSyntaxError(f"{token.contents[0]} tag expects 1 argument")
def render(self, context):
# app_label is really hard to overwrite in the standard Django ModelAdmin.
# To insert it in the template, the entire render_change_form() and delete_view() have to copied and adjusted.
# Instead, have an assignment tag that inserts that in the template.
base_opts = self.base_opts.resolve(context)
new_vars = {}
if base_opts and not isinstance(base_opts, str):
new_vars = {
"app_label": base_opts.app_label, # What this is all about
"opts": base_opts,
}
new_scope = context.push()
new_scope.update(new_vars)
html = self.nodelist.render(context)
context.pop()
return html
@register.tag
def breadcrumb_scope(parser, token):
"""
Easily allow the breadcrumb to be generated in the admin change templates.
"""
return BreadcrumbScope.parse(parser, token)
| BreadcrumbScope |
python | joblib__joblib | joblib/compressor.py | {
"start": 7007,
"end": 18241
} | class ____(io.BufferedIOBase):
"""A file object providing transparent zlib (de)compression.
TODO python2_drop: is it still needed since we dropped Python 2 support A
BinaryZlibFile can act as a wrapper for an existing file object, or refer
directly to a named file on disk.
Note that BinaryZlibFile provides only a *binary* file interface: data read
is returned as bytes, and data to be written should be given as bytes.
This object is an adaptation of the BZ2File object and is compatible with
versions of python >= 2.7.
If filename is a str or bytes object, it gives the name
of the file to be opened. Otherwise, it should be a file object,
which will be used to read or write the compressed data.
mode can be 'rb' for reading (default) or 'wb' for (over)writing
If mode is 'wb', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 produces the most compression. 3 is the default.
"""
wbits = zlib.MAX_WBITS
def __init__(self, filename, mode="rb", compresslevel=3):
# This lock must be recursive, so that BufferedIOBase's
# readline(), readlines() and writelines() don't deadlock.
self._lock = RLock()
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
self.compresslevel = compresslevel
if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
raise ValueError(
"'compresslevel' must be an integer "
"between 1 and 9. You provided 'compresslevel={}'".format(compresslevel)
)
if mode == "rb":
self._mode = _MODE_READ
self._decompressor = zlib.decompressobj(self.wbits)
self._buffer = b""
self._buffer_offset = 0
elif mode == "wb":
self._mode = _MODE_WRITE
self._compressor = zlib.compressobj(
self.compresslevel, zlib.DEFLATED, self.wbits, zlib.DEF_MEM_LEVEL, 0
)
else:
raise ValueError("Invalid mode: %r" % (mode,))
if isinstance(filename, str):
self._fp = io.open(filename, mode)
self._closefp = True
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
else:
raise TypeError("filename must be a str or bytes object, or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
with self._lock:
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._buffer = b""
self._buffer_offset = 0
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
fname = getattr(self._fp, "name", None)
msg = "I/O operation on closed file"
if fname is not None:
msg += " {}".format(fname)
msg += "."
raise ValueError(msg)
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation(
"Seeking is only supported on files open for reading"
)
if not self._fp.seekable():
raise io.UnsupportedOperation(
"The underlying file object does not support seeking"
)
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
try:
rawblock = self._decompressor.unused_data or self._fp.read(_BUFFER_SIZE)
if not rawblock:
raise EOFError
except EOFError:
# End-of-stream marker and end of file. We're good.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset :]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n_bytes, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n_bytes
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset :]
self._buffer_offset = 0
blocks = []
while n_bytes > 0 and self._fill_buffer():
if n_bytes < len(self._buffer):
data = self._buffer[:n_bytes]
self._buffer_offset = n_bytes
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n_bytes -= len(data)
if return_data:
return b"".join(blocks)
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b'' if the file is already at EOF.
"""
with self._lock:
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns the number of bytes read (0 for EOF).
"""
with self._lock:
return io.BufferedIOBase.readinto(self, b)
def write(self, data):
"""Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
with self._lock:
self._check_can_write()
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = zlib.decompressobj(self.wbits)
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
with self._lock:
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: %s" % (whence,))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
with self._lock:
self._check_not_closed()
return self._pos
| BinaryZlibFile |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 26694,
"end": 26810
} | class ____(Interface):
def __call__(request):
"""Return a root object based on the request"""
| IRootFactory |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 1609,
"end": 1644
} | class ____(BasicEntity):
pass
| Foo |
python | sphinx-doc__sphinx | tests/roots/test-ext-autosummary/autosummary_dummy_inherited_module.py | {
"start": 43,
"end": 231
} | class ____(Foo):
def __init__(self):
#: other docstring
self.subclassattr = 'subclassattr'
super().__init__()
__all__ = ['InheritedAttrClass']
| InheritedAttrClass |
python | django__django | django/core/cache/backends/redis.py | {
"start": 837,
"end": 5101
} | class ____:
def __init__(
self,
servers,
serializer=None,
pool_class=None,
parser_class=None,
**options,
):
import redis
self._lib = redis
self._servers = servers
self._pools = {}
self._client = self._lib.Redis
if isinstance(pool_class, str):
pool_class = import_string(pool_class)
self._pool_class = pool_class or self._lib.ConnectionPool
if isinstance(serializer, str):
serializer = import_string(serializer)
if callable(serializer):
serializer = serializer()
self._serializer = serializer or RedisSerializer()
if isinstance(parser_class, str):
parser_class = import_string(parser_class)
parser_class = parser_class or self._lib.connection.DefaultParser
self._pool_options = {"parser_class": parser_class, **options}
def _get_connection_pool_index(self, write):
# Write to the first server. Read from other servers if there are more,
# otherwise read from the first server.
if write or len(self._servers) == 1:
return 0
return random.randint(1, len(self._servers) - 1)
def _get_connection_pool(self, write):
index = self._get_connection_pool_index(write)
if index not in self._pools:
self._pools[index] = self._pool_class.from_url(
self._servers[index],
**self._pool_options,
)
return self._pools[index]
def get_client(self, key=None, *, write=False):
# key is used so that the method signature remains the same and custom
# cache client can be implemented which might require the key to select
# the server, e.g. sharding.
pool = self._get_connection_pool(write)
return self._client(connection_pool=pool)
def add(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
if ret := bool(client.set(key, value, nx=True)):
client.delete(key)
return ret
else:
return bool(client.set(key, value, ex=timeout, nx=True))
def get(self, key, default):
client = self.get_client(key)
value = client.get(key)
return default if value is None else self._serializer.loads(value)
def set(self, key, value, timeout):
client = self.get_client(key, write=True)
value = self._serializer.dumps(value)
if timeout == 0:
client.delete(key)
else:
client.set(key, value, ex=timeout)
def touch(self, key, timeout):
client = self.get_client(key, write=True)
if timeout is None:
return bool(client.persist(key))
else:
return bool(client.expire(key, timeout))
def delete(self, key):
client = self.get_client(key, write=True)
return bool(client.delete(key))
def get_many(self, keys):
client = self.get_client(None)
ret = client.mget(keys)
return {
k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None
}
def has_key(self, key):
client = self.get_client(key)
return bool(client.exists(key))
def incr(self, key, delta):
client = self.get_client(key, write=True)
if not client.exists(key):
raise ValueError("Key '%s' not found." % key)
return client.incr(key, delta)
def set_many(self, data, timeout):
client = self.get_client(None, write=True)
pipeline = client.pipeline()
pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()})
if timeout is not None:
# Setting timeout for each key as redis does not support timeout
# with mset().
for key in data:
pipeline.expire(key, timeout)
pipeline.execute()
def delete_many(self, keys):
client = self.get_client(None, write=True)
client.delete(*keys)
def clear(self):
client = self.get_client(None, write=True)
return bool(client.flushdb())
| RedisCacheClient |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/layers.py | {
"start": 5819,
"end": 7640
} | class ____(MemoryModule):
"""
Memory module that implements LSTM.
"""
def __init__(
self,
input_size: int,
memory_size: int,
num_layers: int = 1,
forget_bias: float = 1.0,
kernel_init: Initialization = Initialization.XavierGlorotUniform,
bias_init: Initialization = Initialization.Zero,
):
super().__init__()
# We set hidden size to half of memory_size since the initial memory
# will be divided between the hidden state and initial cell state.
self.hidden_size = memory_size // 2
self.lstm = lstm_layer(
input_size,
self.hidden_size,
num_layers,
True,
forget_bias,
kernel_init,
bias_init,
)
@property
def memory_size(self) -> int:
return 2 * self.hidden_size
def forward(
self, input_tensor: torch.Tensor, memories: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
if exporting_to_onnx.is_exporting():
# This transpose is needed both at input and output of the LSTM when
# exporting because ONNX will expect (sequence_len, batch, memory_size)
# instead of (batch, sequence_len, memory_size)
memories = torch.transpose(memories, 0, 1)
# We don't use torch.split here since it is not supported by Sentis
h0 = memories[:, :, : self.hidden_size].contiguous()
c0 = memories[:, :, self.hidden_size :].contiguous()
hidden = (h0, c0)
lstm_out, hidden_out = self.lstm(input_tensor, hidden)
output_mem = torch.cat(hidden_out, dim=-1)
if exporting_to_onnx.is_exporting():
output_mem = torch.transpose(output_mem, 0, 1)
return lstm_out, output_mem
| LSTM |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_dc_transforms.py | {
"start": 82275,
"end": 93950
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
"""tests related to #12168"""
__dialect__ = "default"
@testing.fixture(params=[True, False])
def dc_decl_base(self, request, metadata):
_md = metadata
udd = request.param
class Base(MappedAsDataclass, DeclarativeBase):
use_descriptor_defaults = udd
if not use_descriptor_defaults:
_sa_disable_descriptor_defaults = True
metadata = _md
type_annotation_map = {
str: String().with_variant(
String(50), "mysql", "mariadb", "oracle"
)
}
yield Base
Base.registry.dispose()
def test_mapped_column_default(self, dc_decl_base):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(default="my_default")
mc = MyClass()
eq_(mc.data, "my_default")
if not MyClass.use_descriptor_defaults:
eq_(mc.__dict__["data"], "my_default")
else:
assert "data" not in mc.__dict__
eq_(MyClass.__table__.c.data.default.arg, "my_default")
def test_mapped_column_default_and_insert_default(self, dc_decl_base):
with expect_raises_message(
exc.ArgumentError,
"The 'default' and 'insert_default' parameters of "
"Column are mutually exclusive",
):
mapped_column(default="x", insert_default="y")
def test_relationship_only_none_default(self):
with expect_raises_message(
exc.ArgumentError,
r"Only 'None' is accepted as dataclass "
r"default for a relationship\(\)",
):
relationship(default="not none")
@testing.variation("uselist_type", ["implicit", "m2o_explicit"])
def test_relationship_only_nouselist_none_default(
self, dc_decl_base, uselist_type
):
with expect_raises_message(
exc.ArgumentError,
rf"On relationship {'A.bs' if uselist_type.implicit else 'B.a'}, "
"the dataclass default for relationship "
"may only be set for a relationship that references a scalar "
"value, i.e. many-to-one or explicitly uselist=False",
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
if uselist_type.implicit:
bs: Mapped[List["B"]] = relationship("B", default=None)
class B(dc_decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
data: Mapped[str]
if uselist_type.m2o_explicit:
a: Mapped[List[A]] = relationship(
"A", uselist=True, default=None
)
dc_decl_base.registry.configure()
def test_constructor_repr(self, dc_decl_base):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
bs: Mapped[List["B"]] = relationship( # noqa: F821
default_factory=list
)
class B(dc_decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
a_id: Mapped[Optional[int]] = mapped_column(
ForeignKey("a.id"), init=False
)
x: Mapped[Optional[int]] = mapped_column(default=None)
A.__qualname__ = "some_module.A"
B.__qualname__ = "some_module.B"
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x", "bs"],
varargs=None,
varkw=None,
defaults=(
(LoaderCallableStatus.DONT_SET, mock.ANY)
if A.use_descriptor_defaults
else (None, mock.ANY)
),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(B.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x"],
varargs=None,
varkw=None,
defaults=(
(LoaderCallableStatus.DONT_SET,)
if B.use_descriptor_defaults
else (None,)
),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
a2 = A("10", x=5, bs=[B("data1"), B("data2", x=12)])
eq_(
repr(a2),
"some_module.A(id=None, data='10', x=5, "
"bs=[some_module.B(id=None, data='data1', a_id=None, x=None), "
"some_module.B(id=None, data='data2', a_id=None, x=12)])",
)
a3 = A("data")
eq_(repr(a3), "some_module.A(id=None, data='data', x=None, bs=[])")
def test_defaults_if_no_init_dc_level(
self, dc_decl_base: Type[MappedAsDataclass]
):
class MyClass(dc_decl_base, init=False):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(default="default_status")
mc = MyClass()
if MyClass.use_descriptor_defaults:
# behavior change of honoring default when dataclass init=False
eq_(mc.data, "default_status")
else:
eq_(mc.data, None) # "default_status")
def test_defaults_w_no_init_attr_level(
self, dc_decl_base: Type[MappedAsDataclass]
):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(
default="default_status", init=False
)
mc = MyClass()
eq_(mc.data, "default_status")
if MyClass.use_descriptor_defaults:
assert "data" not in mc.__dict__
else:
eq_(mc.__dict__["data"], "default_status")
@testing.variation("use_attr_init", [True, False])
def test_fk_set_scenario(self, dc_decl_base, use_attr_init):
if use_attr_init:
attr_init_kw = {}
else:
attr_init_kw = {"init": False}
class Parent(dc_decl_base):
__tablename__ = "parent"
id: Mapped[int] = mapped_column(
primary_key=True, autoincrement=False
)
class Child(dc_decl_base):
__tablename__ = "child"
id: Mapped[int] = mapped_column(primary_key=True)
parent_id: Mapped[Optional[int]] = mapped_column(
ForeignKey("parent.id"), default=None
)
parent: Mapped[Optional[Parent]] = relationship(
default=None, **attr_init_kw
)
dc_decl_base.metadata.create_all(testing.db)
with Session(testing.db) as sess:
p1 = Parent(id=14)
sess.add(p1)
sess.flush()
# parent_id=14, parent=None but fk is kept
c1 = Child(id=7, parent_id=14)
sess.add(c1)
sess.flush()
if Parent.use_descriptor_defaults:
assert c1.parent is p1
else:
assert c1.parent is None
@testing.variation("use_attr_init", [True, False])
def test_merge_scenario(self, dc_decl_base, use_attr_init):
if use_attr_init:
attr_init_kw = {}
else:
attr_init_kw = {"init": False}
class MyClass(dc_decl_base):
__tablename__ = "myclass"
id: Mapped[int] = mapped_column(
primary_key=True, autoincrement=False
)
name: Mapped[str]
status: Mapped[str] = mapped_column(
default="default_status", **attr_init_kw
)
dc_decl_base.metadata.create_all(testing.db)
with Session(testing.db) as sess:
if use_attr_init:
u1 = MyClass(id=1, name="x", status="custom_status")
else:
u1 = MyClass(id=1, name="x")
u1.status = "custom_status"
sess.add(u1)
sess.flush()
u2 = sess.merge(MyClass(id=1, name="y"))
is_(u2, u1)
eq_(u2.name, "y")
if MyClass.use_descriptor_defaults:
eq_(u2.status, "custom_status")
else:
# was overridden by the default in __dict__
eq_(u2.status, "default_status")
if use_attr_init:
u3 = sess.merge(
MyClass(id=1, name="z", status="default_status")
)
else:
mc = MyClass(id=1, name="z")
mc.status = "default_status"
u3 = sess.merge(mc)
is_(u3, u1)
eq_(u3.name, "z")
# field was explicit so is overridden by merge
eq_(u3.status, "default_status")
@testing.variation("use_attr_init", [True, False])
def test_collection_merge_scenario(self, dc_decl_base, use_attr_init):
if use_attr_init:
attr_init_kw = {}
else:
attr_init_kw = {"init": False}
class MyClass(dc_decl_base):
__tablename__ = "myclass"
id: Mapped[int] = mapped_column(
primary_key=True, autoincrement=False
)
name: Mapped[str]
things: Mapped[List["Thing"]] = relationship(
cascade="all, delete-orphan",
default_factory=list,
**attr_init_kw,
)
class Thing(dc_decl_base):
__tablename__ = "thing"
id: Mapped[int] = mapped_column(
primary_key=True, autoincrement=False
)
my_id: Mapped[int] = mapped_column(
ForeignKey("myclass.id"), init=False
)
name: Mapped[str]
dc_decl_base.metadata.create_all(testing.db)
with Session(testing.db) as sess:
if use_attr_init:
u1 = MyClass(id=1, name="x", things=[Thing(id=1, name="t1")])
else:
u1 = MyClass(id=1, name="x")
u1.things = [Thing(id=1, name="t1")]
sess.add(u1)
sess.flush()
u2 = sess.merge(MyClass(id=1, name="y"))
is_(u2, u1)
eq_(u2.name, "y")
if MyClass.use_descriptor_defaults:
tt = Thing(id=1, name="t1")
tt.my_id = 1
eq_(u2.things, [tt])
else:
eq_(u2.things, [])
if use_attr_init:
u3 = sess.merge(MyClass(id=1, name="z", things=[]))
else:
mc = MyClass(id=1, name="z")
mc.things = []
u3 = sess.merge(mc)
is_(u3, u1)
eq_(u3.name, "z")
# field was explicit so is overridden by merge
eq_(u3.things, [])
| UseDescriptorDefaultsTest |
python | qdrant__qdrant-client | qdrant_client/local/distances.py | {
"start": 393,
"end": 513
} | class ____(str, Enum):
BIGGER_IS_BETTER = "bigger_is_better"
SMALLER_IS_BETTER = "smaller_is_better"
| DistanceOrder |
python | google__jax | tests/array_extensibility_test.py | {
"start": 2663,
"end": 18332
} | class ____:
"""Shortcut for specifying ShapeDtypeStruct."""
def __init__(self, dtype):
self.dtype = jax.dtypes.canonicalize_dtype(dtype)
def __getitem__(self, shape) -> jax.ShapeDtypeStruct:
if isinstance(shape, int):
shape = (shape,)
return jax.ShapeDtypeStruct(shape, self.dtype)
Bool = ShapeDtype(bool)
Int = ShapeDtype(int)
UInt = ShapeDtype('uint32')
Uint8 = ShapeDtype('uint8')
Float = ShapeDtype(float)
Complex = ShapeDtype(complex)
# NumPy namespace objects skipped in the enumeration below, mainly because
# they are not functions or do not take arrays as positional arguments.
SKIPPED_APIS = [
'apply_along_axis',
'apply_over_axes',
'arange',
'array_str',
'array_repr',
'astype',
'bartlett',
'bfloat16',
'blackman',
'block',
'bool',
'bool_',
'broadcast_shapes',
'c_',
'can_cast',
'cdouble',
'character',
'complex128',
'complex64',
'complex_',
'complexfloating',
'csingle',
'diag_indices',
'double',
'dtype',
'e',
'einsum',
'einsum_path',
'euler_gamma',
'empty',
'eye',
'finfo',
'flexible',
'float_',
'float16',
'float32',
'float4_e2m1fn',
'float64',
'float8_e3m4',
'float8_e4m3',
'float8_e4m3b11fnuz',
'float8_e4m3fn',
'float8_e4m3fnuz',
'float8_e5m2',
'float8_e5m2fnuz',
'float8_e8m0fnu',
'floating',
'from_dlpack',
'frombuffer',
'fromfile',
'fromfunction',
'fromiter',
'frompyfunc',
'fromstring',
'full',
'generic',
'geomspace',
'get_printoptions',
'gradient',
'hamming',
'hanning',
'identity',
'iinfo',
'index_exp',
'indices',
'inexact',
'inf',
'int16',
'int2',
'int32',
'int4',
'int64',
'int8',
'int_',
'integer',
'isdtype',
'issubdtype'
'iterable'
'kaiser'
'kron'
'ix_',
'linalg',
'linspace',
'load',
'logspace',
'mask_indices',
'mgrid',
'nan',
'ndarray',
'newaxis',
'number',
'object_',
'ogrid',
'ones',
'pi',
'printoptions',
'promote_types'
'r_',
'result_type',
's_',
'save',
'savez',
'set_printoptions',
'signedinteger',
'single',
'tri',
'tril_indices',
'triu_indices',
'ufunc',
'uint',
'uint16',
'uint2',
'uint32',
'uint4',
'uint64',
'uint8',
'unsignedinteger',
'vectorize',
'zeros',
]
# TODO(jakevdp): commented APIs are ones which do not yet support
# __jax_array__ on inputs. We should fix these!
NUMPY_APIS = [
NumPyAPI.sig(jnp.abs, Float[5]),
NumPyAPI.sig(jnp.absolute, Float[5]),
NumPyAPI.sig(jnp.acos, Float[5]),
NumPyAPI.sig(jnp.acosh, Float[5]),
NumPyAPI.sig(jnp.add, Float[5], Float[5]),
NumPyAPI.sig(jnp.all, Bool[5]),
NumPyAPI.sig(jnp.allclose, Float[5], Float[5]),
NumPyAPI.sig(jnp.amax, Float[5]),
NumPyAPI.sig(jnp.amin, Float[5]),
NumPyAPI.sig(jnp.angle, Float[5]),
NumPyAPI.sig(jnp.any, Float[5]),
NumPyAPI.sig(jnp.append, Float[10], Float[()]),
NumPyAPI.sig(jnp.arccos, Float[5]),
NumPyAPI.sig(jnp.arccosh, Float[5]),
NumPyAPI.sig(jnp.arcsin, Float[5]),
NumPyAPI.sig(jnp.arcsinh, Float[5]),
NumPyAPI.sig(jnp.arctan, Float[5]),
NumPyAPI.sig(jnp.arctan2, Float[5], Float[5]),
NumPyAPI.sig(jnp.arctanh, Float[5]),
NumPyAPI.sig(jnp.argmax, Float[10]),
NumPyAPI.sig(jnp.argmin, Float[10]),
NumPyAPI.sig(jnp.argpartition, Float[10], kth=5),
NumPyAPI.sig(jnp.argsort, Float[10]),
NumPyAPI.sig(jnp.argwhere, Float[10]),
NumPyAPI.sig(jnp.around, Float[5]),
NumPyAPI.sig(jnp.array, Float[5]),
NumPyAPI.sig(jnp.array_equal, Float[5], Float[5]),
NumPyAPI.sig(jnp.array_equiv, Float[5], Float[5]),
NumPyAPI.sig(jnp.array_split, Float[9], indices_or_sections=3),
NumPyAPI.sig(jnp.asarray, Float[5]),
NumPyAPI.sig(jnp.asin, Float[5]),
NumPyAPI.sig(jnp.asinh, Float[5]),
NumPyAPI.sig(jnp.atan, Float[5]),
NumPyAPI.sig(jnp.atan2, Float[5], Float[5]),
NumPyAPI.sig(jnp.atanh, Float[5]),
NumPyAPI.sig(jnp.atleast_1d, Float[5]),
NumPyAPI.sig(jnp.atleast_2d, Float[5]),
NumPyAPI.sig(jnp.atleast_3d, Float[5]),
NumPyAPI.sig(jnp.average, Float[10]),
NumPyAPI.sig(jnp.bincount, Int[10]),
NumPyAPI.sig(jnp.bitwise_and, Int[5], Int[5]),
NumPyAPI.sig(jnp.bitwise_count, Int[5]),
NumPyAPI.sig(jnp.bitwise_invert, Int[5]),
NumPyAPI.sig(jnp.bitwise_left_shift, Int[5], Int[5]),
NumPyAPI.sig(jnp.bitwise_not, Int[5]),
NumPyAPI.sig(jnp.bitwise_or, Int[5], Int[5]),
NumPyAPI.sig(jnp.bitwise_right_shift, Int[5], Int[5]),
NumPyAPI.sig(jnp.bitwise_xor, Int[5], Int[5]),
NumPyAPI.sig(jnp.broadcast_arrays, Float[5]),
NumPyAPI.sig(jnp.broadcast_to, Float[()], shape=(10,)),
NumPyAPI.sig(jnp.cbrt, Float[5]),
NumPyAPI.sig(jnp.ceil, Float[5]),
NumPyAPI.sig(jnp.choose, Int[3], [Float[3], Float[3], Float[3]], mode='clip'),
NumPyAPI.sig(jnp.clip, Float[5]),
NumPyAPI.sig(jnp.column_stack, [Float[5], Float[5], Float[5]]),
NumPyAPI.sig(jnp.compress, Float[10], Bool[10]),
NumPyAPI.sig(jnp.concat, [Float[5], Float[5]]),
NumPyAPI.sig(jnp.concatenate, [Float[5], Float[5]]),
NumPyAPI.sig(jnp.conj, Float[5]),
NumPyAPI.sig(jnp.conjugate, Float[5]),
NumPyAPI.sig(jnp.convolve, Float[7], Float[3]),
NumPyAPI.sig(jnp.copy, Float[5]),
NumPyAPI.sig(jnp.copysign, Float[5], Float[5]),
NumPyAPI.sig(jnp.corrcoef, Float[7], Float[7]),
NumPyAPI.sig(jnp.correlate, Float[7], Float[3]),
NumPyAPI.sig(jnp.cos, Float[5]),
NumPyAPI.sig(jnp.cosh, Float[5]),
NumPyAPI.sig(jnp.count_nonzero, Float[10]),
NumPyAPI.sig(jnp.cov, Float[10]),
NumPyAPI.sig(jnp.cross, Float[3], Float[3]),
NumPyAPI.sig(jnp.cumprod, Float[5]),
NumPyAPI.sig(jnp.cumsum, Float[5]),
NumPyAPI.sig(jnp.cumulative_prod, Float[5]),
NumPyAPI.sig(jnp.cumulative_sum, Float[5]),
NumPyAPI.sig(jnp.deg2rad, Float[5]),
NumPyAPI.sig(jnp.degrees, Float[5]),
NumPyAPI.sig(jnp.delete, Float[5], Int[()]),
NumPyAPI.sig(jnp.diag, Float[5]),
NumPyAPI.sig(jnp.diag_indices_from, Float[5, 5]),
NumPyAPI.sig(jnp.diagflat, Float[5]),
NumPyAPI.sig(jnp.diagonal, Float[5, 5]),
NumPyAPI.sig(jnp.diff, Float[5]),
NumPyAPI.sig(jnp.digitize, Float[5], Float[5]),
NumPyAPI.sig(jnp.divide, Float[5], Float[5]),
NumPyAPI.sig(jnp.divmod, Float[5], Float[5]),
NumPyAPI.sig(jnp.dot, Float[5], Float[5]),
NumPyAPI.sig(jnp.dsplit, Float[3, 5, 6], indices_or_sections=2),
NumPyAPI.sig(jnp.dstack, [Float[3, 5, 1], Float[3, 5, 3]]),
NumPyAPI.sig(jnp.ediff1d, Float[5]),
NumPyAPI.sig(jnp.empty_like, Float[5]),
NumPyAPI.sig(jnp.equal, Float[5], Float[5]),
NumPyAPI.sig(jnp.exp, Float[5]),
NumPyAPI.sig(jnp.exp2, Float[5]),
NumPyAPI.sig(jnp.expand_dims, Float[5], axis=0),
NumPyAPI.sig(jnp.expm1, Float[5]),
NumPyAPI.sig(jnp.extract, Bool[5], Float[5]),
NumPyAPI.sig(jnp.fabs, Float[5]),
NumPyAPI.sig(jnp.fft.fft, Float[5]),
NumPyAPI.sig(jnp.fft.fft2, Float[5, 5]),
NumPyAPI.sig(jnp.fft.ifft, Float[5]),
NumPyAPI.sig(jnp.fft.ifft2, Float[5, 5]),
NumPyAPI.sig(jnp.fill_diagonal, Float[5, 5], Float[()], inplace=False),
NumPyAPI.sig(jnp.fix, Float[5]),
NumPyAPI.sig(jnp.flatnonzero, Float[5]),
NumPyAPI.sig(jnp.flip, Float[5]),
NumPyAPI.sig(jnp.fliplr, Float[5, 5]),
NumPyAPI.sig(jnp.flipud, Float[5, 5]),
NumPyAPI.sig(jnp.float_power, Float[5], Float[5]),
NumPyAPI.sig(jnp.floor, Float[5]),
NumPyAPI.sig(jnp.floor_divide, Float[5], Float[5]),
NumPyAPI.sig(jnp.fmax, Float[5], Float[5]),
NumPyAPI.sig(jnp.fmin, Float[5], Float[5]),
NumPyAPI.sig(jnp.fmod, Float[5], Float[5]),
NumPyAPI.sig(jnp.frexp, Float[5]),
NumPyAPI.sig(jnp.full_like, Float[5], Float[()]),
NumPyAPI.sig(jnp.gcd, Int[5], Int[5]),
NumPyAPI.sig(jnp.greater, Float[5], Float[5]),
NumPyAPI.sig(jnp.greater_equal, Float[5], Float[5]),
NumPyAPI.sig(jnp.heaviside, Float[5], Float[5]),
NumPyAPI.sig(jnp.histogram, Float[5]),
NumPyAPI.sig(jnp.histogram2d, Float[5], Float[5]),
NumPyAPI.sig(jnp.histogram_bin_edges, Float[5]),
NumPyAPI.sig(jnp.histogramdd, Float[5, 3]),
NumPyAPI.sig(jnp.hsplit, Float[3, 6], indices_or_sections=2),
NumPyAPI.sig(jnp.hstack, (Float[5], Float[5])),
NumPyAPI.sig(jnp.hypot, Float[5], Float[5]),
NumPyAPI.sig(jnp.i0, Float[5]),
NumPyAPI.sig(jnp.imag, Complex[5]),
NumPyAPI.sig(jnp.inner, Float[5], Float[5]),
NumPyAPI.sig(jnp.insert, Float[5], Int[()], Float[2]),
NumPyAPI.sig(jnp.interp, Float[10], Float[5], Float[5]),
NumPyAPI.sig(jnp.intersect1d, Int[5], Int[5]),
NumPyAPI.sig(jnp.invert, Int[5]),
NumPyAPI.sig(jnp.isclose, Float[5], Float[5]),
NumPyAPI.sig(jnp.iscomplex, Float[5]),
NumPyAPI.sig(jnp.iscomplexobj, Complex[5]),
NumPyAPI.sig(jnp.isfinite, Float[5]),
NumPyAPI.sig(jnp.isin, Int[5], Int[10]),
NumPyAPI.sig(jnp.isinf, Float[5]),
NumPyAPI.sig(jnp.isnan, Float[5]),
NumPyAPI.sig(jnp.isneginf, Float[5]),
NumPyAPI.sig(jnp.isposinf, Float[5]),
NumPyAPI.sig(jnp.isreal, Float[5]),
NumPyAPI.sig(jnp.isrealobj, Float[5]),
NumPyAPI.sig(jnp.isscalar, Float[()]),
NumPyAPI.sig(jnp.lcm, Int[5], Int[5]),
NumPyAPI.sig(jnp.ldexp, Float[5], Int[5]),
NumPyAPI.sig(jnp.left_shift, Int[5], Int[5]),
NumPyAPI.sig(jnp.less, Float[5], Float[5]),
NumPyAPI.sig(jnp.less_equal, Float[5], Float[5]),
NumPyAPI.sig(jnp.lexsort, [Float[5], Float[5]]),
NumPyAPI.sig(jnp.log, Float[5]),
NumPyAPI.sig(jnp.log10, Float[5]),
NumPyAPI.sig(jnp.log1p, Float[5]),
NumPyAPI.sig(jnp.log2, Float[5]),
NumPyAPI.sig(jnp.logaddexp, Float[5], Float[5]),
NumPyAPI.sig(jnp.logaddexp2, Float[5], Float[5]),
NumPyAPI.sig(jnp.logical_and, Int[5], Int[5]),
NumPyAPI.sig(jnp.logical_not, Int[5]),
NumPyAPI.sig(jnp.logical_or, Int[5], Int[5]),
NumPyAPI.sig(jnp.logical_xor, Int[5], Int[5]),
NumPyAPI.sig(jnp.matmul, Float[5, 5], Float[5]),
NumPyAPI.sig(jnp.matrix_transpose, Float[5, 6]),
NumPyAPI.sig(jnp.matvec, Float[5, 5], Float[5]),
NumPyAPI.sig(jnp.max, Float[5]),
NumPyAPI.sig(jnp.maximum, Float[5], Float[5]),
NumPyAPI.sig(jnp.mean, Float[5]),
NumPyAPI.sig(jnp.median, Float[5]),
NumPyAPI.sig(jnp.meshgrid, Float[5], Float[5]),
NumPyAPI.sig(jnp.min, Float[5]),
NumPyAPI.sig(jnp.minimum, Float[5], Float[5]),
NumPyAPI.sig(jnp.mod, Float[5], Float[5]),
NumPyAPI.sig(jnp.modf, Float[5]),
NumPyAPI.sig(jnp.moveaxis, Float[5, 3], source=0, destination=1),
NumPyAPI.sig(jnp.multiply, Float[5], Float[5]),
NumPyAPI.sig(jnp.nan_to_num, Float[5]),
NumPyAPI.sig(jnp.nanargmax, Float[5]),
NumPyAPI.sig(jnp.nanargmin, Float[5]),
NumPyAPI.sig(jnp.nancumprod, Float[5]),
NumPyAPI.sig(jnp.nancumsum, Float[5]),
NumPyAPI.sig(jnp.nanmax, Float[5]),
NumPyAPI.sig(jnp.nanmean, Float[5]),
NumPyAPI.sig(jnp.nanmedian, Float[5]),
NumPyAPI.sig(jnp.nanmin, Float[5]),
NumPyAPI.sig(jnp.nanpercentile, Float[5], q=75),
NumPyAPI.sig(jnp.nanprod, Float[5]),
NumPyAPI.sig(jnp.nanquantile, Float[5], q=0.75),
NumPyAPI.sig(jnp.nanstd, Float[5]),
NumPyAPI.sig(jnp.nansum, Float[5]),
NumPyAPI.sig(jnp.nanvar, Float[5]),
NumPyAPI.sig(jnp.ndim, Float[5]),
NumPyAPI.sig(jnp.negative, Float[5]),
NumPyAPI.sig(jnp.nextafter, Float[5], Float[5]),
NumPyAPI.sig(jnp.nonzero, Float[5]),
NumPyAPI.sig(jnp.not_equal, Float[5], Float[5]),
NumPyAPI.sig(jnp.ones_like, Float[5]),
NumPyAPI.sig(jnp.outer, Float[5], Float[5]),
NumPyAPI.sig(jnp.packbits, Int[5]),
NumPyAPI.sig(jnp.pad, Float[5], pad_width=2),
NumPyAPI.sig(jnp.partition, Float[5], kth=3),
NumPyAPI.sig(jnp.percentile, Float[5], q=75),
NumPyAPI.sig(jnp.permute_dims, Float[3, 5], axes=(1, 0)),
NumPyAPI.sig(jnp.piecewise, Float[5], [Bool[5], Bool[5]], funclist=[jnp.sin, jnp.cos]),
NumPyAPI.sig(jnp.place, Float[5], Bool[5], Float[3], inplace=False),
NumPyAPI.sig(jnp.poly, Float[5]),
NumPyAPI.sig(jnp.polyadd, Float[5], Float[5]),
NumPyAPI.sig(jnp.polyder, Float[5]),
NumPyAPI.sig(jnp.polydiv, Float[5], Float[5]),
NumPyAPI.sig(jnp.polyfit, Float[5], Float[5], deg=2),
NumPyAPI.sig(jnp.polyint, Float[5]),
NumPyAPI.sig(jnp.polymul, Float[5], Float[5]),
NumPyAPI.sig(jnp.polysub, Float[5], Float[5]),
NumPyAPI.sig(jnp.polyval, Float[5], Float[10]),
NumPyAPI.sig(jnp.positive, Float[5]),
NumPyAPI.sig(jnp.pow, Float[5], Float[5]),
NumPyAPI.sig(jnp.power, Float[5], Float[5]),
NumPyAPI.sig(jnp.prod, Float[5]),
NumPyAPI.sig(jnp.ptp, Float[5]),
NumPyAPI.sig(jnp.put, Float[5], Int[()], Float[()], inplace=False),
NumPyAPI.sig(jnp.put_along_axis, Float[5], Int[1], Float[1], axis=0, inplace=False),
NumPyAPI.sig(jnp.quantile, Float[5], q=0.75),
NumPyAPI.sig(jnp.rad2deg, Float[5]),
NumPyAPI.sig(jnp.radians, Float[5]),
NumPyAPI.sig(jnp.ravel, Float[5]),
NumPyAPI.sig(jnp.ravel_multi_index, [Uint8[5], Uint8[5]], dims=(8, 9)),
NumPyAPI.sig(jnp.real, Complex[5]),
NumPyAPI.sig(jnp.reciprocal, Float[5]),
NumPyAPI.sig(jnp.remainder, Float[5], Float[5]),
NumPyAPI.sig(jnp.repeat, Float[5], repeats=np.array([2, 3, 1, 5, 4])),
NumPyAPI.sig(jnp.reshape, Float[6], shape=(2, 3)),
NumPyAPI.sig(jnp.resize, Float[6], new_shape=(2, 3)),
NumPyAPI.sig(jnp.right_shift, Int[5], Int[5]),
NumPyAPI.sig(jnp.rint, Float[5]),
NumPyAPI.sig(jnp.roll, Float[5], Int[1]),
NumPyAPI.sig(jnp.rollaxis, Float[5, 4], axis=1),
NumPyAPI.sig(jnp.roots, Float[5]).with_skip_on_devices(['tpu']),
NumPyAPI.sig(jnp.rot90, Float[5, 3]),
NumPyAPI.sig(jnp.round, Float[5]),
NumPyAPI.sig(jnp.searchsorted, Float[5], Float[5]),
NumPyAPI.sig(jnp.select, [Bool[5], Bool[5]], [Float[5], Float[5]], Float[()]),
NumPyAPI.sig(jnp.setdiff1d, Int[5], Int[5]),
NumPyAPI.sig(jnp.setxor1d, Int[5], Int[5]),
NumPyAPI.sig(jnp.shape, Float[5, 3]),
NumPyAPI.sig(jnp.sign, Float[5]),
NumPyAPI.sig(jnp.signbit, Float[5]),
NumPyAPI.sig(jnp.sin, Float[5]),
NumPyAPI.sig(jnp.sinc, Float[5]),
NumPyAPI.sig(jnp.sinh, Float[5]),
NumPyAPI.sig(jnp.size, Float[5]),
NumPyAPI.sig(jnp.sort, Float[5]),
NumPyAPI.sig(jnp.sort_complex, Complex[5]),
NumPyAPI.sig(jnp.spacing, Float[5]),
NumPyAPI.sig(jnp.split, Float[6], indices_or_sections=2),
NumPyAPI.sig(jnp.sqrt, Float[5]),
NumPyAPI.sig(jnp.square, Float[5]),
NumPyAPI.sig(jnp.squeeze, Float[5]),
NumPyAPI.sig(jnp.stack, [Float[2, 3], Float[2, 3]], axis=1),
NumPyAPI.sig(jnp.std, Float[5]),
NumPyAPI.sig(jnp.subtract, Float[5], Float[5]),
NumPyAPI.sig(jnp.sum, Float[5]),
NumPyAPI.sig(jnp.swapaxes, Float[3, 5], axis1=1, axis2=0),
NumPyAPI.sig(jnp.take, Float[5], Int[2]),
NumPyAPI.sig(jnp.take_along_axis, Float[5], Int[2], axis=0),
NumPyAPI.sig(jnp.tan, Float[5]),
NumPyAPI.sig(jnp.tanh, Float[5]),
NumPyAPI.sig(jnp.tensordot, Float[2, 3, 4], Float[3, 4, 5]),
NumPyAPI.sig(jnp.tile, Float[5], reps=(2,)),
NumPyAPI.sig(jnp.trace, Float[5, 5]),
NumPyAPI.sig(jnp.transpose, Float[5, 6]),
NumPyAPI.sig(jnp.trapezoid, Float[5]),
NumPyAPI.sig(jnp.tril, Float[5, 6]),
NumPyAPI.sig(jnp.tril_indices_from, Float[5, 6]),
NumPyAPI.sig(jnp.trim_zeros, Float[5]),
NumPyAPI.sig(jnp.triu, Float[5, 6]),
NumPyAPI.sig(jnp.triu_indices_from, Float[5, 6]),
NumPyAPI.sig(jnp.true_divide, Float[5], Float[5]),
NumPyAPI.sig(jnp.trunc, Float[5]),
NumPyAPI.sig(jnp.union1d, Int[5], Int[5]),
NumPyAPI.sig(jnp.unique, Int[10]),
NumPyAPI.sig(jnp.unique_all, Int[10]),
NumPyAPI.sig(jnp.unique_counts, Int[10]),
NumPyAPI.sig(jnp.unique_inverse, Int[10]),
NumPyAPI.sig(jnp.unique_values, Int[10]),
NumPyAPI.sig(jnp.unpackbits, Uint8[8]),
NumPyAPI.sig(jnp.unravel_index, Int[5], shape=(2, 3)),
NumPyAPI.sig(jnp.unstack, Float[5]),
NumPyAPI.sig(jnp.unwrap, Float[5]),
NumPyAPI.sig(jnp.vander, Float[5]),
NumPyAPI.sig(jnp.var, Float[5]),
NumPyAPI.sig(jnp.vdot, Float[5], Float[5]),
NumPyAPI.sig(jnp.vecdot, Float[5], Float[5]),
NumPyAPI.sig(jnp.vecmat, Float[5], Float[5, 3]),
NumPyAPI.sig(jnp.vsplit, Float[6], indices_or_sections=2),
NumPyAPI.sig(jnp.vstack, [Float[5], Float[2, 5]]),
NumPyAPI.sig(jnp.where, Bool[5], Float[5], Float[5]),
NumPyAPI.sig(jnp.zeros_like, Float[5]),
]
| ShapeDtype |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 6645,
"end": 7008
} | class ____(BaseModel):
"""
A response from the chat completion API.
Must be compatible with OpenAI's Chat Completion API.
https://platform.openai.com/docs/api-reference/chat
"""
id: str | None = None
object: str = "chat.completion"
created: int
model: str
choices: list[ChatChoice]
usage: ChatUsage
| ChatCompletionResponse |
python | apache__airflow | airflow-core/src/airflow/models/backfill.py | {
"start": 2526,
"end": 2706
} | class ____(AirflowException):
"""
Raised when a backfill cannot be completed because the reprocess behavior is not valid.
:meta private:
"""
| InvalidReprocessBehavior |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_vectors.py | {
"start": 2554,
"end": 9348
} | class ____(ColumnMapExpectation):
"""Expect column values to be vectors."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"mostly_vectors_and_numbers_strings_scalars": [
[1.1, 4, 5],
"[2, 3.4,6]",
[3, 9, 7],
[2, 2, 2],
[6, 7, 9],
6,
[9, 4],
"five",
[0],
None,
],
"all_valid_vectors": [
[2.1, 3, 4],
[9, 5, 4],
"[0, 0, 2]",
[9, 1, 4],
[8, 7, 8],
[2, 6, 0],
[1, 2, 9],
[8, 7, 4],
[2, 3, 6],
[6, 7, 2],
],
},
"tests": [
{
"title": "vectors_and_nonvectors",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "mostly_vectors_and_numbers_strings_scalars",
"mostly": 0.6,
},
"out": {
"success": True,
"unexpected_index_list": [5, 7, 8],
"unexpected_list": [6, "five", [0]],
},
},
{
"title": "valid_vectors",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid_vectors", "mostly": 1},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental", "datatypes", "column map expectation"],
"contributors": ["@manyshapes"],
"requirements": [],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.is_vector"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys
# for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see
# https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.html
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToBeVectors().print_diagnostic_checklist()
| ExpectColumnValuesToBeVectors |
python | pandas-dev__pandas | pandas/core/arrays/numeric.py | {
"start": 8304,
"end": 10228
} | class ____(BaseMaskedArray):
"""
Base class for IntegerArray and FloatingArray.
"""
_dtype_cls: type[NumericDtype]
def __init__(
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
) -> None:
checker = self._dtype_cls._checker
if not (isinstance(values, np.ndarray) and checker(values.dtype)):
descr = (
"floating"
if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap]
else "integer"
)
raise TypeError(
f"values should be {descr} numpy array. Use "
"the 'pd.array' function instead"
)
if values.dtype == np.float16:
# If we don't raise here, then accessing self.dtype would raise
raise TypeError("FloatingArray does not support np.float16 dtype.")
# NB: if is_nan_na() is True
# then caller is responsible for ensuring
# assert mask[np.isnan(values)].all()
super().__init__(values, mask, copy=copy)
@cache_readonly
def dtype(self) -> NumericDtype:
mapping = self._dtype_cls._get_dtype_mapping()
return mapping[self._data.dtype]
@classmethod
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
dtype_cls = cls._dtype_cls
values, mask = _coerce_to_data_and_mask(value, dtype, copy, dtype_cls)
return values, mask
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: ExtensionDtype, copy: bool = False
) -> Self:
from pandas.core.tools.numeric import to_numeric
scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable")
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
| NumericArray |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0052_migrate_null_external_builds_field.py | {
"start": 371,
"end": 593
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0051_project_urlconf_feature"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | huggingface__transformers | src/transformers/models/glm4v/modular_glm4v.py | {
"start": 38055,
"end": 42618
} | class ____(Qwen2_5_VLTextModel):
def __init__(self, config: Glm4vTextConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[Glm4vTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Glm4vTextRotaryEmbedding(config=config)
del self._attn_implementation
del self.has_sliding_layers
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are two scenarios when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": text_position_ids,
}
# Create the masks
causal_mask = create_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
| Glm4vTextModel |
python | pytorch__pytorch | torch/distributed/pipelining/_schedule_visualizer.py | {
"start": 583,
"end": 3823
} | class ____(NamedTuple):
stage_index: int
computation_type: _ComputationType
microbatch_index: int
def get_schedule_ops(
schedule: str | type[_PipelineSchedule],
pp_degree: int,
num_microbatches: int,
num_stages_per_rank: int | None = None,
add_spacing: bool = False,
with_comms: bool = False,
) -> list[list[_Action | None]]:
"""
Get all actions for a given schedule, pp_degree, and num_microbatches. The actions are returned in a list of lists
where each inner list represents a rank and each element in the inner list represents an action.
The schedule can be specified as a string which is passed into get_schedule_class() or a _PipelineSchedule instance.
"""
if add_spacing and with_comms:
raise ValueError("Cannot add spacing and view comms at the same time")
if isinstance(schedule, str):
schedule_class = get_schedule_class(schedule)
elif issubclass(schedule, _PipelineSchedule):
schedule_class = schedule
else:
raise ValueError(f"Invalid schedule: {schedule}")
# Create a mock of the PipelineStage class
mock_pipeline_stage = mock.create_autospec(PipelineStage, instance=True)
# Set the return values for group_rank and group_size methods
mock_pipeline_stage.group_rank = 0
mock_pipeline_stage.group_size = pp_degree
mock_pipeline_stage.submod = None
# Check num_stages_per_rank is valid
if issubclass(schedule_class, PipelineScheduleSingle):
if num_stages_per_rank is None:
num_stages_per_rank = 1
assert num_stages_per_rank == 1
stages = mock_pipeline_stage
stages.num_stages = num_stages_per_rank * pp_degree
elif issubclass(schedule_class, PipelineScheduleMulti):
if num_stages_per_rank is None:
num_stages_per_rank = 2
assert num_stages_per_rank >= 2
stages = [mock_pipeline_stage for _ in range(num_stages_per_rank)]
for stage in stages:
stage.num_stages = num_stages_per_rank * pp_degree
else:
raise ValueError(f"Invalid schedule: {schedule_class}")
# Instantiate the schedule class
# pyrefly: ignore [bad-instantiation, bad-argument-type]
schedule_instance = schedule_class(stages, num_microbatches)
assert schedule_instance.pipeline_order is not None
# Convert to List[List[_Action]]
all_actions: list[list[_Action | None]] = []
if with_comms:
runtime = _PipelineScheduleRuntime(stages, num_microbatches)
runtime._prepare_schedule_with_comms(schedule_instance.pipeline_order)
for rank in range(pp_degree):
all_actions.append(list(runtime.pipeline_order_with_comms[rank]))
else:
for rank in range(pp_degree):
all_actions.append(schedule_instance.pipeline_order[rank])
# Add spacing
if add_spacing:
# remove all Nones, then respace
# TODO: later we can change this at the schedule creation level to not use Nones
all_actions = [
[action for action in rank if action is not None] for rank in all_actions
]
all_actions = add_schedule_op_spacing(all_actions)
# Return the pipeline order
return all_actions
| OpKey |
python | PyCQA__pylint | tests/functional/g/generic_class_syntax.py | {
"start": 262,
"end": 508
} | class ____(Entity[int]):
def __init__(self, data: int) -> None:
super().__init__(data)
def async_update(self) -> None:
self.data = 2
if self.last_update is None:
pass
self.last_update = 2
| Sensor |
python | PrefectHQ__prefect | tests/_internal/test_installation.py | {
"start": 5902,
"end": 11805
} | class ____:
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_available(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_unavailable_import_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ModuleNotFoundError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_unavailable_module_not_found_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_uv_file_not_found_error(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.side_effect = FileNotFoundError
mock_import_module.return_value = mock_uv
await ainstall_packages(packages)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_upgrade_flag(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=False,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, stream_output=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests"],
stream_output=True,
)
@patch("prefect._internal.installation.importlib.import_module")
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_with_upgrade_and_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
mock_uv = Mock()
mock_uv.find_uv_bin.return_value = "/path/to/uv"
mock_import_module.return_value = mock_uv
await ainstall_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
["/path/to/uv", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=True,
)
@patch(
"prefect._internal.installation.importlib.import_module",
side_effect=ImportError("No module named 'uv'"),
)
@patch("prefect.utilities.processutils.run_process", new_callable=AsyncMock)
async def test_ainstall_packages_fallback_with_upgrade_and_stream_output(
self, mock_run_process: AsyncMock, mock_import_module: MagicMock
):
packages = ["pytest", "requests"]
await ainstall_packages(packages, stream_output=True, upgrade=True)
mock_import_module.assert_called_once_with("uv")
mock_run_process.assert_called_once_with(
[sys.executable, "-m", "pip", "install", "pytest", "requests", "--upgrade"],
stream_output=True,
)
| TestAinstallPackages |
python | davidhalter__parso | parso/pgen2/grammar_parser.py | {
"start": 1216,
"end": 5515
} | class ____:
"""
The parser for Python grammar files.
"""
def __init__(self, bnf_grammar: str):
self._bnf_grammar = bnf_grammar
self.generator = tokenize(
bnf_grammar,
version_info=parse_version_string('3.9')
)
self._gettoken() # Initialize lookahead
def parse(self) -> Iterator[Tuple[NFAState, NFAState]]:
# grammar: (NEWLINE | rule)* ENDMARKER
while self.type != PythonTokenTypes.ENDMARKER:
while self.type == PythonTokenTypes.NEWLINE:
self._gettoken()
# rule: NAME ':' rhs NEWLINE
self._current_rule_name = self._expect(PythonTokenTypes.NAME)
self._expect(PythonTokenTypes.OP, ':')
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.NEWLINE)
yield a, z
def _parse_rhs(self):
# rhs: items ('|' items)*
a, z = self._parse_items()
if self.value != "|":
return a, z
else:
aa = NFAState(self._current_rule_name)
zz = NFAState(self._current_rule_name)
while True:
# Add the possibility to go into the state of a and come back
# to finish.
aa.add_arc(a)
z.add_arc(zz)
if self.value != "|":
break
self._gettoken()
a, z = self._parse_items()
return aa, zz
def _parse_items(self):
# items: item+
a, b = self._parse_item()
while self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING) \
or self.value in ('(', '['):
c, d = self._parse_item()
# Need to end on the next item.
b.add_arc(c)
b = d
return a, b
def _parse_item(self):
# item: '[' rhs ']' | atom ['+' | '*']
if self.value == "[":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ']')
# Make it also possible that there is no token and change the
# state.
a.add_arc(z)
return a, z
else:
a, z = self._parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self._gettoken()
# Make it clear that we can go back to the old state and repeat.
z.add_arc(a)
if value == "+":
return a, z
else:
# The end state is the same as the beginning, nothing must
# change.
return a, a
def _parse_atom(self):
# atom: '(' rhs ')' | NAME | STRING
if self.value == "(":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ')')
return a, z
elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING):
a = NFAState(self._current_rule_name)
z = NFAState(self._current_rule_name)
# Make it clear that the state transition requires that value.
a.add_arc(z, self.value)
self._gettoken()
return a, z
else:
self._raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def _expect(self, type_, value=None):
if self.type != type_:
self._raise_error("expected %s, got %s [%s]",
type_, self.type, self.value)
if value is not None and self.value != value:
self._raise_error("expected %s, got %s", value, self.value)
value = self.value
self._gettoken()
return value
def _gettoken(self):
tup = next(self.generator)
self.type, self.value, self.begin, prefix = tup
def _raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
line = self._bnf_grammar.splitlines()[self.begin[0] - 1]
raise SyntaxError(msg, ('<grammar>', self.begin[0],
self.begin[1], line))
| GrammarParser |
python | astropy__astropy | astropy/modeling/tests/test_models.py | {
"start": 40300,
"end": 41294
} | class ____(_ModelMeta):
@classmethod
def __prepare__(cls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
| _ExtendedModelMeta |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 787,
"end": 907
} | class ____(str, Enum):
"""Available renderer prefixes"""
LEGACY = "renderer"
ATOMIC = "atomic"
| RendererPrefix |
python | kamyu104__LeetCode-Solutions | Python/minimize-connected-groups-by-inserting-interval.py | {
"start": 82,
"end": 739
} | class ____(object):
def minConnectedGroups(self, intervals, k):
"""
:type intervals: List[List[int]]
:type k: int
:rtype: int
"""
intervals.sort()
result = 0
prefix = [0]*(len(intervals)+1)
mx = float("-inf")
left = 0
for right in xrange(len(intervals)):
prefix[right+1] = prefix[right]+int(mx < intervals[right][0])
mx = max(mx, intervals[right][1])
while intervals[right][0]-intervals[left][1] > k:
left += 1
result = max(result, prefix[right+1]-prefix[left+1])
return prefix[-1]-result
| Solution |
python | huggingface__transformers | src/transformers/models/longcat_flash/modeling_longcat_flash.py | {
"start": 10540,
"end": 15179
} | class ____(nn.Module):
"""
A mixed expert module containing zero compute (identity) experts.
"""
def __init__(self, config):
super().__init__()
self.intermediate_size = config.expert_ffn_hidden_size
self.config = config
self.experts = LongcatFlashExperts(config)
self.router = LongcatFlashTopkRouter(config)
def forward(self, hidden_states):
orig_shape = hidden_states.shape
topk_weights, topk_indices = self.router(hidden_states)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
return hidden_states
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
r"""
TODO let's just use the original freqcis computation to not have the view
transpose + reshape! This is not optimized!
Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
b, h, s, d = q.shape
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
b, h, s, d = k.shape
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def yarn_get_mscale(scale=1, mscale=1):
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
| LongcatFlashMoE |
python | apache__airflow | providers/jenkins/src/airflow/providers/jenkins/operators/jenkins_job_trigger.py | {
"start": 2899,
"end": 10397
} | class ____(BaseOperator):
"""
Trigger a Jenkins Job and monitor its execution.
This operator depend on the python-jenkins library version >= 0.4.15 to
communicate with the Jenkins server. You'll also need to configure a Jenkins
connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:param job_name: The name of the job to trigger
:param parameters: The parameters block provided to jenkins for use in
the API call when triggering a build. (templated)
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:param allowed_jenkins_states: Iterable of allowed result jenkins states, default is ``['SUCCESS']``
"""
template_fields: Sequence[str] = ("parameters",)
template_ext: Sequence[str] = (".json",)
ui_color = "#f9ec86"
def __init__(
self,
*,
jenkins_connection_id: str,
job_name: str,
parameters: ParamType = None,
sleep_time: int = 10,
max_try_before_job_appears: int = 10,
allowed_jenkins_states: Iterable[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.parameters = parameters
self.sleep_time = max(sleep_time, 1)
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
self.allowed_jenkins_states = list(allowed_jenkins_states) if allowed_jenkins_states else ["SUCCESS"]
def build_job(self, jenkins_server: Jenkins, params: ParamType = None) -> JenkinsRequest:
"""
Trigger a build job.
This returns a dict with 2 keys ``body`` and ``headers``. ``headers``
contains also a dict-like object which can be queried to get the
location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:param params: The parameters block to provide to jenkins API call.
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Since params can be either JSON string, dictionary, or list,
# check type and pass to build_job_url
if params and isinstance(params, str):
params = ast.literal_eval(params)
request = Request(method="POST", url=jenkins_server.build_job_url(self.job_name, params, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location: str, jenkins_server: Jenkins) -> int:
"""
Poll the jenkins queue until the job is executed.
When we trigger a job through an API call, the job is first put in the
queue without having a build number assigned. We have to wait until the
job exits the queue to know its build number.
To do so, we use get_queue_item to get information about a queued item
https://python-jenkins.readthedocs.io/en/latest/api.html#jenkins.Jenkins.get_queue_item
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
self.log.info("Polling jenkins queue at the url %s", location)
if not (match := re.search(r"/queue/item/(\d+)/?", location)):
raise ValueError(f"Invalid queue location format: {location}")
queue_id = int(match.group(1))
self.log.info("Polling Jenkins queue item with ID %s", queue_id)
for attempt in range(self.max_try_before_job_appears):
# Initialize it to prevent UnboundLocalError in case of exception raised
json_response = None
if attempt:
time.sleep(self.sleep_time)
try:
json_response = jenkins_server.get_queue_item(queue_id)
except (HTTPError, JenkinsException):
self.log.warning("polling failed, retrying", exc_info=True)
if json_response:
# The returned dict will have an "executable" key if the queued item is running on an executor,
# or has completed running.
if (
json_response.get("executable", None) is not None
and "number" in json_response["executable"]
):
build_number = json_response["executable"]["number"]
self.log.info("Job executed on Jenkins side with the build number %s", build_number)
return build_number
self.log.debug("Job not yet started. Queue item: %s", json_response)
raise AirflowException(
f"The job hasn't been executed after polling the queue {self.max_try_before_job_appears} times"
)
@cached_property
def hook(self) -> JenkinsHook:
"""Instantiate the Jenkins hook."""
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context: Mapping[Any, Any]) -> str | None:
self.log.info(
"Triggering the job %s on the jenkins : %s with the parameters : %s",
self.job_name,
self.jenkins_connection_id,
self.parameters,
)
jenkins_server = self.hook.get_jenkins_server()
jenkins_response = self.build_job(jenkins_server, self.parameters)
build_number = self.poll_job_in_queue(jenkins_response["headers"]["Location"], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
try:
while keep_polling_job:
build_info = jenkins_server.get_build_info(name=self.job_name, number=build_number)
if build_info["result"] is not None:
keep_polling_job = False
# Check if job ended with not allowed state.
if build_info["result"] not in self.allowed_jenkins_states:
raise AirflowException(
f"Jenkins job failed, final state : {build_info['result']}. "
f"Find more information on job url : {build_info['url']}"
)
else:
self.log.info("Waiting for job to complete : %s , build %s", self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
raise AirflowException(f"Jenkins job status check failed. Final error was: {err.resp.status}")
except jenkins.JenkinsException as err:
raise AirflowException(
f"Jenkins call failed with error : {err}, if you have parameters "
"double check them, jenkins sends back "
"this exception for unknown parameters"
"You can also check logs for more details on this exception "
"(jenkins_url/log/rss)"
)
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info["url"]
return None
| JenkinsJobTriggerOperator |
python | celery__celery | celery/local.py | {
"start": 8056,
"end": 12008
} | class ____(Proxy):
"""Proxy that evaluates object once.
:class:`Proxy` will evaluate the object each time, while the
promise will only evaluate it once.
"""
__slots__ = ('__pending__', '__weakref__')
def _get_current_object(self):
try:
return object.__getattribute__(self, '__thing')
except AttributeError:
return self.__evaluate__()
def __then__(self, fun, *args, **kwargs):
if self.__evaluated__():
return fun(*args, **kwargs)
from collections import deque
try:
pending = object.__getattribute__(self, '__pending__')
except AttributeError:
pending = None
if pending is None:
pending = deque()
object.__setattr__(self, '__pending__', pending)
pending.append((fun, args, kwargs))
def __evaluated__(self):
try:
object.__getattribute__(self, '__thing')
except AttributeError:
return False
return True
def __maybe_evaluate__(self):
return self._get_current_object()
def __evaluate__(self,
_clean=('_Proxy__local',
'_Proxy__args',
'_Proxy__kwargs')):
try:
thing = Proxy._get_current_object(self)
except Exception:
raise
else:
object.__setattr__(self, '__thing', thing)
for attr in _clean:
try:
object.__delattr__(self, attr)
except AttributeError: # pragma: no cover
# May mask errors so ignore
pass
try:
pending = object.__getattribute__(self, '__pending__')
except AttributeError:
pass
else:
try:
while pending:
fun, args, kwargs = pending.popleft()
fun(*args, **kwargs)
finally:
try:
object.__delattr__(self, '__pending__')
except AttributeError: # pragma: no cover
pass
return thing
def maybe_evaluate(obj):
"""Attempt to evaluate promise, even if obj is not a promise."""
try:
return obj.__maybe_evaluate__()
except AttributeError:
return obj
# ############# Module Generation ##########################
# Utilities to dynamically
# recreate modules, either for lazy loading or
# to create old modules at runtime instead of
# having them litter the source tree.
# import fails in python 2.5. fallback to reduce in stdlib
MODULE_DEPRECATED = """
The module %s is deprecated and will be removed in a future version.
"""
DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
# im_func is no longer available in Py3.
# instead the unbound method itself can be used.
def fun_of_method(method):
return method
def getappattr(path):
"""Get attribute from current_app recursively.
Example: ``getappattr('amqp.get_task_consumer')``.
"""
from celery import current_app
return current_app._rgetattr(path)
COMPAT_MODULES = {
'celery': {
'execute': {
'send_task': 'send_task',
},
'log': {
'get_default_logger': 'log.get_default_logger',
'setup_logging_subsystem': 'log.setup_logging_subsystem',
'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
},
'messaging': {
'TaskConsumer': 'amqp.TaskConsumer',
'establish_connection': 'connection',
'get_consumer_set': 'amqp.TaskConsumer',
},
'registry': {
'tasks': 'tasks',
},
},
}
#: We exclude these from dir(celery)
DEPRECATED_ATTRS = set(COMPAT_MODULES['celery'].keys()) | {'subtask'}
| PromiseProxy |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 30372,
"end": 31465
} | class ____(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
| Sky2Pix_ConicEqualArea |
python | celery__celery | t/unit/backends/test_arangodb.py | {
"start": 405,
"end": 8246
} | class ____:
def setup_method(self):
self.backend = ArangoDbBackend(app=self.app)
def test_init_no_arangodb(self):
prev, module.py_arango_connection = module.py_arango_connection, None
try:
with pytest.raises(ImproperlyConfigured):
ArangoDbBackend(app=self.app)
finally:
module.py_arango_connection = prev
def test_init_no_settings(self):
self.app.conf.arangodb_backend_settings = []
with pytest.raises(ImproperlyConfigured):
ArangoDbBackend(app=self.app)
def test_init_settings_is_None(self):
self.app.conf.arangodb_backend_settings = None
ArangoDbBackend(app=self.app)
def test_init_url(self):
url = None
expected_database = "celery"
expected_collection = "celery"
backend = ArangoDbBackend(app=self.app, url=url)
assert backend.database == expected_database
assert backend.collection == expected_collection
url = "arangodb://localhost:27017/celery-database/celery-collection"
expected_database = "celery-database"
expected_collection = "celery-collection"
backend = ArangoDbBackend(app=self.app, url=url)
assert backend.database == expected_database
assert backend.collection == expected_collection
def test_get_connection_connection_exists(self):
with patch('pyArango.connection.Connection') as mock_Connection:
self.backend._connection = sentinel.connection
connection = self.backend.connection
assert connection == sentinel.connection
mock_Connection.assert_not_called()
expected_connection = mock_Connection()
mock_Connection.reset_mock() # So the assert_called_once below is accurate.
self.backend._connection = None
connection = self.backend.connection
assert connection == expected_connection
mock_Connection.assert_called_once()
def test_get(self):
self.backend._connection = MagicMock(spec=["__getitem__"])
assert self.backend.get(None) is None
self.backend.db.AQLQuery.assert_not_called()
assert self.backend.get(sentinel.task_id) is None
self.backend.db.AQLQuery.assert_called_once_with(
"RETURN DOCUMENT(@@collection, @key).task",
rawResults=True,
bindVars={
"@collection": self.backend.collection,
"key": sentinel.task_id,
},
)
self.backend.get = Mock(return_value=sentinel.retval)
assert self.backend.get(sentinel.task_id) == sentinel.retval
self.backend.get.assert_called_once_with(sentinel.task_id)
def test_set(self):
self.backend._connection = MagicMock(spec=["__getitem__"])
assert self.backend.set(sentinel.key, sentinel.value) is None
self.backend.db.AQLQuery.assert_called_once_with(
"""
UPSERT {_key: @key}
INSERT {_key: @key, task: @value}
UPDATE {task: @value} IN @@collection
""",
bindVars={
"@collection": self.backend.collection,
"key": sentinel.key,
"value": sentinel.value,
},
)
def test_mget(self):
self.backend._connection = MagicMock(spec=["__getitem__"])
result = list(self.backend.mget(None))
expected_result = []
assert result == expected_result
self.backend.db.AQLQuery.assert_not_called()
Query = MagicMock(spec=pyArango.query.Query)
query = Query()
query.nextBatch = MagicMock(side_effect=StopIteration())
self.backend.db.AQLQuery = Mock(return_value=query)
keys = [sentinel.task_id_0, sentinel.task_id_1]
result = list(self.backend.mget(keys))
expected_result = []
assert result == expected_result
self.backend.db.AQLQuery.assert_called_once_with(
"FOR k IN @keys RETURN DOCUMENT(@@collection, k).task",
rawResults=True,
bindVars={
"@collection": self.backend.collection,
"keys": keys,
},
)
values = [sentinel.value_0, sentinel.value_1]
query.__iter__.return_value = iter([sentinel.value_0, sentinel.value_1])
result = list(self.backend.mget(keys))
expected_result = values
assert result == expected_result
def test_delete(self):
self.backend._connection = MagicMock(spec=["__getitem__"])
assert self.backend.delete(None) is None
self.backend.db.AQLQuery.assert_not_called()
assert self.backend.delete(sentinel.task_id) is None
self.backend.db.AQLQuery.assert_called_once_with(
"REMOVE {_key: @key} IN @@collection",
bindVars={
"@collection": self.backend.collection,
"key": sentinel.task_id,
},
)
def test_config_params(self):
self.app.conf.arangodb_backend_settings = {
'host': 'test.arangodb.com',
'port': '8529',
'username': 'johndoe',
'password': 'mysecret',
'database': 'celery_database',
'collection': 'celery_collection',
'http_protocol': 'https',
'verify': True
}
x = ArangoDbBackend(app=self.app)
assert x.host == 'test.arangodb.com'
assert x.port == 8529
assert x.username == 'johndoe'
assert x.password == 'mysecret'
assert x.database == 'celery_database'
assert x.collection == 'celery_collection'
assert x.http_protocol == 'https'
assert x.arangodb_url == 'https://test.arangodb.com:8529'
assert x.verify is True
def test_backend_by_url(
self, url="arangodb://username:password@host:port/database/collection"
):
from celery.backends.arangodb import ArangoDbBackend
backend, url_ = backends.by_url(url, self.app.loader)
assert backend is ArangoDbBackend
assert url_ == url
def test_backend_params_by_url(self):
url = (
"arangodb://johndoe:mysecret@test.arangodb.com:8529/"
"celery_database/celery_collection"
)
with self.Celery(backend=url) as app:
x = app.backend
assert x.host == 'test.arangodb.com'
assert x.port == 8529
assert x.username == 'johndoe'
assert x.password == 'mysecret'
assert x.database == 'celery_database'
assert x.collection == 'celery_collection'
assert x.http_protocol == 'http'
assert x.arangodb_url == 'http://test.arangodb.com:8529'
assert x.verify is False
def test_backend_cleanup(self):
self.backend._connection = MagicMock(spec=["__getitem__"])
self.backend.expires = None
self.backend.cleanup()
self.backend.db.AQLQuery.assert_not_called()
self.backend.expires = 0
self.backend.cleanup()
self.backend.db.AQLQuery.assert_not_called()
now = datetime.datetime.now(datetime.timezone.utc)
self.backend.app.now = Mock(return_value=now)
self.backend.expires = 86400
expected_checkpoint = (now - self.backend.expires_delta).isoformat()
self.backend.cleanup()
self.backend.db.AQLQuery.assert_called_once_with(
"""
FOR record IN @@collection
FILTER record.task.date_done < @checkpoint
REMOVE record IN @@collection
""",
bindVars={
"@collection": self.backend.collection,
"checkpoint": expected_checkpoint,
},
)
| test_ArangoDbBackend |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py | {
"start": 205,
"end": 339
} | class ____:
def __str__(self):
return False
# TODO: Once Ruff has better type checking
def return_int():
return 3
| Bool |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dags.py | {
"start": 4176,
"end": 4283
} | class ____(StrictBaseModel):
"""Dag Serializer for updatable bodies."""
is_paused: bool
| DAGPatchBody |
python | psf__requests | src/requests/exceptions.py | {
"start": 2090,
"end": 2158
} | class ____(ConnectionError):
"""An SSL error occurred."""
| SSLError |
python | kamyu104__LeetCode-Solutions | Python/profitable-schemes.py | {
"start": 60,
"end": 608
} | class ____(object):
def profitableSchemes(self, G, P, group, profit):
"""
:type G: int
:type P: int
:type group: List[int]
:type profit: List[int]
:rtype: int
"""
dp = [[0 for _ in xrange(G+1)] for _ in xrange(P+1)]
dp[0][0] = 1
for p, g in itertools.izip(profit, group):
for i in reversed(xrange(P+1)):
for j in reversed(xrange(G-g+1)):
dp[min(i+p, P)][j+g] += dp[i][j]
return sum(dp[P]) % (10**9 + 7)
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 37638,
"end": 38252
} | class ____(Node):
"""A node with a physical location."""
def __init__(self, width: float, height: float, depth: float) -> None:
super().__init__()
self.width = width
self.height = height
self.depth = depth
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def render(self, output: Output, # type: ignore[override]
x1: float, y1: float, x2: float, y2: float) -> None:
pass
| Box |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 1530,
"end": 1633
} | class ____(Sitemap):
changefreq = "never"
priority = 0.5
location = "/location/"
| EmptySitemap |
python | numba__numba | numba/core/config.py | {
"start": 3495,
"end": 23057
} | class ____(object):
def __init__(self):
self.reset()
def reset(self):
self.old_environ = {}
self.update(force=True)
def update(self, force=False):
new_environ = {}
# first check if there's a .numba_config.yaml and use values from that
if os.path.exists(_config_fname) and os.path.isfile(_config_fname):
if not _HAVE_YAML:
msg = ("A Numba config file is found but YAML parsing "
"capabilities appear to be missing. "
"To use this feature please install `pyyaml`. e.g. "
"`conda install pyyaml`.")
warnings.warn(msg)
else:
with open(_config_fname, 'rt') as f:
y_conf = yaml.safe_load(f)
if y_conf is not None:
for k, v in y_conf.items():
new_environ['NUMBA_' + k.upper()] = v
# clobber file based config with any locally defined env vars
for name, value in os.environ.items():
if name.startswith('NUMBA_'):
new_environ[name] = value
# We update the config variables if at least one NUMBA environment
# variable was modified. This lets the user modify values
# directly in the config module without having them when
# reload_config() is called by the compiler.
if force or self.old_environ != new_environ:
self.process_environ(new_environ)
# Store a copy
self.old_environ = dict(new_environ)
self.validate()
def validate(self):
global CUDA_USE_NVIDIA_BINDING
if CUDA_USE_NVIDIA_BINDING: # noqa: F821
try:
import cuda # noqa: F401
except ImportError as ie:
msg = ("CUDA Python bindings requested (the environment "
"variable NUMBA_CUDA_USE_NVIDIA_BINDING is set), "
f"but they are not importable: {ie.msg}.")
warnings.warn(msg)
CUDA_USE_NVIDIA_BINDING = False
if CUDA_PER_THREAD_DEFAULT_STREAM: # noqa: F821
warnings.warn("PTDS support is handled by CUDA Python when "
"using the NVIDIA binding. Please set the "
"environment variable "
"CUDA_PYTHON_CUDA_PER_THREAD_DEFAULT_STREAM to 1 "
"instead.")
def process_environ(self, environ):
def _readenv(name, ctor, default):
value = environ.get(name)
if value is None:
return default() if callable(default) else default
try:
return ctor(value)
except Exception:
warnings.warn(f"Environment variable '{name}' is defined but "
f"its associated value '{value}' could not be "
"parsed.\nThe parse failed with exception:\n"
f"{traceback.format_exc()}",
RuntimeWarning)
return default
def optional_str(x):
return str(x) if x is not None else None
# Type casting rules selection
USE_LEGACY_TYPE_SYSTEM = _readenv(
"NUMBA_USE_LEGACY_TYPE_SYSTEM", int, 1
)
# developer mode produces full tracebacks, disables help instructions
DEVELOPER_MODE = _readenv("NUMBA_DEVELOPER_MODE", int, 0)
# disable performance warnings, will switch of the generation of
# warnings of the class NumbaPerformanceWarning
DISABLE_PERFORMANCE_WARNINGS = _readenv(
"NUMBA_DISABLE_PERFORMANCE_WARNINGS", int, 0)
# Flag to enable full exception reporting
FULL_TRACEBACKS = _readenv(
"NUMBA_FULL_TRACEBACKS", int, DEVELOPER_MODE)
# Show help text when an error occurs
SHOW_HELP = _readenv("NUMBA_SHOW_HELP", int, 0)
# The color scheme to use for error messages, default is no color
# just bold fonts in use.
COLOR_SCHEME = _readenv("NUMBA_COLOR_SCHEME", str, "no_color")
# Whether to globally enable bounds checking. The default None means
# to use the value of the flag to @njit. 0 or 1 overrides the flag
# globally.
BOUNDSCHECK = _readenv("NUMBA_BOUNDSCHECK", int, None)
# Whether to always warn about potential uninitialized variables
# because static controlflow analysis cannot find a definition
# in one or more of the incoming paths.
ALWAYS_WARN_UNINIT_VAR = _readenv(
"NUMBA_ALWAYS_WARN_UNINIT_VAR", int, 0,
)
# Whether to warn about kernel launches where the grid size will
# under utilize the GPU due to low occupancy. On by default.
CUDA_LOW_OCCUPANCY_WARNINGS = _readenv(
"NUMBA_CUDA_LOW_OCCUPANCY_WARNINGS", int, 1)
# Whether to use the official CUDA Python API Bindings
CUDA_USE_NVIDIA_BINDING = _readenv(
"NUMBA_CUDA_USE_NVIDIA_BINDING", int, 0)
# Debug flag to control compiler debug print
DEBUG = _readenv("NUMBA_DEBUG", int, 0)
# DEBUG print IR after pass names
DEBUG_PRINT_AFTER = _readenv("NUMBA_DEBUG_PRINT_AFTER", str, "none")
# DEBUG print IR before pass names
DEBUG_PRINT_BEFORE = _readenv("NUMBA_DEBUG_PRINT_BEFORE", str, "none")
# DEBUG print IR before and after pass names
DEBUG_PRINT_WRAP = _readenv("NUMBA_DEBUG_PRINT_WRAP", str, "none")
# Highlighting in intermediate dumps
HIGHLIGHT_DUMPS = _readenv("NUMBA_HIGHLIGHT_DUMPS", int, 0)
# JIT Debug flag to trigger IR instruction print
DEBUG_JIT = _readenv("NUMBA_DEBUG_JIT", int, 0)
# Enable debugging of front-end operation
# (up to and including IR generation)
DEBUG_FRONTEND = _readenv("NUMBA_DEBUG_FRONTEND", int, 0)
# Enable debug prints in nrtdynmod and use of "safe" API functions
DEBUG_NRT = _readenv("NUMBA_DEBUG_NRT", int, 0)
# Enable NRT statistics counters
NRT_STATS = _readenv("NUMBA_NRT_STATS", int, 0)
# How many recently deserialized functions to retain regardless
# of external references
FUNCTION_CACHE_SIZE = _readenv("NUMBA_FUNCTION_CACHE_SIZE", int, 128)
# Maximum tuple size that parfors will unpack and pass to
# internal gufunc.
PARFOR_MAX_TUPLE_SIZE = _readenv("NUMBA_PARFOR_MAX_TUPLE_SIZE",
int, 100)
# Enable logging of cache operation
DEBUG_CACHE = _readenv("NUMBA_DEBUG_CACHE", int, DEBUG)
# Redirect cache directory
# Contains path to the directory
CACHE_DIR = _readenv("NUMBA_CACHE_DIR", str, "")
# Override default cache locators list including their order
# Comma separated list of locator class names,
# see _locator_classes in caching submodule
CACHE_LOCATOR_CLASSES = _readenv("NUMBA_CACHE_LOCATOR_CLASSES", str, "")
# Enable tracing support
TRACE = _readenv("NUMBA_TRACE", int, 0)
# Enable chrome tracing support
CHROME_TRACE = _readenv("NUMBA_CHROME_TRACE", str, "")
# Enable debugging of type inference
DEBUG_TYPEINFER = _readenv("NUMBA_DEBUG_TYPEINFER", int, 0)
# Disable caching of failed type inferences.
# Use this to isolate problems due to the fail cache.
DISABLE_TYPEINFER_FAIL_CACHE = _readenv(
"NUMBA_DISABLE_TYPEINFER_FAIL_CACHE", int, 0)
# Configure compilation target to use the specified CPU name
# and CPU feature as the host information.
# Note: this overrides "host" option for AOT compilation.
CPU_NAME = _readenv("NUMBA_CPU_NAME", optional_str, None)
CPU_FEATURES = _readenv("NUMBA_CPU_FEATURES", optional_str,
("" if str(CPU_NAME).lower() == 'generic'
else None))
# Optimization level
OPT = _readenv("NUMBA_OPT", _process_opt_level, _OptLevel(3))
# Force dump of Python bytecode
DUMP_BYTECODE = _readenv("NUMBA_DUMP_BYTECODE", int, DEBUG_FRONTEND)
# Force dump of control flow graph
DUMP_CFG = _readenv("NUMBA_DUMP_CFG", int, DEBUG_FRONTEND)
# Force dump of Numba IR
DUMP_IR = _readenv("NUMBA_DUMP_IR", int,
DEBUG_FRONTEND)
# Force dump of Numba IR in SSA form
DUMP_SSA = _readenv("NUMBA_DUMP_SSA", int,
DEBUG_FRONTEND or DEBUG_TYPEINFER)
# print debug info of analysis and optimization on array operations
DEBUG_ARRAY_OPT = _readenv("NUMBA_DEBUG_ARRAY_OPT", int, 0)
# insert debug stmts to print information at runtime
DEBUG_ARRAY_OPT_RUNTIME = _readenv(
"NUMBA_DEBUG_ARRAY_OPT_RUNTIME", int, 0)
# print stats about parallel for-loops
DEBUG_ARRAY_OPT_STATS = _readenv("NUMBA_DEBUG_ARRAY_OPT_STATS", int, 0)
# prints user friendly information about parallel
PARALLEL_DIAGNOSTICS = _readenv("NUMBA_PARALLEL_DIAGNOSTICS", int, 0)
# print debug info of inline closure pass
DEBUG_INLINE_CLOSURE = _readenv("NUMBA_DEBUG_INLINE_CLOSURE", int, 0)
# Force dump of LLVM IR
DUMP_LLVM = _readenv("NUMBA_DUMP_LLVM", int, DEBUG)
# Force dump of Function optimized LLVM IR
DUMP_FUNC_OPT = _readenv("NUMBA_DUMP_FUNC_OPT", int, DEBUG)
# Force dump of Optimized LLVM IR
DUMP_OPTIMIZED = _readenv("NUMBA_DUMP_OPTIMIZED", int, DEBUG)
# Force disable loop vectorize
LOOP_VECTORIZE = _readenv("NUMBA_LOOP_VECTORIZE", int, 1)
# Enable superword-level parallelism vectorization, default is off
# since #8705 (miscompilation).
SLP_VECTORIZE = _readenv("NUMBA_SLP_VECTORIZE", int, 0)
# Force dump of generated assembly
DUMP_ASSEMBLY = _readenv("NUMBA_DUMP_ASSEMBLY", int, DEBUG)
# Force dump of type annotation
ANNOTATE = _readenv("NUMBA_DUMP_ANNOTATION", int, 0)
# Dump IR in such as way as to aid in "diff"ing.
DIFF_IR = _readenv("NUMBA_DIFF_IR", int, 0)
# Dump type annotation in html format
def fmt_html_path(path):
if path is None:
return path
else:
return os.path.abspath(path)
HTML = _readenv("NUMBA_DUMP_HTML", fmt_html_path, None)
# x86-64 specific
# Enable AVX on supported platforms where it won't degrade performance.
def avx_default():
if not _os_supports_avx():
return False
else:
# There are various performance issues with AVX and LLVM
# on some CPUs (list at
# http://llvm.org/bugs/buglist.cgi?quicksearch=avx).
# For now we'd rather disable it, since it can pessimize code
cpu_name = CPU_NAME or ll.get_host_cpu_name()
disabled_cpus = {'corei7-avx', 'core-avx-i',
'sandybridge', 'ivybridge'}
# Disable known baseline CPU names that virtual machines may
# incorrectly report as having AVX support.
# This can cause problems with the SVML-pass's use of AVX512.
# See https://github.com/numba/numba/issues/9582
disabled_cpus |= {'nocona'}
return cpu_name not in disabled_cpus
ENABLE_AVX = _readenv("NUMBA_ENABLE_AVX", int, avx_default)
# if set and SVML is available, it will be disabled
# By default, it's disabled on 32-bit platforms.
DISABLE_INTEL_SVML = _readenv(
"NUMBA_DISABLE_INTEL_SVML", int, IS_32BITS)
# Disable jit for debugging
DISABLE_JIT = _readenv("NUMBA_DISABLE_JIT", int, 0)
# choose parallel backend to use
THREADING_LAYER_PRIORITY = _readenv(
"NUMBA_THREADING_LAYER_PRIORITY",
lambda string: string.split(),
['tbb', 'omp', 'workqueue'],
)
THREADING_LAYER = _readenv("NUMBA_THREADING_LAYER", str, 'default')
# CUDA Configs
# Whether to warn about kernel launches where a host array
# is used as a parameter, forcing a copy to and from the device.
# On by default.
CUDA_WARN_ON_IMPLICIT_COPY = _readenv(
"NUMBA_CUDA_WARN_ON_IMPLICIT_COPY", int, 1)
# Force CUDA compute capability to a specific version
FORCE_CUDA_CC = _readenv("NUMBA_FORCE_CUDA_CC", _parse_cc, None)
# The default compute capability to target when compiling to PTX.
CUDA_DEFAULT_PTX_CC = _readenv("NUMBA_CUDA_DEFAULT_PTX_CC", _parse_cc,
(5, 0))
# Disable CUDA support
DISABLE_CUDA = _readenv("NUMBA_DISABLE_CUDA",
int, int(MACHINE_BITS == 32))
# Enable CUDA simulator
ENABLE_CUDASIM = _readenv("NUMBA_ENABLE_CUDASIM", int, 0)
# CUDA logging level
# Any level name from the *logging* module. Case insensitive.
# Defaults to CRITICAL if not set or invalid.
# Note: This setting only applies when logging is not configured.
# Any existing logging configuration is preserved.
CUDA_LOG_LEVEL = _readenv("NUMBA_CUDA_LOG_LEVEL", str, '')
# Include argument values in the CUDA Driver API logs
CUDA_LOG_API_ARGS = _readenv("NUMBA_CUDA_LOG_API_ARGS", int, 0)
# Maximum number of pending CUDA deallocations (default: 10)
CUDA_DEALLOCS_COUNT = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_COUNT",
int, 10)
# Maximum ratio of pending CUDA deallocations to capacity (default: 0.2)
CUDA_DEALLOCS_RATIO = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_RATIO",
float, 0.2)
CUDA_ARRAY_INTERFACE_SYNC = _readenv("NUMBA_CUDA_ARRAY_INTERFACE_SYNC",
int, 1)
# Path of the directory that the CUDA driver libraries are located
CUDA_DRIVER = _readenv("NUMBA_CUDA_DRIVER", str, '')
# Buffer size for logs produced by CUDA driver operations (e.g.
# linking)
CUDA_LOG_SIZE = _readenv("NUMBA_CUDA_LOG_SIZE", int, 1024)
# Whether to generate verbose log messages when JIT linking
CUDA_VERBOSE_JIT_LOG = _readenv("NUMBA_CUDA_VERBOSE_JIT_LOG", int, 1)
# Whether the default stream is the per-thread default stream
CUDA_PER_THREAD_DEFAULT_STREAM = _readenv(
"NUMBA_CUDA_PER_THREAD_DEFAULT_STREAM", int, 0)
CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY = _readenv(
"NUMBA_CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY", int, 0)
# Location of the CUDA include files
if IS_WIN32:
cuda_path = os.environ.get('CUDA_PATH')
if cuda_path:
default_cuda_include_path = os.path.join(cuda_path, "include")
else:
default_cuda_include_path = "cuda_include_not_found"
else:
default_cuda_include_path = os.path.join(os.sep, 'usr', 'local',
'cuda', 'include')
CUDA_INCLUDE_PATH = _readenv("NUMBA_CUDA_INCLUDE_PATH", str,
default_cuda_include_path)
# Threading settings
# The default number of threads to use.
def num_threads_default():
try:
sched_getaffinity = os.sched_getaffinity
except AttributeError:
pass
else:
return max(1, len(sched_getaffinity(0)))
cpu_count = os.cpu_count()
if cpu_count is not None:
return max(1, cpu_count)
return 1
NUMBA_DEFAULT_NUM_THREADS = num_threads_default()
# Numba thread pool size (defaults to number of CPUs on the system).
_NUMBA_NUM_THREADS = _readenv("NUMBA_NUM_THREADS", int,
NUMBA_DEFAULT_NUM_THREADS)
if ('NUMBA_NUM_THREADS' in globals()
and globals()['NUMBA_NUM_THREADS'] != _NUMBA_NUM_THREADS):
from numba.np.ufunc import parallel
if parallel._is_initialized:
raise RuntimeError("Cannot set NUMBA_NUM_THREADS to a "
"different value once the threads have been "
"launched (currently have %s, "
"trying to set %s)" %
(_NUMBA_NUM_THREADS,
globals()['NUMBA_NUM_THREADS']))
NUMBA_NUM_THREADS = _NUMBA_NUM_THREADS
del _NUMBA_NUM_THREADS
# sys.monitoring support
ENABLE_SYS_MONITORING = _readenv("NUMBA_ENABLE_SYS_MONITORING",
int, 0)
# Profiling support
# Indicates if a profiler detected. Only VTune can be detected for now
RUNNING_UNDER_PROFILER = 'VS_PROFILER' in os.environ
# Enables jit events in LLVM to support profiling of dynamic code
ENABLE_PROFILING = _readenv(
"NUMBA_ENABLE_PROFILING", int, int(RUNNING_UNDER_PROFILER))
# Debug Info
# The default value for the `debug` flag
DEBUGINFO_DEFAULT = _readenv("NUMBA_DEBUGINFO", int, ENABLE_PROFILING)
CUDA_DEBUGINFO_DEFAULT = _readenv("NUMBA_CUDA_DEBUGINFO", int, 0)
EXTEND_VARIABLE_LIFETIMES = _readenv("NUMBA_EXTEND_VARIABLE_LIFETIMES",
int, 0)
# gdb binary location
def which_gdb(path_or_bin):
gdb = shutil.which(path_or_bin)
return gdb if gdb is not None else path_or_bin
GDB_BINARY = _readenv("NUMBA_GDB_BINARY", which_gdb, 'gdb')
# CUDA Memory management
CUDA_MEMORY_MANAGER = _readenv("NUMBA_CUDA_MEMORY_MANAGER", str,
'default')
# Experimental refprune pass
LLVM_REFPRUNE_PASS = _readenv(
"NUMBA_LLVM_REFPRUNE_PASS", int, 1,
)
LLVM_REFPRUNE_FLAGS = _readenv(
"NUMBA_LLVM_REFPRUNE_FLAGS", str,
"all" if LLVM_REFPRUNE_PASS else "",
)
# llvmlite memory manager
USE_LLVMLITE_MEMORY_MANAGER = _readenv(
"NUMBA_USE_LLVMLITE_MEMORY_MANAGER", int, None
)
# Timing support.
# LLVM_PASS_TIMINGS enables LLVM recording of pass timings.
LLVM_PASS_TIMINGS = _readenv(
"NUMBA_LLVM_PASS_TIMINGS", int, 0,
)
# Coverage support.
# JIT_COVERAGE (bool) controls whether the compiler report compiled
# lines to coverage tools. Defaults to off.
JIT_COVERAGE = _readenv(
"NUMBA_JIT_COVERAGE", int, 0,
)
# Inject the configuration values into the module globals
for name, value in locals().copy().items():
if name.isupper():
globals()[name] = value
_env_reloader = _EnvReloader()
def reload_config():
"""
Reload the configuration from environment variables, if necessary.
"""
_env_reloader.update()
| _EnvReloader |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 163917,
"end": 164304
} | class ____(sgqlc.types.Input):
"""A message to include with a new commit"""
__schema__ = github_schema
__field_names__ = ("headline", "body")
headline = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="headline")
"""The headline of the message."""
body = sgqlc.types.Field(String, graphql_name="body")
"""The body of the message."""
| CommitMessage |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 12754,
"end": 16072
} | class ____(fixtures.TestBase):
def test_unloaded_expirable(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id = mapped_column(Integer, Identity(), primary_key=True)
x = mapped_column(
Integer,
)
y = mapped_column(Integer, deferred=True)
decl_base.metadata.create_all(testing.db)
with Session(testing.db) as sess:
obj = A(x=1, y=2)
sess.add(obj)
sess.commit()
with expect_deprecated(
"The InstanceState.unloaded_expirable attribute is deprecated. "
"Please use InstanceState.unloaded."
):
eq_(inspect(obj).unloaded, {"id", "x", "y"})
eq_(inspect(obj).unloaded_expirable, inspect(obj).unloaded)
def test_evaluator_is_private(self):
with expect_deprecated(
"Direct use of 'EvaluatorCompiler' is not supported, and this "
"name will be removed in a future release. "
"'_EvaluatorCompiler' is for internal use only"
):
from sqlalchemy.orm.evaluator import EvaluatorCompiler
from sqlalchemy.orm.evaluator import _EvaluatorCompiler
is_(EvaluatorCompiler, _EvaluatorCompiler)
@testing.combinations(
("init", True),
("kw_only", True),
("default", 5),
("default_factory", lambda: 10),
argnames="paramname, value",
)
def test_column_property_dc_attributes(self, paramname, value):
with expect_deprecated(
rf"The column_property.{paramname} parameter is deprecated "
r"for column_property\(\)",
):
column_property(column("q"), **{paramname: value})
def test_column_property_dc_attributes_still_function(self, dc_decl_base):
with expect_deprecated(
r"The column_property.init parameter is deprecated "
r"for column_property\(\)",
r"The column_property.default parameter is deprecated "
r"for column_property\(\)",
r"The column_property.default_factory parameter is deprecated "
r"for column_property\(\)",
r"The column_property.kw_only parameter is deprecated "
r"for column_property\(\)",
):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column()
const1: Mapped[str] = column_property(
data + "asdf", init=True, default="foobar"
)
const2: Mapped[str] = column_property(
data + "asdf",
init=True,
default_factory=lambda: "factory_foo",
)
const3: Mapped[str] = column_property(
data + "asdf", init=True, kw_only=True
)
m1 = MyClass(data="d1", const3="c3")
eq_(m1.const1, "foobar")
eq_(m1.const2, "factory_foo")
eq_(m1.const3, "c3")
with expect_raises_message(
TypeError, "missing 1 required keyword-only argument: 'const3'"
):
MyClass(data="d1")
| MiscDeprecationsTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1561149,
"end": 1562807
} | class ____(
ColorDef, MarkPropDefGradientstringnull
):
"""
ValueDefWithConditionMarkPropFieldOrDatumDefGradientstringnull schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`ExprRef`, :class:`Gradient`, :class:`LinearGradient`, :class:`RadialGradient`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {
"$ref": "#/definitions/ValueDefWithCondition<MarkPropFieldOrDatumDef,(Gradient|string|null)>"
}
def __init__(
self,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
**kwds,
):
super().__init__(condition=condition, value=value, **kwds)
| ValueDefWithConditionMarkPropFieldOrDatumDefGradientstringnull |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 920631,
"end": 923195
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"associated_pull_requests",
"branch_protection_rule",
"name",
"prefix",
"ref_update_rule",
"repository",
"target",
)
associated_pull_requests = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestConnection),
graphql_name="associatedPullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)),
graphql_name="states",
default=None,
),
),
(
"labels",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="labels",
default=None,
),
),
(
"head_ref_name",
sgqlc.types.Arg(String, graphql_name="headRefName", default=None),
),
(
"base_ref_name",
sgqlc.types.Arg(String, graphql_name="baseRefName", default=None),
),
(
"order_by",
sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
branch_protection_rule = sgqlc.types.Field(
BranchProtectionRule, graphql_name="branchProtectionRule"
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
prefix = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="prefix")
ref_update_rule = sgqlc.types.Field(RefUpdateRule, graphql_name="refUpdateRule")
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
target = sgqlc.types.Field(GitObject, graphql_name="target")
| Ref |
python | doocs__leetcode | lcof2/剑指 Offer II 097. 子序列的数目/Solution2.py | {
"start": 0,
"end": 260
} | class ____:
def numDistinct(self, s: str, t: str) -> int:
n = len(t)
f = [1] + [0] * n
for a in s:
for j in range(n, 0, -1):
if a == t[j - 1]:
f[j] += f[j - 1]
return f[n]
| Solution |
python | google__pytype | pytype/pattern_matching.py | {
"start": 2577,
"end": 4767
} | class ____:
"""Holds a set of options."""
def __init__(self):
# Collection of options, stored as a dict rather than a set so we can find a
# given option efficiently.
self._options: dict[abstract.Class, _Option] = {}
def __iter__(self):
yield from self._options.values()
def __bool__(self):
return not self.is_complete
@property
def is_complete(self) -> bool:
return all(x.is_empty for x in self)
def add_instance(self, val):
"""Add an instance to the match options."""
cls = val.cls
if cls not in self._options:
self._options[cls] = _Option(cls)
if isinstance(val, abstract.ConcreteValue):
self._options[cls].values.add(val)
else:
self.add_type(cls)
def add_type(self, cls):
"""Add an class to the match options."""
if cls not in self._options:
self._options[cls] = _Option(cls)
vals = _get_class_values(cls)
if vals is not None:
self._options[cls].values.update(vals)
else:
self._options[cls].indefinite = True
def cover_instance(self, val) -> list[_Value]:
"""Remove an instance from the match options."""
assert isinstance(val, abstract.Instance)
cls = val.cls
if cls not in self._options:
return []
opt = self._options[cls]
if cls.is_enum:
val = val.name
if val in opt.values:
opt.values.remove(val)
return [val]
else:
if (
not cls.is_enum
and not isinstance(val, abstract.ConcreteValue)
and opt.values
):
# We have passed in an indefinite value to a match var with concrete
# values; we can no longer be sure which values of the type are covered.
opt.indefinite = True
return [val] if opt.indefinite else []
def cover_type(self, val) -> list[_Value]:
"""Remove a class and any associated instances from the match options."""
if val not in self._options:
return []
opt = self._options[val]
vals = list(opt.values)
opt.values = set()
if opt.indefinite:
# opt is now empty; we have covered all potential values
opt.indefinite = False
return [val]
else:
return vals
| _OptionSet |
python | walkccc__LeetCode | solutions/99. Recover Binary Search Tree/99.py | {
"start": 0,
"end": 636
} | class ____:
def recoverTree(self, root: TreeNode | None) -> None:
def swap(x: TreeNode | None, y: TreeNode | None) -> None:
temp = x.val
x.val = y.val
y.val = temp
def inorder(root: TreeNode | None) -> None:
if not root:
return
inorder(root.left)
if self.pred and root.val < self.pred.val:
self.y = root
if not self.x:
self.x = self.pred
else:
return
self.pred = root
inorder(root.right)
inorder(root)
swap(self.x, self.y)
pred = None
x = None # the first wrong node
y = None # the second wrong node
| Solution |
python | sanic-org__sanic | sanic/cli/console.py | {
"start": 1596,
"end": 3022
} | class ____(NamedTuple):
request: Request
response: HTTPResponse
def make_request(
url: str = "/",
headers: Optional[Union[dict[str, Any], Sequence[tuple[str, str]]]] = None,
method: str = "GET",
body: Optional[str] = None,
):
assert repl_app, "No Sanic app has been registered."
headers = headers or {}
protocol = REPLProtocol()
request = Request( # type: ignore
url.encode(),
Header(headers),
"1.1",
method,
protocol,
repl_app,
)
if body is not None:
request.body = body.encode()
request.stream = protocol # type: ignore
request.conn_info = None
return request
async def respond(request) -> HTTPResponse:
assert repl_app, "No Sanic app has been registered."
await repl_app.handle_request(request)
assert repl_response
return repl_response
async def do(
url: str = "/",
headers: Optional[Union[dict[str, Any], Sequence[tuple[str, str]]]] = None,
method: str = "GET",
body: Optional[str] = None,
) -> Result:
request = make_request(url, headers, method, body)
response = await respond(request)
return Result(request, response)
def _variable_description(name: str, desc: str, type_desc: str) -> str:
return (
f" - {Colors.BOLD + Colors.SANIC}{name}{Colors.END}: {desc} - "
f"{Colors.BOLD + Colors.BLUE}{type_desc}{Colors.END}"
)
| Result |
python | django__django | tests/postgres_tests/test_app_installed_check.py | {
"start": 783,
"end": 4945
} | class ____(PostgreSQLTestCase):
def _make_error(self, obj, klass_name):
"""Helper to create postgres.E005 error for specific objects."""
return checks.Error(
"'django.contrib.postgres' must be in INSTALLED_APPS in order to "
f"use {klass_name}.",
obj=obj,
id="postgres.E005",
)
def assert_model_check_errors(self, model_class, expected_errors):
errors = model_class.check(databases=self.databases)
self.assertEqual(errors, [])
with modify_settings(INSTALLED_APPS={"remove": "django.contrib.postgres"}):
errors = model_class.check(databases=self.databases)
self.assertEqual(errors, expected_errors)
def test_indexes(self):
class IndexModel(PostgreSQLModel):
field = models.IntegerField()
class Meta:
indexes = [
PostgresIndex(fields=["id"], name="postgres_index_test"),
GinIndex(fields=["field"], name="gin_index_test"),
]
self.assert_model_check_errors(
IndexModel,
[
self._make_error(IndexModel, "PostgresIndex"),
self._make_error(IndexModel, "GinIndex"),
],
)
def test_exclusion_constraint(self):
class ExclusionModel(PostgreSQLModel):
value = models.IntegerField()
class Meta:
constraints = [
ExclusionConstraint(
name="exclude_equal",
expressions=[("value", RangeOperators.EQUAL)],
)
]
self.assert_model_check_errors(
ExclusionModel, [self._make_error(ExclusionModel, "ExclusionConstraint")]
)
def test_array_field(self):
field = IntegerArrayModel._meta.get_field("field")
self.assert_model_check_errors(
IntegerArrayModel,
[self._make_error(field, "ArrayField")],
)
def test_nested_array_field(self):
"""Inner ArrayField does not cause a postgres.E001 error."""
field = NestedIntegerArrayModel._meta.get_field("field")
self.assert_model_check_errors(
NestedIntegerArrayModel,
[
self._make_error(field, "ArrayField"),
],
)
def test_hstore_field(self):
class HStoreFieldModel(PostgreSQLModel):
field = HStoreField()
field = HStoreFieldModel._meta.get_field("field")
self.assert_model_check_errors(
HStoreFieldModel,
[
self._make_error(field, "HStoreField"),
],
)
def test_range_fields(self):
class RangeFieldsModel(PostgreSQLModel):
int_range = IntegerRangeField()
bigint_range = BigIntegerRangeField()
decimal_range = DecimalRangeField()
datetime_range = DateTimeRangeField()
date_range = DateRangeField()
expected_errors = [
self._make_error(field, field.__class__.__name__)
for field in [
RangeFieldsModel._meta.get_field("int_range"),
RangeFieldsModel._meta.get_field("bigint_range"),
RangeFieldsModel._meta.get_field("decimal_range"),
RangeFieldsModel._meta.get_field("datetime_range"),
RangeFieldsModel._meta.get_field("date_range"),
]
]
self.assert_model_check_errors(RangeFieldsModel, expected_errors)
def test_search_vector_field(self):
class SearchModel(PostgreSQLModel):
search_vector = SearchVectorField()
search_query = SearchQueryField()
vector_field = SearchModel._meta.get_field("search_vector")
query_field = SearchModel._meta.get_field("search_query")
self.assert_model_check_errors(
SearchModel,
[
self._make_error(vector_field, "SearchVectorField"),
self._make_error(query_field, "SearchQueryField"),
],
)
| TestPostgresAppInstalledCheck |
python | getsentry__sentry | src/sentry/snuba/sessions_v2.py | {
"start": 2882,
"end": 4147
} | class ____:
def get_snuba_columns(self, raw_groupby):
if "session.status" in raw_groupby:
return [
"sessions",
"sessions_abnormal",
"sessions_crashed",
"sessions_errored",
"sessions_unhandled",
]
return ["sessions"]
def extract_from_row(self, row, group):
if row is None:
return 0
status = group.get("session.status")
if status is None:
return row["sessions"]
if status == "healthy":
healthy_sessions = row["sessions"] - row["sessions_errored"] - row["sessions_unhandled"]
return max(healthy_sessions, 0)
if status == "abnormal":
return row["sessions_abnormal"]
if status == "unhandled":
return row["sessions_unhandled"]
if status == "crashed":
return row["sessions_crashed"]
if status == "errored":
errored_sessions = (
row["sessions_errored"]
- row["sessions_unhandled"]
- row["sessions_crashed"]
- row["sessions_abnormal"]
)
return max(errored_sessions, 0)
return 0
| SessionsField |
python | getsentry__sentry | src/sentry/runner/commands/tsdb.py | {
"start": 233,
"end": 3224
} | class ____(click.ParamType):
name = "datetime"
def convert(
self,
value: str | datetime | None,
param: click.Parameter | None,
context: click.Context | None,
) -> datetime | None:
if value is None:
return value
elif isinstance(value, datetime):
return value
try:
result = parse(value)
except Exception:
self.fail(f"{value!r} is not a valid datetime", param, context)
if result.tzinfo is None:
# TODO: We should probably warn about this? Also note that this
# doesn't use the Django specified timezone, since settings haven't
# been configured yet.
result = result.replace(tzinfo=timezone.utc)
return result
@click.group()
def tsdb() -> None:
"""Tools for interacting with the time series database."""
@tsdb.group()
def query() -> None:
"""Execute queries against the time series database."""
@query.command()
@click.argument(
"metrics",
nargs=-1,
type=click.Choice(
[
"organization_total_received",
"organization_total_rejected",
"organization_total_blacklisted",
]
),
)
@click.option("--since", callback=DateTimeParamType())
@click.option("--until", callback=DateTimeParamType())
@configuration
def organizations(metrics: tuple[str, ...], since: datetime | None, until: datetime | None) -> None:
"""
Fetch metrics for organizations.
"""
from django.utils import timezone
from sentry import tsdb
from sentry.models.organization import Organization
from sentry.tsdb.base import TSDBModel
stdout = click.get_text_stream("stdout")
stderr = click.get_text_stream("stderr")
def aggregate(series: Iterable[tuple[object, float]]) -> float:
return sum(value for timestamp, value in series)
metrics_dct = {name: getattr(TSDBModel, name) for name in metrics}
if not metrics_dct:
return
if until is None:
until = timezone.now()
if since is None:
since = until - timedelta(minutes=60)
if until < since:
raise click.ClickException(f"invalid time range provided: {since} to {until}")
stderr.write("Dumping {} from {} to {}...\n".format(", ".join(metrics_dct), since, until))
objects = Organization.objects.all()
for chunk in chunked(objects, 100):
instances = {instance.pk: instance for instance in chunk}
results = {}
for metric in metrics_dct.values():
results[metric] = tsdb.backend.get_range(metric, list(instances.keys()), since, until)
for key, instance in instances.items():
values = []
for metric in metrics_dct.values():
values.append(aggregate(results[metric][key]))
stdout.write(
"{} {} {}\n".format(instance.id, instance.slug, " ".join(map(str, values)))
)
| DateTimeParamType |
python | astropy__astropy | astropy/units/tests/test_quantity.py | {
"start": 12172,
"end": 29424
} | class ____:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15.0 * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.0
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiple with a unit.
new_quantity = self.q1 * u.s
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m s")
# Reverse multiple with a unit.
new_quantity = u.s * self.q1
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m s")
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416, decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.0
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.0
assert new_quantity.unit == u.Unit("1/m")
# Divide by a unit.
new_quantity = self.q1 / u.s
assert new_quantity.value == 11.42
assert new_quantity.unit == u.Unit("m/s")
# Divide into a unit.
new_quantity = u.s / self.q1
assert new_quantity.value == 1 / 11.42
assert new_quantity.unit == u.Unit("s/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, "m*s")
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, "m/s")
assert u.s / self.q1 == u.Quantity(1 / 11.42, "s/m")
def test_power(self):
# raise quantity to a power
new_quantity = self.q1**2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1**3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
@pytest.mark.parametrize(
"exponent_type",
[int, float, np.uint64, np.int32, np.float32, u.Quantity, Masked],
)
def test_quantity_as_power(self, exponent_type):
# raise unit to a dimensionless Quantity power
# regression test for https://github.com/astropy/astropy/issues/16260
q = u.m ** exponent_type(2)
assert q == u.m**2
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m**2)
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1) # noqa: B002
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
"""When trying to add or subtract units that aren't compatible, throw an error"""
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(
TypeError, match=r"Unsupported operand type\(s\) for ufunc .*"
):
q1 + {"a": 1}
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3.0 * u.m / u.km
dq1 = dq + 1.0 * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.0
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
"""Perform a more complicated test"""
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15.0, u.meter)
time = u.Quantity(11.0, u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673e-11, u.m**3 / u.kg / u.s**2)
_ = (1.0 / (4.0 * np.pi * G)).to(u.pc**-3 / u.s**-2 * u.kg)
# Area
side1 = u.Quantity(11.0, u.centimeter)
side2 = u.Quantity(7.0, u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77.0, decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm**-2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1.0 * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000.0 * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1.0 * u.cm == 1.0
assert 1.0 * u.cm != 1.0
for quantity in (1.0 * u.cm, 1.0 * u.dimensionless_unscaled):
with pytest.raises(ValueError, match="ambiguous"):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = (
"only dimensionless scalar quantities can be converted to Python scalars"
)
index_err_msg = (
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.0
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ["a", "b", "c"] == ["a", "b", "c", "a", "b", "c"]
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1.0, 2.0, 3.0], u.m)
assert np.all(np.array(q) == np.array([1.0, 2.0, 3.0]))
def test_index(self):
val = 123
out = operator.index(u.Quantity(val, u.one, dtype=int))
assert out == val
with pytest.raises(TypeError):
operator.index(u.Quantity(val, u.m, dtype=int))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_ilshift(): # in-place conversion
q = u.Quantity(10, unit=u.one)
# Incompatible units. This goes through ilshift and hits a
# UnitConversionError first in ilshift, then in the unit's rlshift.
with pytest.raises(u.UnitConversionError):
q <<= u.rad
# unless the equivalency is enabled
with u.add_enabled_equivalencies(u.dimensionless_angles()):
q <<= u.rad
assert np.isclose(q, 10 * u.rad)
def test_quantity_round():
q = u.Quantity(10.1289, unit=u.s)
assert np.isclose(round(q), 10 * u.s)
assert np.isclose(round(q, 2), 10.13 * u.s)
def test_regression_12964():
# This will fail if the fix to
# https://github.com/astropy/astropy/issues/12964 doesn't work.
x = u.Quantity(10, u.km, dtype=int)
x <<= u.pc
# We add a test that this worked.
assert x.unit is u.pc
assert x.dtype == np.float64
def test_quantity_value_views():
q1 = u.Quantity([1.0, 2.0], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.0
assert np.all(q1 == [0.0, 2.0] * u.meter)
v2 = q1.to_value()
v2[1] = 3.0
assert np.all(q1 == [0.0, 3.0] * u.meter)
v3 = q1.to_value("m")
v3[0] = 1.0
assert np.all(q1 == [1.0, 3.0] * u.meter)
q2 = q1.to("m", copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2.0, 3.0] * u.meter)
v4 = q1.to_value("cm")
v4[0] = 0.0
# copy if different unit.
assert np.all(q1 == [2.0, 3.0] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
def test_self_equivalency():
assert u.deg.is_equivalent(1 * u.radian)
def test_si():
q1 = 10.0 * u.m * u.s**2 / (200.0 * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10.0 * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10.0 / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10.0 * u.cm * u.s**2 / (200.0 * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10.0 * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10.0 / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10.0 * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
| TestQuantityOperations |
python | aimacode__aima-python | search.py | {
"start": 2595,
"end": 5239
} | class ____:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"""Create a search tree Node, derived from a parent by an action."""
self.state = state
self.parent = parent
self.action = action
self.path_cost = path_cost
self.depth = 0
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node {}>".format(self.state)
def __lt__(self, node):
return self.state < node.state
def expand(self, problem):
"""List the nodes reachable in one step from this node."""
return [self.child_node(problem, action)
for action in problem.actions(self.state)]
def child_node(self, problem, action):
"""[Figure 3.10]"""
next_state = problem.result(self.state, action)
next_node = Node(next_state, self, action, problem.path_cost(self.path_cost, self.state, action, next_state))
return next_node
def solution(self):
"""Return the sequence of actions to go from the root to this node."""
return [node.action for node in self.path()[1:]]
def path(self):
"""Return a list of nodes forming the path from the root to this node."""
node, path_back = self, []
while node:
path_back.append(node)
node = node.parent
return list(reversed(path_back))
# We want for a queue of nodes in breadth_first_graph_search or
# astar_search to have no duplicated states, so we treat nodes
# with the same state as equal. [Problem: this may not be what you
# want in other contexts.]
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
# We use the hash value of the state
# stored in the node instead of the node
# object itself to quickly search a node
# with the same state in a Hash Table
return hash(self.state)
# ______________________________________________________________________________
| Node |
python | PyCQA__pylint | tests/lint/unittest_lint.py | {
"start": 32140,
"end": 42839
} | class ____(PyLinter):
@staticmethod
def should_analyze_file(modname: str, path: str, is_argument: bool = False) -> bool:
if os.path.basename(path) == "wrong.py":
return False
return super(_CustomPyLinter, _CustomPyLinter).should_analyze_file(
modname, path, is_argument=is_argument
)
@pytest.mark.needs_two_cores
def test_custom_should_analyze_file() -> None:
"""Check that we can write custom should_analyze_file that work
even for arguments.
"""
package_dir = os.path.join(REGRTEST_DATA_DIR, "bad_package")
wrong_file = os.path.join(package_dir, "wrong.py")
for jobs in (1, 2):
reporter = testutils.GenericTestReporter()
linter = _CustomPyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(package_dir))
linter.check([package_dir, wrong_file])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == 1
assert "invalid syntax" in messages[0].msg
# we do the check with jobs=1 as well, so that we are sure that the duplicates
# are created by the multiprocessing problem.
@pytest.mark.needs_two_cores
@pytest.mark.parametrize("jobs", [1, 2])
def test_multiprocessing(jobs: int) -> None:
"""Check that multiprocessing does not create duplicates."""
# For the bug (#3584) to show up we need more than one file with issues
# per process
filenames = [
"special_attr_scope_lookup_crash.py",
"syntax_error.py",
"unused_variable.py",
"wildcard.py",
"wrong_import_position.py",
]
reporter = testutils.GenericTestReporter()
linter = PyLinter()
linter.config.jobs = jobs
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(REGRTEST_DATA_DIR))
linter.check([os.path.join(REGRTEST_DATA_DIR, fname) for fname in filenames])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == len(set(messages))
def test_filename_with__init__(initialized_linter: PyLinter) -> None:
# This tracks a regression where a file whose name ends in __init__.py,
# such as flycheck__init__.py, would accidentally lead to linting the
# entire containing directory.
reporter = testutils.GenericTestReporter()
linter = initialized_linter
linter.open()
linter.set_reporter(reporter)
filepath = join(INPUT_DIR, "not__init__.py")
linter.check([filepath])
messages = reporter.messages
assert len(messages) == 0
def test_by_module_statement_value(initialized_linter: PyLinter) -> None:
"""Test "statement" for each module analyzed of computed correctly."""
linter = initialized_linter
linter.check([os.path.join(os.path.dirname(__file__), "data")])
by_module_stats = linter.stats.by_module
for module, module_stats in by_module_stats.items():
linter2 = initialized_linter
linter2.stats = LinterStats()
if module == "data":
linter2.check([os.path.join(os.path.dirname(__file__), "data/__init__.py")])
else:
linter2.check([os.path.join(os.path.dirname(__file__), module)])
# Check that the by_module "statement" is equal to the global "statement"
# computed for that module
assert module_stats["statement"] == linter2.stats.statement
def test_finds_pyi_file() -> None:
run = Run(
["--prefer-stubs=y", join(REGRTEST_DATA_DIR, "pyi")],
exit=False,
)
assert run.linter.current_file is not None
assert run.linter.current_file.endswith(
"a_module_that_we_definitely_dont_use_in_the_functional_tests.pyi"
)
def test_recursive_finds_pyi_file() -> None:
run = Run(
[
"--recursive",
"y",
"--prefer-stubs",
"y",
join(REGRTEST_DATA_DIR, "pyi"),
],
exit=False,
)
assert run.linter.current_file is not None
assert run.linter.current_file.endswith(
"a_module_that_we_definitely_dont_use_in_the_functional_tests.pyi"
)
def test_no_false_positive_from_pyi_stub() -> None:
run = Run(
[
"--recursive",
"y",
"--prefer-stubs",
"n",
join(REGRTEST_DATA_DIR, "uses_module_with_stub.py"),
],
exit=False,
)
assert not run.linter.stats.by_msg
@pytest.mark.parametrize(
"ignore_parameter,ignore_parameter_value",
[
("--ignore", "failing.py"),
("--ignore", "ignored_subdirectory"),
("--ignore-patterns", "failing.*"),
("--ignore-patterns", "ignored_*"),
("--ignore-paths", ".*directory/ignored.*"),
("--ignore-paths", ".*ignored.*/failing.*"),
],
)
def test_recursive_ignore(ignore_parameter: str, ignore_parameter_value: str) -> None:
run = Run(
[
"--recursive",
"y",
ignore_parameter,
ignore_parameter_value,
join(REGRTEST_DATA_DIR, "directory"),
],
exit=False,
)
linted_files = run.linter._iterate_file_descrs(
tuple(run.linter._discover_files([join(REGRTEST_DATA_DIR, "directory")]))
)
linted_file_paths = [file_item.filepath for file_item in linted_files]
ignored_file = os.path.abspath(
join(REGRTEST_DATA_DIR, "directory", "ignored_subdirectory", "failing.py")
)
assert ignored_file not in linted_file_paths
for regrtest_data_module in (
("directory", "subdirectory", "subsubdirectory", "module.py"),
("directory", "subdirectory", "module.py"),
("directory", "package", "module.py"),
("directory", "package", "subpackage", "module.py"),
):
module = os.path.abspath(join(REGRTEST_DATA_DIR, *regrtest_data_module))
assert module in linted_file_paths
# We lint the modules in `regrtest` in other tests as well. Prevent test pollution by
# explicitly clearing the astroid caches.
astroid.MANAGER.clear_cache()
def test_source_roots_globbing() -> None:
run = Run(
[
"--source-roots",
join(REGRTEST_DATA_DIR, "pep420", "basic", "*"),
join(REGRTEST_DATA_DIR, "pep420", "basic", "project"),
],
exit=False,
)
assert run.linter.config.source_roots == [
join(REGRTEST_DATA_DIR, "pep420", "basic", "project")
]
def test_recursive_implicit_namespace() -> None:
run = Run(
[
"--verbose",
"--recursive",
"y",
"--source-roots",
join(REGRTEST_DATA_DIR, "pep420", "basic", "project"),
join(REGRTEST_DATA_DIR, "pep420", "basic"),
],
exit=False,
)
assert run.linter.file_state.base_name == "namespace.package"
def test_recursive_implicit_namespace_wrapper() -> None:
run = Run(
[
"--recursive",
"y",
"--source-roots",
join(REGRTEST_DATA_DIR, "pep420", "wrapper", "project"),
join(REGRTEST_DATA_DIR, "pep420", "wrapper"),
],
exit=False,
)
run.linter.set_reporter(testutils.GenericTestReporter())
run.linter.check([join(REGRTEST_DATA_DIR, "pep420", "wrapper")])
assert run.linter.reporter.messages == []
def test_globbing() -> None:
run = Run(
[
"--verbose",
"--source-roots",
join(REGRTEST_DATA_DIR, "pep420", "basic", "project"),
join(REGRTEST_DATA_DIR, "pep420", "basic", "project", "**", "__init__.py"),
],
exit=False,
)
assert run.linter.file_state.base_name == "namespace.package.__init__"
def test_relative_imports(initialized_linter: PyLinter) -> None:
"""Regression test for https://github.com/pylint-dev/pylint/issues/3651."""
linter = initialized_linter
with tempdir() as tmpdir:
create_files(["x/y/__init__.py", "x/y/one.py", "x/y/two.py"], tmpdir)
with open("x/y/__init__.py", "w", encoding="utf-8") as f:
f.write(
"""
\"\"\"Module x.y\"\"\"
from .one import ONE
from .two import TWO
"""
)
with open("x/y/one.py", "w", encoding="utf-8") as f:
f.write(
"""
\"\"\"Module x.y.one\"\"\"
ONE = 1
"""
)
with open("x/y/two.py", "w", encoding="utf-8") as f:
f.write(
"""
\"\"\"Module x.y.two\"\"\"
from .one import ONE
TWO = ONE + ONE
"""
)
linter.check(["x/y"])
assert not linter.stats.by_msg
def test_import_sibling_module_from_namespace(initialized_linter: PyLinter) -> None:
"""If the parent directory above `namespace` is on sys.path, ensure that
modules under `namespace` can import each other without raising `import-error`.
"""
linter = initialized_linter
with tempdir() as tmpdir:
create_files(["namespace/submodule1.py", "namespace/submodule2.py"])
second_path = Path("namespace/submodule2.py")
with open(second_path, "w", encoding="utf-8") as f:
f.write(
"""\"\"\"This module imports submodule1.\"\"\"
import submodule1
print(submodule1)
"""
)
os.chdir("namespace")
extra_sys_paths = [expand_modules.discover_package_path(tmpdir, [])]
# Add the parent directory to sys.path
with lint.augmented_sys_path(extra_sys_paths):
linter.check(["submodule2.py"])
assert not linter.stats.by_msg
def test_lint_namespace_package_under_dir(initialized_linter: PyLinter) -> None:
"""Regression test for https://github.com/pylint-dev/pylint/issues/1667."""
linter = initialized_linter
with tempdir():
create_files(["outer/namespace/__init__.py", "outer/namespace/module.py"])
linter.check(["outer.namespace"])
assert not linter.stats.by_msg
def test_lint_namespace_package_under_dir_on_path(initialized_linter: PyLinter) -> None:
"""If the directory above a namespace package is on sys.path,
the namespace module under it is linted.
"""
linter = initialized_linter
with tempdir() as tmpdir:
create_files(["namespace_on_path/submodule1.py"])
os.chdir(tmpdir)
extra_sys_paths = [expand_modules.discover_package_path(tmpdir, [])]
with lint.augmented_sys_path(extra_sys_paths):
linter.check(["namespace_on_path"])
assert linter.file_state.base_name == "namespace_on_path"
| _CustomPyLinter |
python | joblib__joblib | joblib/pool.py | {
"start": 3551,
"end": 6056
} | class ____(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == "win32":
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock, self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
| CustomizablePicklingQueue |
python | django__django | django/utils/connection.py | {
"start": 139,
"end": 836
} | class ____:
"""Proxy for accessing a connection object's attributes."""
def __init__(self, connections, alias):
self.__dict__["_connections"] = connections
self.__dict__["_alias"] = alias
def __getattr__(self, item):
return getattr(self._connections[self._alias], item)
def __setattr__(self, name, value):
return setattr(self._connections[self._alias], name, value)
def __delattr__(self, name):
return delattr(self._connections[self._alias], name)
def __contains__(self, key):
return key in self._connections[self._alias]
def __eq__(self, other):
return self._connections[self._alias] == other
| ConnectionProxy |
python | sympy__sympy | sympy/utilities/_compilation/runners.py | {
"start": 9630,
"end": 10237
} | class ____(CompilerRunner):
environ_key_compiler = 'FC'
environ_key_flags = 'FFLAGS'
standards = (None, 'f77', 'f95', 'f2003', 'f2008')
std_formater = {
'gfortran': lambda x: '-std=gnu' if x is None else '-std=legacy' if x == 'f77' else '-std={}'.format(x),
'ifort': lambda x: '-stand f08' if x is None else '-stand f{}'.format(x[-2:]), # f2008 => f08
}
compiler_dict = OrderedDict([
('gnu', 'gfortran'),
('intel', 'ifort'),
])
compiler_name_vendor_mapping = {
'gfortran': 'gnu',
'ifort': 'intel',
}
| FortranCompilerRunner |
python | pydantic__pydantic | pydantic/mypy.py | {
"start": 5666,
"end": 8722
} | class ____:
"""A Pydantic mypy plugin config holder.
Attributes:
init_forbid_extra: Whether to add a `**kwargs` at the end of the generated `__init__` signature.
init_typed: Whether to annotate fields in the generated `__init__`.
warn_required_dynamic_aliases: Whether to raise required dynamic aliases error.
debug_dataclass_transform: Whether to not reset `dataclass_transform_spec` attribute
of `ModelMetaclass` for testing purposes.
"""
__slots__ = (
'init_forbid_extra',
'init_typed',
'warn_required_dynamic_aliases',
'debug_dataclass_transform',
)
init_forbid_extra: bool
init_typed: bool
warn_required_dynamic_aliases: bool
debug_dataclass_transform: bool # undocumented
def __init__(self, options: Options) -> None:
if options.config_file is None: # pragma: no cover
return
toml_config = parse_toml(options.config_file)
if toml_config is not None:
config = toml_config.get('tool', {}).get('pydantic-mypy', {})
for key in self.__slots__:
setting = config.get(key, False)
if not isinstance(setting, bool):
raise ValueError(f'Configuration value must be a boolean for key: {key}')
setattr(self, key, setting)
else:
plugin_config = ConfigParser()
plugin_config.read(options.config_file)
for key in self.__slots__:
setting = plugin_config.getboolean(CONFIGFILE_KEY, key, fallback=False)
setattr(self, key, setting)
def to_data(self) -> dict[str, Any]:
"""Returns a dict of config names to their values."""
return {key: getattr(self, key) for key in self.__slots__}
def from_attributes_callback(ctx: MethodContext) -> Type:
"""Raise an error if from_attributes is not enabled."""
model_type: Instance
ctx_type = ctx.type
if isinstance(ctx_type, TypeType):
ctx_type = ctx_type.item
if isinstance(ctx_type, CallableType) and isinstance(ctx_type.ret_type, Instance):
model_type = ctx_type.ret_type # called on the class
elif isinstance(ctx_type, Instance):
model_type = ctx_type # called on an instance (unusual, but still valid)
else: # pragma: no cover
detail = f'ctx.type: {ctx_type} (of type {ctx_type.__class__.__name__})'
error_unexpected_behavior(detail, ctx.api, ctx.context)
return ctx.default_return_type
pydantic_metadata = model_type.type.metadata.get(METADATA_KEY)
if pydantic_metadata is None:
return ctx.default_return_type
if not model_type.type.has_base(BASEMODEL_FULLNAME):
# not a Pydantic v2 model
return ctx.default_return_type
from_attributes = pydantic_metadata.get('config', {}).get('from_attributes')
if from_attributes is not True:
error_from_attributes(model_type.type.name, ctx.api, ctx.context)
return ctx.default_return_type
| PydanticPluginConfig |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_open_library_id.py | {
"start": 1626,
"end": 3910
} | class ____(ColumnMapExpectation):
"""Expect column values to conform to the valid Open Library ID format."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_open_library_id": [
"OL36858W",
"OL3156833A",
"OL6917238M",
"OL23747519M",
],
"malformed_open_library_id": [
"",
"OL36858",
"AB36858W",
"This is not a valid Open Library ID",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_open_library_id"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_open_library_id"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_open_library_id"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidOpenLibraryId().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidOpenLibraryId |
python | google__jax | tests/python_callback_test.py | {
"start": 33393,
"end": 44086
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu", "gpu", "tpu"]):
self.skipTest(f"Host callback not supported on {jtu.device_under_test()}")
def tearDown(self):
super().tearDown()
dispatch.runtime_tokens.clear()
def test_io_callback_can_mutate_state(self):
x = 0
def cb():
nonlocal x
x += 1
return np.array(x, np.int32)
def f():
return io_callback(cb, jax.ShapeDtypeStruct((), jnp.int32))
f()
jax.effects_barrier()
self.assertEqual(x, 1)
f()
jax.effects_barrier()
self.assertEqual(x, 2)
def test_io_callback_can_be_batched_if_unordered(self):
_mut = 0
def cb(x):
nonlocal _mut
_mut += 1
return x
x = jnp.arange(4)
def f(x):
return io_callback(cb, jax.ShapeDtypeStruct((), x.dtype), x)
jax.vmap(f)(x)
jax.effects_barrier()
self.assertEqual(_mut, 4)
jax.vmap(f)(x)
jax.effects_barrier()
self.assertEqual(_mut, 8)
def test_cannot_call_ordered_io_in_pmap(self):
if config.pmap_shmap_merge.value:
self.skipTest("Test does not raise under pmap_shmap_merge=True")
def f(x):
return io_callback(
lambda x: x, jax.ShapeDtypeStruct((), jnp.int32), x, ordered=True)
with self.assertRaisesRegex(
ValueError, "Ordered effects not supported in `pmap`"):
jax.pmap(f)(jnp.arange(jax.local_device_count()))
def test_cannot_call_ordered_io_in_vmap(self):
def f(x):
return io_callback(
lambda x: x, jax.ShapeDtypeStruct((), jnp.int32), x, ordered=True)
with self.assertRaisesRegex(
ValueError, "Cannot `vmap` ordered IO callback"):
jax.vmap(f)(jnp.arange(4))
def test_cannot_use_io_callback_in_jvp(self):
def f(x):
return io_callback(lambda x: x, jax.ShapeDtypeStruct((), jnp.float32), x)
with self.assertRaisesRegex(
ValueError, "IO callbacks do not support JVP."):
jax.jvp(f, (0.,), (1.,))
def test_cannot_use_io_callback_in_linearize(self):
def f(x):
return io_callback(lambda x: x, jax.ShapeDtypeStruct((), jnp.float32), x)
with self.assertRaisesRegex(
ValueError, "IO callbacks do not support JVP."):
jax.linearize(f, 0.)
def test_cannot_use_io_callback_in_transpose(self):
x = jnp.array(1.)
def f(x):
return io_callback(lambda x: x, jax.ShapeDtypeStruct((), x.dtype), x)
with self.assertRaisesRegex(
ValueError, "IO callbacks do not support transpose."):
jax.linear_transpose(f, x)(x)
def test_cannot_vmap_of_cond_io_callback(self):
def f(pred):
def true_fun():
io_callback(lambda: print("true"), None)
def false_fun():
io_callback(lambda: print("false"), None)
return lax.cond(pred, false_fun, true_fun)
with self.assertRaisesRegex(NotImplementedError,
"IO effect not supported in vmap-of-cond."):
jax.vmap(f)(jnp.array([True, True]))
def test_cannot_vmap_of_while_io_callback(self):
def check(x):
assert np.all(x < 5)
def f(i):
def cond(i):
return i < 5
def body(i):
io_callback(check, None, i)
return i + 1
return lax.while_loop(cond, body, i)
with self.assertRaisesRegex(
Exception, "not supported in while_loop with batched predicate"):
jax.vmap(f)(jnp.array([0, 4]))
def test_cannot_use_io_callback_in_checkpoint(self):
@jax.grad
@jax.checkpoint
def f(x, y):
io_callback(lambda x: x, y, y)
return x
with self.assertRaisesRegex(NotImplementedError,
"Effects not supported in partial-eval of `checkpoint`"):
f(2., 3.)
@parameterized.named_parameters(
dict(
testcase_name=f'{ordered=}_{with_sharding=}',
ordered=ordered,
with_sharding=with_sharding,
)
for ordered in [True, False]
for with_sharding in [True, False]
)
@jtu.ignore_warning(message='.*Please use `jax.jit` instead.*',
category=DeprecationWarning)
def test_can_use_io_callback_in_pjit(
self, *, ordered: bool, with_sharding: bool
):
devices = jax.devices()
mesh = jax.sharding.Mesh(np.array(devices), ['dev'])
_collected: list[int] = []
def _cb(x):
nonlocal _collected
_collected.append(int(x.sum()))
io_callback_kwargs = dict(ordered=ordered)
callback_device = devices[0]
if with_sharding:
callback_device = devices[-1]
io_callback_kwargs['sharding'] = jax.sharding.SingleDeviceSharding(
callback_device
)
def f(x):
io_callback(_cb, None, x, **io_callback_kwargs)
io_callback(_cb, None, x + 1, **io_callback_kwargs)
return x
in_spec = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec('dev')
)
out_spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
f = pjit.pjit(f, in_shardings=in_spec, out_shardings=out_spec)
expected = []
with mesh:
x = jnp.arange(mesh.size)
f(x)
expected.extend([int(x.sum()), int((x + 1).sum())])
f(x + 5)
expected.extend([int((x + 5).sum()), int((x + 6).sum())])
jax.effects_barrier()
if ordered:
self.assertAllClose(_collected, expected)
else:
self.assertEqual(len(_collected), len(expected))
for v in expected:
self.assertIn(v, _collected)
callback_device_index = in_spec._device_assignment.index(callback_device)
stablehlo_ir = f.lower(x).as_text()
if config.use_shardy_partitioner.value:
self.assertIn(
"sdy.sharding ="
f" #sdy.sharding_per_value<[<@maximal_mesh_{callback_device_index},"
" []>]>",
stablehlo_ir)
self.assertIn(
f"sdy.mesh @maximal_mesh_{callback_device_index} = <[],"
f" device_ids=[{callback_device_index}]>",
stablehlo_ir)
else:
self.assertIn(f"{{maximal device={callback_device_index}}}", stablehlo_ir)
@jtu.ignore_warning(message='.*Please use `jax.jit` instead.*',
category=DeprecationWarning)
def test_sequence_pjit_io_callback_ordered(self):
if jtu.is_device_tpu(7, 'x'):
self.skipTest('TODO(b/453664256): Failing on TPU 7x.')
# A sequence of pairs of calls to pjit(io_callback(ordered=True)) with each
# pair on a different device assignment.
_collected: list[int] = []
def _cb(i, x):
nonlocal _collected
# Sleep different amounts of time, to test the ordering.
time.sleep([0.02, 0.03, 0.04][len(_collected) % 3])
logging.info('Collected iteration %s: %s', i, x)
_collected.append(int(x.sum()))
def f_base(i, x):
io_callback(_cb, None, i, x, ordered=True)
io_callback(_cb, None, i, x + 1, ordered=True)
nr_iterations = 8
# TODO(zce): If I pin to 1 device below (jax.devices()[:1]) then this test
# flakes. It also flakes when pinned to 2 devices. It seems that repeatedly
# dispatching to the same device triggers the problem.
devices = jax.devices()
expected = [] # The expected value for _collected
for i in range(nr_iterations):
if len(devices) > 1:
devices_for_iteration = [
devices[i % len(devices)],
devices[(i + 1) % len(devices)],
]
else:
devices_for_iteration = devices
logging.info(
'Running iteration %d on devices %s', i, devices_for_iteration
)
mesh = jax.sharding.Mesh(np.array(devices_for_iteration), ['dev'])
in_spec = (
jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec()),
jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('dev')),
)
out_spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
f = pjit.pjit(f_base, in_shardings=in_spec, out_shardings=out_spec)
with mesh:
x = jax.device_put(
np.arange(len(devices_for_iteration), dtype=np.int32) + 10 * i,
in_spec[1],
)
f(i, x)
expected.extend([int(x.sum()), int((x + 1).sum())])
f(i, x + 5)
expected.extend([int((x + 5).sum()), int((x + 6).sum())])
jax.effects_barrier()
self.assertEqual(_collected, expected)
@parameterized.named_parameters(
dict(testcase_name='multi_device',
single_device=False),
dict(testcase_name='single_device',
single_device=True)
)
def test_can_shard_io_callback_manually(self, single_device: bool):
devices = jax.devices()
if single_device:
devices = devices[:1]
mesh = Mesh(np.array(devices), axis_names=('x',))
spec = jax.sharding.PartitionSpec('x')
sharding = jax.sharding.NamedSharding(mesh, spec)
_collected = collections.defaultdict(list)
def func(shard_id, x):
nonlocal _collected
_collected[shard_id.item()].append(x)
def f(shard_ids, x):
io_callback(func, None, shard_ids, x, ordered=True)
io_callback(func, None, shard_ids, x + 1, ordered=True)
f = shard_map(f, mesh=mesh, in_specs=spec, out_specs=None)
shard_ids = jnp.arange(mesh.devices.size)
inp = jnp.arange(2 * jax.local_device_count())
jax.jit(f, in_shardings=sharding, out_shardings=None)(shard_ids, inp)
jax.effects_barrier()
self.assertLen(_collected, mesh.devices.size)
# Verify the partial ordering: no specified order across shards, but strict
# ordering between the two calls in each shard.
for shard in _collected.values():
self.assertLen(shard, 2)
np.testing.assert_array_equal(shard[0] + 1, shard[1])
def test_batching_with_side_effects(self):
# https://github.com/jax-ml/jax/issues/20628#issuecomment-2050800195
x_lst = []
def append_x(x):
nonlocal x_lst
x_lst.append(x)
@jax.jit
def f(x):
io_callback(append_x, None, x, ordered=False)
io_callback(append_x, None, 2 * x, ordered=False)
jax.vmap(f)(jnp.arange(3.))
jax.effects_barrier()
self.assertAllClose(x_lst, [0., 1., 2., 0., 2., 4.], check_dtypes=False)
def test_batching_with_side_effects_while_loop(self):
# https://github.com/jax-ml/jax/issues/20628#issuecomment-2050921219
x_lst = []
def append_x(x):
nonlocal x_lst
x_lst.append(x)
@jax.jit
def f(x):
def body(i):
io_callback(append_x, None, x, ordered=False)
io_callback(append_x, None, 2 * x, ordered=False)
return i + 1
jax.lax.while_loop(lambda i: i < 2, body, 0)
jax.vmap(f)(jnp.arange(3.)) # don't crash
jax.effects_barrier()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| IOCallbackTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.