language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/blip_2/test_processing_blip_2.py | {
"start": 878,
"end": 1537
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Blip2Processor
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class.from_pretrained("hf-internal-testing/tiny-random-ViTModel")
@staticmethod
def prepare_processor_dict():
return {"num_query_tokens": 1}
| Blip2ProcessorTest |
python | catalyst-team__catalyst | catalyst/contrib/losses/ce.py | {
"start": 119,
"end": 925
} | class ____(nn.Module):
"""@TODO: Docs. Contribution is welcome."""
def __init__(self, size_average=True):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.size_average = size_average
def forward(self, input_: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Calculates loss between ``input_`` and ``target`` tensors.
Args:
input_: input tensor of shape ...
target: target tensor of shape ...
@TODO: Docs (add shapes). Contribution is welcome.
"""
assert input_.size() == target.size()
input_ = F.log_softmax(input_)
loss = -torch.sum(input_ * target)
loss = loss / input_.size()[0] if self.size_average else loss
return loss
| NaiveCrossEntropyLoss |
python | walkccc__LeetCode | solutions/3367. Maximize Sum of Weights after Edge Removals/3367.py | {
"start": 0,
"end": 887
} | class ____:
def maximizeSumOfWeights(self, edges: list[list[int]], k: int) -> int:
graph = [[] for _ in range(len(edges) + 1)]
for u, v, w in edges:
graph[u].append((v, w))
graph[v].append((u, w))
def dfs(u: int, prev: int) -> tuple[int, int]:
"""
Returns
(the weight sum of the subtree rooted at u with at most k - 1 children,
the weight sum of the subtree rooted at u with at most k children).
"""
weightSum = 0
diffs = []
for v, w in graph[u]:
if v == prev:
continue
subK1, subK = dfs(v, u)
weightSum += subK
# If picking (u, v) makes the sum larger, we should pick it.
diffs.append(max(0, subK1 - subK + w))
return (weightSum + sum(heapq.nlargest(k - 1, diffs)),
weightSum + sum(heapq.nlargest(k, diffs)))
return dfs(0, -1)[1]
| Solution |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 12249,
"end": 12668
} | class ____(ConcreteTemplate):
cases = [signature(types.boolean, types.boolean)]
cases += [signature(types.boolean, op) for op in sorted(types.signed_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.unsigned_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.real_domain)]
cases += [signature(types.boolean, op) for op in sorted(types.complex_domain)]
| UnaryNot |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/automations.py | {
"start": 22498,
"end": 25145
} | class ____(PrefectBaseModel):
"""Represents one instance of a trigger firing"""
id: UUID = Field(default_factory=uuid7)
trigger: ServerTriggerTypes = Field(
default=..., description="The trigger that is firing"
)
trigger_states: Set[TriggerState] = Field(
default=...,
description="The state changes represented by this Firing",
)
triggered: DateTime = Field(
default=...,
description=(
"The time at which this trigger fired, which may differ from the "
"occurred time of the associated event (as events processing may always "
"be slightly delayed)."
),
)
triggering_labels: Dict[str, str] = Field(
default_factory=dict,
description=(
"The labels associated with this Firing, derived from the underlying "
"for_each values of the trigger. Only used in the context "
"of EventTriggers."
),
)
triggering_firings: List[Firing] = Field(
default_factory=list,
description=(
"The firings of the triggers that caused this trigger to fire. Only used "
"in the context of CompoundTriggers."
),
)
triggering_event: Optional[ReceivedEvent] = Field(
default=None,
description=(
"The most recent event associated with this Firing. This may be the "
"event that caused the trigger to fire (for Reactive triggers), or the "
"last event to match the trigger (for Proactive triggers), or the state "
"change event (for a Metric trigger)."
),
)
triggering_value: Optional[Any] = Field(
default=None,
description=(
"A value associated with this firing of a trigger. Maybe used to "
"convey additional information at the point of firing, like the value of "
"the last query for a MetricTrigger"
),
)
@field_validator("trigger_states")
@classmethod
def validate_trigger_states(cls, value: set[TriggerState]) -> set[TriggerState]:
if not value:
raise ValueError("At least one trigger state must be provided")
return value
def all_firings(self) -> Sequence[Firing]:
return [self] + [
f for child in self.triggering_firings for f in child.all_firings()
]
def all_events(self) -> Sequence[ReceivedEvent]:
events = [self.triggering_event] if self.triggering_event else []
return events + [
e for child in self.triggering_firings for e in child.all_events()
]
| Firing |
python | doocs__leetcode | lcof2/剑指 Offer II 038. 每日温度/Solution.py | {
"start": 0,
"end": 346
} | class ____:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
ans = [0] * len(temperatures)
stk = []
for i, t in enumerate(temperatures):
while stk and temperatures[stk[-1]] < t:
j = stk.pop()
ans[j] = i - j
stk.append(i)
return ans
| Solution |
python | astropy__astropy | astropy/table/connect.py | {
"start": 2760,
"end": 4574
} | class ____(registry.UnifiedReadWrite):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
Get help on the available writers for ``Table`` using the``help()`` method::
>>> Table.write.help() # Get help writing Table and list supported formats
>>> Table.write.help('fits') # Get detailed help on Table FITS writer
>>> Table.write.list_formats() # Print list of available formats
The ``serialize_method`` argument is explained in the section on
`Table serialization methods
<https://docs.astropy.org/en/latest/io/unified.html#table-serialization-methods>`_.
See also: https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=None)
# uses default global registry
def __call__(self, *args, serialize_method=None, **kwargs):
instance = self._instance
with serialize_method_as(instance, serialize_method):
self.registry.write(instance, *args, **kwargs)
| TableWrite |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 95079,
"end": 102319
} | class ____(StrategyBase):
"""A list of devices with a state & compute distribution policy.
See [the guide](https://www.tensorflow.org/guide/distribute_strategy)
for overview and examples.
Note: Not all `tf.distribute.Strategy` implementations currently support
TensorFlow's partitioned variables (where a single variable is split across
multiple devices) at this time.
"""
def make_dataset_iterator(self, dataset):
"""Makes an iterator for input provided via `dataset`.
DEPRECATED: This method is not available in TF 2.x.
Data from the given dataset will be distributed evenly across all the
compute replicas. We will assume that the input dataset is batched by the
global batch size. With this assumption, we will make a best effort to
divide each batch across all the replicas (one or more workers).
If this effort fails, an error will be thrown, and the user should instead
use `make_input_fn_iterator` which provides more control to the user, and
does not try to divide a batch across replicas.
The user could also use `make_input_fn_iterator` if they want to
customize which input is fed to which replica/worker etc.
Args:
dataset: `tf.data.Dataset` that will be distributed evenly across all
replicas.
Returns:
An `tf.distribute.InputIterator` which returns inputs for each step of the
computation. User should call `initialize` on the returned iterator.
"""
return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access
def make_input_fn_iterator(self, # pylint: disable=useless-super-delegation
input_fn,
replication_mode=InputReplicationMode.PER_WORKER):
"""Returns an iterator split across replicas created from an input function.
DEPRECATED: This method is not available in TF 2.x.
The `input_fn` should take an `tf.distribute.InputContext` object where
information about batching and input sharding can be accessed:
```
def input_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
with strategy.scope():
iterator = strategy.make_input_fn_iterator(input_fn)
replica_results = strategy.experimental_run(replica_fn, iterator)
```
The `tf.data.Dataset` returned by `input_fn` should have a per-replica
batch size, which may be computed using
`input_context.get_per_replica_batch_size`.
Args:
input_fn: A function taking a `tf.distribute.InputContext` object and
returning a `tf.data.Dataset`.
replication_mode: an enum value of `tf.distribute.InputReplicationMode`.
Only `PER_WORKER` is supported currently, which means there will be
a single call to `input_fn` per worker. Replicas will dequeue from the
local `tf.data.Dataset` on their worker.
Returns:
An iterator object that should first be `.initialize()`-ed. It may then
either be passed to `strategy.experimental_run()` or you can
`iterator.get_next()` to get the next value to pass to
`strategy.extended.call_for_each_replica()`.
"""
return super(StrategyV1, self).make_input_fn_iterator(
input_fn, replication_mode)
def experimental_make_numpy_dataset(self, numpy_input, session=None):
"""Makes a tf.data.Dataset for input provided via a numpy array.
This avoids adding `numpy_input` as a large constant in the graph,
and copies the data to the machine or machines that will be processing
the input.
Note that you will likely need to use
tf.distribute.Strategy.experimental_distribute_dataset
with the returned dataset to further distribute it with the strategy.
Example:
```
numpy_input = np.ones([10], dtype=np.float32)
dataset = strategy.experimental_make_numpy_dataset(numpy_input)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
```
Args:
numpy_input: A nest of NumPy input arrays that will be converted into a
dataset. Note that lists of Numpy arrays are stacked, as that is normal
`tf.data.Dataset` behavior.
session: (TensorFlow v1.x graph execution only) A session used for
initialization.
Returns:
A `tf.data.Dataset` representing `numpy_input`.
"""
return self.extended.experimental_make_numpy_dataset(
numpy_input, session=session)
@deprecated(
None,
"This method is not available in TF 2.x. Please switch to using `run` instead."
)
def experimental_run(self, fn, input_iterator=None): # pylint: disable=useless-super-delegation
"""Runs ops in `fn` on each replica, with inputs from `input_iterator`.
DEPRECATED: This method is not available in TF 2.x. Please switch
to using `run` instead.
When eager execution is enabled, executes ops specified by `fn` on each
replica. Otherwise, builds a graph to execute the ops on each replica.
Each replica will take a single, different input from the inputs provided by
one `get_next` call on the input iterator.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `replica_id_in_sync_group`.
IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being
used, and whether eager execution is enabled, `fn` may be called one or more
times (once for each replica).
Args:
fn: The function to run. The inputs to the function must match the outputs
of `input_iterator.get_next()`. The output must be a `tf.nest` of
`Tensor`s.
input_iterator: (Optional) input iterator from which the inputs are taken.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `PerReplica` (if the values are unsynchronized),
`Mirrored` (if the values are kept in sync), or `Tensor` (if running on a
single replica).
"""
return super(StrategyV1, self).experimental_run(
fn, input_iterator)
def reduce(self, reduce_op, value, axis=None):
return super(StrategyV1, self).reduce(reduce_op, value, axis)
reduce.__doc__ = StrategyBase.reduce.__doc__
def update_config_proto(self, config_proto):
"""Returns a copy of `config_proto` modified for use with this strategy.
DEPRECATED: This method is not available in TF 2.x.
The updated config has something needed to run a strategy, e.g.
configuration to run collective ops, or device filters to improve
distributed training performance.
Args:
config_proto: a `tf.ConfigProto` object.
Returns:
The updated copy of the `config_proto`.
"""
return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access
# NOTE(josh11b): For any strategy that needs to support tf.compat.v1,
# instead descend from StrategyExtendedV1.
@tf_export("distribute.StrategyExtended", v1=[])
| StrategyV1 |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 4215,
"end": 4348
} | class ____(ShowFieldTypeAndContent, Model2A):
objects = MyManager()
field4 = models.CharField(max_length=30)
| ModelWithMyManager |
python | explosion__spaCy | spacy/tests/test_errors.py | {
"start": 87,
"end": 333
} | class ____(metaclass=ErrorsWithCodes):
E001 = "error description"
def test_add_codes():
assert Errors.E001 == "[E001] error description"
with pytest.raises(AttributeError):
Errors.E002
assert isclass(Errors.__class__)
| Errors |
python | apache__airflow | providers/standard/src/airflow/providers/standard/hooks/filesystem.py | {
"start": 931,
"end": 2887
} | class ____(BaseHook):
"""
Allows for interaction with an file server.
Connection should have a name and a path specified under extra:
example:
Connection Id: fs_test
Connection Type: File (path)
Host, Schema, Login, Password, Port: empty
Extra: {"path": "/tmp"}
"""
conn_name_attr = "fs_conn_id"
default_conn_name = "fs_default"
conn_type = "fs"
hook_name = "File (path)"
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {"path": StringField(lazy_gettext("Path"), widget=BS3TextFieldWidget())}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["host", "schema", "port", "login", "password", "extra"],
"relabeling": {},
"placeholders": {},
}
def __init__(self, fs_conn_id: str = default_conn_name, **kwargs):
super().__init__(**kwargs)
conn = self.get_connection(fs_conn_id)
self.basepath = conn.extra_dejson.get("path", "")
self.conn = conn
def get_conn(self) -> None:
pass
def get_path(self) -> str:
"""
Get the path to the filesystem location.
:return: the path.
"""
return self.basepath
def test_connection(self):
"""Test File connection."""
try:
p = self.get_path()
if not p:
return False, "File Path is undefined."
if not Path(p).exists():
return False, f"Path {p} does not exist."
return True, f"Path {p} is existing."
except Exception as e:
return False, str(e)
| FSHook |
python | Farama-Foundation__Gymnasium | tests/vector/test_vector_env_info.py | {
"start": 3904,
"end": 5327
} | class ____(gym.Env):
def __init__(self, infos):
self.observation_space = Box(0, 1)
self.action_space = Box(0, 1)
self.infos = infos
def reset(
self,
*,
seed: int | None = None,
options: dict[str, Any] | None = None,
) -> tuple[ObsType, dict[str, Any]]:
return self.observation_space.sample(), self.infos[0]
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
return self.observation_space.sample(), 0, True, False, self.infos[1]
@pytest.mark.parametrize("vectorizer", [AsyncVectorEnv, SyncVectorEnv])
def test_vector_return_info(vectorizer):
vec_env = vectorizer(
[
lambda: ReturnInfoEnv([{"a": 1}, {"c": np.array([1, 2])}]),
lambda: ReturnInfoEnv([{"a": 2, "b": 3}, {"c": np.array([3, 4])}]),
]
)
reset_expected_infos = {
"a": np.array([1, 2]),
"b": np.array([0, 3]),
"_a": np.array([True, True]),
"_b": np.array([False, True]),
}
step_expected_infos = {
"c": np.array([[1, 2], [3, 4]]),
"_c": np.array([True, True]),
}
_, reset_info = vec_env.reset()
assert data_equivalence(reset_info, reset_expected_infos)
_, _, _, _, step_info = vec_env.step(vec_env.action_space.sample())
assert data_equivalence(step_info, step_expected_infos)
| ReturnInfoEnv |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 2651,
"end": 3378
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ApertusRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| ApertusRMSNorm |
python | python-poetry__poetry | src/poetry/inspection/lazy_wheel.py | {
"start": 1001,
"end": 1098
} | class ____(Exception):
"""Raised when a lazy wheel is unsupported."""
| LazyWheelUnsupportedError |
python | pytorch__pytorch | test/inductor/test_layout_optim.py | {
"start": 487,
"end": 1123
} | class ____(nn.Module):
def __init__(self, dim=512, manual_graph_break=False):
super().__init__()
self.conv1 = nn.Conv2d(3, dim, kernel_size=3, stride=2, bias=False)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=2, bias=False)
self.manual_graph_break = manual_graph_break
def forward(self, x):
x = self.conv1(x)
if self.manual_graph_break:
torch._dynamo.graph_break()
x = self.conv2(x)
return x
def get_example_inputs(self):
return (torch.rand(2, 3, 16, 16),)
@skipIfXpu(msg="ccl doesn't currently work on the XPU stack")
| Model2Conv |
python | getsentry__sentry | tests/sentry/auth/providers/fly/test_provider.py | {
"start": 344,
"end": 3026
} | class ____(TestCase):
def setUp(self) -> None:
self.auth_provider = AuthProvider.objects.create(
provider=ChannelName.FLY_IO.value, organization_id=self.organization.id
)
super().setUp()
def test_refresh_identity_without_refresh_token(self) -> None:
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider, user=self.user, data={"access_token": "access_token"}
)
provider = self.auth_provider.get_provider()
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
def test_build_config(self) -> None:
provider = self.auth_provider.get_provider()
resource = {"id": "nathans-org", "role": "member"}
result = provider.build_config(resource=resource)
assert result == {"org": {"id": "nathans-org"}}
assert provider.is_partner == (self.auth_provider.provider == ChannelName.FLY_IO.value)
def test_build_identity(self) -> None:
provider = self.auth_provider.get_provider()
data = {
"access_token": "fo1_6xgeCrB8ew8vFQ86vdaakBSFTVDGCzOUvebUbvgPGhI",
"token_type": "Bearer",
"expires_in": 7200,
"refresh_token": "PmUkAB75UPLKGZplERMq8WwOHnsTllZ5HveY4RvNUTk",
"scope": "read",
"created_at": 1686786353,
}
user_info = {
"resource_owner_id": "k9d01lp82rky6vo2",
"scope": ["read"],
"expires_in": 7200,
"application": {"uid": "elMJpuhA5bXbR59ZaKdXrxXGFVKTypGHuJ4h6Rfw1Qk"},
"created_at": 1686786353,
"user_id": "k9d01lp82rky6vo2",
"user_name": "Nathan",
"email": "k9d01lp82rky6vo2@customer.fly.io",
"organizations": [
{"id": "nathans-org", "role": "member"},
{"id": "0vogzmzoj1k5xp29", "role": "admin"},
],
}
state = {
"state": "9da4041848844e8088864eaea3c3a705",
"data": data,
"user": user_info,
}
expected_user_id = user_info["user_id"]
result = provider.build_identity(state)
assert result == {
"id": expected_user_id,
"email": user_info["email"],
"name": user_info["email"],
"data": provider.get_oauth_data(data),
"email_verified": False,
}
def test_audit_log_data(self) -> None:
audit_log_data = self.auth_provider.get_audit_log_data()
assert audit_log_data["provider"] == "fly"
assert audit_log_data["config"] == {}
@control_silo_test
| FlyOAuth2ProviderTest |
python | sympy__sympy | sympy/tensor/array/dense_ndim_array.py | {
"start": 4719,
"end": 6403
} | class ____(DenseNDimArray, MutableNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
flat_list = flatten(flat_list)
self = object.__new__(cls)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(2, 2)
>>> a[0,0] = 1
>>> a[1,1] = 1
>>> a
[[1, 0], [0, 1]]
"""
if isinstance(index, tuple) and any(isinstance(i, slice) for i in index):
value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value)
for i in eindices:
other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None]
self._array[self._parse_index(i)] = value[other_i]
else:
index = self._parse_index(index)
self._setter_iterable_check(value)
value = _sympify(value)
self._array[index] = value
def as_immutable(self):
return ImmutableDenseNDimArray(self)
@property
def free_symbols(self):
return {i for j in self._array for i in j.free_symbols}
| MutableDenseNDimArray |
python | scrapy__scrapy | tests/test_squeues_request.py | {
"start": 4568,
"end": 4752
} | class ____(TestRequestQueueBase):
is_fifo = False
@pytest.fixture
def q(self, crawler):
return LifoMemoryQueue.from_crawler(crawler=crawler)
| TestLifoMemoryQueueRequest |
python | wandb__wandb | wandb/vendor/pygments/lexers/jvm.py | {
"start": 18630,
"end": 21751
} | class ____(RegexLexer):
"""
For Gosu source code.
.. versionadded:: 1.5
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property\s+)(get|set)?', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(uses)(\s+)([\w.]+\*?)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[.#])([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_$]\w*', Name),
(r'and|or|not|[\\~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Text)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
| GosuLexer |
python | spack__spack | lib/spack/spack/error.py | {
"start": 6513,
"end": 6602
} | class ____(SpecError):
"""Raised when a spec file name is invalid."""
| SpecFilenameError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance19.py | {
"start": 280,
"end": 328
} | class ____(ABC, metaclass=Meta1):
pass
| Parent1 |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 93379,
"end": 114314
} | class ____(Request):
"""
Clone an existing task
:param task: ID of the task
:type task: str
:param new_task_name: The name of the cloned task. If not provided then taken
from the original task
:type new_task_name: str
:param new_task_comment: The comment of the cloned task. If not provided then
taken from the original task
:type new_task_comment: str
:param new_task_tags: The user-defined tags of the cloned task. If not provided
then taken from the original task
:type new_task_tags: Sequence[str]
:param new_task_system_tags: The system tags of the cloned task. If not
provided then empty
:type new_task_system_tags: Sequence[str]
:param new_task_parent: The parent of the cloned task. If not provided then
taken from the original task
:type new_task_parent: str
:param new_task_project: The project of the cloned task. If not provided then
taken from the original task
:type new_task_project: str
:param new_task_hyperparams: The hyper params for the new task. If not provided
then taken from the original task
:type new_task_hyperparams: dict
:param new_task_configuration: The configuration for the new task. If not
provided then taken from the original task
:type new_task_configuration: dict
:param execution_overrides: The execution params for the cloned task. The
params not specified are taken from the original task
:type execution_overrides: Execution
:param validate_references: If set to 'false' then the task fields that are
copied from the original task are not validated. The default is false.
:type validate_references: bool
:param new_project_name: Clone task to a new project by this name (only if
`new_task_project` is not provided). If a project by this name already exists,
task will be cloned to existing project.
:type new_project_name: str
:param new_task_input_models: The list of input models for the cloned task. If
not specifed then copied from the original task
:type new_task_input_models: Sequence[TaskModelItem]
:param new_task_container: The docker container properties for the new task. If
not provided then taken from the original task
:type new_task_container: dict
"""
_service = "tasks"
_action = "clone"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
},
"required": ["name", "model"],
"type": "object",
},
},
"properties": {
"execution_overrides": {
"$ref": "#/definitions/execution",
"description": "The execution params for the cloned task. The params not specified are taken from the original task",
},
"new_project_name": {
"description": "Clone task to a new project by this name (only if `new_task_project` is not provided). If a project by this name already exists, task will be cloned to existing project.",
"type": "string",
},
"new_task_comment": {
"description": "The comment of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "The configuration for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_container": {
"additionalProperties": {"type": ["string", "null"]},
"description": "The docker container properties for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "The hyper params for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_input_models": {
"description": "The list of input models for the cloned task. If not specifed then copied from the original task",
"items": {"$ref": "#/definitions/task_model_item"},
"type": "array",
},
"new_task_name": {
"description": "The name of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_parent": {
"description": "The parent of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_project": {
"description": "The project of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_system_tags": {
"description": "The system tags of the cloned task. If not provided then empty",
"items": {"type": "string"},
"type": "array",
},
"new_task_tags": {
"description": "The user-defined tags of the cloned task. If not provided then taken from the original task",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
"validate_references": {
"description": "If set to 'false' then the task fields that are copied from the original task are not validated. The default is false.",
"type": "boolean",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
new_task_name: Optional[str] = None,
new_task_comment: Optional[str] = None,
new_task_tags: Optional[List[str]] = None,
new_task_system_tags: Optional[List[str]] = None,
new_task_parent: Optional[str] = None,
new_task_project: Optional[str] = None,
new_task_hyperparams: Optional[dict] = None,
new_task_configuration: Optional[dict] = None,
execution_overrides: Any = None,
validate_references: Optional[bool] = None,
new_project_name: Optional[str] = None,
new_task_input_models: Optional[List[Any]] = None,
new_task_container: Optional[dict] = None,
**kwargs: Any
) -> None:
super(CloneRequest, self).__init__(**kwargs)
self.task = task
self.new_task_name = new_task_name
self.new_task_comment = new_task_comment
self.new_task_tags = new_task_tags
self.new_task_system_tags = new_task_system_tags
self.new_task_parent = new_task_parent
self.new_task_project = new_task_project
self.new_task_hyperparams = new_task_hyperparams
self.new_task_configuration = new_task_configuration
self.execution_overrides = execution_overrides
self.validate_references = validate_references
self.new_project_name = new_project_name
self.new_task_input_models = new_task_input_models
self.new_task_container = new_task_container
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("new_task_name")
def new_task_name(self) -> Optional[str]:
return self._property_new_task_name
@new_task_name.setter
def new_task_name(self, value: Optional[str]) -> None:
if value is None:
self._property_new_task_name = None
return
self.assert_isinstance(value, "new_task_name", six.string_types)
self._property_new_task_name = value
@schema_property("new_task_comment")
def new_task_comment(self) -> Optional[str]:
return self._property_new_task_comment
@new_task_comment.setter
def new_task_comment(self, value: Optional[str]) -> None:
if value is None:
self._property_new_task_comment = None
return
self.assert_isinstance(value, "new_task_comment", six.string_types)
self._property_new_task_comment = value
@schema_property("new_task_tags")
def new_task_tags(self) -> Optional[List[str]]:
return self._property_new_task_tags
@new_task_tags.setter
def new_task_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_new_task_tags = None
return
self.assert_isinstance(value, "new_task_tags", (list, tuple))
self.assert_isinstance(value, "new_task_tags", six.string_types, is_array=True)
self._property_new_task_tags = value
@schema_property("new_task_system_tags")
def new_task_system_tags(self) -> Optional[List[str]]:
return self._property_new_task_system_tags
@new_task_system_tags.setter
def new_task_system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_new_task_system_tags = None
return
self.assert_isinstance(value, "new_task_system_tags", (list, tuple))
self.assert_isinstance(value, "new_task_system_tags", six.string_types, is_array=True)
self._property_new_task_system_tags = value
@schema_property("new_task_parent")
def new_task_parent(self) -> Optional[str]:
return self._property_new_task_parent
@new_task_parent.setter
def new_task_parent(self, value: Optional[str]) -> None:
if value is None:
self._property_new_task_parent = None
return
self.assert_isinstance(value, "new_task_parent", six.string_types)
self._property_new_task_parent = value
@schema_property("new_task_project")
def new_task_project(self) -> Optional[str]:
return self._property_new_task_project
@new_task_project.setter
def new_task_project(self, value: Optional[str]) -> None:
if value is None:
self._property_new_task_project = None
return
self.assert_isinstance(value, "new_task_project", six.string_types)
self._property_new_task_project = value
@schema_property("new_task_hyperparams")
def new_task_hyperparams(self) -> Optional[dict]:
return self._property_new_task_hyperparams
@new_task_hyperparams.setter
def new_task_hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_new_task_hyperparams = None
return
self.assert_isinstance(value, "new_task_hyperparams", (dict,))
self._property_new_task_hyperparams = value
@schema_property("new_task_configuration")
def new_task_configuration(self) -> Optional[dict]:
return self._property_new_task_configuration
@new_task_configuration.setter
def new_task_configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_new_task_configuration = None
return
self.assert_isinstance(value, "new_task_configuration", (dict,))
self._property_new_task_configuration = value
@schema_property("execution_overrides")
def execution_overrides(self) -> Any:
return self._property_execution_overrides
@execution_overrides.setter
def execution_overrides(self, value: Any) -> None:
if value is None:
self._property_execution_overrides = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution_overrides", Execution)
self._property_execution_overrides = value
@schema_property("validate_references")
def validate_references(self) -> Optional[bool]:
return self._property_validate_references
@validate_references.setter
def validate_references(self, value: Optional[bool]) -> None:
if value is None:
self._property_validate_references = None
return
self.assert_isinstance(value, "validate_references", (bool,))
self._property_validate_references = value
@schema_property("new_project_name")
def new_project_name(self) -> Optional[str]:
return self._property_new_project_name
@new_project_name.setter
def new_project_name(self, value: Optional[str]) -> None:
if value is None:
self._property_new_project_name = None
return
self.assert_isinstance(value, "new_project_name", six.string_types)
self._property_new_project_name = value
@schema_property("new_task_input_models")
def new_task_input_models(self) -> Optional[List[Any]]:
return self._property_new_task_input_models
@new_task_input_models.setter
def new_task_input_models(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_new_task_input_models = None
return
self.assert_isinstance(value, "new_task_input_models", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [TaskModelItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "new_task_input_models", TaskModelItem, is_array=True)
self._property_new_task_input_models = value
@schema_property("new_task_container")
def new_task_container(self) -> Optional[dict]:
return self._property_new_task_container
@new_task_container.setter
def new_task_container(self, value: Optional[dict]) -> None:
if value is None:
self._property_new_task_container = None
return
self.assert_isinstance(value, "new_task_container", (dict,))
self._property_new_task_container = value
| CloneRequest |
python | kamyu104__LeetCode-Solutions | Python/friend-circles.py | {
"start": 31,
"end": 953
} | class ____(object):
def findCircleNum(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
class UnionFind(object):
def __init__(self, n):
self.set = range(n)
self.count = n
def find_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root != y_root:
self.set[min(x_root, y_root)] = max(x_root, y_root)
self.count -= 1
circles = UnionFind(len(M))
for i in xrange(len(M)):
for j in xrange(len(M)):
if M[i][j] and i != j:
circles.union_set(i, j)
return circles.count
| Solution |
python | facebook__pyre-check | documentation/examples/pytorch/sources/_torch/__init__.py | {
"start": 633,
"end": 693
} | class ____(Generic[DType, Shape], torch.Tensor):
pass
| Tensor |
python | ipython__ipython | IPython/core/completer.py | {
"start": 17104,
"end": 17899
} | class ____:
"""Completion item to be included in the dictionary returned by new-style Matcher (API v2).
.. warning::
Provisional
This class is used to describe the currently supported attributes of
simple completion items, and any additional implementation details
should not be relied on. Additional attributes may be included in
future versions, and meaning of text disambiguated from the current
dual meaning of "text to insert" and "text to used as a label".
"""
__slots__ = ["text", "type"]
def __init__(self, text: str, *, type: Optional[str] = None):
self.text = text
self.type = type
def __repr__(self):
return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
| SimpleCompletion |
python | doocs__leetcode | solution/0900-0999/0967.Numbers With Same Consecutive Differences/Solution.py | {
"start": 0,
"end": 482
} | class ____:
def numsSameConsecDiff(self, n: int, k: int) -> List[int]:
def dfs(x: int):
if x >= boundary:
ans.append(x)
return
last = x % 10
if last + k <= 9:
dfs(x * 10 + last + k)
if last - k >= 0 and k != 0:
dfs(x * 10 + last - k)
ans = []
boundary = 10 ** (n - 1)
for i in range(1, 10):
dfs(i)
return ans
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_leap_year.py | {
"start": 634,
"end": 1629
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_leap_year"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_leap_year(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidLeapYear |
python | scrapy__scrapy | tests/spiders.py | {
"start": 16107,
"end": 16306
} | class ____(BytesReceivedCallbackSpider):
def bytes_received(self, data, request, spider):
self.meta["bytes_received"] = data
raise StopDownload(fail=True)
| BytesReceivedErrbackSpider |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 5154,
"end": 6603
} | class ____(StringField):
"""A field that validates input as an URL."""
_URL_REGEX = LazyRegexCompiler(
r"^(?:[a-z0-9\.\-]*)://" # scheme is validated separately
r"(?:(?:[A-Z0-9](?:[A-Z0-9-_]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|" # domain...
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|" # ...or ipv4
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" # ...or ipv6
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
_URL_SCHEMES = ["http", "https", "ftp", "ftps"]
def __init__(self, url_regex=None, schemes=None, **kwargs):
"""
:param url_regex: (optional) Overwrite the default regex used for validation
:param schemes: (optional) Overwrite the default URL schemes that are allowed
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.url_regex = url_regex or self._URL_REGEX
self.schemes = schemes or self._URL_SCHEMES
super().__init__(**kwargs)
def validate(self, value):
# Check first if the scheme is valid
scheme = value.split("://")[0].lower()
if scheme not in self.schemes:
self.error(f"Invalid scheme {scheme} in URL: {value}")
# Then check full URL
if not self.url_regex.match(value):
self.error(f"Invalid URL: {value}")
| URLField |
python | pandas-dev__pandas | pandas/tests/extension/base/ops.py | {
"start": 4338,
"end": 7694
} | class ____(BaseOpsUtil):
"""
Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc: type[Exception] | None = TypeError
frame_scalar_exc: type[Exception] | None = TypeError
series_array_exc: type[Exception] | None = TypeError
divmod_exc: type[Exception] | None = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):
pytest.skip("Skip testing Python string formatting")
op_name = all_arithmetic_operators
ser = pd.Series(data)
self.check_opname(ser, op_name, ser.iloc[0])
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):
pytest.skip("Skip testing Python string formatting")
op_name = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0])
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
ser = pd.Series(data)
self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser)))
def test_divmod(self, data):
ser = pd.Series(data)
self._check_divmod_op(ser, divmod, 1)
self._check_divmod_op(1, ops.rdivmod, ser)
def test_divmod_series_array(self, data, data_for_twos):
ser = pd.Series(data)
self._check_divmod_op(ser, divmod, data)
other = data_for_twos
self._check_divmod_op(other, ops.rdivmod, ser)
other = pd.Series(other)
self._check_divmod_op(other, ops.rdivmod, ser)
def test_add_series_with_extension_array(self, data):
# Check adding an ExtensionArray to a Series of the same dtype matches
# the behavior of adding the arrays directly and then wrapping in a
# Series.
ser = pd.Series(data)
exc = self._get_expected_exception("__add__", ser, data)
if exc is not None:
with pytest.raises(exc):
ser + data
return
result = ser + data
expected = pd.Series(data + data)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame, pd.Index])
@pytest.mark.parametrize(
"op_name",
[
x
for x in tm.arithmetic_dunder_methods + tm.comparison_dunder_methods
if not x.startswith("__r")
],
)
def test_direct_arith_with_ndframe_returns_not_implemented(
self, data, box, op_name
):
# EAs should return NotImplemented for ops with Series/DataFrame/Index
# Pandas takes care of unboxing the series and calling the EA's op.
other = box(data)
if hasattr(data, op_name):
result = getattr(data, op_name)(other)
assert result is NotImplemented
| BaseArithmeticOpsTests |
python | tqdm__tqdm | tqdm/std.py | {
"start": 1533,
"end": 1638
} | class ____(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
| TqdmDeprecationWarning |
python | getsentry__sentry | src/sentry/users/api/serializers/identity.py | {
"start": 604,
"end": 1025
} | class ____(Serializer):
def serialize(
self,
obj: Identity,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> IdentitySerializerResponse:
return {
"id": str(obj.id),
"identityProvider": serialize(obj.idp),
"externalId": obj.external_id,
"status": obj.status,
}
| IdentitySerializer |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/docstore/types.py | {
"start": 709,
"end": 8404
} | class ____(ABC):
# ===== Save/load =====
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the docstore to a file."""
# ===== Main interface =====
@property
@abstractmethod
def docs(self) -> Dict[str, BaseNode]: ...
@abstractmethod
def add_documents(
self,
docs: Sequence[BaseNode],
allow_update: bool = True,
batch_size: int = DEFAULT_BATCH_SIZE,
store_text: bool = True,
) -> None: ...
@abstractmethod
async def async_add_documents(
self,
docs: Sequence[BaseNode],
allow_update: bool = True,
batch_size: int = DEFAULT_BATCH_SIZE,
store_text: bool = True,
) -> None: ...
@abstractmethod
def get_document(
self, doc_id: str, raise_error: bool = True
) -> Optional[BaseNode]: ...
@abstractmethod
async def aget_document(
self, doc_id: str, raise_error: bool = True
) -> Optional[BaseNode]: ...
@abstractmethod
def delete_document(self, doc_id: str, raise_error: bool = True) -> None:
"""Delete a document from the store."""
...
@abstractmethod
async def adelete_document(self, doc_id: str, raise_error: bool = True) -> None:
"""Delete a document from the store."""
...
@abstractmethod
def document_exists(self, doc_id: str) -> bool: ...
@abstractmethod
async def adocument_exists(self, doc_id: str) -> bool: ...
# ===== Hash =====
@abstractmethod
def set_document_hash(self, doc_id: str, doc_hash: str) -> None: ...
@abstractmethod
async def aset_document_hash(self, doc_id: str, doc_hash: str) -> None: ...
@abstractmethod
def set_document_hashes(self, doc_hashes: Dict[str, str]) -> None: ...
@abstractmethod
async def aset_document_hashes(self, doc_hashes: Dict[str, str]) -> None: ...
@abstractmethod
def get_document_hash(self, doc_id: str) -> Optional[str]: ...
@abstractmethod
async def aget_document_hash(self, doc_id: str) -> Optional[str]: ...
@abstractmethod
def get_all_document_hashes(self) -> Dict[str, str]: ...
@abstractmethod
async def aget_all_document_hashes(self) -> Dict[str, str]: ...
# ==== Ref Docs =====
@abstractmethod
def get_all_ref_doc_info(self) -> Optional[Dict[str, RefDocInfo]]:
"""Get a mapping of ref_doc_id -> RefDocInfo for all ingested documents."""
@abstractmethod
async def aget_all_ref_doc_info(self) -> Optional[Dict[str, RefDocInfo]]:
"""Get a mapping of ref_doc_id -> RefDocInfo for all ingested documents."""
@abstractmethod
def get_ref_doc_info(self, ref_doc_id: str) -> Optional[RefDocInfo]:
"""Get the RefDocInfo for a given ref_doc_id."""
@abstractmethod
async def aget_ref_doc_info(self, ref_doc_id: str) -> Optional[RefDocInfo]:
"""Get the RefDocInfo for a given ref_doc_id."""
@abstractmethod
def delete_ref_doc(self, ref_doc_id: str, raise_error: bool = True) -> None:
"""Delete a ref_doc and all it's associated nodes."""
@abstractmethod
async def adelete_ref_doc(self, ref_doc_id: str, raise_error: bool = True) -> None:
"""Delete a ref_doc and all it's associated nodes."""
# ===== Nodes =====
def get_nodes(
self, node_ids: List[str], raise_error: bool = True
) -> List[BaseNode]:
"""
Get nodes from docstore.
Args:
node_ids (List[str]): node ids
raise_error (bool): raise error if node_id not found
"""
nodes: list[BaseNode] = []
for node_id in node_ids:
# if needed for type checking
if not raise_error:
if node := self.get_node(node_id=node_id, raise_error=False):
nodes.append(node)
continue
nodes.append(self.get_node(node_id=node_id, raise_error=True))
return nodes
async def aget_nodes(
self, node_ids: List[str], raise_error: bool = True
) -> List[BaseNode]:
"""
Get nodes from docstore.
Args:
node_ids (List[str]): node ids
raise_error (bool): raise error if node_id not found
"""
nodes: list[BaseNode] = []
for node_id in node_ids:
# if needed for type checking
if not raise_error:
if node := await self.aget_node(node_id=node_id, raise_error=False):
nodes.append(node)
continue
nodes.append(await self.aget_node(node_id=node_id, raise_error=True))
return nodes
@overload
def get_node(self, node_id: str, raise_error: Literal[True] = True) -> BaseNode: ...
@overload
def get_node(
self, node_id: str, raise_error: Literal[False] = False
) -> Optional[BaseNode]: ...
def get_node(self, node_id: str, raise_error: bool = True) -> Optional[BaseNode]:
"""
Get node from docstore.
Args:
node_id (str): node id
raise_error (bool): raise error if node_id not found
"""
doc = self.get_document(node_id, raise_error=raise_error)
if doc is None:
# The doc store should have raised an error if the node_id is not found, but it didn't
# so we raise an error here
if raise_error:
raise ValueError(f"Node {node_id} not found")
return None
# The document should always be a BaseNode, but we check to be safe
if not isinstance(doc, BaseNode):
raise ValueError(f"Document {node_id} is not a Node.")
return doc
@overload
async def aget_node(
self, node_id: str, raise_error: Literal[True] = True
) -> BaseNode: ...
@overload
async def aget_node(
self, node_id: str, raise_error: Literal[False] = False
) -> Optional[BaseNode]: ...
async def aget_node(
self, node_id: str, raise_error: bool = True
) -> Optional[BaseNode]:
"""
Get node from docstore.
Args:
node_id (str): node id
raise_error (bool): raise error if node_id not found
"""
doc = await self.aget_document(node_id, raise_error=raise_error)
if doc is None:
# The doc store should have raised an error if the node_id is not found, but it didn't
# so we raise an error here
if raise_error:
raise ValueError(f"Node {node_id} not found")
return None
# The document should always be a BaseNode, but we check to be safe
if not isinstance(doc, BaseNode):
raise ValueError(f"Document {node_id} is not a Node.")
return doc
def get_node_dict(self, node_id_dict: Dict[int, str]) -> Dict[int, BaseNode]:
"""
Get node dict from docstore given a mapping of index to node ids.
Args:
node_id_dict (Dict[int, str]): mapping of index to node ids
"""
return {
index: self.get_node(node_id) for index, node_id in node_id_dict.items()
}
async def aget_node_dict(self, node_id_dict: Dict[int, str]) -> Dict[int, BaseNode]:
"""
Get node dict from docstore given a mapping of index to node ids.
Args:
node_id_dict (Dict[int, str]): mapping of index to node ids
"""
return {
index: await self.aget_node(node_id)
for index, node_id in node_id_dict.items()
}
| BaseDocumentStore |
python | ipython__ipython | IPython/core/payload.py | {
"start": 892,
"end": 1763
} | class ____(Configurable):
_payload: List = List([])
def write_payload(self, data, single=True):
"""Include or update the specified `data` payload in the PayloadManager.
If a previous payload with the same source exists and `single` is True,
it will be overwritten with the new one.
"""
if not isinstance(data, dict):
raise TypeError('Each payload write must be a dict, got: %r' % data)
if single and 'source' in data:
source = data['source']
for i, pl in enumerate(self._payload):
if 'source' in pl and pl['source'] == source:
self._payload[i] = data
return
self._payload.append(data)
def read_payload(self):
return self._payload
def clear_payload(self):
self._payload = []
| PayloadManager |
python | anthropics__anthropic-sdk-python | tests/lib/test_bedrock.py | {
"start": 598,
"end": 6141
} | class ____(TypedDict):
# Available regions: https://docs.aws.amazon.com/global-infrastructure/latest/regions/aws-regions.html#available-regions
name: t.Union[t.Literal["default"], str]
region: str
def profile_to_ini(profile: AwsConfigProfile) -> str:
"""
Convert an AWS config profile to an INI format string.
"""
profile_name = profile["name"] if profile["name"] == "default" else f"profile {profile['name']}"
return f"[{profile_name}]\nregion = {profile['region']}\n"
@pytest.fixture
def profiles() -> t.List[AwsConfigProfile]:
return [
{"name": "default", "region": "us-east-2"},
]
@pytest.fixture
def mock_aws_config(
profiles: t.List[AwsConfigProfile],
monkeypatch: t.Any,
) -> t.Iterable[None]:
with tempfile.NamedTemporaryFile(mode="w+", delete=True) as temp_file:
for profile in profiles:
temp_file.write(profile_to_ini(profile))
temp_file.flush()
monkeypatch.setenv("AWS_CONFIG_FILE", str(temp_file.name))
yield
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.respx()
def test_messages_retries(respx_mock: MockRouter) -> None:
respx_mock.post(re.compile(r"https://bedrock-runtime\.us-east-1\.amazonaws\.com/model/.*/invoke")).mock(
side_effect=[
httpx.Response(500, json={"error": "server error"}, headers={"retry-after-ms": "10"}),
httpx.Response(200, json={"foo": "bar"}),
]
)
sync_client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="anthropic.claude-3-5-sonnet-20241022-v2:0",
)
calls = cast("list[MockRequestCall]", respx_mock.calls)
assert len(calls) == 2
assert (
calls[0].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-5-sonnet-20241022-v2:0/invoke"
)
assert (
calls[1].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-5-sonnet-20241022-v2:0/invoke"
)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.respx()
@pytest.mark.asyncio()
async def test_messages_retries_async(respx_mock: MockRouter) -> None:
respx_mock.post(re.compile(r"https://bedrock-runtime\.us-east-1\.amazonaws\.com/model/.*/invoke")).mock(
side_effect=[
httpx.Response(500, json={"error": "server error"}, headers={"retry-after-ms": "10"}),
httpx.Response(200, json={"foo": "bar"}),
]
)
await async_client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="anthropic.claude-3-5-sonnet-20241022-v2:0",
)
calls = cast("list[MockRequestCall]", respx_mock.calls)
assert len(calls) == 2
assert (
calls[0].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-5-sonnet-20241022-v2:0/invoke"
)
assert (
calls[1].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-5-sonnet-20241022-v2:0/invoke"
)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.respx()
def test_application_inference_profile(respx_mock: MockRouter) -> None:
respx_mock.post(re.compile(r"https://bedrock-runtime\.us-east-1\.amazonaws\.com/model/.*/invoke")).mock(
side_effect=[
httpx.Response(500, json={"error": "server error"}, headers={"retry-after-ms": "10"}),
httpx.Response(200, json={"foo": "bar"}),
]
)
sync_client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/jf2sje1c0jnb",
)
calls = cast("list[MockRequestCall]", respx_mock.calls)
assert len(calls) == 2
assert (
calls[0].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/arn:aws:bedrock:us-east-1:123456789012:application-inference-profile%2Fjf2sje1c0jnb/invoke"
)
assert (
calls[1].request.url
== "https://bedrock-runtime.us-east-1.amazonaws.com/model/arn:aws:bedrock:us-east-1:123456789012:application-inference-profile%2Fjf2sje1c0jnb/invoke"
)
def test_region_infer_from_profile(
mock_aws_config: None, # noqa: ARG001
profiles: t.List[AwsConfigProfile],
) -> None:
client = AnthropicBedrock()
assert client.aws_region == profiles[0]["region"]
@pytest.mark.parametrize(
"profiles, aws_profile",
[
pytest.param([{"name": "default", "region": "us-east-2"}], "default", id="default profile"),
pytest.param(
[{"name": "default", "region": "us-east-2"}, {"name": "custom", "region": "us-west-1"}],
"custom",
id="custom profile",
),
],
)
def test_region_infer_from_specified_profile(
mock_aws_config: None, # noqa: ARG001
profiles: t.List[AwsConfigProfile],
aws_profile: str,
monkeypatch: t.Any,
) -> None:
monkeypatch.setenv("AWS_PROFILE", aws_profile)
client = AnthropicBedrock()
assert client.aws_region == next(profile for profile in profiles if profile["name"] == aws_profile)["region"]
| AwsConfigProfile |
python | PyCQA__pydocstyle | src/pydocstyle/config.py | {
"start": 4575,
"end": 33218
} | class ____:
"""Responsible for parsing configuration from files and CLI.
There are 2 types of configurations: Run configurations and Check
configurations.
Run Configurations:
------------------
Responsible for deciding things that are related to the user interface and
configuration discovery, e.g. verbosity, debug options, etc.
All run configurations default to `False` or `None` and are decided only
by CLI.
Check Configurations:
--------------------
Configurations that are related to which files and errors will be checked.
These are configurable in 2 ways: using the CLI, and using configuration
files.
Configuration files are nested within the file system, meaning that the
closer a configuration file is to a checked file, the more relevant it will
be. For instance, imagine this directory structure:
A
+-- tox.ini: sets `select=D100`
+-- B
+-- foo.py
+-- tox.ini: sets `add-ignore=D100`
Then `foo.py` will not be checked for `D100`.
The configuration build algorithm is described in `self._get_config`.
Note: If any of `BASE_ERROR_SELECTION_OPTIONS` was selected in the CLI, all
configuration files will be ignored and each file will be checked for
the error codes supplied in the CLI.
"""
CONFIG_FILE_OPTIONS = (
'convention',
'select',
'ignore',
'add-select',
'add-ignore',
'match',
'match-dir',
'ignore-decorators',
'ignore-self-only-init',
)
BASE_ERROR_SELECTION_OPTIONS = ('ignore', 'select', 'convention')
DEFAULT_MATCH_RE = r'(?!test_).*\.py'
DEFAULT_MATCH_DIR_RE = r'[^\.].*'
DEFAULT_IGNORE_DECORATORS_RE = ''
DEFAULT_PROPERTY_DECORATORS = (
"property,cached_property,functools.cached_property"
)
DEFAULT_CONVENTION = conventions.pep257
DEFAULT_IGNORE_SELF_ONLY_INIT = False
PROJECT_CONFIG_FILES = (
'setup.cfg',
'tox.ini',
'.pydocstyle',
'.pydocstyle.ini',
'.pydocstylerc',
'.pydocstylerc.ini',
'pyproject.toml',
# The following is deprecated, but remains for backwards compatibility.
'.pep257',
)
POSSIBLE_SECTION_NAMES = ('pydocstyle', 'pep257')
def __init__(self):
"""Create a configuration parser."""
self._cache = {}
self._override_by_cli = None
self._options = self._arguments = self._run_conf = None
self._parser = self._create_option_parser()
# ---------------------------- Public Methods -----------------------------
def get_default_run_configuration(self):
"""Return a `RunConfiguration` object set with default values."""
options, _ = self._parse_args([])
return self._create_run_config(options)
def parse(self):
"""Parse the configuration.
If one of `BASE_ERROR_SELECTION_OPTIONS` was selected, overrides all
error codes to check and disregards any error code related
configurations from the configuration files.
"""
self._options, self._arguments = self._parse_args()
self._arguments = self._arguments or ['.']
if not self._validate_options(self._options):
raise IllegalConfiguration()
self._run_conf = self._create_run_config(self._options)
config = self._create_check_config(self._options, use_defaults=False)
self._override_by_cli = config
@check_initialized
def get_user_run_configuration(self):
"""Return the run configuration for the script."""
return self._run_conf
@check_initialized
def get_files_to_check(self):
"""Generate files and error codes to check on each one.
Walk dir trees under `self._arguments` and yield file names
that `match` under each directory that `match_dir`.
The method locates the configuration for each file name and yields a
tuple of (filename, [error_codes]).
With every discovery of a new configuration file `IllegalConfiguration`
might be raised.
"""
def _get_matches(conf):
"""Return the `match` and `match_dir` functions for `config`."""
match_func = re(conf.match + '$').match
match_dir_func = re(conf.match_dir + '$').match
return match_func, match_dir_func
def _get_ignore_decorators(conf):
"""Return the `ignore_decorators` as None or regex."""
return (
re(conf.ignore_decorators) if conf.ignore_decorators else None
)
def _get_property_decorators(conf):
"""Return the `property_decorators` as None or set."""
return (
set(conf.property_decorators.split(","))
if conf.property_decorators
else None
)
for name in self._arguments:
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
config = self._get_config(os.path.abspath(root))
match, match_dir = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
property_decorators = _get_property_decorators(config)
# Skip any dirs that do not match match_dir
dirs[:] = [d for d in dirs if match_dir(d)]
for filename in map(os.path.basename, filenames):
if match(filename):
full_path = os.path.join(root, filename)
yield (
full_path,
list(config.checked_codes),
ignore_decorators,
property_decorators,
config.ignore_self_only_init,
)
else:
config = self._get_config(os.path.abspath(name))
match, _ = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
property_decorators = _get_property_decorators(config)
if match(os.path.basename(name)):
yield (
name,
list(config.checked_codes),
ignore_decorators,
property_decorators,
config.ignore_self_only_init,
)
# --------------------------- Private Methods -----------------------------
def _get_config_by_discovery(self, node):
"""Get a configuration for checking `node` by config discovery.
Config discovery happens when no explicit config file is specified. The
file system is searched for config files starting from the directory
containing the file being checked, and up until the root directory of
the project.
See `_get_config` for further details.
"""
path = self._get_node_dir(node)
if path in self._cache:
return self._cache[path]
config_file = self._get_config_file_in_folder(path)
if config_file is None:
parent_dir, tail = os.path.split(path)
if tail:
# No configuration file, simply take the parent's.
config = self._get_config(parent_dir)
else:
# There's no configuration file and no parent directory.
# Use the default configuration or the one given in the CLI.
config = self._create_check_config(self._options)
else:
# There's a config file! Read it and merge if necessary.
options, inherit = self._read_configuration_file(config_file)
parent_dir, tail = os.path.split(path)
if tail and inherit:
# There is a parent dir and we should try to merge.
parent_config = self._get_config(parent_dir)
config = self._merge_configuration(parent_config, options)
else:
# No need to merge or parent dir does not exist.
config = self._create_check_config(options)
return config
def _get_config(self, node):
"""Get and cache the run configuration for `node`.
If no configuration exists (not local and not for the parent node),
returns and caches a default configuration.
The algorithm:
-------------
* If the current directory's configuration exists in
`self._cache` - return it.
* If a configuration file does not exist in this directory:
* If the directory is not a root directory:
* Cache its configuration as this directory's and return it.
* Else:
* Cache a default configuration and return it.
* Else:
* Read the configuration file.
* If a parent directory exists AND the configuration file
allows inheritance:
* Read the parent configuration by calling this function with the
parent directory as `node`.
* Merge the parent configuration with the current one and
cache it.
* If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in
the CLI - return the CLI configuration with the configuration match
clauses
* Set the `--add-select` and `--add-ignore` CLI configurations.
"""
if self._run_conf.config is None:
log.debug('No config file specified, discovering.')
config = self._get_config_by_discovery(node)
else:
log.debug('Using config file %r', self._run_conf.config)
if not os.path.exists(self._run_conf.config):
raise IllegalConfiguration(
'Configuration file {!r} specified '
'via --config was not found.'.format(self._run_conf.config)
)
if None in self._cache:
return self._cache[None]
options, _ = self._read_configuration_file(self._run_conf.config)
if options is None:
log.warning(
'Configuration file does not contain a '
'pydocstyle section. Using default configuration.'
)
config = self._create_check_config(self._options)
else:
config = self._create_check_config(options)
# Make the CLI always win
final_config = {}
for attr in CheckConfiguration._fields:
cli_val = getattr(self._override_by_cli, attr)
conf_val = getattr(config, attr)
final_config[attr] = cli_val if cli_val is not None else conf_val
config = CheckConfiguration(**final_config)
self._set_add_options(config.checked_codes, self._options)
# Handle caching
if self._run_conf.config is not None:
self._cache[None] = config
else:
self._cache[self._get_node_dir(node)] = config
return config
@staticmethod
def _get_node_dir(node):
"""Return the absolute path of the directory of a filesystem node."""
path = os.path.abspath(node)
return path if os.path.isdir(path) else os.path.dirname(path)
def _read_configuration_file(self, path):
"""Try to read and parse `path` as a configuration file.
If the configurations were illegal (checked with
`self._validate_options`), raises `IllegalConfiguration`.
Returns (options, should_inherit).
"""
if path.endswith('.toml'):
parser = TomlParser()
else:
parser = RawConfigParser(inline_comment_prefixes=('#', ';'))
options = None
should_inherit = True
if parser.read(path) and self._get_section_name(parser):
all_options = self._parser.option_list[:]
for group in self._parser.option_groups:
all_options.extend(group.option_list)
option_list = {o.dest: o.type or o.action for o in all_options}
# First, read the default values
new_options, _ = self._parse_args([])
# Second, parse the configuration
section_name = self._get_section_name(parser)
for opt in parser.options(section_name):
if opt == 'inherit':
should_inherit = parser.getboolean(section_name, opt)
continue
if opt.replace('_', '-') not in self.CONFIG_FILE_OPTIONS:
log.warning(f"Unknown option '{opt}' ignored")
continue
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = parser.getint(section_name, opt)
elif opt_type == 'string':
value = parser.get(section_name, opt)
else:
assert opt_type in ('store_true', 'store_false')
value = parser.getboolean(section_name, opt)
setattr(new_options, normalized_opt, value)
# Third, fix the set-options
options = self._fix_set_options(new_options)
if options is not None:
if not self._validate_options(options):
raise IllegalConfiguration(f'in file: {path}')
return options, should_inherit
def _merge_configuration(self, parent_config, child_options):
"""Merge parent config into the child options.
The migration process requires an `options` object for the child in
order to distinguish between mutually exclusive codes, add-select and
add-ignore error codes.
"""
# Copy the parent error codes so we won't override them
error_codes = copy.deepcopy(parent_config.checked_codes)
if self._has_exclusive_option(child_options):
error_codes = self._get_exclusive_error_codes(child_options)
self._set_add_options(error_codes, child_options)
kwargs = dict(checked_codes=error_codes)
for key in (
'match',
'match_dir',
'ignore_decorators',
'property_decorators',
'ignore_self_only_init',
):
child_value = getattr(child_options, key)
kwargs[key] = (
child_value
if child_value is not None
else getattr(parent_config, key)
)
return CheckConfiguration(**kwargs)
def _parse_args(self, args=None, values=None):
"""Parse the options using `self._parser` and reformat the options."""
options, arguments = self._parser.parse_args(args, values)
return self._fix_set_options(options), arguments
@staticmethod
def _create_run_config(options):
"""Create a `RunConfiguration` object from `options`."""
values = {
opt: getattr(options, opt) for opt in RunConfiguration._fields
}
return RunConfiguration(**values)
@classmethod
def _create_check_config(cls, options, use_defaults=True):
"""Create a `CheckConfiguration` object from `options`.
If `use_defaults`, any of the match options that are `None` will
be replaced with their default value and the default convention will be
set for the checked codes.
"""
checked_codes = None
if cls._has_exclusive_option(options) or use_defaults:
checked_codes = cls._get_checked_errors(options)
kwargs = dict(checked_codes=checked_codes)
defaults = {
'match': "MATCH_RE",
'match_dir': "MATCH_DIR_RE",
'ignore_decorators': "IGNORE_DECORATORS_RE",
'property_decorators': "PROPERTY_DECORATORS",
'ignore_self_only_init': "IGNORE_SELF_ONLY_INIT",
}
for key, default in defaults.items():
kwargs[key] = (
getattr(cls, f"DEFAULT_{default}")
if getattr(options, key) is None and use_defaults
else getattr(options, key)
)
return CheckConfiguration(**kwargs)
@classmethod
def _get_section_name(cls, parser):
"""Parse options from relevant section."""
for section_name in cls.POSSIBLE_SECTION_NAMES:
if parser.has_section(section_name):
return section_name
return None
@classmethod
def _get_config_file_in_folder(cls, path):
"""Look for a configuration file in `path`.
If exists return its full path, otherwise None.
"""
if os.path.isfile(path):
path = os.path.dirname(path)
for fn in cls.PROJECT_CONFIG_FILES:
if fn.endswith('.toml'):
config = TomlParser()
else:
config = RawConfigParser(inline_comment_prefixes=('#', ';'))
full_path = os.path.join(path, fn)
if config.read(full_path) and cls._get_section_name(config):
return full_path
@classmethod
def _get_exclusive_error_codes(cls, options):
"""Extract the error codes from the selected exclusive option."""
codes = set(ErrorRegistry.get_error_codes())
checked_codes = None
if options.ignore is not None:
ignored = cls._expand_error_codes(options.ignore)
checked_codes = codes - ignored
elif options.select is not None:
checked_codes = cls._expand_error_codes(options.select)
elif options.convention is not None:
checked_codes = getattr(conventions, options.convention)
# To not override the conventions nor the options - copy them.
return copy.deepcopy(checked_codes)
@classmethod
def _set_add_options(cls, checked_codes, options):
"""Set `checked_codes` by the `add_ignore` or `add_select` options."""
checked_codes |= cls._expand_error_codes(options.add_select)
checked_codes -= cls._expand_error_codes(options.add_ignore)
@staticmethod
def _expand_error_codes(code_parts):
"""Return an expanded set of error codes to ignore."""
codes = set(ErrorRegistry.get_error_codes())
expanded_codes = set()
try:
for part in code_parts:
# Dealing with split-lined configurations; The part might begin
# with a whitespace due to the newline character.
part = part.strip()
if not part:
continue
codes_to_add = {
code for code in codes if code.startswith(part)
}
if not codes_to_add:
log.warning(
'Error code passed is not a prefix of any '
'known errors: %s',
part,
)
expanded_codes.update(codes_to_add)
except TypeError as e:
raise IllegalConfiguration(e) from e
return expanded_codes
@classmethod
def _get_checked_errors(cls, options):
"""Extract the codes needed to be checked from `options`."""
checked_codes = cls._get_exclusive_error_codes(options)
if checked_codes is None:
checked_codes = cls.DEFAULT_CONVENTION
cls._set_add_options(checked_codes, options)
return checked_codes
@classmethod
def _validate_options(cls, options):
"""Validate the mutually exclusive options.
Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS`
was selected.
"""
for opt1, opt2 in itertools.permutations(
cls.BASE_ERROR_SELECTION_OPTIONS, 2
):
if getattr(options, opt1) and getattr(options, opt2):
log.error(
'Cannot pass both {} and {}. They are '
'mutually exclusive.'.format(opt1, opt2)
)
return False
if options.convention and options.convention not in conventions:
log.error(
"Illegal convention '{}'. Possible conventions: {}".format(
options.convention, ', '.join(conventions.keys())
)
)
return False
return True
@classmethod
def _has_exclusive_option(cls, options):
"""Return `True` iff one or more exclusive options were selected."""
return any(
[
getattr(options, opt) is not None
for opt in cls.BASE_ERROR_SELECTION_OPTIONS
]
)
@classmethod
def _fix_set_options(cls, options):
"""Alter the set options from None/strings to sets in place."""
optional_set_options = ('ignore', 'select')
mandatory_set_options = ('add_ignore', 'add_select')
def _get_set(value_str):
"""Split `value_str` by the delimiter `,` and return a set.
Removes empty values ('') and strips whitespace.
Also expands error code prefixes, to avoid doing this for every
file.
"""
if isinstance(value_str, str):
value_str = value_str.split(",")
return cls._expand_error_codes(
{x.strip() for x in value_str} - {""}
)
for opt in optional_set_options:
value = getattr(options, opt)
if value is not None:
setattr(options, opt, _get_set(value))
for opt in mandatory_set_options:
value = getattr(options, opt)
if value is None:
value = ''
if not isinstance(value, Set):
value = _get_set(value)
setattr(options, opt, value)
return options
@classmethod
def _create_option_parser(cls):
"""Return an option parser to parse the command line arguments."""
from optparse import OptionGroup, OptionParser
parser = OptionParser(
version=__version__,
usage='Usage: pydocstyle [options] [<file|dir>...]',
)
option = parser.add_option
# Run configuration options
option(
'-e',
'--explain',
action='store_true',
default=False,
help='show explanation of each error',
)
option(
'-s',
'--source',
action='store_true',
default=False,
help='show source for each error',
)
option(
'-d',
'--debug',
action='store_true',
default=False,
help='print debug information',
)
option(
'-v',
'--verbose',
action='store_true',
default=False,
help='print status information',
)
option(
'--count',
action='store_true',
default=False,
help='print total number of errors to stdout',
)
option(
'--config',
metavar='<path>',
default=None,
help='use given config file and disable config discovery',
)
parser.add_option_group(
OptionGroup(
parser,
'Note',
'When using --match, --match-dir or --ignore-decorators consider '
'whether you should use a single quote (\') or a double quote ("), '
'depending on your OS, Shell, etc.',
)
)
check_group = OptionGroup(
parser,
'Error Check Options',
'Only one of --select, --ignore or --convention can be '
'specified. If none is specified, defaults to '
'`--convention=pep257`. These three options select the "basic '
'list" of error codes to check. If you wish to change that list '
'(for example, if you selected a known convention but wish to '
'ignore a specific error from it or add a new one) you can '
'use `--add-[ignore/select]` in order to do so.',
)
add_check = check_group.add_option
# Error check options
add_check(
'--select',
metavar='<codes>',
default=None,
help='choose the basic list of checked errors by '
'specifying which errors to check for (with a list of '
'comma-separated error codes or prefixes). '
'for example: --select=D101,D2',
)
add_check(
'--ignore',
metavar='<codes>',
default=None,
help='choose the basic list of checked errors by '
'specifying which errors to ignore out of all of the '
'available error codes (with a list of '
'comma-separated error codes or prefixes). '
'for example: --ignore=D101,D2',
)
add_check(
'--convention',
metavar='<name>',
default=None,
help='choose the basic list of checked errors by specifying '
'an existing convention. Possible conventions: {}.'.format(
', '.join(conventions)
),
)
add_check(
'--add-select',
metavar='<codes>',
default=None,
help='add extra error codes to check to the basic list of '
'errors previously set by --select, --ignore or '
'--convention.',
)
add_check(
'--add-ignore',
metavar='<codes>',
default=None,
help='ignore extra error codes by removing them from the '
'basic list previously set by --select, --ignore '
'or --convention.',
)
add_check(
'--ignore-self-only-init',
default=None,
action='store_true',
help='ignore __init__ methods which only have a self param.',
)
parser.add_option_group(check_group)
# Match clauses
option(
'--match',
metavar='<pattern>',
default=None,
help=(
"check only files that exactly match <pattern> regular "
"expression; default is --match='{}' which matches "
"files that don't start with 'test_' but end with "
"'.py'"
).format(cls.DEFAULT_MATCH_RE),
)
option(
'--match-dir',
metavar='<pattern>',
default=None,
help=(
"search only dirs that exactly match <pattern> regular "
"expression; default is --match-dir='{}', which "
"matches all dirs that don't start with "
"a dot"
).format(cls.DEFAULT_MATCH_DIR_RE),
)
# Decorators
option(
'--ignore-decorators',
metavar='<decorators>',
default=None,
help=(
"ignore any functions or methods that are decorated "
"by a function with a name fitting the <decorators> "
"regular expression; default is --ignore-decorators='{}'"
" which does not ignore any decorated functions.".format(
cls.DEFAULT_IGNORE_DECORATORS_RE
)
),
)
option(
'--property-decorators',
metavar='<property-decorators>',
default=None,
help=(
"consider any method decorated with one of these "
"decorators as a property, and consequently allow "
"a docstring which is not in imperative mood; default "
"is --property-decorators='{}'".format(
cls.DEFAULT_PROPERTY_DECORATORS
)
),
)
return parser
# Check configuration - used by the ConfigurationParser class.
CheckConfiguration = namedtuple(
'CheckConfiguration',
(
'checked_codes',
'match',
'match_dir',
'ignore_decorators',
'property_decorators',
'ignore_self_only_init',
),
)
| ConfigurationParser |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar18.py | {
"start": 387,
"end": 479
} | class ____:
def fn(self, returnable: T1) -> T1: ...
T2 = TypeVar("T2", Async, Sync)
| Sync |
python | catalyst-team__catalyst | tests/catalyst/callbacks/test_batch_overfit.py | {
"start": 117,
"end": 1654
} | class ____(dl.Callback):
def __init__(self):
super().__init__(order=dl.CallbackOrder.external)
def on_loader_start(self, runner):
# 320 samples with 32 batch size
# -> 1 batch size = 32
# -> 0.1 portion = 32
assert len(runner.loaders[runner.loader_key]) == 32
def _prepare_experiment():
# data
utils.set_global_seed(42)
num_samples, num_features = int(32e1), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=0)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
return loaders, model, criterion, optimizer, scheduler
def test_batch_overfit():
loaders, model, criterion, optimizer, scheduler = _prepare_experiment()
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logs/batch_overfit",
num_epochs=1,
verbose=False,
callbacks=[dl.BatchOverfitCallback(train=1, valid=0.1)],
)
assert runner.epoch_metrics["train"]["loss"] < 1.4
assert runner.epoch_metrics["valid"]["loss"] < 1.3
| BatchOverfitCallbackCheck |
python | ionelmc__pytest-benchmark | tests/test_with_testcase.py | {
"start": 285,
"end": 575
} | class ____(unittest.TestCase):
@pytest.fixture(autouse=True)
def setupBenchmark(self, benchmark_weave):
self.benchmark_weave = benchmark_weave
def test_foo2(self):
self.benchmark_weave('time.sleep')
time.sleep(0.0000001)
| TerribleTerribleWayToWritePatchTests |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modular_falcon_mamba.py | {
"start": 25958,
"end": 26023
} | class ____(MambaCausalLMOutput):
pass
| FalconMambaCausalLMOutput |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor2.py | {
"start": 649,
"end": 693
} | class ____(Generic[_T1]):
pass
| CaveDweller |
python | modin-project__modin | modin/core/execution/python/implementations/pandas_on_python/partitioning/partition_manager.py | {
"start": 1232,
"end": 1722
} | class ____(PandasDataframePartitionManager):
"""
Class for managing partitions with pandas storage format and Python engine.
Inherits all functionality from ``PandasDataframePartitionManager`` base class.
"""
_partition_class = PandasOnPythonDataframePartition
_column_partitions_class = PandasOnPythonDataframeColumnPartition
_row_partition_class = PandasOnPythonDataframeRowPartition
_execution_wrapper = PythonWrapper
| PandasOnPythonDataframePartitionManager |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_bar18.py | {
"start": 315,
"end": 1830
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar18.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/chartsheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/chartsheets/sheet1.xml": ["<pageSetup", "<pageMargins", "<drawing"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [40294272, 40295808]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.activate()
chartsheet.set_header("Page &P")
chartsheet.set_footer("&A")
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 90450,
"end": 90522
} | class ____(Binop):
operation = operator.lt
_operator_repr = "<"
| LT |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/cnn.py | {
"start": 736,
"end": 1234
} | class ____(nn.Module):
"""A simple CNN model."""
@nn.compact
def __call__(self, x):
x = nn.Conv(features=32, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=256)(x)
x = nn.relu(x)
x = nn.Dense(features=10)(x)
return x
| CNN |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/_extras/_common.py | {
"start": 165,
"end": 348
} | class ____(AnthropicError):
def __init__(self, *, library: str, extra: str) -> None:
super().__init__(INSTRUCTIONS.format(library=library, extra=extra))
| MissingDependencyError |
python | Textualize__textual | docs/examples/guide/layout/utility_containers.py | {
"start": 132,
"end": 636
} | class ____(App):
CSS_PATH = "utility_containers.tcss"
def compose(self) -> ComposeResult:
yield Horizontal(
Vertical(
Static("One"),
Static("Two"),
classes="column",
),
Vertical(
Static("Three"),
Static("Four"),
classes="column",
),
)
if __name__ == "__main__":
app = UtilityContainersExample()
app.run()
| UtilityContainersExample |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 169485,
"end": 173095
} | class ____(int):
def __add__(self, other: Any) -> CompileCounterInt:
return CompileCounterInt(super().__add__(other))
def set_feature_use(feature: str, usage: bool) -> None:
"""
Records whether we are using a feature
Generally a feature is a JK.
"""
# Note that sometimes (tests etc...) we're not in a context which we can record into
if get_metrics_context().in_progress():
get_metrics_context().set_key_value("feature_usage", feature, usage)
_ddp_optimization_mode: tuple[str, ...] = (
"ddp_optimizer",
"python_reducer", # experimental mode
"python_reducer_without_compiled_forward",
"no_optimization",
)
def get_optimize_ddp_mode() -> str:
optimize_ddp = config.optimize_ddp
if isinstance(optimize_ddp, bool):
mode = "ddp_optimizer" if optimize_ddp else "no_optimization"
elif isinstance(optimize_ddp, str):
mode = optimize_ddp
else:
raise ValueError(
f"Invalid dynamo config optimize_ddp type {type(optimize_ddp)=}"
)
assert mode in _ddp_optimization_mode, (
f"Invalid dynamo config optimize_ddp value {mode=}"
)
return mode
@contextmanager
def maybe_disable_inference_mode() -> Generator[None, None, None]:
"""
Disables torch.inference_mode for the compilation (still on at runtime).
This simplifies the compile stack where we can assume that inference_mode
will always be off.
Since inference_mode is equivalent to no_grad + some optimizations (version
counts etc), we turn on no_grad here. The other optimizations are not
relevant to torch.compile.
"""
is_inference_mode_on = (
config.fake_tensor_disable_inference_mode and torch.is_inference_mode_enabled()
)
if is_inference_mode_on:
with (
torch.inference_mode(False),
torch.no_grad(),
):
yield
else:
yield
@contextmanager
def maybe_disable_inference_mode_for_fake_prop() -> Generator[None, None, None]:
"""
Turns off tracking of inference_mode for fake tensor propagation. With this
context manager, when a real tensor is converted to fake tensor, the fake
tensor looses its inference-ness.
"""
if config.fake_tensor_disable_inference_mode:
with torch._subclasses.meta_utils.disable_inference_mode_for_fake_prop():
yield
else:
yield
def is_node_meta_valid(node: Optional[torch.fx.Node]) -> bool:
return node is None or "example_value" in node.meta or "val" in node.meta
# If True, enforce fullgraph=True - raise errors on graph break
_error_on_graph_break = False
def _get_error_on_graph_break() -> bool:
return _error_on_graph_break
def _set_error_on_graph_break(value: bool) -> None:
global _error_on_graph_break
_error_on_graph_break = value
@torch._disable_dynamo
def record_pregraph_bytecode_enter() -> AbstractContextManager[None]:
cm: AbstractContextManager[None] = (
torch._C._profiler._RecordFunctionFast("Pregraph bytecode")
if torch.autograd.profiler._is_profiler_enabled
else contextlib.nullcontext()
)
cm.__enter__()
return cm
@torch._disable_dynamo
def record_pregraph_bytecode_exit(cm: AbstractContextManager[None]) -> None:
cm.__exit__(None, None, None)
# Returns a set of code objects present traced in the current TracingContext, or None
# if there is no current TracingContext.
def get_traced_code() -> Optional[list[CodeType]]:
from torch._guards import TracingContext
return TracingContext.get_traced_code()
| CompileCounterInt |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_interop_test.py | {
"start": 2473,
"end": 12184
} | class ____(tf.test.TestCase):
def setUp(self):
super(InteropTest, self).setUp()
physical_devices = tf.config.list_physical_devices('CPU')
configs = tf.config.get_logical_device_configuration(physical_devices[0])
if configs is None:
logical_devices = [
tf.config.LogicalDeviceConfiguration() for _ in range(3)
]
tf.config.set_logical_device_configuration(physical_devices[0],
logical_devices)
def testGradientTapeInterop(self):
with tf.GradientTape() as t:
x = np.asarray(3.0)
y = np.asarray(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertIsInstance(dx, np.ndarray)
self.assertIsInstance(dy, np.ndarray)
self.assertAllClose(dx, 2.0)
self.assertAllClose(dy, 3.0)
def testGradientTapeNoneGradients(self):
y = np.asarray(2.0)
with tf.GradientTape() as t:
x = np.asarray(3.0)
t.watch([x])
z = 2 * x
dz = t.gradient(z, y)
self.assertIsNone(dz)
def testCondInterop(self):
x = np.asarray(3.0)
def fn(x):
x_plus_1 = tf.cond(x > 0, lambda: x + 1, lambda: x + 2)
x_plus_2 = tf.cond(x < 0, lambda: x + 1, lambda: x + 2)
return x_plus_1, x_plus_2
raw_x_plus_1, raw_x_plus_2 = fn(x)
fn_x_plus_1, fn_x_plus_2 = tf.function(fn)(x)
self.assertAllClose(raw_x_plus_1, x + 1)
self.assertAllClose(raw_x_plus_2, x + 2)
self.assertAllClose(fn_x_plus_1, x + 1)
self.assertAllClose(fn_x_plus_2, x + 2)
def testWhileInterop(self):
def fn():
x = np.asarray(0)
c = lambda x: x < 10000
b = lambda x: [x + 1]
return tf.while_loop(c, b, [x], parallel_iterations=20)
self.assertEqual(10000, fn()[0])
self.assertEqual(10000, tf.function(fn)()[0])
def testTensorTFNPArrayInterop(self):
arr = np.asarray(0.)
t = tf.constant(10.)
arr_plus_t = arr + t
t_plus_arr = t + arr
self.assertIsInstance(arr_plus_t, tf.Tensor)
self.assertIsInstance(t_plus_arr, tf.Tensor)
self.assertEqual(10., arr_plus_t.numpy())
self.assertEqual(10., t_plus_arr.numpy())
def testTensorTFNPOp(self):
t = tf.constant(10.)
sq = np.square(t)
self.assertIsInstance(sq, np.ndarray)
self.assertEqual(100., sq)
def testTFNPArrayTFOpInterop(self):
arr = np.asarray(10.)
# TODO(nareshmodi): Test more ops.
sq = tf.square(arr)
self.assertIsInstance(sq, tf.Tensor)
self.assertEqual(100., sq.numpy())
def testTFNPArrayNPOpInterop(self):
arr = np.asarray([10.])
# TODO(nareshmodi): Test more ops.
sq = onp.square(arr)
self.assertIsInstance(sq, onp.ndarray)
self.assertEqual(100., sq[0])
# TODO(b/171313773): why doesn't tensor have __array_module__
def testArrayModule(self):
self.skipTest("Tensor doesn't have __array_module__")
arr = np.asarray([10])
module = arr.__array_module__((tf.Tensor,))
self.assertIs(module, tf.experimental.numpy)
class Dummy:
pass
module = arr.__array_module__((tf.Tensor, Dummy))
self.assertIs(module, NotImplemented)
# TODO(nareshmodi): Fails since the autopacking code doesn't use
# nest.flatten.
# def testAutopacking(self):
# arr1 = np.asarray(1.)
# arr2 = np.asarray(2.)
# arr3 = np.asarray(3.)
# t = ops.convert_to_tensor_v2([arr1, arr2, arr3])
# self.assertEqual(t.numpy(), [1., 2., 3.])
def testDistStratInterop(self):
strategy = tf.distribute.MirroredStrategy(
devices=['CPU:0', 'CPU:1', 'CPU:2'])
multiplier = np.asarray(5.)
@tf.function
def run():
ctx = tf.distribute.get_replica_context()
val = np.asarray(ctx.replica_id_in_sync_group)
return val * multiplier
distributed_values = strategy.run(run)
reduced = strategy.reduce(
tf.distribute.ReduceOp.SUM, distributed_values, axis=None)
values = strategy.experimental_local_results(distributed_values)
# Note that this should match the number of virtual CPUs.
self.assertLen(values, 3)
self.assertIsInstance(values[0], np.ndarray)
self.assertIsInstance(values[1], np.ndarray)
self.assertIsInstance(values[2], np.ndarray)
self.assertAllClose(values[0], 0)
self.assertAllClose(values[1], 5)
self.assertAllClose(values[2], 10)
# "strategy.reduce" doesn't rewrap in ndarray.
# self.assertIsInstance(reduced, np.ndarray)
self.assertAllClose(reduced, 15)
@test_util.disable_tfrt('b/180469928')
def testPyFuncInterop(self):
def py_func_fn(a, b):
return a + b
@tf.function
def fn(a, b):
result = tf.py_function(py_func_fn, [a, b], a.dtype)
return np.asarray(result)
a = np.asarray(1.)
b = np.asarray(2.)
result = fn(a, b)
self.assertIsInstance(result, np.ndarray)
self.assertAllClose(result, 3.)
def testDatasetInterop(self):
values = [1, 2, 3, 4, 5, 6]
values_as_array = np.asarray(values)
# Tensor dataset
dataset = tf.data.Dataset.from_tensors(values_as_array)
for value, value_from_dataset in zip([values_as_array], dataset):
self.assertIsInstance(value_from_dataset, np.ndarray)
self.assertAllEqual(value_from_dataset, value)
# Tensor slice dataset
dataset = tf.data.Dataset.from_tensor_slices(values_as_array)
for value, value_from_dataset in zip(values, dataset):
self.assertIsInstance(value_from_dataset, np.ndarray)
self.assertAllEqual(value_from_dataset, value)
# # TODO(nareshmodi): as_numpy_iterator() doesn't work.
# items = list(dataset.as_numpy_iterator())
# Map over a dataset.
dataset = dataset.map(lambda x: np.add(x, 1))
for value, value_from_dataset in zip(values, dataset):
self.assertIsInstance(value_from_dataset, np.ndarray)
self.assertAllEqual(value_from_dataset, value + 1)
# Batch a dataset.
dataset = tf.data.Dataset.from_tensor_slices(values_as_array).batch(2)
for value, value_from_dataset in zip([[1, 2], [3, 4], [5, 6]], dataset):
self.assertIsInstance(value_from_dataset, np.ndarray)
self.assertAllEqual(value_from_dataset, value)
def testKerasInterop(self):
# Return an ndarray from the model.
inputs = tf.keras.layers.Input(shape=(10,))
output_layer = tf.keras.layers.Lambda(np.square)(inputs)
model = tf.keras.Model([inputs], output_layer)
values = onp.arange(10, dtype=onp.float32).reshape((1, 10))
values_as_array = np.asarray(values)
result = model(values)
self.assertIsInstance(result, np.ndarray)
self.assertAllClose(result, onp.square(values))
result = model(values_as_array)
self.assertIsInstance(result, np.ndarray)
self.assertAllClose(result, onp.square(values))
def testKerasInteropSequential(self):
class ProjectionLayer(tf.keras.layers.Layer):
"""Linear projection layer using TF NumPy."""
def __init__(self, units):
super(ProjectionLayer, self).__init__()
self._units = units
def build(self, input_shape):
stddev = np.sqrt(self._units).astype(np.float32)
initial_value = np.random.randn(input_shape[1], self._units).astype(
np.float32) / stddev
# Note that TF NumPy can interoperate with tf.Variable.
self.w = tf.Variable(initial_value, trainable=True)
def call(self, inputs):
return np.matmul(inputs, self.w)
model = tf.keras.Sequential(
[tf.keras.layers.Dense(100), ProjectionLayer(2)])
output = model.call(np.random.randn(10, 100).astype(np.float32))
self.assertIsInstance(output, np.ndarray)
dense_layer = tf.keras.layers.Dense(100)
output = dense_layer(np.random.randn(10, 100).astype(np.float32))
def testPForInterop(self):
def outer_product(a):
return np.tensordot(a, a, 0)
batch_size = 100
a = np.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
self.assertIsInstance(c, np.ndarray)
self.assertEqual(c.shape, (batch_size, 32, 32, 32, 32))
c = tf.vectorized_map(lambda x: x.T, a)
self.assertIsInstance(c, np.ndarray)
self.assertEqual(c.shape, (batch_size, 32, 32))
def testJacobian(self):
with tf.GradientTape() as g:
x = np.asarray([1., 2.])
y = np.asarray([3., 4.])
g.watch(x)
g.watch(y)
z = x * x * y
jacobian = g.jacobian(z, [x, y])
answer = [tf.linalg.diag(2 * x * y), tf.linalg.diag(x * x)]
self.assertIsInstance(jacobian[0], np.ndarray)
self.assertIsInstance(jacobian[1], np.ndarray)
self.assertAllClose(jacobian, answer)
def testBatchJacobian(self):
with tf.GradientTape() as g:
x = np.asarray([[1., 2.], [3., 4.]])
y = np.asarray([[3., 4.], [5., 6.]])
g.watch(x)
g.watch(y)
z = x * x * y
batch_jacobian = g.batch_jacobian(z, x)
answer = tf.stack(
[tf.linalg.diag(2 * x[0] * y[0]),
tf.linalg.diag(2 * x[1] * y[1])])
self.assertIsInstance(batch_jacobian, np.ndarray)
self.assertAllClose(batch_jacobian, answer)
def testForwardprop(self):
x = np.asarray([1., 2.])
xt = np.asarray([3., 4.])
with tf.autodiff.ForwardAccumulator(x, xt) as acc:
y = x * 2.
yt = acc.jvp(y)
self.assertIsInstance(yt, np.ndarray)
self.assertAllClose([6., 8.], yt)
z = np.asarray([1.])
self.assertIsNone(acc.jvp(z))
def testMapFn(self):
x = np.asarray([1., 2.])
mapped_x = tf.map_fn(lambda x: (x[0]+1, x[1]+1), (x, x))
self.assertIsInstance(mapped_x[0], np.ndarray)
self.assertIsInstance(mapped_x[1], np.ndarray)
self.assertAllClose(mapped_x[0], [2., 3.])
self.assertAllClose(mapped_x[1], [2., 3.])
| InteropTest |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 49621,
"end": 50148
} | class ____(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a custom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=utc),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=utc, input_formats=['%I:%M%p, %d %b %Y'])
| TestCustomInputFormatDateTimeField |
python | bokeh__bokeh | src/bokeh/models/nodes.py | {
"start": 4618,
"end": 6412
} | class ____(Coordinate):
""" Represents a symbolic coordinate (by name).
.. note::
This model is experimental and may change at any point.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
target = Required(Either(Instance(Model), Enum(ImplicitTarget)), help="""
The provider of coordinates for this node.
This can be either a concrete model that can provide its coordinates (e.g.
a renderer, a frame or a canvas) or an implicit target defined by the
enum, which is resolved as the nearest parent of the given type. If the
provider cannot be determined or it isn't able to provide coordinates,
then the node resolved to an invalid coordinate (with x and y components
being ``NaN``).
""")
symbol = Required(String, help="""
A symbolic name of a coordinate to provide.
The allowed terms are dependent on the target of this node. For example,
for box-like targets this will comprise of box anchors (e.g. center, top
left) and box edges (e.g. top, left).
""")
offset = Int(default=0, help="""
Optional pixel offset for the computed coordinate.
""")
canvas: ClassVar[BoxNodes] = BoxNodes("canvas")
plot: ClassVar[BoxNodes] = BoxNodes("plot")
frame: ClassVar[BoxNodes] = BoxNodes("frame")
parent: ClassVar[BoxNodes] = BoxNodes("parent")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Node |
python | django__django | tests/i18n/tests.py | {
"start": 78116,
"end": 78583
} | class ____(ResolutionOrderI18NTests):
def test_sparse_territory_catalog(self):
"""
Untranslated strings for territorial language variants use the
translations of the generic language. In this case, the de-de
translation falls back to de.
"""
with translation.override("de-de"):
self.assertGettext("Test 1 (en)", "(de-de)")
self.assertGettext("Test 2 (en)", "(de)")
| TranslationFallbackI18NTests |
python | encode__starlette | starlette/authentication.py | {
"start": 4306,
"end": 4570
} | class ____(BaseUser):
def __init__(self, username: str) -> None:
self.username = username
@property
def is_authenticated(self) -> bool:
return True
@property
def display_name(self) -> str:
return self.username
| SimpleUser |
python | pytorch__pytorch | torch/_subclasses/complex_tensor/_core.py | {
"start": 347,
"end": 4692
} | class ____(Tensor):
"""A class that decomposes all ops on complex Tensors into their real and imaginary parts."""
_re: Tensor
_im: Tensor
def __new__(cls, real: Tensor, imag: Tensor) -> Self:
"""Initialize a ComplexTensor from its real and imaginary parts."""
from ._ops.common import REAL_TO_COMPLEX
shape = real.shape
device = real.device
# TODO (hameerabbasi): `torch.compile` sometimes fails here without making these
# contiguous. Why?
real = real.contiguous()
imag = imag.contiguous()
# TODO (hameerabbasi):
# What should we do with dtype?
# We could convert to the complex type (float32 -> complex64), but we
# can't use that model for say `bfloat16` which does not have a
# corresponding complex dtype.
# If we want to support this complex rep using any float type (see
# https://github.com/pytorch/pytorch/issues/95100)
# We either need to:
# 1) add the complex types for say `complexbf32`, knowing they can't really be used anywhere
# else.
# 2) We use the real float dtype here, and it is up to the user to know
# that dtype=float<size> here really means complex<2xSize> with dtype
# matching that of re/im parts alone
# I'm going with 1 for now, so that I can make gradcheck and some complex
# ops work properly, but might want to discuss this in the RFP.
dtype = REAL_TO_COMPLEX.get(real.dtype)
if dtype is None:
raise TypeError(
"Unsupported dtype for constituent tensors. Supported dtypes are: "
f"{set(REAL_TO_COMPLEX.keys())!r}."
)
storage_offset = real.storage_offset()
strides = real.stride()
layout = real.layout
pin_memory = real.is_pinned()
assert shape == imag.shape, f"Expected imag shape {shape}, got {imag.shape}"
assert device == imag.device, (
f"Expected imag device {device}, got {imag.device}"
)
assert real.dtype == imag.dtype, (
f"Expected imag dtype {real.dtype}, got {imag.dtype}"
)
assert pin_memory == imag.is_pinned(), (
f"Expected imag pinning {pin_memory}, got {imag.is_pinned()}"
)
res = Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
shape,
device=device,
dtype=dtype,
storage_offset=storage_offset,
strides=strides,
pin_memory=pin_memory,
layout=layout,
requires_grad=False,
)
res._re = real.clone().detach()
res._im = imag.clone().detach()
return res
@property
def re(self) -> Tensor:
return self._re
@property
def im(self) -> Tensor:
return self._im
@classmethod
def __torch_dispatch__(
cls,
func: OpOverload,
types: tuple[type, ...],
args: tuple = (),
kwargs: dict | None = None,
):
from ._ops.common import lookup_complex
kwargs = {} if kwargs is None else kwargs
impl = lookup_complex(func, *args, **kwargs)
if impl is None:
return NotImplemented
return impl(*args, **kwargs)
@staticmethod
def from_interleaved(t: Tensor) -> ComplexTensor:
t_real = torch.real(t)
t_imag = torch.imag(t) if t.dtype.is_complex else torch.zeros_like(t_real)
return Complex.apply(t_real, t_imag)
def as_interleaved(self) -> Tensor:
return torch.complex(self.real, self.imag)
@staticmethod
def __tensor_unflatten__(
inner_tensors: dict[str, Tensor],
meta: Any,
outer_size: tuple[int, ...],
outer_stride: tuple[int, ...],
) -> ComplexTensor:
assert meta is None
re, im = inner_tensors["re"], inner_tensors["im"]
return ComplexTensor(re, im)
def __tensor_flatten__(self) -> tuple[list[str], Any]:
return ["re", "im"], None
def __repr__(self, *, tensor_contents=None) -> str:
return f"ComplexTensor(real={self.re!r}, imag={self.im!r})"
def is_pinned(self, device: DeviceLikeType | None = None) -> bool:
return self.re.is_pinned(device)
| ComplexTensor |
python | kamyu104__LeetCode-Solutions | Python/non-overlapping-intervals.py | {
"start": 473,
"end": 993
} | class ____(object):
def eraseOverlapIntervals(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key=lambda interval: interval[0])
result, prev = 0, 0
for i in xrange(1, len(intervals)):
if intervals[i][0] < intervals[prev][1]:
if intervals[i][1] < intervals[prev][1]:
prev = i
result += 1
else:
prev = i
return result
| Solution2 |
python | wandb__wandb | wandb/vendor/promise-2.3.0/tests/test_spec.py | {
"start": 174,
"end": 13441
} | class ____:
"""
A helper class with some side effects
we can test.
"""
def __init__(self):
self.count = 0
def tick(self):
self.count += 1
def value(self):
return self.count
def test_3_2_1():
"""
Test that the arguments to 'then' are optional.
"""
p1 = Promise()
p2 = p1.then()
p3 = Promise()
p4 = p3.then()
p1.do_resolve(5)
p3.do_reject(Exception("How dare you!"))
def test_3_2_1_1():
"""
That that the first argument to 'then' is ignored if it
is not a function.
"""
results = {}
nonFunctions = [None, False, 5, {}, []]
def testNonFunction(nonFunction):
def foo(k, r):
results[k] = r
p1 = Promise.reject(Exception("Error: " + str(nonFunction)))
p2 = p1.then(nonFunction, lambda r: foo(str(nonFunction), r))
p2._wait()
for v in nonFunctions:
testNonFunction(v)
for v in nonFunctions:
assert_exception(results[str(v)], Exception, "Error: " + str(v))
def test_3_2_1_2():
"""
That that the second argument to 'then' is ignored if it
is not a function.
"""
results = {}
nonFunctions = [None, False, 5, {}, []]
def testNonFunction(nonFunction):
def foo(k, r):
results[k] = r
p1 = Promise.resolve("Error: " + str(nonFunction))
p2 = p1.then(lambda r: foo(str(nonFunction), r), nonFunction)
p2._wait()
for v in nonFunctions:
testNonFunction(v)
for v in nonFunctions:
assert "Error: " + str(v) == results[str(v)]
def test_3_2_2_1():
"""
The first argument to 'then' must be called when a promise is
fulfilled.
"""
c = Counter()
def check(v, c):
assert v == 5
c.tick()
p1 = Promise.resolve(5)
p2 = p1.then(lambda v: check(v, c))
p2._wait()
assert 1 == c.value()
def test_3_2_2_2():
"""
Make sure callbacks are never called more than once.
"""
c = Counter()
p1 = Promise.resolve(5)
p2 = p1.then(lambda v: c.tick())
p2._wait()
try:
# I throw an exception
p1.do_resolve(5)
assert False # Should not get here!
except AssertionError:
# This is expected
pass
assert 1 == c.value()
def test_3_2_2_3():
"""
Make sure fulfilled callback never called if promise is rejected
"""
cf = Counter()
cr = Counter()
p1 = Promise.reject(Exception("Error"))
p2 = p1.then(lambda v: cf.tick(), lambda r: cr.tick())
p2._wait()
assert 0 == cf.value()
assert 1 == cr.value()
def test_3_2_3_1():
"""
The second argument to 'then' must be called when a promise is
rejected.
"""
c = Counter()
def check(r, c):
assert_exception(r, Exception, "Error")
c.tick()
p1 = Promise.reject(Exception("Error"))
p2 = p1.then(None, lambda r: check(r, c))
p2._wait()
assert 1 == c.value()
def test_3_2_3_2():
"""
Make sure callbacks are never called more than once.
"""
c = Counter()
p1 = Promise.reject(Exception("Error"))
p2 = p1.then(None, lambda v: c.tick())
p2._wait()
try:
# I throw an exception
p1.do_reject(Exception("Error"))
assert False # Should not get here!
except AssertionError:
# This is expected
pass
assert 1 == c.value()
def test_3_2_3_3():
"""
Make sure rejected callback never called if promise is fulfilled
"""
cf = Counter()
cr = Counter()
p1 = Promise.resolve(5)
p2 = p1.then(lambda v: cf.tick(), lambda r: cr.tick())
p2._wait()
assert 0 == cr.value()
assert 1 == cf.value()
def test_3_2_5_1_when():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise.resolve(2)
order = []
p2 = p1.then(lambda v: add(order, "p2"))
p3 = p1.then(lambda v: add(order, "p3"))
p2._wait()
p3._wait()
assert 2 == len(order)
assert "p2" == order[0]
assert "p3" == order[1]
def test_3_2_5_1_if():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise.resolve(2)
order = []
p2 = p1.then(lambda v: add(order, "p2"))
p3 = p1.then(lambda v: add(order, "p3"))
p2._wait()
p3._wait()
assert 2 == len(order)
assert "p2" == order[0]
assert "p3" == order[1]
def test_3_2_5_2_when():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise.reject(Exception("Error"))
order = []
p2 = p1.then(None, lambda v: add(order, "p2"))
p3 = p1.then(None, lambda v: add(order, "p3"))
p2._wait()
p3._wait()
assert 2 == len(order)
assert "p2" == order[0]
assert "p3" == order[1]
def test_3_2_5_2_if():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise.reject(Exception("Error"))
order = []
p2 = p1.then(None, lambda v: add(order, "p2"))
p3 = p1.then(None, lambda v: add(order, "p3"))
p2._wait()
p3._wait()
assert 2 == len(order)
assert "p2" == order[0]
assert "p3" == order[1]
def test_3_2_6_1():
"""
Promises returned by then must be fulfilled when the promise
they are chained from is fulfilled IF the fulfillment value
is not a promise.
"""
p1 = Promise.resolve(5)
pf = p1.then(lambda v: v * v)
assert pf.get() == 25
p2 = Promise.reject(Exception("Error"))
pr = p2.then(None, lambda r: 5)
assert 5 == pr.get()
def test_3_2_6_2_when():
"""
Promises returned by then must be rejected when any of their
callbacks throw an exception.
"""
def fail(v):
raise AssertionError("Exception Message")
p1 = Promise.resolve(5)
pf = p1.then(fail)
pf._wait()
assert pf.is_rejected
assert_exception(pf.reason, AssertionError, "Exception Message")
p2 = Promise.reject(Exception("Error"))
pr = p2.then(None, fail)
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, AssertionError, "Exception Message")
def test_3_2_6_2_if():
"""
Promises returned by then must be rejected when any of their
callbacks throw an exception.
"""
def fail(v):
raise AssertionError("Exception Message")
p1 = Promise.resolve(5)
pf = p1.then(fail)
pf._wait()
assert pf.is_rejected
assert_exception(pf.reason, AssertionError, "Exception Message")
p2 = Promise.reject(Exception("Error"))
pr = p2.then(None, fail)
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, AssertionError, "Exception Message")
def test_3_2_6_3_when_fulfilled():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is fulfilled after the chaining is defined.
"""
p1 = Promise()
pending = Promise()
def p1_resolved(v):
return pending
pf = p1.then(p1_resolved)
assert pending.is_pending
assert pf.is_pending
p1.do_resolve(10)
pending.do_resolve(5)
pending._wait()
assert pending.is_fulfilled
assert 5 == pending.get()
pf._wait()
assert pf.is_fulfilled
assert 5 == pf.get()
p2 = Promise()
bad = Promise()
pr = p2.then(lambda r: bad)
assert bad.is_pending
assert pr.is_pending
p2.do_resolve(10)
bad._reject_callback(Exception("Error"))
bad._wait()
assert bad.is_rejected
assert_exception(bad.reason, Exception, "Error")
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, Exception, "Error")
def test_3_2_6_3_if_fulfilled():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is fulfilled before the chaining is defined.
"""
p1 = Promise()
p1.do_resolve(10)
pending = Promise()
pending.do_resolve(5)
pf = p1.then(lambda r: pending)
pending._wait()
assert pending.is_fulfilled
assert 5 == pending.get()
pf._wait()
assert pf.is_fulfilled
assert 5 == pf.get()
p2 = Promise()
p2.do_resolve(10)
bad = Promise()
bad.do_reject(Exception("Error"))
pr = p2.then(lambda r: bad)
bad._wait()
assert_exception(bad.reason, Exception, "Error")
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, Exception, "Error")
def test_3_2_6_3_when_rejected():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is rejected after the chaining is defined.
"""
p1 = Promise()
pending = Promise()
pr = p1.then(None, lambda r: pending)
assert pending.is_pending
assert pr.is_pending
p1.do_reject(Exception("Error"))
pending.do_resolve(10)
pending._wait()
assert pending.is_fulfilled
assert 10 == pending.get()
assert 10 == pr.get()
p2 = Promise()
bad = Promise()
pr = p2.then(None, lambda r: bad)
assert bad.is_pending
assert pr.is_pending
p2.do_reject(Exception("Error"))
bad.do_reject(Exception("Assertion"))
bad._wait()
assert bad.is_rejected
assert_exception(bad.reason, Exception, "Assertion")
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, Exception, "Assertion")
def test_3_2_6_3_if_rejected():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is rejected before the chaining is defined.
"""
p1 = Promise()
p1.do_reject(Exception("Error"))
pending = Promise()
pending.do_resolve(10)
pr = p1.then(None, lambda r: pending)
pending._wait()
assert pending.is_fulfilled
assert 10 == pending.get()
pr._wait()
assert pr.is_fulfilled
assert 10 == pr.get()
p2 = Promise()
p2.do_reject(Exception("Error"))
bad = Promise()
bad.do_reject(Exception("Assertion"))
pr = p2.then(None, lambda r: bad)
bad._wait()
assert bad.is_rejected
assert_exception(bad.reason, Exception, "Assertion")
pr._wait()
assert pr.is_rejected
assert_exception(pr.reason, Exception, "Assertion")
def test_3_2_6_4_pending():
"""
Handles the case where the arguments to then
are not functions or promises.
"""
p1 = Promise()
p2 = p1.then(5)
p1.do_resolve(10)
assert 10 == p1.get()
p2._wait()
assert p2.is_fulfilled
assert 10 == p2.get()
def test_3_2_6_4_fulfilled():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p1.do_resolve(10)
p2 = p1.then(5)
assert 10 == p1.get()
p2._wait()
assert p2.is_fulfilled
assert 10 == p2.get()
def test_3_2_6_5_pending():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p2 = p1.then(None, 5)
p1.do_reject(Exception("Error"))
assert_exception(p1.reason, Exception, "Error")
p2._wait()
assert p2.is_rejected
assert_exception(p2.reason, Exception, "Error")
def test_3_2_6_5_rejected():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p1.do_reject(Exception("Error"))
p2 = p1.then(None, 5)
assert_exception(p1.reason, Exception, "Error")
p2._wait()
assert p2.is_rejected
assert_exception(p2.reason, Exception, "Error")
def test_chained_promises():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise(lambda resolve, reject: resolve(Promise.resolve(True)))
assert p1.get() == True
def test_promise_resolved_after():
"""
The first argument to 'then' must be called when a promise is
fulfilled.
"""
c = Counter()
def check(v, c):
assert v == 5
c.tick()
p1 = Promise()
p2 = p1.then(lambda v: check(v, c))
p1.do_resolve(5)
Promise.wait(p2)
assert 1 == c.value()
def test_promise_follows_indifentely():
a = Promise.resolve(None)
b = a.then(lambda x: Promise.resolve("X"))
e = Event()
def b_then(v):
c = Promise.resolve(None)
d = c.then(lambda v: Promise.resolve("B"))
return d
promise = b.then(b_then)
assert promise.get() == "B"
def test_promise_all_follows_indifentely():
promises = Promise.all(
[
Promise.resolve("A"),
Promise.resolve(None)
.then(Promise.resolve)
.then(lambda v: Promise.resolve(None).then(lambda v: Promise.resolve("B"))),
]
)
assert promises.get() == ["A", "B"]
| Counter |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/Dock.py | {
"start": 8456,
"end": 12456
} | class ____(VerticalLabel):
sigClicked = QtCore.Signal(object, object)
sigCloseClicked = QtCore.Signal()
def __init__(self, text, closable=False, fontSize="12px"):
self.dim = False
self.fixedWidth = False
self.fontSize = fontSize
VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)
self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)
self.dock = None
self.updateStyle()
self.setAutoFillBackground(False)
self.mouseMoved = False
self.pressPos = QtCore.QPointF(0, 0)
self.closeButton = None
if closable:
self.closeButton = QtWidgets.QToolButton(self)
self.closeButton.clicked.connect(self.sigCloseClicked)
self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))
def updateStyle(self):
r = '3px'
if self.dim:
fg = '#aaa'
bg = '#44a'
border = '#339'
else:
fg = '#fff'
bg = '#66c'
border = '#55B'
if self.orientation == 'vertical':
self.vStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: 0px;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: %s;
border-width: 0px;
border-right: 2px solid %s;
padding-top: 3px;
padding-bottom: 3px;
font-size: %s;
}""" % (bg, fg, r, r, border, self.fontSize)
self.setStyleSheet(self.vStyle)
else:
self.hStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: %s;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px;
border-width: 0px;
border-bottom: 2px solid %s;
padding-left: 3px;
padding-right: 3px;
font-size: %s;
}""" % (bg, fg, r, r, border, self.fontSize)
self.setStyleSheet(self.hStyle)
def setDim(self, d):
if self.dim != d:
self.dim = d
self.updateStyle()
def setOrientation(self, o):
VerticalLabel.setOrientation(self, o)
self.updateStyle()
def isClosable(self):
return self.closeButton is not None
def mousePressEvent(self, ev):
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
self.pressPos = lpos
self.mouseMoved = False
ev.accept()
def mouseMoveEvent(self, ev):
if not self.mouseMoved:
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()
if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:
self.dock.startDrag()
ev.accept()
def mouseReleaseEvent(self, ev):
ev.accept()
if not self.mouseMoved:
self.sigClicked.emit(self, ev)
def mouseDoubleClickEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.LeftButton:
self.dock.float()
def resizeEvent (self, ev):
if self.closeButton:
if self.orientation == 'vertical':
size = ev.size().width()
pos = QtCore.QPoint(0, 0)
else:
size = ev.size().height()
pos = QtCore.QPoint(ev.size().width() - size, 0)
self.closeButton.setFixedSize(QtCore.QSize(size, size))
self.closeButton.move(pos)
super(DockLabel,self).resizeEvent(ev)
| DockLabel |
python | huggingface__transformers | tests/models/cohere/test_modeling_cohere.py | {
"start": 1328,
"end": 5841
} | class ____:
config_class = CohereConfig
if is_torch_available():
model_class = CohereModel
for_causal_lm_class = CohereForCausalLM
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
# Ignore copy
def get_config(self):
return self.config_class(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = self.model_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| CohereModelTester |
python | google__pytype | pytype/pytd/booleq.py | {
"start": 174,
"end": 1558
} | class ____:
"""Base class for boolean terms."""
__slots__ = ()
def simplify(self, assignments):
"""Simplify this term, given a list of possible values for each variable.
Args:
assignments: A list of possible values for each variable. A dictionary
mapping strings (variable name) to sets of strings (value names).
Returns:
A new BooleanTerm, potentially simplified.
"""
raise NotImplementedError()
def extract_pivots(self, assignments):
"""Find values for every variable that appears in this term.
This finds all variables that appear in this term and limits them to the
values they appear together with. For example, consider the equation
t = v1 | (t = v2 & (t = v2 | t = v3))
Here, t can be limited to [v1, v2]. (v3 is impossible.)
Args:
assignments: The current "upper bound", i.e. all values that are still
possible for variables. Used for extracting pivots out of Eq(var, var).
Returns:
A dictionary mapping strings (variable names) to sets of strings (value
or variable names).
"""
raise NotImplementedError()
def extract_equalities(self):
"""Find all equalities that appear in this term.
Returns:
A sequence of tuples of a string (variable name) and a string (value or
variable name).
"""
raise NotImplementedError()
| BooleanTerm |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 3202,
"end": 3455
} | class ____(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return gen_array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
| CppOpImpl |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 48429,
"end": 61562
} | class ____(NonStrictDataModel):
"""
:param id: Dataset ID
:type id: str
:param name: Dataset name
:type name: str
:param user: Associated user ID
:type user: str
:param company: Company ID
:type company: str
:param created: Dataset creation time (UTC)
:type created: datetime.datetime
:param last_update: Time of last update (UTC). Updated on dataset update; on
any version operation: when version is created, modified, committed, published
or deleted; and on any frame operation: when frames are added, modified or
deleted.
:type last_update: datetime.datetime
:param comment:
:type comment: str
:param tags: List of user-defined tags
:type tags: Sequence[str]
:param system_tags: List of system tags. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param terms_of_use: Terms of use string
:type terms_of_use: str
:param metadata: User-provided metadata
:type metadata: dict
:param project: Associated project ID
:type project: str
:param display_stats: Calculated statistics for the latest committed or
published version
:type display_stats: Statistics
:param display_version_name: The name of the version from which statistics are
taken
:type display_version_name: str
:param version_count: Amount of versions in dataset. Only supported by
datasets.get_all.
:type version_count: int
:param head_version: The most recent version for write operations. Calculated
as the non-published version with the longest path to the root.
:type head_version: Version
:param paradigm: 'single_version' for datasets whose version tree has only one
path, 'general' otherwise
:type paradigm: VersionParadigmEnum
"""
_schema = {
"properties": {
"comment": {"description": "", "type": ["string", "null"]},
"company": {"description": "Company ID", "type": ["string", "null"]},
"created": {
"description": "Dataset creation time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"display_stats": {
"description": "Calculated statistics for the latest committed or published version",
"oneOf": [{"$ref": "#/definitions/statistics"}, {"type": "null"}],
},
"display_version_name": {
"description": "The name of the version from which statistics are taken",
"type": ["string", "null"],
},
"head_version": {
"description": (
"The most recent version for write operations. Calculated as the non-published version with the"
" longest path to the root."
),
"oneOf": [{"$ref": "#/definitions/version"}, {"type": "null"}],
},
"id": {"description": "Dataset ID", "type": ["string", "null"]},
"last_update": {
"description": (
"Time of last update (UTC). Updated on dataset update; on any version operation:\nwhen version is"
" created, modified, committed, published or deleted; and on any frame operation: when frames are"
" added,\nmodified or deleted."
),
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": True,
"description": "User-provided metadata",
"type": ["object", "null"],
},
"name": {"description": "Dataset name", "type": ["string", "null"]},
"paradigm": {
"description": (
"'single_version' for datasets whose version tree has only one path, 'general' otherwise"
),
"oneOf": [
{"$ref": "#/definitions/version_paradigm_enum"},
{"type": "null"},
],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"system_tags": {
"description": "List of system tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of user-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"terms_of_use": {
"description": "Terms of use string",
"type": ["string", "null"],
},
"user": {"description": "Associated user ID", "type": ["string", "null"]},
"version_count": {
"description": "Amount of versions in dataset. Only supported by datasets.get_all.",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
user=None,
company=None,
created=None,
last_update=None,
comment=None,
tags=None,
system_tags=None,
terms_of_use=None,
metadata=None,
project=None,
display_stats=None,
display_version_name=None,
version_count=None,
head_version=None,
paradigm=None,
**kwargs
):
super(Dataset, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.last_update = last_update
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.terms_of_use = terms_of_use
self.metadata = metadata
self.project = project
self.display_stats = display_stats
self.display_version_name = display_version_name
self.version_count = version_count
self.head_version = head_version
self.paradigm = paradigm
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self):
return self._property_company
@company.setter
def company(self, value):
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self):
return self._property_created
@created.setter
def created(self, value):
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("last_update")
def last_update(self):
return self._property_last_update
@last_update.setter
def last_update(self, value):
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("terms_of_use")
def terms_of_use(self):
return self._property_terms_of_use
@terms_of_use.setter
def terms_of_use(self, value):
if value is None:
self._property_terms_of_use = None
return
self.assert_isinstance(value, "terms_of_use", six.string_types)
self._property_terms_of_use = value
@schema_property("metadata")
def metadata(self):
return self._property_metadata
@metadata.setter
def metadata(self, value):
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("display_stats")
def display_stats(self):
return self._property_display_stats
@display_stats.setter
def display_stats(self, value):
if value is None:
self._property_display_stats = None
return
if isinstance(value, dict):
value = Statistics.from_dict(value)
else:
self.assert_isinstance(value, "display_stats", Statistics)
self._property_display_stats = value
@schema_property("display_version_name")
def display_version_name(self):
return self._property_display_version_name
@display_version_name.setter
def display_version_name(self, value):
if value is None:
self._property_display_version_name = None
return
self.assert_isinstance(value, "display_version_name", six.string_types)
self._property_display_version_name = value
@schema_property("version_count")
def version_count(self):
return self._property_version_count
@version_count.setter
def version_count(self, value):
if value is None:
self._property_version_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "version_count", six.integer_types)
self._property_version_count = value
@schema_property("head_version")
def head_version(self):
return self._property_head_version
@head_version.setter
def head_version(self, value):
if value is None:
self._property_head_version = None
return
if isinstance(value, dict):
value = Version.from_dict(value)
else:
self.assert_isinstance(value, "head_version", Version)
self._property_head_version = value
@schema_property("paradigm")
def paradigm(self):
return self._property_paradigm
@paradigm.setter
def paradigm(self, value):
if value is None:
self._property_paradigm = None
return
if isinstance(value, six.string_types):
try:
value = VersionParadigmEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "paradigm", enum.Enum)
self._property_paradigm = value
| Dataset |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 27616,
"end": 28397
} | class ____(Operation):
def __init__(self, dtype, *, name=None):
super().__init__(name=name)
self.dtype = backend.standardize_dtype(dtype)
def call(self, x):
return backend.core.cast(x, self.dtype)
def compute_output_spec(self, x):
return backend.KerasTensor(shape=x.shape, dtype=self.dtype)
@keras_export("keras.ops.cast")
def cast(x, dtype):
"""Cast a tensor to the desired dtype.
Args:
x: A tensor or variable.
dtype: The target type.
Returns:
A tensor of the specified `dtype`.
Example:
>>> x = keras.ops.arange(4)
>>> x = keras.ops.cast(x, dtype="float16")
"""
if any_symbolic_tensors((x,)):
return Cast(dtype=dtype)(x)
return backend.core.cast(x, dtype)
| Cast |
python | streamlit__streamlit | lib/tests/streamlit/runtime/context_test.py | {
"start": 7255,
"end": 8572
} | class ____(unittest.TestCase):
"""Test StreamlitCookies class methods."""
def test_cookies_getitem(self):
"""Test that __getitem__ returns cookie value."""
cookies = StreamlitCookies({"session_id": "abc123", "user_id": "456"})
assert cookies["session_id"] == "abc123"
assert cookies["user_id"] == "456"
def test_cookies_len(self):
"""Test that __len__ returns number of cookies."""
cookies = StreamlitCookies({"session_id": "abc123", "user_id": "456"})
assert len(cookies) == 2
def test_cookies_iter(self):
"""Test that __iter__ returns cookie keys."""
cookies = StreamlitCookies({"session_id": "abc123", "user_id": "456"})
cookie_keys = list(cookies)
assert sorted(cookie_keys) == ["session_id", "user_id"]
def test_cookies_from_tornado(self):
"""Test creating StreamlitCookies from Tornado cookies."""
morsel1 = Morsel()
morsel1.set("session_id", "abc123", "abc123")
morsel2 = Morsel()
morsel2.set("user_id", "456", "456")
tornado_cookies = {"session_id": morsel1, "user_id": morsel2}
cookies = StreamlitCookies.from_tornado_cookies(tornado_cookies)
assert cookies.to_dict() == {"session_id": "abc123", "user_id": "456"}
| StreamlitCookiesTest |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 24157,
"end": 24811
} | class ____(PointEvent):
''' Announce the start of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotatestart'
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
| RotateStart |
python | doocs__leetcode | solution/1800-1899/1806.Minimum Number of Operations to Reinitialize a Permutation/Solution.py | {
"start": 0,
"end": 293
} | class ____:
def reinitializePermutation(self, n: int) -> int:
ans, i = 0, 1
while 1:
ans += 1
if i < n >> 1:
i <<= 1
else:
i = (i - (n >> 1)) << 1 | 1
if i == 1:
return ans
| Solution |
python | geekcomputers__Python | thread_signal.py | {
"start": 95,
"end": 738
} | class ____(threading.Thread):
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
def run(self):
while self.event.is_set():
print("sub thread")
sleep(2)
else:
print("sub thread end")
exit()
def handler_thread(event):
print("main thread end")
event.clear()
def handler(signum, frame):
handler_thread(frame.f_globals["event"])
signal.signal(signal.SIGINT, handler)
print("main thread")
event = threading.Event()
event.set()
p = producer(event)
p.start()
p.join()
sleep(100) # 一定要使主线程处于活动状态,否则信号处理对子线程不起作用
| producer |
python | numba__numba | numba/core/typing/collections.py | {
"start": 596,
"end": 839
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Container)):
return signature(types.intp, val)
@infer_global(operator.truth)
| ContainerLen |
python | pyca__cryptography | src/cryptography/fernet.py | {
"start": 661,
"end": 5225
} | class ____:
def __init__(
self,
key: bytes | str,
backend: typing.Any = None,
) -> None:
try:
key = base64.urlsafe_b64decode(key)
except binascii.Error as exc:
raise ValueError(
"Fernet key must be 32 url-safe base64-encoded bytes."
) from exc
if len(key) != 32:
raise ValueError(
"Fernet key must be 32 url-safe base64-encoded bytes."
)
self._signing_key = key[:16]
self._encryption_key = key[16:]
@classmethod
def generate_key(cls) -> bytes:
return base64.urlsafe_b64encode(os.urandom(32))
def encrypt(self, data: bytes) -> bytes:
return self.encrypt_at_time(data, int(time.time()))
def encrypt_at_time(self, data: bytes, current_time: int) -> bytes:
iv = os.urandom(16)
return self._encrypt_from_parts(data, current_time, iv)
def _encrypt_from_parts(
self, data: bytes, current_time: int, iv: bytes
) -> bytes:
utils._check_bytes("data", data)
padder = padding.PKCS7(algorithms.AES.block_size).padder()
padded_data = padder.update(data) + padder.finalize()
encryptor = Cipher(
algorithms.AES(self._encryption_key),
modes.CBC(iv),
).encryptor()
ciphertext = encryptor.update(padded_data) + encryptor.finalize()
basic_parts = (
b"\x80"
+ current_time.to_bytes(length=8, byteorder="big")
+ iv
+ ciphertext
)
h = HMAC(self._signing_key, hashes.SHA256())
h.update(basic_parts)
hmac = h.finalize()
return base64.urlsafe_b64encode(basic_parts + hmac)
def decrypt(self, token: bytes | str, ttl: int | None = None) -> bytes:
timestamp, data = Fernet._get_unverified_token_data(token)
if ttl is None:
time_info = None
else:
time_info = (ttl, int(time.time()))
return self._decrypt_data(data, timestamp, time_info)
def decrypt_at_time(
self, token: bytes | str, ttl: int, current_time: int
) -> bytes:
if ttl is None:
raise ValueError(
"decrypt_at_time() can only be used with a non-None ttl"
)
timestamp, data = Fernet._get_unverified_token_data(token)
return self._decrypt_data(data, timestamp, (ttl, current_time))
def extract_timestamp(self, token: bytes | str) -> int:
timestamp, data = Fernet._get_unverified_token_data(token)
# Verify the token was not tampered with.
self._verify_signature(data)
return timestamp
@staticmethod
def _get_unverified_token_data(token: bytes | str) -> tuple[int, bytes]:
if not isinstance(token, (str, bytes)):
raise TypeError("token must be bytes or str")
try:
data = base64.urlsafe_b64decode(token)
except (TypeError, binascii.Error):
raise InvalidToken
if not data or data[0] != 0x80:
raise InvalidToken
if len(data) < 9:
raise InvalidToken
timestamp = int.from_bytes(data[1:9], byteorder="big")
return timestamp, data
def _verify_signature(self, data: bytes) -> None:
h = HMAC(self._signing_key, hashes.SHA256())
h.update(data[:-32])
try:
h.verify(data[-32:])
except InvalidSignature:
raise InvalidToken
def _decrypt_data(
self,
data: bytes,
timestamp: int,
time_info: tuple[int, int] | None,
) -> bytes:
if time_info is not None:
ttl, current_time = time_info
if timestamp + ttl < current_time:
raise InvalidToken
if current_time + _MAX_CLOCK_SKEW < timestamp:
raise InvalidToken
self._verify_signature(data)
iv = data[9:25]
ciphertext = data[25:-32]
decryptor = Cipher(
algorithms.AES(self._encryption_key), modes.CBC(iv)
).decryptor()
plaintext_padded = decryptor.update(ciphertext)
try:
plaintext_padded += decryptor.finalize()
except ValueError:
raise InvalidToken
unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
unpadded = unpadder.update(plaintext_padded)
try:
unpadded += unpadder.finalize()
except ValueError:
raise InvalidToken
return unpadded
| Fernet |
python | ApeWorX__ape | tests/integration/cli/utils.py | {
"start": 462,
"end": 899
} | class ____:
"""
A class that extracts a callable test's name and module name.
"""
def __init__(self, test_method: Callable):
self.module_full_name = test_method.__module__
self.name = test_method.__name__
@property
def module_name(self) -> str:
return self.module_full_name.split(".")[-1]
@property
def node_id(self) -> str:
return f"{self.module_name}.{self.name}"
| NodeId |
python | ray-project__ray | python/ray/util/client/dataclient.py | {
"start": 7522,
"end": 22951
} | class ____:
def __init__(self, client_worker: "Worker", client_id: str, metadata: list):
"""Initializes a thread-safe datapath over a Ray Client gRPC channel.
Args:
client_worker: The Ray Client worker that manages this client
client_id: the generated ID representing this client
metadata: metadata to pass to gRPC requests
"""
self.client_worker = client_worker
self._client_id = client_id
self._metadata = metadata
self.data_thread = self._start_datathread()
# Track outstanding requests to resend in case of disconnection
self.outstanding_requests: Dict[int, Any] = OrderedDict()
# Serialize access to all mutable internal states: self.request_queue,
# self.ready_data, self.asyncio_waiting_data,
# self._in_shutdown, self._req_id, self.outstanding_requests and
# calling self._next_id()
self.lock = threading.Lock()
# Waiting for response or shutdown.
self.cv = threading.Condition(lock=self.lock)
self.request_queue = self._create_queue()
self.ready_data: Dict[int, Any] = {}
# NOTE: Dictionary insertion is guaranteed to complete before lookup
# and/or removal because of synchronization via the request_queue.
self.asyncio_waiting_data: Dict[int, ResponseCallable] = {}
self._in_shutdown = False
self._req_id = 0
self._last_exception = None
self._acknowledge_counter = 0
self.data_thread.start()
# Must hold self.lock when calling this function.
def _next_id(self) -> int:
assert self.lock.locked()
self._req_id += 1
if self._req_id > INT32_MAX:
self._req_id = 1
# Responses that aren't tracked (like opportunistic releases)
# have req_id=0, so make sure we never mint such an id.
assert self._req_id != 0
return self._req_id
def _start_datathread(self) -> threading.Thread:
return threading.Thread(
target=self._data_main,
name="ray_client_streaming_rpc",
args=(),
daemon=True,
)
# A helper that takes requests from queue. If the request wraps a PutRequest,
# lazily chunks and yields the request. Otherwise, yields the request directly.
def _requests(self):
while True:
req = self.request_queue.get()
if req is None:
# Stop when client signals shutdown.
return
req_type = req.WhichOneof("type")
if req_type == "put":
yield from chunk_put(req)
elif req_type == "task":
yield from chunk_task(req)
else:
yield req
def _data_main(self) -> None:
reconnecting = False
try:
while not self.client_worker._in_shutdown:
stub = ray_client_pb2_grpc.RayletDataStreamerStub(
self.client_worker.channel
)
metadata = self._metadata + [("reconnecting", str(reconnecting))]
resp_stream = stub.Datapath(
self._requests(),
metadata=metadata,
wait_for_ready=True,
)
try:
for response in resp_stream:
self._process_response(response)
return
except grpc.RpcError as e:
reconnecting = self._can_reconnect(e)
if not reconnecting:
self._last_exception = e
return
self._reconnect_channel()
except Exception as e:
self._last_exception = e
finally:
logger.debug("Shutting down data channel.")
self._shutdown()
def _process_response(self, response: Any) -> None:
"""
Process responses from the data servicer.
"""
if response.req_id == 0:
# This is not being waited for.
logger.debug(f"Got unawaited response {response}")
return
if response.req_id in self.asyncio_waiting_data:
can_remove = True
try:
callback = self.asyncio_waiting_data[response.req_id]
if isinstance(callback, ChunkCollector):
can_remove = callback(response)
elif callback:
callback(response)
if can_remove:
# NOTE: calling del self.asyncio_waiting_data results
# in the destructor of ClientObjectRef running, which
# calls ReleaseObject(). So self.asyncio_waiting_data
# is accessed without holding self.lock. Holding the
# lock shouldn't be necessary either.
del self.asyncio_waiting_data[response.req_id]
except Exception:
logger.exception("Callback error:")
with self.lock:
# Update outstanding requests
if response.req_id in self.outstanding_requests and can_remove:
del self.outstanding_requests[response.req_id]
# Acknowledge response
self._acknowledge(response.req_id)
else:
with self.lock:
self.ready_data[response.req_id] = response
self.cv.notify_all()
def _can_reconnect(self, e: grpc.RpcError) -> bool:
"""
Processes RPC errors that occur while reading from data stream.
Returns True if the error can be recovered from, False otherwise.
"""
if not self.client_worker._can_reconnect(e):
logger.error("Unrecoverable error in data channel.")
logger.debug(e)
return False
logger.debug("Recoverable error in data channel.")
logger.debug(e)
return True
def _shutdown(self) -> None:
"""
Shutdown the data channel
"""
with self.lock:
self._in_shutdown = True
self.cv.notify_all()
callbacks = self.asyncio_waiting_data.values()
self.asyncio_waiting_data = {}
if self._last_exception:
# Abort async requests with the error.
err = ConnectionError(
"Failed during this or a previous request. Exception that "
f"broke the connection: {self._last_exception}"
)
else:
err = ConnectionError(
"Request cannot be fulfilled because the data client has "
"disconnected."
)
for callback in callbacks:
if callback:
callback(err)
# Since self._in_shutdown is set to True, no new item
# will be added to self.asyncio_waiting_data
def _acknowledge(self, req_id: int) -> None:
"""
Puts an acknowledge request on the request queue periodically.
Lock should be held before calling this. Used when an async or
blocking response is received.
"""
if not self.client_worker._reconnect_enabled:
# Skip ACKs if reconnect isn't enabled
return
assert self.lock.locked()
self._acknowledge_counter += 1
if self._acknowledge_counter % ACKNOWLEDGE_BATCH_SIZE == 0:
self.request_queue.put(
ray_client_pb2.DataRequest(
acknowledge=ray_client_pb2.AcknowledgeRequest(req_id=req_id)
)
)
def _reconnect_channel(self) -> None:
"""
Attempts to reconnect the gRPC channel and resend outstanding
requests. First, the server is pinged to see if the current channel
still works. If the ping fails, then the current channel is closed
and replaced with a new one.
Once a working channel is available, a new request queue is made
and filled with any outstanding requests to be resent to the server.
"""
try:
# Ping the server to see if the current channel is reuseable, for
# example if gRPC reconnected the channel on its own or if the
# RPC error was transient and the channel is still open
ping_succeeded = self.client_worker.ping_server(timeout=5)
except grpc.RpcError:
ping_succeeded = False
if not ping_succeeded:
# Ping failed, try refreshing the data channel
logger.warning(
"Encountered connection issues in the data channel. "
"Attempting to reconnect."
)
try:
self.client_worker._connect_channel(reconnecting=True)
except ConnectionError:
logger.warning("Failed to reconnect the data channel")
raise
logger.debug("Reconnection succeeded!")
# Recreate the request queue, and resend outstanding requests
with self.lock:
self.request_queue = self._create_queue()
for request in self.outstanding_requests.values():
# Resend outstanding requests
self.request_queue.put(request)
# Use SimpleQueue to avoid deadlocks when appending to queue from __del__()
@staticmethod
def _create_queue():
return queue.SimpleQueue()
def close(self) -> None:
thread = None
with self.lock:
self._in_shutdown = True
# Notify blocking operations to fail.
self.cv.notify_all()
# Add sentinel to terminate streaming RPC.
if self.request_queue is not None:
# Intentional shutdown, tell server it can clean up the
# connection immediately and ignore the reconnect grace period.
cleanup_request = ray_client_pb2.DataRequest(
connection_cleanup=ray_client_pb2.ConnectionCleanupRequest()
)
self.request_queue.put(cleanup_request)
self.request_queue.put(None)
if self.data_thread is not None:
thread = self.data_thread
# Wait until streaming RPCs are done.
if thread is not None:
thread.join()
def _blocking_send(
self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
with self.lock:
self._check_shutdown()
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
self.outstanding_requests[req_id] = req
self.cv.wait_for(lambda: req_id in self.ready_data or self._in_shutdown)
self._check_shutdown()
data = self.ready_data[req_id]
del self.ready_data[req_id]
del self.outstanding_requests[req_id]
self._acknowledge(req_id)
return data
def _async_send(
self,
req: ray_client_pb2.DataRequest,
callback: Optional[ResponseCallable] = None,
) -> None:
with self.lock:
self._check_shutdown()
req_id = self._next_id()
req.req_id = req_id
self.asyncio_waiting_data[req_id] = callback
self.outstanding_requests[req_id] = req
self.request_queue.put(req)
# Must hold self.lock when calling this function.
def _check_shutdown(self):
assert self.lock.locked()
if not self._in_shutdown:
return
self.lock.release()
# Do not try disconnect() or throw exceptions in self.data_thread.
# Otherwise deadlock can occur.
if threading.current_thread().ident == self.data_thread.ident:
return
from ray.util import disconnect
disconnect()
self.lock.acquire()
if self._last_exception is not None:
msg = (
"Request can't be sent because the Ray client has already "
"been disconnected due to an error. Last exception: "
f"{self._last_exception}"
)
else:
msg = (
"Request can't be sent because the Ray client has already "
"been disconnected."
)
raise ConnectionError(msg)
def Init(
self, request: ray_client_pb2.InitRequest, context=None
) -> ray_client_pb2.InitResponse:
datareq = ray_client_pb2.DataRequest(
init=request,
)
resp = self._blocking_send(datareq)
return resp.init
def PrepRuntimeEnv(
self, request: ray_client_pb2.PrepRuntimeEnvRequest, context=None
) -> ray_client_pb2.PrepRuntimeEnvResponse:
datareq = ray_client_pb2.DataRequest(
prep_runtime_env=request,
)
resp = self._blocking_send(datareq)
return resp.prep_runtime_env
def ConnectionInfo(self, context=None) -> ray_client_pb2.ConnectionInfoResponse:
datareq = ray_client_pb2.DataRequest(
connection_info=ray_client_pb2.ConnectionInfoRequest()
)
resp = self._blocking_send(datareq)
return resp.connection_info
def GetObject(
self, request: ray_client_pb2.GetRequest, context=None
) -> ray_client_pb2.GetResponse:
datareq = ray_client_pb2.DataRequest(
get=request,
)
resp = self._blocking_send(datareq)
return resp.get
def RegisterGetCallback(
self, request: ray_client_pb2.GetRequest, callback: ResponseCallable
) -> None:
if len(request.ids) != 1:
raise ValueError(
"RegisterGetCallback() must have exactly 1 Object ID. "
f"Actual: {request}"
)
datareq = ray_client_pb2.DataRequest(
get=request,
)
collector = ChunkCollector(callback=callback, request=datareq)
self._async_send(datareq, collector)
# TODO: convert PutObject to async
def PutObject(
self, request: ray_client_pb2.PutRequest, context=None
) -> ray_client_pb2.PutResponse:
datareq = ray_client_pb2.DataRequest(
put=request,
)
resp = self._blocking_send(datareq)
return resp.put
def ReleaseObject(
self, request: ray_client_pb2.ReleaseRequest, context=None
) -> None:
datareq = ray_client_pb2.DataRequest(
release=request,
)
self._async_send(datareq)
def Schedule(self, request: ray_client_pb2.ClientTask, callback: ResponseCallable):
datareq = ray_client_pb2.DataRequest(task=request)
self._async_send(datareq, callback)
def Terminate(
self, request: ray_client_pb2.TerminateRequest
) -> ray_client_pb2.TerminateResponse:
req = ray_client_pb2.DataRequest(
terminate=request,
)
resp = self._blocking_send(req)
return resp.terminate
def ListNamedActors(
self, request: ray_client_pb2.ClientListNamedActorsRequest
) -> ray_client_pb2.ClientListNamedActorsResponse:
req = ray_client_pb2.DataRequest(
list_named_actors=request,
)
resp = self._blocking_send(req)
return resp.list_named_actors
| DataClient |
python | huggingface__transformers | src/transformers/models/m2m_100/modeling_m2m_100.py | {
"start": 40429,
"end": 45875
} | class ____(M2M100PreTrainedModel):
_tied_weights_keys = {
"decoder.embed_tokens.weight": "shared.weight",
"encoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: M2M100Config):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = M2M100ScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale)
self.encoder = M2M100Encoder(config)
self.decoder = M2M100Decoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The M2M100 Model with a language modeling head. Can be used for summarization.
"""
)
| M2M100Model |
python | conda__conda | conda/models/prefix_graph.py | {
"start": 14427,
"end": 16633
} | class ____(PrefixGraph):
"""
Compared with PrefixGraph, this class takes in more than one record of a given name,
and operates on that graph from the higher view across any matching dependencies. It is
not a Prefix thing, but more like a "graph of all possible candidates" thing, and is used
for unsatisfiability analysis
"""
def __init__(self, records, specs=()):
records = tuple(records)
super().__init__(records, specs)
self.specs_by_name = defaultdict(dict)
for node in records:
parent_dict = self.specs_by_name.get(node.name, {})
for dep in tuple(MatchSpec(d) for d in node.depends):
deps = parent_dict.get(dep.name, set())
deps.add(dep)
parent_dict[dep.name] = deps
self.specs_by_name[node.name] = parent_dict
consolidated_graph = {}
# graph is toposorted, so looping over it is in dependency order
for node, parent_nodes in reversed(list(self.graph.items())):
cg = consolidated_graph.get(node.name, set())
cg.update(_.name for _ in parent_nodes)
consolidated_graph[node.name] = cg
self.graph_by_name = consolidated_graph
def breadth_first_search_by_name(self, root_spec, target_spec):
"""Return shorted path from root_spec to spec_name"""
queue = []
queue.append([root_spec])
visited = []
while queue:
path = queue.pop(0)
node = path[-1]
if node in visited:
continue
visited.append(node)
if node == target_spec:
return path
children = []
specs = self.specs_by_name.get(node.name)
if specs is None:
continue
for _, deps in specs.items():
children.extend(list(deps))
for adj in children:
if adj.name == target_spec.name and adj.version != target_spec.version:
pass
else:
new_path = list(path)
new_path.append(adj)
queue.append(new_path)
| GeneralGraph |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 8509,
"end": 9667
} | class ____(LogCaptureAPITestCase):
endpoint = "concurrent-ratelimit-endpoint"
def test_concurrent_request_finishes(self) -> None:
self._caplog.set_level(logging.INFO, logger="sentry")
for i in range(10):
self.get_success_response()
# these requests were done in succession, so we should not have any
# rate limiting
self.assert_access_log_recorded()
for i in range(10):
assert not hasattr(self.captured_logs[i], "token_type")
assert self.captured_logs[0].group == RateLimitedEndpoint.rate_limits.group
assert self.captured_logs[i].concurrent_requests == "1"
assert self.captured_logs[i].concurrent_limit == "1"
assert self.captured_logs[i].rate_limit_type == "not_limited"
assert self.captured_logs[i].limit == "20"
# we cannot assert on the exact amount of remaining requests because
# we may be crossing a second boundary during our test. That would make things
# flaky.
assert int(self.captured_logs[i].remaining) < 20
@all_silo_test
| TestAccessLogConcurrentRateLimited |
python | pyinstaller__pyinstaller | PyInstaller/depend/analysis.py | {
"start": 3064,
"end": 51122
} | class ____(ModuleGraph):
"""
Directed graph whose nodes represent modules and edges represent dependencies between these modules.
This high-level subclass wraps the lower-level `ModuleGraph` class with support for graph and runtime hooks.
While each instance of `ModuleGraph` represents a set of disconnected trees, each instance of this class *only*
represents a single connected tree whose root node is the Python script originally passed by the user on the
command line. For that reason, while there may (and typically do) exist more than one `ModuleGraph` instance,
there typically exists only a singleton instance of this class.
Attributes
----------
_hooks : ModuleHookCache
Dictionary mapping the fully-qualified names of all modules with normal (post-graph) hooks to the absolute paths
of such hooks. See the the `_find_module_path()` method for details.
_hooks_pre_find_module_path : ModuleHookCache
Dictionary mapping the fully-qualified names of all modules with pre-find module path hooks to the absolute
paths of such hooks. See the the `_find_module_path()` method for details.
_hooks_pre_safe_import_module : ModuleHookCache
Dictionary mapping the fully-qualified names of all modules with pre-safe import module hooks to the absolute
paths of such hooks. See the `_safe_import_module()` method for details.
_user_hook_dirs : list
List of the absolute paths of all directories containing user-defined hooks for the current application.
_excludes : list
List of module names to be excluded when searching for dependencies.
_additional_files_cache : AdditionalFilesCache
Cache of all external dependencies (e.g., binaries, datas) listed in hook scripts for imported modules.
_module_collection_mode : dict
A dictionary of module/package collection mode settings set by hook scripts for their modules.
_bindepend_symlink_suppression : set
A set of paths or path patterns corresponding to shared libraries for which binary dependency analysis should
not create symbolic links into top-level application directory.
_base_modules: list
Dependencies for `base_library.zip` (which remain the same for every executable).
"""
# Note: these levels are completely arbitrary and may be adjusted if needed.
LOG_LEVEL_MAPPING = {0: INFO, 1: DEBUG, 2: TRACE, 3: TRACE, 4: TRACE}
def __init__(self, pyi_homepath, user_hook_dirs=(), excludes=(), **kwargs):
super().__init__(excludes=excludes, **kwargs)
# Homepath to the place where is PyInstaller located.
self._homepath = pyi_homepath
# modulegraph Node for the main python script that is analyzed by PyInstaller.
self._top_script_node = None
# Absolute paths of all user-defined hook directories.
self._excludes = excludes
self._reset(user_hook_dirs)
self._analyze_base_modules()
def _reset(self, user_hook_dirs):
"""
Reset for another set of scripts. This is primary required for running the test-suite.
"""
self._top_script_node = None
self._additional_files_cache = AdditionalFilesCache()
self._module_collection_mode = dict()
self._bindepend_symlink_suppression = set()
# Hook sources: user-supplied (command-line / spec file), entry-point (upstream hooks, contributed hooks), and
# built-in hooks. The order does not really matter anymore, because each entry is now a (location, priority)
# tuple, and order is determined from assigned priority (which may also be overridden by hooks themselves).
self._user_hook_dirs = [
*user_hook_dirs,
(os.path.join(PACKAGEPATH, 'hooks'), HOOK_PRIORITY_BUILTIN_HOOKS),
]
# Hook-specific lookup tables. These need to reset when reusing cached PyiModuleGraph to avoid hooks to refer to
# files or data from another test-case.
logger.info('Initializing module graph hook caches...')
self._hooks = self._cache_hooks("")
self._hooks_pre_safe_import_module = self._cache_hooks('pre_safe_import_module')
self._hooks_pre_find_module_path = self._cache_hooks('pre_find_module_path')
# Search for run-time hooks in all hook directories.
self._available_rthooks = defaultdict(list)
for uhd, _ in self._user_hook_dirs:
uhd_path = os.path.abspath(os.path.join(uhd, 'rthooks.dat'))
try:
with open(uhd_path, 'r', encoding='utf-8') as f:
rthooks = ast.literal_eval(f.read())
except FileNotFoundError:
# Ignore if this hook path doesn't have run-time hooks.
continue
except Exception as e:
logger.error('Unable to read run-time hooks from %r: %s' % (uhd_path, e))
continue
self._merge_rthooks(rthooks, uhd, uhd_path)
# Convert back to a standard dict.
self._available_rthooks = dict(self._available_rthooks)
def _merge_rthooks(self, rthooks, uhd, uhd_path):
"""
The expected data structure for a run-time hook file is a Python dictionary of type ``Dict[str, List[str]]``,
where the dictionary keys are module names and the sequence strings are Python file names.
Check then merge this data structure, updating the file names to be absolute.
"""
# Check that the root element is a dict.
assert isinstance(rthooks, dict), 'The root element in %s must be a dict.' % uhd_path
for module_name, python_file_name_list in rthooks.items():
# Ensure the key is a string.
assert isinstance(module_name, str), \
'%s must be a dict whose keys are strings; %s is not a string.' % (uhd_path, module_name)
# Ensure the value is a list.
assert isinstance(python_file_name_list, list), \
'The value of %s key %s must be a list.' % (uhd_path, module_name)
if module_name in self._available_rthooks:
logger.warning(
'Runtime hooks for %s have already been defined. Skipping the runtime hooks for %s that are '
'defined in %s.', module_name, module_name, os.path.join(uhd, 'rthooks')
)
# Skip this module
continue
# Merge this with existing run-time hooks.
for python_file_name in python_file_name_list:
# Ensure each item in the list is a string.
assert isinstance(python_file_name, str), \
'%s key %s, item %r must be a string.' % (uhd_path, module_name, python_file_name)
# Transform it into an absolute path.
abs_path = os.path.join(uhd, 'rthooks', python_file_name)
# Make sure this file exists.
assert os.path.exists(abs_path), \
'In %s, key %s, the file %r expected to be located at %r does not exist.' % \
(uhd_path, module_name, python_file_name, abs_path)
# Merge it.
self._available_rthooks[module_name].append(abs_path)
@staticmethod
def _findCaller(*args, **kwargs):
# Used to add an additional stack-frame above logger.findCaller. findCaller expects the caller to be three
# stack-frames above itself.
return logger.findCaller(*args, **kwargs)
def msg(self, level, s, *args):
"""
Print a debug message with the given level.
1. Map the msg log level to a logger log level.
2. Generate the message format (the same format as ModuleGraph)
3. Find the caller, which findCaller expects three stack-frames above itself:
[3] caller -> [2] msg (here) -> [1] _findCaller -> [0] logger.findCaller
4. Create a logRecord with the caller's information.
5. Handle the logRecord.
"""
try:
level = self.LOG_LEVEL_MAPPING[level]
except KeyError:
return
if not logger.isEnabledFor(level):
return
msg = "%s %s" % (s, ' '.join(map(repr, args)))
try:
fn, lno, func, sinfo = self._findCaller()
except ValueError: # pragma: no cover
fn, lno, func, sinfo = "(unknown file)", 0, "(unknown function)", None
record = logger.makeRecord(logger.name, level, fn, lno, msg, [], None, func, None, sinfo)
logger.handle(record)
# Set logging methods so that the stack is correctly detected.
msgin = msg
msgout = msg
def _cache_hooks(self, hook_type):
"""
Create a cache of all hooks of the specified type.
The cache will include all official hooks defined by the PyInstaller codebase _and_ all unofficial hooks
defined for the current application.
Parameters
----------
hook_type : str
Type of hooks to be cached, equivalent to the basename of the subpackage of the `PyInstaller.hooks`
package containing such hooks (e.g., empty string for standard hooks, `pre_safe_import_module` for
pre-safe-import-module hooks, `pre_find_module_path` for pre-find-module-path hooks).
"""
# Cache of this type of hooks.
hook_dirs = []
for user_hook_dir, priority in self._user_hook_dirs:
# Absolute path of the user-defined subdirectory of this hook type. If this directory exists, add it to the
# list to be cached.
user_hook_type_dir = os.path.join(user_hook_dir, hook_type)
if os.path.isdir(user_hook_type_dir):
hook_dirs.append((user_hook_type_dir, priority))
return ModuleHookCache(self, hook_dirs)
def _analyze_base_modules(self):
"""
Analyze dependencies of the the modules in base_library.zip.
"""
logger.info('Analyzing modules for base_library.zip ...')
required_mods = []
# Collect submodules from required modules in base_library.zip.
for m in PY3_BASE_MODULES:
if is_package(m):
required_mods += collect_submodules(m)
else:
required_mods.append(m)
# Initialize ModuleGraph.
self._base_modules = [mod for req in required_mods for mod in self.import_hook(req)]
def add_script(self, pathname, caller=None):
"""
Wrap the parent's 'run_script' method and create graph from the first script in the analysis, and save its
node to use as the "caller" node for all others. This gives a connected graph rather than a collection of
unrelated trees.
"""
if self._top_script_node is None:
# Remember the node for the first script.
try:
self._top_script_node = super().add_script(pathname)
except SyntaxError:
print("\nSyntax error in", pathname, file=sys.stderr)
formatted_lines = traceback.format_exc().splitlines(True)
print(*formatted_lines[-4:], file=sys.stderr)
sys.exit(1)
# Create references from the top script to the base_modules in graph.
for node in self._base_modules:
self.add_edge(self._top_script_node, node)
# Return top-level script node.
return self._top_script_node
else:
if not caller:
# Defaults to as any additional script is called from the top-level script.
caller = self._top_script_node
return super().add_script(pathname, caller=caller)
def process_post_graph_hooks(self, analysis):
"""
For each imported module, run this module's post-graph hooks if any.
Parameters
----------
analysis: build_main.Analysis
The Analysis that calls the hooks
"""
# For each iteration of the infinite "while" loop below:
#
# 1. All hook() functions defined in cached hooks for imported modules are called. This may result in new
# modules being imported (e.g., as hidden imports) that were ignored earlier in the current iteration: if
# this is the case, all hook() functions defined in cached hooks for these modules will be called by the next
# iteration.
# 2. All cached hooks whose hook() functions were called are removed from this cache. If this cache is empty, no
# hook() functions will be called by the next iteration and this loop will be terminated.
# 3. If no hook() functions were called, this loop is terminated.
logger.info('Processing module hooks (post-graph stage)...')
while True:
# Set of the names of all imported modules whose post-graph hooks are run by this iteration, preventing the
# next iteration from re- running these hooks. If still empty at the end of this iteration, no post-graph
# hooks were run; thus, this loop will be terminated.
hooked_module_names = set()
# For each remaining hookable module and corresponding hooks...
for module_name, module_hook in self._hooks.items():
# Graph node for this module if imported or "None" otherwise.
module_node = self.find_node(module_name, create_nspkg=False)
# If this module has not been imported, temporarily ignore it. This module is retained in the cache, as
# a subsequently run post-graph hook could import this module as a hidden import.
if module_node is None:
continue
# If this module is unimportable, permanently ignore it.
if type(module_node).__name__ not in VALID_MODULE_TYPES:
hooked_module_names.add(module_name)
continue
# Run this script's post-graph hook.
module_hook.post_graph(analysis)
# Cache all external dependencies listed by this script after running this hook, which could add
# dependencies.
self._additional_files_cache.add(module_name, module_hook.binaries, module_hook.datas)
# Update package collection mode settings.
self._module_collection_mode.update(module_hook.module_collection_mode)
# Update symbolic link suppression patterns for binary dependency analysis.
self._bindepend_symlink_suppression.update(module_hook.bindepend_symlink_suppression)
# Prevent this module's hooks from being run again.
hooked_module_names.add(module_name)
# Prevent all post-graph hooks run above from being run again by the next iteration.
self._hooks.remove_modules(*hooked_module_names)
# If no post-graph hooks were run, terminate iteration.
if not hooked_module_names:
break
def _find_all_excluded_imports(self, module_name):
"""
Collect excludedimports from the hooks of the specified module and all its parents.
"""
excluded_imports = set()
while module_name:
# Gather excluded imports from hook belonging to the module.
module_hook = self._hooks.get(module_name, None)
if module_hook:
excluded_imports.update(module_hook.excludedimports)
# Change module name to the module's parent name
module_name = module_name.rpartition('.')[0]
return excluded_imports
def _safe_import_hook(
self, target_module_partname, source_module, target_attr_names, level=DEFAULT_IMPORT_LEVEL, edge_attr=None
):
if source_module is not None:
# Gather all excluded imports for the referring modules, as well as its parents.
# For example, we want the excluded imports specified by hook for PIL to be also applied when the referring
# module is its submodule, PIL.Image.
excluded_imports = self._find_all_excluded_imports(source_module.identifier)
# Apply extra processing only if we have any excluded-imports rules
if excluded_imports:
# Resolve the base module name. Level can be ABSOLUTE_IMPORT_LEVEL (= 0) for absolute imports, or an
# integer indicating the relative level. We do not use equality comparison just in case we ever happen
# to get ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL (-1), which is a remnant of python2 days.
if level > ABSOLUTE_IMPORT_LEVEL:
if isinstance(source_module, Package):
# Package
base_module_name = source_module.identifier
else:
# Module in a package; base name must be the parent package name!
base_module_name = '.'.join(source_module.identifier.split('.')[:-1])
# Adjust the base module name based on level
if level > 1:
base_module_name = '.'.join(base_module_name.split('.')[:-(level - 1)])
if target_module_partname:
base_module_name += '.' + target_module_partname
else:
base_module_name = target_module_partname
def _exclude_module(module_name, excluded_imports, referrer_name):
"""
Helper for checking whether given module should be excluded.
Returns the name of exclusion rule if module should be excluded, None otherwise.
"""
module_name_parts = module_name.split('.')
for excluded_import in excluded_imports:
excluded_import_parts = excluded_import.split('.')
match = module_name_parts[:len(excluded_import_parts)] == excluded_import_parts
if match:
# Check if the referrer is (was!) subject to the same rule. Because if it was and was
# analyzed anyway, some other import chain must have overrode the exclusion, and we should
# waive it here. A package hook might exclude a part (a subpackage) of the said package to
# prevent its collection when there are no external references; but when they are (for
# example, user explicitly imports the said subpackage in their program), we must let the
# subpackage import its submodules.
referrer_name_parts = referrer_name.split('.')
referrer_match = referrer_name_parts[:len(excluded_import_parts)] == excluded_import_parts
if referrer_match:
logger.debug(
"Deactivating suppression rule %r for module %r because it also applies to the "
"referrer (%r)...", excluded_import, module_name, referrer_name
)
continue
return excluded_import
return None
# First, check if base module name is to be excluded.
# This covers both basic `import a` and `import a.b.c`, as well as `from d import e, f` where base
# module `d` is excluded.
excluded_import_rule = _exclude_module(
base_module_name,
excluded_imports,
source_module.identifier,
)
if excluded_import_rule:
logger.debug(
"Suppressing import of %r from module %r due to excluded import %r specified in a hook for %r "
"(or its parent package(s)).", base_module_name, source_module.identifier, excluded_import_rule,
source_module.identifier
)
return []
# If we have target attribute names, check each of them, and remove excluded ones from the
# `target_attr_names` list.
if target_attr_names:
filtered_target_attr_names = []
for target_attr_name in target_attr_names:
submodule_name = base_module_name + '.' + target_attr_name
excluded_import_rule = _exclude_module(
submodule_name,
excluded_imports,
source_module.identifier,
)
if excluded_import_rule:
logger.debug(
"Suppressing import of %r from module %r due to excluded import %r specified in a hook "
"for %r (or its parent package(s)).", submodule_name, source_module.identifier,
excluded_import_rule, source_module.identifier
)
else:
filtered_target_attr_names.append(target_attr_name)
# Swap with filtered target attribute names list; if no elements remain after the filtering, pass
# None...
target_attr_names = filtered_target_attr_names or None
ret_modules = super()._safe_import_hook(
target_module_partname, source_module, target_attr_names, level, edge_attr
)
# Ensure that hooks are pre-loaded for returned module(s), in an attempt to ensure that hooks are called in the
# order of imports. The hooks are cached, so there should be no downsides to pre-loading hooks early (as opposed
# to loading them in post-graph analysis). When modules are imported from other modules, the hooks for those
# referring (source) modules and their parent package(s) are loaded by the exclusion mechanism that takes place
# before the above `super()._safe_import_hook` call. The code below attempts to complement that, but for the
# referred (target) modules and their parent package(s).
for ret_module in ret_modules:
if type(ret_module).__name__ not in VALID_MODULE_TYPES:
continue
# (Ab)use the `_find_all_excluded_imports` helper to load all hooks for the given module and its parent
# package(s).
self._find_all_excluded_imports(ret_module.identifier)
return ret_modules
def _safe_import_module(self, module_basename, module_name, parent_package):
"""
Create a new graph node for the module with the passed name under the parent package signified by the passed
graph node.
This method wraps the superclass method with support for pre-import module hooks. If such a hook exists for
this module (e.g., a script `PyInstaller.hooks.hook-{module_name}` containing a function
`pre_safe_import_module()`), that hook will be run _before_ the superclass method is called.
Pre-Safe-Import-Hooks are performed just *prior* to importing the module. When running the hook, the modules
parent package has already been imported and ti's `__path__` is set up. But the module is just about to be
imported.
See the superclass method for description of parameters and return value.
"""
# If this module has a pre-safe import module hook, run it. Make sure to remove it first, to prevent subsequent
# calls from running it again.
hook = self._hooks_pre_safe_import_module.pop(module_name, None)
if hook is not None:
# Dynamically import this hook as a fabricated module.
hook_path, hook_basename = os.path.split(hook.hook_filename)
logger.info('Processing pre-safe-import-module hook %r from %r', hook_basename, hook_path)
hook_module_name = 'PyInstaller_hooks_pre_safe_import_module_' + module_name.replace('.', '_')
hook_module = importlib_load_source(hook_module_name, hook.hook_filename)
# Object communicating changes made by this hook back to us.
hook_api = PreSafeImportModuleAPI(
module_graph=self,
module_basename=module_basename,
module_name=module_name,
parent_package=parent_package,
)
# Run this hook, passed this object.
if not hasattr(hook_module, 'pre_safe_import_module'):
raise NameError('pre_safe_import_module() function not defined by hook %r.' % hook_module)
hook_module.pre_safe_import_module(hook_api)
# Respect method call changes requested by this hook.
module_basename = hook_api.module_basename
module_name = hook_api.module_name
# Call the superclass method.
return super()._safe_import_module(module_basename, module_name, parent_package)
def _find_module_path(self, fullname, module_name, search_dirs):
"""
Get a 3-tuple detailing the physical location of the module with the passed name if that module exists _or_
raise `ImportError` otherwise.
This method wraps the superclass method with support for pre-find module path hooks. If such a hook exists
for this module (e.g., a script `PyInstaller.hooks.hook-{module_name}` containing a function
`pre_find_module_path()`), that hook will be run _before_ the superclass method is called.
See superclass method for parameter and return value descriptions.
"""
# If this module has a pre-find module path hook, run it. Make sure to remove it first, to prevent subsequent
# calls from running it again.
hook = self._hooks_pre_find_module_path.pop(fullname, None)
if hook is not None:
# Dynamically import this hook as a fabricated module.
hook_path, hook_basename = os.path.split(hook.hook_filename)
logger.info('Processing pre-find-module-path hook %r from %r', hook_basename, hook_path)
hook_fullname = 'PyInstaller_hooks_pre_find_module_path_' + fullname.replace('.', '_')
hook_module = importlib_load_source(hook_fullname, hook.hook_filename)
# Object communicating changes made by this hook back to us.
hook_api = PreFindModulePathAPI(
module_graph=self,
module_name=fullname,
search_dirs=search_dirs,
)
# Run this hook, passed this object.
if not hasattr(hook_module, 'pre_find_module_path'):
raise NameError('pre_find_module_path() function not defined by hook %r.' % hook_module)
hook_module.pre_find_module_path(hook_api)
# Respect search-directory changes requested by this hook.
search_dirs = hook_api.search_dirs
# Call the superclass method.
return super()._find_module_path(fullname, module_name, search_dirs)
def get_code_objects(self):
"""
Get code objects from ModuleGraph for pure Python modules. This allows to avoid writing .pyc/pyo files to hdd
at later stage.
:return: Dict with module name and code object.
"""
code_dict = {}
mod_types = PURE_PYTHON_MODULE_TYPES
for node in self.iter_graph(start=self._top_script_node):
# TODO This is terrible. To allow subclassing, types should never be directly compared. Use isinstance()
# instead, which is safer, simpler, and accepts sets. Most other calls to type() in the codebase should also
# be refactored to call isinstance() instead.
# get node type e.g. Script
mg_type = type(node).__name__
if mg_type in mod_types:
if node.code:
code_dict[node.identifier] = node.code
return code_dict
def _make_toc(self, typecode=None):
"""
Return the name, path and type of selected nodes as a TOC. The selection is determined by the given list
of PyInstaller TOC typecodes. If that list is empty we return the complete flattened graph as a TOC with the
ModuleGraph note types in place of typecodes -- meant for debugging only. Normally we return ModuleGraph
nodes whose types map to the requested PyInstaller typecode(s) as indicated in the MODULE_TYPES_TO_TOC_DICT.
We use the ModuleGraph (really, ObjectGraph) flatten() method to scan all the nodes. This is patterned after
ModuleGraph.report().
"""
toc = list()
for node in self.iter_graph(start=self._top_script_node):
entry = self._node_to_toc(node, typecode)
# Append the entry. We do not check for duplicates here; the TOC normalization is left to caller.
# However, as entries are obtained from modulegraph, there should not be any duplicates at this stage.
if entry is not None:
toc.append(entry)
return toc
def make_pure_toc(self):
"""
Return all pure Python modules formatted as TOC.
"""
# PyInstaller should handle special module types without code object.
return self._make_toc(PURE_PYTHON_MODULE_TYPES)
def make_binaries_toc(self):
"""
Return all binary Python modules formatted as TOC.
"""
return self._make_toc(BINARY_MODULE_TYPES)
def make_missing_toc(self):
"""
Return all MISSING Python modules formatted as TOC.
"""
return self._make_toc(BAD_MODULE_TYPES)
@staticmethod
def _node_to_toc(node, typecode=None):
# TODO This is terrible. Everything in Python has a type. It is nonsensical to even speak of "nodes [that] are
# not typed." How would that even occur? After all, even "None" has a type! (It is "NoneType", for the curious.)
# Remove this, please.
# Get node type, e.g., Script
mg_type = type(node).__name__
assert mg_type is not None
if typecode and mg_type not in typecode:
# Type is not a to be selected one, skip this one
return None
# Extract the identifier and a path if any.
if mg_type == 'Script':
# for Script nodes only, identifier is a whole path
(name, ext) = os.path.splitext(node.filename)
name = os.path.basename(name)
elif mg_type == 'ExtensionPackage':
# Package with __init__ module being an extension module. This needs to end up as e.g. 'mypkg/__init__.so'.
# Convert the packages name ('mypkg') into the module name ('mypkg.__init__') *here* to keep special cases
# away elsewhere (where the module name is converted to a filename).
name = node.identifier + ".__init__"
else:
name = node.identifier
path = node.filename if node.filename is not None else ''
# Ensure name is really 'str'. Module graph might return object type 'modulegraph.Alias' which inherits fromm
# 'str'. But 'marshal.dumps()' function is able to marshal only 'str'. Otherwise on Windows PyInstaller might
# fail with message like:
# ValueError: unmarshallable object
name = str(name)
# Translate to the corresponding TOC typecode.
toc_type = MODULE_TYPES_TO_TOC_DICT[mg_type]
return name, path, toc_type
def nodes_to_toc(self, nodes):
"""
Given a list of nodes, create a TOC representing those nodes. This is mainly used to initialize a TOC of
scripts with the ones that are runtime hooks. The process is almost the same as _make_toc(), but the caller
guarantees the nodes are valid, so minimal checking.
"""
return [self._node_to_toc(node) for node in nodes]
# Return true if the named item is in the graph as a BuiltinModule node. The passed name is a basename.
def is_a_builtin(self, name):
node = self.find_node(name)
if node is None:
return False
return type(node).__name__ == 'BuiltinModule'
def get_importers(self, name):
"""
List all modules importing the module with the passed name.
Returns a list of (identifier, DependencyIinfo)-tuples. If the names module has not yet been imported, this
method returns an empty list.
Parameters
----------
name : str
Fully-qualified name of the module to be examined.
Returns
----------
list
List of (fully-qualified names, DependencyIinfo)-tuples of all modules importing the module with the passed
fully-qualified name.
"""
def get_importer_edge_data(importer):
edge = self.graph.edge_by_node(importer, name)
# edge might be None in case an AliasModule was added.
if edge is not None:
return self.graph.edge_data(edge)
node = self.find_node(name)
if node is None:
return []
_, importers = self.get_edges(node)
importers = (importer.identifier for importer in importers if importer is not None)
return [(importer, get_importer_edge_data(importer)) for importer in importers]
# TODO: create a class from this function.
def analyze_runtime_hooks(self, custom_runhooks):
"""
Analyze custom run-time hooks and run-time hooks implied by found modules.
:return : list of Graph nodes.
"""
rthooks_nodes = []
logger.info('Analyzing run-time hooks ...')
# Process custom runtime hooks (from --runtime-hook options). The runtime hooks are order dependent. First hooks
# in the list are executed first. Put their graph nodes at the head of the priority_scripts list Pyinstaller
# defined rthooks and thus they are executed first.
if custom_runhooks:
for hook_file in custom_runhooks:
logger.info("Including custom run-time hook %r", hook_file)
hook_file = os.path.abspath(hook_file)
# Not using "try" here because the path is supposed to exist, if it does not, the raised error will
# explain.
rthooks_nodes.append(self.add_script(hook_file))
# Find runtime hooks that are implied by packages already imported. Get a temporary TOC listing all the scripts
# and packages graphed so far. Assuming that runtime hooks apply only to modules and packages.
temp_toc = self._make_toc(VALID_MODULE_TYPES)
for (mod_name, path, typecode) in temp_toc:
# Look if there is any run-time hook for given module.
if mod_name in self._available_rthooks:
# There could be several run-time hooks for a module.
for abs_path in self._available_rthooks[mod_name]:
hook_path, hook_basename = os.path.split(abs_path)
logger.info("Including run-time hook %r from %r", hook_basename, hook_path)
rthooks_nodes.append(self.add_script(abs_path))
return rthooks_nodes
def add_hiddenimports(self, module_list):
"""
Add hidden imports that are either supplied as CLI option --hidden-import=MODULENAME or as dependencies from
some PyInstaller features when enabled (e.g., crypto feature).
"""
assert self._top_script_node is not None
# Analyze the script's hidden imports (named on the command line).
for modnm in module_list:
node = self.find_node(modnm)
if node is not None:
logger.debug('Hidden import %r already found', modnm)
else:
logger.info("Analyzing hidden import %r", modnm)
# ModuleGraph throws ImportError if import not found.
try:
nodes = self.import_hook(modnm)
assert len(nodes) == 1
node = nodes[0]
except ImportError:
logger.error("Hidden import %r not found", modnm)
continue
# Create references from the top script to the hidden import, even if found otherwise. Do not waste time
# checking whether it is actually added by this (test-) script.
self.add_edge(self._top_script_node, node)
def get_code_using(self, module: str) -> dict:
"""
Find modules that import a given **module**.
"""
co_dict = {}
pure_python_module_types = PURE_PYTHON_MODULE_TYPES | {
'Script',
}
node = self.find_node(module)
if node:
referrers = self.incoming(node)
for r in referrers:
# Under python 3.7 and earlier, if `module` is added to hidden imports, one of referrers ends up being
# None, causing #3825. Work around it.
if r is None:
continue
# Ensure that modulegraph objects have 'code' attribute.
if type(r).__name__ not in pure_python_module_types:
continue
identifier = r.identifier
if identifier == module or identifier.startswith(module + '.'):
# Skip self references or references from `modules`'s own submodules.
continue
# The code object may be None if referrer ends up shadowed by eponymous directory that ends up treated
# as a namespace package. See #6873 for an example.
if r.code is None:
continue
co_dict[r.identifier] = r.code
return co_dict
def metadata_required(self) -> set:
"""
Collect metadata for all packages that appear to need it.
"""
# List every function that we can think of which is known to require metadata.
out = set()
out |= self._metadata_from(
"pkg_resources",
["get_distribution"], # Requires metadata for one distribution.
["require"], # Requires metadata for all dependencies.
)
# importlib.metadata is often `import ... as` aliased to importlib_metadata for compatibility with < py38.
# Assume both are valid.
for importlib_metadata in ["importlib.metadata", "importlib_metadata"]:
out |= self._metadata_from(
importlib_metadata,
["metadata", "distribution", "version", "files", "requires"],
[],
)
return out
def _metadata_from(self, package, methods=(), recursive_methods=()) -> set:
"""
Collect metadata whose requirements are implied by given function names.
Args:
package:
The module name that must be imported in a source file to trigger the search.
methods:
Function names from **package** which take a distribution name as an argument and imply that metadata
is required for that distribution.
recursive_methods:
Like **methods** but also implies that a distribution's dependencies' metadata must be collected too.
Returns:
Required metadata in hook data ``(source, dest)`` format as returned by
:func:`PyInstaller.utils.hooks.copy_metadata()`.
Scan all source code to be included for usage of particular *key* functions which imply that that code will
require metadata for some distribution (which may not be its own) at runtime. In the case of a match,
collect the required metadata.
"""
from PyInstaller.utils.hooks import copy_metadata
from PyInstaller.compat import importlib_metadata
# Generate sets of possible function names to search for.
need_metadata = set()
need_recursive_metadata = set()
for method in methods:
need_metadata.update(bytecode.any_alias(package + "." + method))
for method in recursive_methods:
need_recursive_metadata.update(bytecode.any_alias(package + "." + method))
out = set()
for name, code in self.get_code_using(package).items():
for calls in bytecode.recursive_function_calls(code).values():
for function_name, args in calls:
# Only consider function calls taking one argument.
if len(args) != 1:
continue
package = args[0]
try:
if function_name in need_metadata:
out.update(copy_metadata(package))
elif function_name in need_recursive_metadata:
out.update(copy_metadata(package, recursive=True))
except importlib_metadata.PackageNotFoundError:
# Currently, we opt to silently skip over missing metadata.
continue
return out
def get_collected_packages(self) -> list:
"""
Return the list of collected python packages.
"""
# `node.identifier` might be an instance of `modulegraph.Alias`, hence explicit conversion to `str`.
return [
str(node.identifier) for node in self.iter_graph(start=self._top_script_node)
if type(node).__name__ == 'Package'
]
def make_hook_binaries_toc(self) -> list:
"""
Return the TOC list of binaries collected by hooks."
"""
toc = []
for node in self.iter_graph(start=self._top_script_node):
module_name = str(node.identifier)
for dest_name, src_name in self._additional_files_cache.binaries(module_name):
toc.append((dest_name, src_name, 'BINARY'))
return toc
def make_hook_datas_toc(self) -> list:
"""
Return the TOC list of data files collected by hooks."
"""
toc = []
for node in self.iter_graph(start=self._top_script_node):
module_name = str(node.identifier)
for dest_name, src_name in self._additional_files_cache.datas(module_name):
toc.append((dest_name, src_name, 'DATA'))
return toc
_cached_module_graph_ = None
def initialize_modgraph(excludes=(), user_hook_dirs=()):
"""
Create the cached module graph.
This function might appear weird but is necessary for speeding up test runtime because it allows caching basic
ModuleGraph object that gets created for 'base_library.zip'.
Parameters
----------
excludes : list
List of the fully-qualified names of all modules to be "excluded" and hence _not_ frozen into the executable.
user_hook_dirs : list
List of the absolute paths of all directories containing user-defined hooks for the current application or
`None` if no such directories were specified.
Returns
----------
PyiModuleGraph
Module graph with core dependencies.
"""
# Normalize parameters to ensure tuples and make comparison work.
user_hook_dirs = user_hook_dirs or ()
excludes = excludes or ()
# Ensure that __main__ is always excluded from the modulegraph, to prevent accidentally pulling PyInstaller itself
# into the modulegraph. This seems to happen on Windows, because modulegraph is able to resolve `__main__` as
# `.../PyInstaller.exe/__main__.py` and analyze it. The `__main__` has a different meaning during analysis compared
# to the program run-time, when it refers to the program's entry-point (which would always be part of the
# modulegraph anyway, by virtue of being the starting point of the analysis).
if "__main__" not in excludes:
excludes += ("__main__",)
# If there is a graph cached with the same excludes, reuse it. See ``PyiModulegraph._reset()`` for what is
# reset. This cache is used primarily to speed up the test-suite. Fixture `pyi_modgraph` calls this function with
# empty excludes, creating a graph suitable for the huge majority of tests.
global _cached_module_graph_
if _cached_module_graph_ and _cached_module_graph_._excludes == excludes:
logger.info('Reusing cached module dependency graph...')
graph = deepcopy(_cached_module_graph_)
graph._reset(user_hook_dirs)
return graph
logger.info('Initializing module dependency graph...')
# Construct the initial module graph by analyzing all import statements.
graph = PyiModuleGraph(
HOMEPATH,
excludes=excludes,
# get_implies() are hidden imports known by modulgraph.
implies=get_implies(),
user_hook_dirs=user_hook_dirs,
)
if not _cached_module_graph_:
# Only cache the first graph, see above for explanation.
logger.info('Caching module dependency graph...')
# cache a deep copy of the graph
_cached_module_graph_ = deepcopy(graph)
# Clear data which does not need to be copied from the cached graph since it will be reset by
# ``PyiModulegraph._reset()`` anyway.
_cached_module_graph_._hooks = None
_cached_module_graph_._hooks_pre_safe_import_module = None
_cached_module_graph_._hooks_pre_find_module_path = None
return graph
def get_bootstrap_modules():
"""
Get TOC with the bootstrapping modules and their dependencies.
:return: TOC with modules
"""
# Import 'struct' modules to get real paths to module file names.
mod_struct = __import__('struct')
# Basic modules necessary for the bootstrap process.
loader_mods = list()
loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
# On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically)
# and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from
# executable. 'zlib' is required to decompress this bytecode.
for mod_name in ['_struct', 'zlib']:
mod = __import__(mod_name) # C extension.
if hasattr(mod, '__file__'):
mod_file = os.path.abspath(mod.__file__)
# Resolve full destination name for extension, diverting it into python3.x/lib-dynload directory if
# necessary (to match behavior for extension collection introduced in #5604).
mod_dest = destination_name_for_extension(mod_name, mod_file, 'EXTENSION')
loader_mods.append((mod_dest, mod_file, 'EXTENSION'))
loader_mods.append(('struct', os.path.abspath(mod_struct.__file__), 'PYMODULE'))
# Loader/bootstrap modules.
# NOTE: These modules should be kept simple without any complicated dependencies.
loader_mods += [
('pyimod01_archive', os.path.join(loaderpath, 'pyimod01_archive.py'), 'PYMODULE'),
('pyimod02_importers', os.path.join(loaderpath, 'pyimod02_importers.py'), 'PYMODULE'),
('pyimod03_ctypes', os.path.join(loaderpath, 'pyimod03_ctypes.py'), 'PYMODULE'),
]
if is_win:
loader_mods.append(('pyimod04_pywin32', os.path.join(loaderpath, 'pyimod04_pywin32.py'), 'PYMODULE'))
# The bootstrap script
loader_mods.append(('pyiboot01_bootstrap', os.path.join(loaderpath, 'pyiboot01_bootstrap.py'), 'PYSOURCE'))
return loader_mods
| PyiModuleGraph |
python | h5py__h5py | h5py/tests/test_file.py | {
"start": 677,
"end": 4585
} | class ____(TestCase):
"""
Feature: Opening files with Python-style modes.
"""
def test_default(self):
""" Default semantics in the presence or absence of a file """
fname = self.mktemp()
# No existing file; error
with pytest.raises(FileNotFoundError):
with File(fname):
pass
# Existing readonly file; open read-only
with File(fname, 'w'):
pass
os.chmod(fname, stat.S_IREAD)
try:
with File(fname) as f:
self.assertTrue(f)
self.assertEqual(f.mode, 'r')
finally:
os.chmod(fname, stat.S_IWRITE)
# File exists but is not HDF5; raise OSError
with open(fname, 'wb') as f:
f.write(b'\x00')
with self.assertRaises(OSError):
File(fname)
def test_create(self):
""" Mode 'w' opens file in overwrite mode """
fname = self.mktemp()
fid = File(fname, 'w')
self.assertTrue(fid)
fid.create_group('foo')
fid.close()
fid = File(fname, 'w')
self.assertNotIn('foo', fid)
fid.close()
def test_create_exclusive(self):
""" Mode 'w-' opens file in exclusive mode """
fname = self.mktemp()
fid = File(fname, 'w-')
self.assertTrue(fid)
fid.close()
with self.assertRaises(FileExistsError):
File(fname, 'w-')
def test_append(self):
""" Mode 'a' opens file in append/readwrite mode, creating if necessary """
fname = self.mktemp()
fid = File(fname, 'a')
try:
self.assertTrue(fid)
fid.create_group('foo')
assert 'foo' in fid
finally:
fid.close()
fid = File(fname, 'a')
try:
assert 'foo' in fid
fid.create_group('bar')
assert 'bar' in fid
finally:
fid.close()
# Observed on cibuildwheel v2.19.1
# https://github.com/pypa/cibuildwheel/issues/1882
@pytest.mark.skipif(
os.getenv("CIBUILDWHEEL") == "1" and sys.platform == "linux",
reason="Linux docker cibuildwheel environment permissions issue",
)
def test_append_permissions(self):
""" Mode 'a' fails when file is read-only """
fname = self.mktemp()
with File(fname, 'a') as fid:
fid.create_group('foo')
os.chmod(fname, stat.S_IREAD) # Make file read-only
try:
with pytest.raises(PermissionError):
File(fname, 'a')
finally:
# Make it writable again so it can be deleted on Windows
os.chmod(fname, stat.S_IREAD | stat.S_IWRITE)
def test_readonly(self):
""" Mode 'r' opens file in readonly mode """
fname = self.mktemp()
fid = File(fname, 'w')
fid.close()
self.assertFalse(fid)
fid = File(fname, 'r')
self.assertTrue(fid)
with self.assertRaises(ValueError):
fid.create_group('foo')
fid.close()
def test_readwrite(self):
""" Mode 'r+' opens existing file in readwrite mode """
fname = self.mktemp()
fid = File(fname, 'w')
fid.create_group('foo')
fid.close()
fid = File(fname, 'r+')
assert 'foo' in fid
fid.create_group('bar')
assert 'bar' in fid
fid.close()
def test_nonexistent_file(self):
""" Modes 'r' and 'r+' do not create files """
fname = self.mktemp()
with self.assertRaises(FileNotFoundError):
File(fname, 'r')
with self.assertRaises(FileNotFoundError):
File(fname, 'r+')
def test_invalid_mode(self):
""" Invalid modes raise ValueError """
with self.assertRaises(ValueError):
File(self.mktemp(), 'mongoose')
| TestFileOpen |
python | python__mypy | mypyc/test/test_rarray.py | {
"start": 268,
"end": 1488
} | class ____(unittest.TestCase):
def test_basics(self) -> None:
a = RArray(int_rprimitive, 10)
assert a.item_type == int_rprimitive
assert a.length == 10
def test_str_conversion(self) -> None:
a = RArray(int_rprimitive, 10)
assert str(a) == "int[10]"
assert repr(a) == "<RArray <RPrimitive builtins.int>[10]>"
def test_eq(self) -> None:
a = RArray(int_rprimitive, 10)
assert a == RArray(int_rprimitive, 10)
assert a != RArray(bool_rprimitive, 10)
assert a != RArray(int_rprimitive, 9)
def test_hash(self) -> None:
assert hash(RArray(int_rprimitive, 10)) == hash(RArray(int_rprimitive, 10))
assert hash(RArray(bool_rprimitive, 5)) == hash(RArray(bool_rprimitive, 5))
def test_alignment(self) -> None:
a = RArray(int_rprimitive, 10)
assert compute_rtype_alignment(a) == PLATFORM_SIZE
b = RArray(bool_rprimitive, 55)
assert compute_rtype_alignment(b) == 1
def test_size(self) -> None:
a = RArray(int_rprimitive, 9)
assert compute_rtype_size(a) == 9 * PLATFORM_SIZE
b = RArray(bool_rprimitive, 3)
assert compute_rtype_size(b) == 3
| TestRArray |
python | Netflix__metaflow | metaflow/sidecar/sidecar_subprocess.py | {
"start": 1028,
"end": 9708
} | class ____(object):
def __init__(self, worker_type):
# type: (str, dict) -> None
self._worker_type = worker_type
# Sub-process launched and poller used
self._process = None
self._poller = None
# Retry counts when needing to send a MUST_SEND message
self._send_mustsend_remaining_tries = 0
# Keep track of the `mustsend` across restarts
self._cached_mustsend = None
# Tracks if a previous message had an error
self._prev_message_error = False
self.start()
def start(self):
if (
self._worker_type is not None
and self._worker_type.startswith(NULL_SIDECAR_PREFIX)
) or (platform.system() == "Darwin" and sys.version_info < (3, 0)):
# If on darwin and running python 2 disable sidecars
# there is a bug with importing poll from select in some cases
#
# TODO: Python 2 shipped by Anaconda allows for
# `from select import poll`. We can consider enabling sidecars
# for that distribution if needed at a later date.
self._poller = NullPoller()
self._process = None
self._logger("No sidecar started")
else:
self._starting = True
from select import poll
python_version = sys.executable
cmdline = [
python_version,
"-u",
os.path.dirname(__file__) + "/sidecar_worker.py",
self._worker_type,
]
self._logger("Starting sidecar")
debug.sidecar_exec(cmdline)
self._process = self._start_subprocess(cmdline)
if self._process is not None:
fcntl.fcntl(self._process.stdin, F_SETFL, O_NONBLOCK)
self._poller = poll()
self._poller.register(self._process.stdin.fileno(), select.POLLOUT)
else:
# unable to start subprocess, fallback to Null sidecar
self._logger("Unable to start subprocess")
self._poller = NullPoller()
def kill(self):
try:
msg = Message(MessageTypes.SHUTDOWN, None)
self._emit_msg(msg)
except:
pass
def send(self, msg, retries=3, thread_safe_send=False):
if msg.msg_type == MessageTypes.MUST_SEND:
# If this is a must-send message, we treat it a bit differently. A must-send
# message has to be properly sent before any of the other best effort messages.
self._cached_mustsend = msg.payload
self._send_mustsend_remaining_tries = MUST_SEND_RETRY_TIMES
self._send_mustsend(retries, thread_safe_send)
else:
# Ignore return code for send.
self._send_internal(msg, retries=retries, thread_safe_send=thread_safe_send)
def _start_subprocess(self, cmdline):
for _ in range(3):
try:
env = os.environ.copy()
inject_tracing_vars(env)
# Set stdout=sys.stdout & stderr=sys.stderr
# to print to console the output of sidecars.
return subprocess.Popen(
cmdline,
stdin=subprocess.PIPE,
env=env,
stdout=sys.stdout if debug.sidecar else subprocess.DEVNULL,
stderr=sys.stderr if debug.sidecar else subprocess.DEVNULL,
bufsize=0,
)
except blockingError as be:
self._logger("Sidecar popen failed: %s" % repr(be))
except Exception as e:
self._logger("Unknown popen error: %s" % repr(e))
break
def _send_internal(self, msg, retries=3, thread_safe_send=False):
if self._process is None:
return False
try:
if msg.msg_type == MessageTypes.BEST_EFFORT:
# If we have a mustsend to send, we need to send it first prior to
# sending a best-effort message
if self._send_mustsend_remaining_tries == -1:
# We could not send the "mustsend" so we don't try to send this out;
# restart sidecar so use the PipeUnavailableError caught below
raise PipeUnavailableError()
elif self._send_mustsend_remaining_tries > 0:
self._send_mustsend(thread_safe_send=thread_safe_send)
if self._send_mustsend_remaining_tries == 0:
self._emit_msg(msg, thread_safe_send)
self._prev_message_error = False
return True
else:
self._emit_msg(msg, thread_safe_send)
self._prev_message_error = False
return True
return False
except MsgTimeoutError:
# drop message, do not retry on timeout
self._logger("Unable to send message due to timeout")
self._prev_message_error = True
except Exception as ex:
if isinstance(ex, (PipeUnavailableError, BrokenPipeError)):
self._logger("Restarting sidecar due to broken/unavailable pipe")
self.start()
if self._cached_mustsend is not None:
self._send_mustsend_remaining_tries = MUST_SEND_RETRY_TIMES
# We don't send the "must send" here, letting it send "lazily" on the
# next message. The reason for this is to simplify the interactions
# with the retry logic.
else:
self._prev_message_error = True
if retries > 0:
self._logger("Retrying msg send to sidecar (due to %s)" % repr(ex))
return self._send_internal(msg, retries - 1, thread_safe_send)
else:
self._logger(
"Error sending log message (exhausted retries): %s" % repr(ex)
)
return False
def _send_mustsend(self, retries=3, thread_safe_send=False):
if (
self._cached_mustsend is not None
and self._send_mustsend_remaining_tries > 0
):
# If we don't succeed in sending the must-send, we will try again
# next time.
if self._send_internal(
Message(MessageTypes.MUST_SEND, self._cached_mustsend),
retries,
thread_safe_send,
):
self._cached_mustsend = None
self._send_mustsend_remaining_tries = 0
return True
else:
self._send_mustsend_remaining_tries -= 1
if self._send_mustsend_remaining_tries == 0:
# Mark as "failed after try"
self._send_mustsend_remaining_tries = -1
return False
def _write_bytes(self, msg_ser):
written_bytes = 0
while written_bytes < len(msg_ser):
# self._logger("Sent %d out of %d bytes" % (written_bytes, len(msg_ser)))
try:
fds = self._poller.poll(MESSAGE_WRITE_TIMEOUT_IN_MS)
if fds is None or len(fds) == 0:
raise MsgTimeoutError("Poller timed out")
for fd, event in fds:
if event & select.POLLERR:
raise PipeUnavailableError("Pipe unavailable")
f = os.write(fd, msg_ser[written_bytes:])
written_bytes += f
except NullSidecarError:
# sidecar is disabled, ignore all messages
break
def _emit_msg(self, msg, thread_safe_send=False):
# If the previous message had an error, we want to prepend a "\n" to this message
# to maximize the chance of this message being valid (for example, if the
# previous message only partially sent for whatever reason, we want to "clear" it)
msg = msg.serialize()
if self._prev_message_error:
msg = "\n" + msg
msg_ser = msg.encode("utf-8")
# If threadsafe send is enabled, we will use a lock to ensure that only one thread
# can send a message at a time. This is to avoid interleaving of messages.
if thread_safe_send:
with lock:
self._write_bytes(msg_ser)
else:
self._write_bytes(msg_ser)
def _logger(self, msg):
if debug.sidecar:
print("[sidecar:%s] %s" % (self._worker_type, msg), file=sys.stderr)
| SidecarSubProcess |
python | pytorch__pytorch | torch/ao/nn/quantized/reference/modules/rnn.py | {
"start": 10422,
"end": 12539
} | class ____(RNNCellBase):
"""
We'll store weight_qparams for all the weights (weight_ih and weight_hh),
we need to pass in a `weight_qparams_dict` that maps from weight name,
e.g. weight_ih, to the weight_qparams for that weight
"""
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool = True,
device=None,
dtype=None,
weight_qparams_dict: dict[str, Any] | None = None,
) -> None:
factory_kwargs = {
"device": device,
"dtype": dtype,
"weight_qparams_dict": weight_qparams_dict,
}
super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
def _get_name(self):
return "QuantizedGRUCell(Reference)"
def forward(self, input: Tensor, hx: Tensor | None = None) -> Tensor:
assert input.dim() in (
1,
2,
), (
f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
)
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(
input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
ret = _VF.gru_cell(
input,
hx,
self.get_weight_ih(),
self.get_weight_hh(),
self.bias_ih,
self.bias_hh,
)
if not is_batched:
ret = ret.squeeze(0)
return ret
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.bias,
mod.weight_ih.device,
mod.weight_ih.dtype,
weight_qparams_dict,
)
ref_mod.weight_ih = mod.weight_ih
ref_mod.weight_hh = mod.weight_hh
ref_mod.bias_ih = mod.bias_ih
ref_mod.bias_hh = mod.bias_hh
return ref_mod
| GRUCell |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1048012,
"end": 1048422
} | class ____(VegaLiteSchema):
"""
RowColboolean schema wrapper.
Parameters
----------
column : bool
row : bool
"""
_schema = {"$ref": "#/definitions/RowCol<boolean>"}
def __init__(
self,
column: Optional[bool] = Undefined,
row: Optional[bool] = Undefined,
**kwds,
):
super().__init__(column=column, row=row, **kwds)
| RowColboolean |
python | django__django | django/db/models/lookups.py | {
"start": 22713,
"end": 22801
} | class ____(StartsWith):
lookup_name = "istartswith"
@Field.register_lookup
| IStartsWith |
python | MongoEngine__mongoengine | tests/fields/test_enum_field.py | {
"start": 4831,
"end": 4913
} | class ____(Document):
color = EnumField(Color, default=Color.RED)
| ModelWithColor |
python | neetcode-gh__leetcode | python/0463-island-perimeter.py | {
"start": 0,
"end": 635
} | class ____:
def islandPerimeter(self, grid: List[List[int]]) -> int:
visit = set()
def dfs(i, j):
if i >= len(grid) or j >= len(grid[0]) or i < 0 or j < 0 or grid[i][j] == 0:
return 1
if (i, j) in visit:
return 0
visit.add((i, j))
perim = dfs(i, j + 1)
perim += dfs(i + 1, j)
perim += dfs(i, j - 1)
perim += dfs(i - 1, j)
return perim
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j]:
return dfs(i, j)
| Solution |
python | numba__numba | numba/core/ir.py | {
"start": 40097,
"end": 40405
} | class ____(SlotEqualityCheckMixin):
"""Describes a loop-block
"""
__slots__ = "entry", "exit"
def __init__(self, entry, exit):
self.entry = entry
self.exit = exit
def __repr__(self):
args = self.entry, self.exit
return "Loop(entry=%s, exit=%s)" % args
| Loop |
python | MongoEngine__mongoengine | mongoengine/base/common.py | {
"start": 367,
"end": 2796
} | class ____:
"""Wrapper for the document registry (providing a singleton pattern).
This is part of MongoEngine's internals, not meant to be used directly by end-users
"""
@staticmethod
def get(name):
doc = _document_registry.get(name, None)
if not doc:
# Possible old style name
single_end = name.split(".")[-1]
compound_end = ".%s" % single_end
possible_match = [
k
for k in _document_registry
if k.endswith(compound_end) or k == single_end
]
if len(possible_match) == 1:
doc = _document_registry.get(possible_match.pop(), None)
if not doc:
raise NotRegistered(
"""
`%s` has not been registered in the document registry.
Importing the document class automatically registers it, has it
been imported?
""".strip()
% name
)
return doc
@staticmethod
def register(DocCls):
ExistingDocCls = _document_registry.get(DocCls._class_name)
if (
ExistingDocCls is not None
and ExistingDocCls.__module__ != DocCls.__module__
):
# A sign that a codebase may have named two different classes with the same name accidentally,
# this could cause issues with dereferencing because MongoEngine makes the assumption that a Document
# class name is unique.
warnings.warn(
f"Multiple Document classes named `{DocCls._class_name}` were registered, "
f"first from: `{ExistingDocCls.__module__}`, then from: `{DocCls.__module__}`. "
"this may lead to unexpected behavior during dereferencing.",
stacklevel=4,
)
_document_registry[DocCls._class_name] = DocCls
@staticmethod
def unregister(doc_cls_name):
_document_registry.pop(doc_cls_name)
def _get_documents_by_db(connection_alias, default_connection_alias):
"""Get all registered Documents class attached to a given database"""
def get_doc_alias(doc_cls):
return doc_cls._meta.get("db_alias", default_connection_alias)
return [
doc_cls
for doc_cls in _document_registry.values()
if get_doc_alias(doc_cls) == connection_alias
]
| _DocumentRegistry |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_build.py | {
"start": 24161,
"end": 25394
} | class ____(GoogleBaseHook):
"""Asynchronous Hook for the Google Cloud Build Service."""
@GoogleBaseHook.fallback_to_default_project_id
async def get_cloud_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: AsyncRetry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""Retrieve a Cloud Build with a specified id."""
if not id_:
raise AirflowException("Google Cloud Build id is required.")
client_options = None
if location != "global":
client_options = ClientOptions(api_endpoint=f"{location}-cloudbuild.googleapis.com:443")
client = CloudBuildAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
request = GetBuildRequest(
project_id=project_id,
id=id_,
)
build_instance = await client.get_build(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return build_instance
| CloudBuildAsyncHook |
python | pytorch__pytorch | test/distributed/nn/jit/test_instantiator.py | {
"start": 647,
"end": 1823
} | class ____(TestCase):
def test_get_arg_return_types_from_interface(self):
(
args_str,
arg_types_str,
return_type_str,
) = instantiator.get_arg_return_types_from_interface(MyModuleInterface)
self.assertEqual(args_str, "tensor, number, word")
self.assertEqual(arg_types_str, "tensor: Tensor, number: int, word: str")
self.assertEqual(return_type_str, "Tuple[Tensor, int, str]")
def test_instantiate_scripted_remote_module_template(self):
generated_module = instantiator.instantiate_scriptable_remote_module_template(
MyModuleInterface
)
self.assertTrue(hasattr(generated_module, "_remote_forward"))
self.assertTrue(hasattr(generated_module, "_generated_methods"))
def test_instantiate_non_scripted_remote_module_template(self):
generated_module = (
instantiator.instantiate_non_scriptable_remote_module_template()
)
self.assertTrue(hasattr(generated_module, "_remote_forward"))
self.assertTrue(hasattr(generated_module, "_generated_methods"))
if __name__ == "__main__":
run_tests()
| TestInstantiator |
python | huggingface__transformers | tests/quantization/fp_quant_integration/test_fp_quant.py | {
"start": 5889,
"end": 6090
} | class ____(FPQuantBaseTest):
@classmethod
def getQuantizationConfig(cls):
return FPQuantConfig(forward_dtype="nvfp4", pseudoquantization=True)
@require_qutlass
| FPQuantNVFP4PseudoquantTest |
python | getsentry__sentry | src/sentry/models/avatars/organization_avatar.py | {
"start": 247,
"end": 1001
} | class ____(AvatarBase):
"""
An OrganizationAvatar associates an Organization with their avatar photo File
and contains their preferences for avatar type.
"""
AVATAR_TYPES = ((0, "letter_avatar"), (1, "upload"))
FILE_TYPE = "avatar.file"
file_id = BoundedBigIntegerField(unique=True, null=True)
organization = FlexibleForeignKey("sentry.Organization", unique=True, related_name="avatar")
avatar_type = models.PositiveSmallIntegerField(default=0, choices=AVATAR_TYPES)
url_path = "organization-avatar"
class Meta:
app_label = "sentry"
db_table = "sentry_organizationavatar"
def get_cache_key(self, size) -> str:
return f"org_avatar:{self.organization_id}:{size}"
| OrganizationAvatar |
python | davidhalter__parso | parso/pgen2/generator.py | {
"start": 4577,
"end": 14580
} | class ____:
"""
Most grammars will have certain keywords and operators that are mentioned
in the grammar as strings (e.g. "if") and not token types (e.g. NUMBER).
This class basically is the former.
"""
def __init__(self, value: str):
self.value = value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.value)
def _simplify_dfas(dfas):
"""
This is not theoretically optimal, but works well enough.
Algorithm: repeatedly look for two states that have the same
set of arcs (same labels pointing to the same nodes) and
unify them, until things stop changing.
dfas is a list of DFAState instances
"""
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfas):
for j in range(i + 1, len(dfas)):
state_j = dfas[j]
if state_i == state_j:
del dfas[j]
for state in dfas:
state.unifystate(state_j, state_i)
changes = True
break
def _make_dfas(start, finish):
"""
Uses the powerset construction algorithm to create DFA states from sets of
NFA states.
Also does state reduction if some states are not needed.
"""
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def addclosure(nfa_state, base_nfa_set):
assert isinstance(nfa_state, NFAState)
if nfa_state in base_nfa_set:
return
base_nfa_set.add(nfa_state)
for nfa_arc in nfa_state.arcs:
if nfa_arc.nonterminal_or_string is None:
addclosure(nfa_arc.next, base_nfa_set)
base_nfa_set = set()
addclosure(start, base_nfa_set)
states = [DFAState(start.from_rule, base_nfa_set, finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
# Find state transitions and store them in arcs.
for nfa_state in state.nfa_set:
for nfa_arc in nfa_state.arcs:
if nfa_arc.nonterminal_or_string is not None:
nfa_set = arcs.setdefault(nfa_arc.nonterminal_or_string, set())
addclosure(nfa_arc.next, nfa_set)
# Now create the dfa's with no None's in arcs anymore. All Nones have
# been eliminated and state transitions (arcs) are properly defined, we
# just need to create the dfa's.
for nonterminal_or_string, nfa_set in arcs.items():
for nested_state in states:
if nested_state.nfa_set == nfa_set:
# The DFA state already exists for this rule.
break
else:
nested_state = DFAState(start.from_rule, nfa_set, finish)
states.append(nested_state)
state.add_arc(nested_state, nonterminal_or_string)
return states # List of DFAState instances; first one is start
def _dump_nfa(start, finish):
print("Dump of NFA for", start.from_rule)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for arc in state.arcs:
label, next_ = arc.nonterminal_or_string, arc.next
if next_ in todo:
j = todo.index(next_)
else:
j = len(todo)
todo.append(next_)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def _dump_dfas(dfas):
print("Dump of DFA for", dfas[0].from_rule)
for i, state in enumerate(dfas):
print(" State", i, state.is_final and "(final)" or "")
for nonterminal, next_ in state.arcs.items():
print(" %s -> %d" % (nonterminal, dfas.index(next_)))
def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar:
"""
``bnf_text`` is a grammar in extended BNF (using * for repetition, + for
at-least-once repetition, [] for optional parts, | for alternatives and ()
for grouping).
It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its
own parser.
"""
rule_to_dfas = {}
start_nonterminal = None
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
# _dump_nfa(nfa_a, nfa_z)
dfas = _make_dfas(nfa_a, nfa_z)
# _dump_dfas(dfas)
# oldlen = len(dfas)
_simplify_dfas(dfas)
# newlen = len(dfas)
rule_to_dfas[nfa_a.from_rule] = dfas
# print(nfa_a.from_rule, oldlen, newlen)
if start_nonterminal is None:
start_nonterminal = nfa_a.from_rule
reserved_strings: Mapping[str, ReservedString] = {}
for nonterminal, dfas in rule_to_dfas.items():
for dfa_state in dfas:
for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items():
if terminal_or_nonterminal in rule_to_dfas:
dfa_state.nonterminal_arcs[terminal_or_nonterminal] = next_dfa
else:
transition = _make_transition(
token_namespace,
reserved_strings,
terminal_or_nonterminal
)
dfa_state.transitions[transition] = DFAPlan(next_dfa)
_calculate_tree_traversal(rule_to_dfas)
return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore[arg-type]
def _make_transition(token_namespace, reserved_syntax_strings, label):
"""
Creates a reserved string ("if", "for", "*", ...) or returns the token type
(NUMBER, STRING, ...) for a given grammar terminal.
"""
if label[0].isalpha():
# A named token (e.g. NAME, NUMBER, STRING)
return getattr(token_namespace, label)
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
assert not label.startswith('"""') and not label.startswith("'''")
value = literal_eval(label)
try:
return reserved_syntax_strings[value]
except KeyError:
r = reserved_syntax_strings[value] = ReservedString(value)
return r
def _calculate_tree_traversal(nonterminal_to_dfas):
"""
By this point we know how dfas can move around within a stack node, but we
don't know how we can add a new stack node (nonterminal transitions).
"""
# Map from grammar rule (nonterminal) name to a set of tokens.
first_plans = {}
nonterminals = list(nonterminal_to_dfas.keys())
nonterminals.sort()
for nonterminal in nonterminals:
if nonterminal not in first_plans:
_calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal)
# Now that we have calculated the first terminals, we are sure that
# there is no left recursion.
for dfas in nonterminal_to_dfas.values():
for dfa_state in dfas:
transitions = dfa_state.transitions
for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():
for transition, pushes in first_plans[nonterminal].items():
if transition in transitions:
prev_plan = transitions[transition]
# Make sure these are sorted so that error messages are
# at least deterministic
choices = sorted([
(
prev_plan.dfa_pushes[0].from_rule
if prev_plan.dfa_pushes
else prev_plan.next_dfa.from_rule
),
(
pushes[0].from_rule
if pushes else next_dfa.from_rule
),
])
raise ValueError(
"Rule %s is ambiguous; given a %s token, we "
"can't determine if we should evaluate %s or %s."
% (
(
dfa_state.from_rule,
transition,
) + tuple(choices)
)
)
transitions[transition] = DFAPlan(next_dfa, pushes)
def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
"""
Calculates the first plan in the first_plans dictionary for every given
nonterminal. This is going to be used to know when to create stack nodes.
"""
dfas = nonterminal_to_dfas[nonterminal]
new_first_plans = {}
first_plans[nonterminal] = None # dummy to detect left recursion
# We only need to check the first dfa. All the following ones are not
# interesting to find first terminals.
state = dfas[0]
for transition, next_ in state.transitions.items():
# It's a string. We have finally found a possible first token.
new_first_plans[transition] = [next_.next_dfa]
for nonterminal2, next_ in state.nonterminal_arcs.items():
# It's a nonterminal and we have either a left recursion issue
# in the grammar or we have to recurse.
try:
first_plans2 = first_plans[nonterminal2]
except KeyError:
first_plans2 = _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal2)
else:
if first_plans2 is None:
raise ValueError("left recursion for rule %r" % nonterminal)
for t, pushes in first_plans2.items():
new_first_plans[t] = [next_] + pushes
first_plans[nonterminal] = new_first_plans
return new_first_plans
| ReservedString |
python | doocs__leetcode | solution/2500-2599/2546.Apply Bitwise Operations to Make Strings Equal/Solution.py | {
"start": 0,
"end": 122
} | class ____:
def makeStringsEqual(self, s: str, target: str) -> bool:
return ("1" in s) == ("1" in target)
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/base_class/test_reshape.py | {
"start": 153,
"end": 3138
} | class ____:
def test_repeat(self):
repeats = 2
index = Index([1, 2, 3])
expected = Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(["b", "c", "d"])
# test 0th element
tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a"))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e"))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z"))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(["a"]), null_index.insert(0, "a"))
def test_insert_missing(self, nulls_fixture, using_infer_string):
# GH#22295
# test there is no mangling of NA values
expected = Index(["a", nulls_fixture, "b", "c"], dtype=object)
result = Index(list("abc"), dtype=object).insert(
1, Index([nulls_fixture], dtype=object)
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"val", [(1, 2), np.datetime64("2019-12-31"), np.timedelta64(1, "D")]
)
@pytest.mark.parametrize("loc", [-1, 2])
def test_insert_datetime_into_object(self, loc, val):
# GH#44509
idx = Index(["1", "2", "3"])
result = idx.insert(loc, val)
expected = Index(["1", "2", val, "3"])
tm.assert_index_equal(result, expected)
assert type(expected[2]) is type(val)
def test_insert_none_into_string_numpy(self, string_dtype_no_object):
# GH#55365
index = Index(["a", "b", "c"], dtype=string_dtype_no_object)
result = index.insert(-1, None)
expected = Index(["a", "b", None, "c"], dtype=string_dtype_no_object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"pos,expected",
[
(0, Index(["b", "c", "d"], name="index")),
(-1, Index(["a", "b", "c"], name="index")),
],
)
def test_delete(self, pos, expected):
index = Index(["a", "b", "c", "d"], name="index")
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(["a", "b", "c", "d"], name="index")
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_append_multiple(self):
index = Index(["a", "b", "c", "d", "e", "f"])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
| TestReshape |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 5493,
"end": 5900
} | class ____(PrivateViewMixin, OrganizationOwnerView, DeleteViewWithMessage):
success_message = _("Owner removed")
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
if self._is_last_user():
return HttpResponseBadRequest(_("User is the last owner, can't be removed"))
return super().post(request, *args, **kwargs)
# Team views
| DeleteOrganizationOwner |
python | chardet__chardet | chardet/euckrprober.py | {
"start": 1295,
"end": 1687
} | class ____(MultiByteCharSetProber):
def __init__(self) -> None:
super().__init__()
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
self.distribution_analyzer = EUCKRDistributionAnalysis()
self.reset()
@property
def charset_name(self) -> str:
return "EUC-KR"
@property
def language(self) -> str:
return "Korean"
| EUCKRProber |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 43502,
"end": 48002
} | class ____:
@pytest.fixture(autouse=True)
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(("y", "x"), (y, x), strict=True)))
self.data_min = distance.min()
self.data_max = distance.max()
yield
# Remove all matplotlib figures
plt.close("all")
@pytest.mark.slow
def test_recover_from_seaborn_jet_exception(self) -> None:
pal = _color_palette("jet", 4)
assert type(pal) is np.ndarray
assert len(pal) == 4
@pytest.mark.slow
def test_build_discrete_cmap(self) -> None:
for cmap, levels, extend, filled in [
("jet", [0, 1], "both", False),
("hot", [-4, 4], "max", True),
]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
assert ncmap.N == len(levels) - 1
assert len(ncmap.colors) == len(levels) - 1
assert cnorm.N == len(levels)
assert_array_equal(cnorm.boundaries, levels)
assert max(levels) == cnorm.vmax
assert min(levels) == cnorm.vmin
if filled:
assert ncmap.colorbar_extend == extend
else:
assert ncmap.colorbar_extend == "max"
@pytest.mark.slow
def test_discrete_colormap_list_of_levels(self) -> None:
for extend, levels in [
("max", [-1, 2, 4, 8, 10]),
("both", [2, 5, 10, 11]),
("neither", [0, 5, 10, 15]),
("min", [2, 5, 10, 15]),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(levels=levels)
assert_array_equal(levels, primitive.norm.boundaries)
assert max(levels) == primitive.norm.vmax
assert min(levels) == primitive.norm.vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert len(levels) - 1 == len(primitive.cmap.colors)
@pytest.mark.slow
def test_discrete_colormap_int_levels(self) -> None:
for extend, levels, vmin, vmax, cmap in [
("neither", 7, None, None, None),
("neither", 7, None, 20, mpl.colormaps["RdBu"]),
("both", 7, 4, 8, None),
("min", 10, 4, 15, None),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(
levels=levels, vmin=vmin, vmax=vmax, cmap=cmap
)
assert levels >= len(primitive.norm.boundaries) - 1
if vmax is None:
assert primitive.norm.vmax >= self.data_max
else:
assert primitive.norm.vmax >= vmax
if vmin is None:
assert primitive.norm.vmin <= self.data_min
else:
assert primitive.norm.vmin <= vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert levels >= len(primitive.cmap.colors)
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self) -> None:
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20) # type: ignore[call-arg]
assert primitive.norm.vmax == max(levels)
assert primitive.norm.vmin == min(levels)
def test_discrete_colormap_provided_boundary_norm(self) -> None:
norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4)
primitive = self.darray.plot.contourf(norm=norm)
np.testing.assert_allclose(list(primitive.levels), norm.boundaries)
def test_discrete_colormap_provided_boundary_norm_matching_cmap_levels(
self,
) -> None:
norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4)
primitive = self.darray.plot.contourf(norm=norm)
cbar = primitive.colorbar
assert cbar is not None
assert cbar.norm.Ncmap == cbar.norm.N # type: ignore[attr-defined] # Exists, debatable if public though.
| TestDiscreteColorMap |
python | jschneier__django-storages | tests/test_s3.py | {
"start": 37053,
"end": 37316
} | class ____(TestCase):
def setUp(self):
self.storage = s3.S3StaticStorage()
self.storage._connections.connection = mock.MagicMock()
def test_querystring_auth(self):
self.assertFalse(self.storage.querystring_auth)
| S3StaticStorageTests |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 42029,
"end": 42884
} | class ____(Event):
"""
An event triggered by a draw operation on the canvas.
In most backends, callbacks subscribed to this event will be fired after
the rendering is complete but before the screen is updated. Any extra
artists drawn to the canvas's renderer will be reflected without an
explicit call to ``blit``.
.. warning::
Calling ``canvas.draw`` and ``canvas.blit`` in these callbacks may
not be safe with all backends and may cause infinite recursion.
A DrawEvent has a number of special attributes in addition to those defined
by the parent `Event` class.
Attributes
----------
renderer : `RendererBase`
The renderer for the draw event.
"""
def __init__(self, name, canvas, renderer):
super().__init__(name, canvas)
self.renderer = renderer
| DrawEvent |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_format17.py | {
"start": 350,
"end": 1336
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("format17.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a pattern only."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
pattern = workbook.add_format({"pattern": 2, "fg_color": "red"})
worksheet.write("A1", "", pattern)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_color(self):
"""Test the creation of a simple XlsxWriter file with a pattern only."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
pattern = workbook.add_format({"pattern": 2, "fg_color": Color("red")})
worksheet.write("A1", "", pattern)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Q.py | {
"start": 2547,
"end": 3808
} | class ____(Benchmark):
r"""
Quintic objective function.
This class defines the Quintic [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Quintic}}(x) = \sum_{i=1}^{n} \left|{x_{i}^{5} - 3 x_{i}^{4}
+ 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\right|
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x ** 5 - 3 * x ** 4 + 4 * x ** 3 + 2 * x ** 2
- 10 * x - 4))
| Quintic |
python | astropy__astropy | astropy/utils/misc.py | {
"start": 11798,
"end": 18090
} | class ____(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj: object) -> object:
from astropy import units as u
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = "dimensionless_unit"
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s: str) -> str:
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return "".join(
c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn"
)
def did_you_mean(
s: str,
candidates: Iterable[str],
n: int = 3,
cutoff: float = 0.8,
fix: Callable[[str], list[str]] | None = None,
) -> str:
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : iterable of str
Note that str itself does not cause an error, but the output
might not be what was expected.
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a list of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
s_lower = strip_accents(s).lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = defaultdict(list)
for candidate in candidates:
candidates_lower[candidate.lower()].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
matches: Iterable[str] = (
[s_lower[:-1]]
if s_lower.endswith("s") and s_lower[:-1] in candidates_lower
else difflib.get_close_matches(s_lower, candidates_lower, n=n, cutoff=cutoff)
)
if not matches:
return ""
matches = chain.from_iterable(candidates_lower[match] for match in matches)
if fix is not None:
matches = chain.from_iterable(fix(match) for match in matches)
*first_matches, suggestion = sorted(set(matches))
if first_matches:
suggestion = ", ".join(first_matches) + " or " + suggestion
return f"Did you mean {suggestion}?"
LOCALE_LOCK: Final = threading.Lock()
@contextmanager
def _set_locale(name: str) -> Generator[None, None, None]:
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple _set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
----------
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
def dtype_bytes_or_chars(dtype: np.dtype) -> int | None:
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r"(\d+)$", dtype.str)
out = int(match.group(1)) if match else None
return out
def _hungry_for(option): # pragma: no cover
"""
Open browser loaded with ``option`` options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open(f"https://www.google.com/search?q={option}+near+me")
def pizza(): # pragma: no cover
"""``/pizza``."""
_hungry_for("pizza")
def coffee(is_adam=False, is_brigitta=False): # pragma: no cover
"""``/coffee``."""
if is_adam and is_brigitta:
raise ValueError("There can be only one!")
if is_adam:
option = "fresh+third+wave+coffee"
elif is_brigitta:
option = "decent+espresso"
else:
option = "coffee"
_hungry_for(option)
| JsonCustomEncoder |
python | huggingface__transformers | tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py | {
"start": 16706,
"end": 17754
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0)
processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = processor(
text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="pt"
)
outputs = model(**inputs)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape,
(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]),
)
expected_logits = torch.tensor([[1.2284727, 0.3104122]])
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
| VisionTextDualEncoderIntegrationTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.