language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_hint_returned.py | {
"start": 843,
"end": 1001
} | class ____:
""" __length_hint__ returns non-int """
def __length_hint__(self): # [invalid-length-hint-returned]
return 3.0
| SecondBadLengthHint |
python | pola-rs__polars | py-polars/tests/unit/utils/pycapsule_utils.py | {
"start": 25,
"end": 711
} | class ____:
"""
Hold the Arrow C Stream pycapsule.
A class that exposes the Arrow C Stream interface via Arrow PyCapsules. This
ensures that the consumer is seeing _only_ the `__arrow_c_stream__` dunder, and that
nothing else (e.g. the dataframe or array interface) is actually being used.
"""
arrow_obj: Any
def __init__(self, arrow_obj: object) -> None:
self.arrow_obj = arrow_obj
def __arrow_c_stream__(self, requested_schema: object = None) -> object:
return self.arrow_obj.__arrow_c_stream__(requested_schema)
def __iter__(self) -> None:
return
def __next__(self) -> None:
return
| PyCapsuleStreamHolder |
python | scrapy__scrapy | tests/test_logformatter.py | {
"start": 9796,
"end": 11124
} | class ____:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.base_settings = {
"LOG_LEVEL": "DEBUG",
"ITEM_PIPELINES": {
DropSomeItemsPipeline: 300,
},
}
@inlineCallbacks
def test_show_messages(self):
crawler = get_crawler(ItemSpider, self.base_settings)
with LogCapture() as lc:
yield crawler.crawl(mockserver=self.mockserver)
assert "Scraped from <200 http://127.0.0.1:" in str(lc)
assert "Crawled (200) <GET http://127.0.0.1:" in str(lc)
assert "Dropped: Ignoring item" in str(lc)
@inlineCallbacks
def test_skip_messages(self):
settings = self.base_settings.copy()
settings["LOG_FORMATTER"] = SkipMessagesLogFormatter
crawler = get_crawler(ItemSpider, settings)
with LogCapture() as lc:
yield crawler.crawl(mockserver=self.mockserver)
assert "Scraped from <200 http://127.0.0.1:" not in str(lc)
assert "Crawled (200) <GET http://127.0.0.1:" not in str(lc)
assert "Dropped: Ignoring item" not in str(lc)
| TestShowOrSkipMessages |
python | PyCQA__pylint | tests/functional/m/match_class_pattern.py | {
"start": 283,
"end": 360
} | class ____:
__match_args__ = ["x", "y"] # [invalid-match-args-definition]
| C |
python | fluentpython__example-code-2e | 24-class-metaprog/checked/initsub/checked_demo.py | {
"start": 56,
"end": 560
} | class ____(Checked):
title: str
year: int
box_office: float
if __name__ == '__main__':
movie = Movie(title='The Godfather', year=1972, box_office=137)
print(movie.title)
print(movie)
try:
# remove the "type: ignore" comment to see Mypy error
movie.year = 'MCMLXXII' # type: ignore
except TypeError as e:
print(e)
try:
blockbuster = Movie(title='Avatar', year=2009, box_office='billions')
except TypeError as e:
print(e)
| Movie |
python | keon__algorithms | tests/test_strings.py | {
"start": 15076,
"end": 15358
} | class ____(unittest.TestCase):
def test_count_binary_substring(self):
self.assertEqual(6, count_binary_substring("00110011"))
self.assertEqual(4, count_binary_substring("10101"))
self.assertEqual(3, count_binary_substring("00110"))
| TestCountBinarySubstring |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1477232,
"end": 1477682
} | class ____(sgqlc.types.Type, HovercardContext):
"""A hovercard context with a message describing the current code
review state of the pull request.
"""
__schema__ = github_schema
__field_names__ = ("review_decision",)
review_decision = sgqlc.types.Field(PullRequestReviewDecision, graphql_name="reviewDecision")
"""The current status of the pull request with respect to code
review.
"""
| ReviewStatusHovercardContext |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_overridden_method.py | {
"start": 1066,
"end": 1276
} | class ____(Property):
@property
def close(self):
pass
@close.setter
def close(self, attr):
return attr
@close.deleter
def close(self):
return None
| PropertySetter |
python | kamyu104__LeetCode-Solutions | Python/form-largest-integer-with-digits-that-add-up-to-target.py | {
"start": 29,
"end": 760
} | class ____(object):
def largestNumber(self, cost, target):
"""
:type cost: List[int]
:type target: int
:rtype: str
"""
dp = [0]
for t in xrange(1, target+1):
dp.append(-1)
for i, c in enumerate(cost):
if t-c < 0 or dp[t-c] < 0:
continue
dp[t] = max(dp[t], dp[t-c]+1)
if dp[target] < 0:
return "0"
result = []
for i in reversed(xrange(9)):
while target >= cost[i] and dp[target] == dp[target-cost[i]]+1:
target -= cost[i]
result.append(i+1)
return "".join(map(str, result))
# Time: O(t)
# Space: O(t)
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/keywords_test.py | {
"start": 443,
"end": 2797
} | class ____(unittest.TestCase):
def setUp(self):
super(KeywordsConflictTest, self).setUp()
self.pool = descriptor_pool.Default()
def testMessage(self):
message = getattr(more_messages_pb2, 'class')()
message.int_field = 123
self.assertEqual(message.int_field, 123)
des = self.pool.FindMessageTypeByName('google.protobuf.internal.class')
self.assertEqual(des.name, 'class')
def testNestedMessage(self):
message = getattr(more_messages_pb2, 'class')()
message.nested_message.field = 234
self.assertEqual(message.nested_message.field, 234)
des = self.pool.FindMessageTypeByName('google.protobuf.internal.class.try')
self.assertEqual(des.name, 'try')
def testField(self):
message = getattr(more_messages_pb2, 'class')()
setattr(message, 'if', 123)
setattr(message, 'as', 1)
self.assertEqual(getattr(message, 'if'), 123)
self.assertEqual(getattr(message, 'as'), 1)
def testEnum(self):
class_ = getattr(more_messages_pb2, 'class')
message = class_()
# Normal enum value.
message.enum_field = more_messages_pb2.default
self.assertEqual(message.enum_field, more_messages_pb2.default)
# Top level enum value.
message.enum_field = getattr(more_messages_pb2, 'else')
self.assertEqual(message.enum_field, 1)
# Nested enum value
message.nested_enum_field = getattr(class_, 'True')
self.assertEqual(message.nested_enum_field, 1)
def testExtension(self):
message = getattr(more_messages_pb2, 'class')()
# Top level extension
extension1 = getattr(more_messages_pb2, 'continue')
message.Extensions[extension1] = 456
self.assertEqual(message.Extensions[extension1], 456)
# None top level extension
extension2 = getattr(more_messages_pb2.ExtendClass, 'return')
message.Extensions[extension2] = 789
self.assertEqual(message.Extensions[extension2], 789)
def testExtensionForNestedMessage(self):
message = getattr(more_messages_pb2, 'class')()
extension = getattr(more_messages_pb2, 'with')
message.nested_message.Extensions[extension] = 999
self.assertEqual(message.nested_message.Extensions[extension], 999)
def TestFullKeywordUsed(self):
message = more_messages_pb2.TestFullKeyword()
message.field2.int_field = 123
if __name__ == '__main__':
unittest.main()
| KeywordsConflictTest |
python | wandb__wandb | wandb/sdk/lib/asyncio_compat.py | {
"start": 4320,
"end": 9327
} | class ____:
"""Object that `open_task_group()` yields."""
def __init__(self) -> None:
self._tasks: list[asyncio.Task[None]] = []
def start_soon(self, coro: Coroutine[Any, Any, Any]) -> None:
"""Schedule a task in the group.
Args:
coro: The return value of the `async` function defining the task.
"""
self._tasks.append(asyncio.create_task(coro))
async def _wait_all(self, *, race: bool, timeout: float | None) -> None:
"""Block until tasks complete.
Args:
race: If true, blocks until the first task completes and then
cancels the rest. Otherwise, waits for all tasks or until
the first exception.
timeout: How long to wait.
Raises:
TimeoutError: If the timeout expires.
Exception: If one or more tasks raises an exception, one of these
is raised arbitrarily.
"""
if not self._tasks:
return
if race:
return_when = asyncio.FIRST_COMPLETED
else:
return_when = asyncio.FIRST_EXCEPTION
done, pending = await asyncio.wait(
self._tasks,
timeout=timeout,
return_when=return_when,
)
if not done:
raise TimeoutError(f"Timed out after {timeout} seconds.")
# If any of the finished tasks raised an exception, pick the first one.
for task in done:
if exc := task.exception():
raise exc
# Wait for remaining tasks to clean up, then re-raise any exceptions
# that arise. Note that pending is only non-empty when race=True.
for task in pending:
task.cancel()
await asyncio.gather(*pending, return_exceptions=True)
for task in pending:
if task.cancelled():
continue
if exc := task.exception():
raise exc
async def _cancel_all(self) -> None:
"""Cancel all tasks.
Blocks until cancelled tasks complete to allow them to clean up.
Ignores exceptions.
"""
for task in self._tasks:
# NOTE: It is safe to cancel tasks that have already completed.
task.cancel()
await asyncio.gather(*self._tasks, return_exceptions=True)
@contextlib.asynccontextmanager
async def open_task_group(
*,
exit_timeout: float | None = None,
race: bool = False,
) -> AsyncIterator[TaskGroup]:
"""Create a task group.
`asyncio` gained task groups in Python 3.11.
This is an async context manager, meant to be used with `async with`.
On exit, it blocks until all subtasks complete. If any subtask fails, or if
the current task is cancelled, it cancels all subtasks in the group and
raises the subtask's exception. If multiple subtasks fail simultaneously,
one of their exceptions is chosen arbitrarily.
NOTE: Subtask exceptions do not propagate until the context manager exits.
This means that the task group cannot cancel code running inside the
`async with` block .
Args:
exit_timeout: An optional timeout in seconds. When exiting the
context manager, if tasks don't complete in this time,
they are cancelled and a TimeoutError is raised.
race: If true, all pending tasks are cancelled once any task
in the group completes. Prefer to use the race() function instead.
Raises:
TimeoutError: if exit_timeout is specified and tasks don't finish
in time.
"""
task_group = TaskGroup()
try:
yield task_group
await task_group._wait_all(race=race, timeout=exit_timeout)
finally:
await task_group._cancel_all()
@contextlib.asynccontextmanager
async def cancel_on_exit(coro: Coroutine[Any, Any, Any]) -> AsyncIterator[None]:
"""Schedule a task, cancelling it when exiting the context manager.
If the context manager exits successfully but the given coroutine raises
an exception, that exception is reraised. The exception is suppressed
if the context manager raises an exception.
"""
async def stop_immediately():
pass
async with open_task_group(race=True) as group:
group.start_soon(stop_immediately())
group.start_soon(coro)
yield
async def race(*coros: Coroutine[Any, Any, Any]) -> None:
"""Wait until the first completed task.
After any coroutine completes, all others are cancelled.
If the current task is cancelled, all coroutines are cancelled too.
If coroutines complete simultaneously and any one of them raises
an exception, an arbitrary one is propagated. Similarly, if any coroutines
raise exceptions during cancellation, one of them propagates.
Args:
coros: Coroutines to race.
"""
async with open_task_group(race=True) as tg:
for coro in coros:
tg.start_soon(coro)
| TaskGroup |
python | scipy__scipy | scipy/optimize/tests/test_nonlin.py | {
"start": 11551,
"end": 14187
} | class ____:
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
rng = np.random.default_rng(123)
A = rng.standard_normal((N, N))
if complex:
A = A + 1j*rng.standard_normal((N, N))
b = rng.standard_normal(N)
if complex:
b = b + 1j*rng.standard_normal(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
def _check_autojac(self, A, b):
def func(x):
return A.dot(x) - b
def jac(v):
return A
sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), jac, maxiter=2,
f_tol=1e-6, line_search=None, verbose=0)
np.testing.assert_allclose(A @ sol, b, atol=1e-6)
# test jac input as array -- not a function
sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), A, maxiter=2,
f_tol=1e-6, line_search=None, verbose=0)
np.testing.assert_allclose(A @ sol, b, atol=1e-6)
def test_jac_sparse(self):
A = csr_array([[1, 2], [2, 1]])
b = np.array([1, -1])
self._check_autojac(A, b)
self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
def test_jac_ndarray(self):
A = np.array([[1, 2], [2, 1]])
b = np.array([1, -1])
self._check_autojac(A, b)
self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
| TestLinear |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 4568,
"end": 4716
} | class ____(FrozenModel):
model_config = ConfigDict(frozen=False)
inheriting2 = InheritingModel2(x=1, y='c')
inheriting2.y = 'd'
| InheritingModel2 |
python | ray-project__ray | rllib/callbacks/callbacks.py | {
"start": 1128,
"end": 27935
} | class ____(metaclass=_CallbackMeta):
"""Abstract base class for RLlib callbacks (similar to Keras callbacks).
These callbacks can be used for custom metrics and custom postprocessing.
By default, all of these callbacks are no-ops. To configure custom training
callbacks, subclass RLlibCallback and then set
{"callbacks": YourCallbacksClass} in the algo config.
"""
@OverrideToImplementCustomLogic
def on_algorithm_init(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
**kwargs,
) -> None:
"""Callback run when a new Algorithm instance has finished setup.
This method gets called at the end of Algorithm.setup() after all
the initialization is done, and before actually training starts.
Args:
algorithm: Reference to the Algorithm instance.
metrics_logger: The MetricsLogger object inside the `Algorithm`. Can be
used to log custom metrics after algo initialization.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_train_result(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
result: dict,
**kwargs,
) -> None:
"""Called at the end of Algorithm.train().
Args:
algorithm: Current Algorithm instance.
metrics_logger: The MetricsLogger object inside the Algorithm. Can be
used to log custom metrics after traing results are available.
result: Dict of results returned from Algorithm.train() call.
You can mutate this object to add additional metrics.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_evaluate_start(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
**kwargs,
) -> None:
"""Callback before evaluation starts.
This method gets called at the beginning of Algorithm.evaluate().
Args:
algorithm: Reference to the algorithm instance.
metrics_logger: The MetricsLogger object inside the `Algorithm`. Can be
used to log custom metrics before running the next round of evaluation.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_evaluate_offline_start(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
**kwargs,
) -> None:
"""Callback before offline evaluation starts.
This method gets called at the beginning of Algorithm.evaluate_offline().
Args:
algorithm: Reference to the algorithm instance.
metrics_logger: The MetricsLogger object inside the `Algorithm`. Can be
used to log custom metrics before running the next round of offline
evaluation.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_evaluate_end(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
evaluation_metrics: dict,
**kwargs,
) -> None:
"""Runs when the evaluation is done.
Runs at the end of Algorithm.evaluate().
Args:
algorithm: Reference to the algorithm instance.
metrics_logger: The MetricsLogger object inside the `Algorithm`. Can be
used to log custom metrics after the most recent evaluation round.
evaluation_metrics: Results dict to be returned from algorithm.evaluate().
You can mutate this object to add additional metrics.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_evaluate_offline_end(
self,
*,
algorithm: "Algorithm",
metrics_logger: Optional[MetricsLogger] = None,
evaluation_metrics: dict,
**kwargs,
) -> None:
"""Runs when the offline evaluation is done.
Runs at the end of Algorithm.evaluate_offline().
Args:
algorithm: Reference to the algorithm instance.
metrics_logger: The MetricsLogger object inside the `Algorithm`. Can be
used to log custom metrics after the most recent offline evaluation
round.
evaluation_metrics: Results dict to be returned from
Algorithm.evaluate_offline(). You can mutate this object to add
additional metrics.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_env_runners_recreated(
self,
*,
algorithm: "Algorithm",
env_runner_group: "EnvRunnerGroup",
env_runner_indices: List[int],
is_evaluation: bool,
**kwargs,
) -> None:
"""Callback run after one or more EnvRunner actors have been recreated.
You can access and change the EnvRunners in question through the following code
snippet inside your custom override of this method:
.. testcode::
from ray.rllib.callbacks.callbacks import RLlibCallback
class MyCallbacks(RLlibCallback):
def on_env_runners_recreated(
self,
*,
algorithm,
env_runner_group,
env_runner_indices,
is_evaluation,
**kwargs,
):
# Define what you would like to do on the recreated EnvRunner:
def func(env_runner):
# Here, we just set some arbitrary property to 1.
if is_evaluation:
env_runner._custom_property_for_evaluation = 1
else:
env_runner._custom_property_for_training = 1
# Use the `foreach_env_runner` method of the worker set and
# only loop through those worker IDs that have been restarted.
# Note that we set `local_worker=False` to NOT include it (local
# workers are never recreated; if they fail, the entire Algorithm
# fails).
env_runner_group.foreach_env_runner(
func,
remote_worker_ids=env_runner_indices,
local_env_runner=False,
)
Args:
algorithm: Reference to the Algorithm instance.
env_runner_group: The EnvRunnerGroup object in which the workers in question
reside. You can use a `env_runner_group.foreach_env_runner(
remote_worker_ids=..., local_env_runner=False)` method call to execute
custom code on the recreated (remote) workers. Note that the local
worker is never recreated as a failure of this would also crash the
Algorithm.
env_runner_indices: The list of (remote) worker IDs that have been
recreated.
is_evaluation: Whether `worker_set` is the evaluation EnvRunnerGroup
(located in `Algorithm.eval_env_runner_group`) or not.
"""
pass
@OverrideToImplementCustomLogic
def on_offline_eval_runners_recreated(
self,
*,
algorithm: "Algorithm",
offline_eval_runner_group: "OfflineEvaluationRunnerGroup",
offline_eval_runner_indices: List[int],
**kwargs,
) -> None:
"""Callback run after one or more OfflineEvaluationRunner actors have been recreated.
You can access and change the OfflineEvaluationRunners in question through the following code
snippet inside your custom override of this method:
.. testcode::
from ray.rllib.callbacks.callbacks import RLlibCallback
class MyCallbacks(RLlibCallback):
def on_offline_eval_runners_recreated(
self,
*,
algorithm,
offline_eval_runner_group,
offline_eval_runner_indices,
**kwargs,
):
# Define what you would like to do on the recreated EnvRunner:
def func(offline_eval_runner):
# Here, we just set some arbitrary property to 1.
if is_evaluation:
offline_eval_runner._custom_property_for_evaluation = 1
else:
offline_eval_runner._custom_property_for_training = 1
# Use the `foreach_runner` method of the worker set and
# only loop through those worker IDs that have been restarted.
# Note that `local_runner=False` as long as there are remote
# runners.
offline_eval_runner_group.foreach_runner(
func,
remote_runner_ids=offline_eval_runner_indices,
local_runner=False,
)
Args:
algorithm: Reference to the Algorithm instance.
offline_eval_runner_group: The OfflineEvaluationRunnerGroup object in which
the workers in question reside. You can use a `runner_group.foreach_runner(
remote_worker_ids=..., local_runner=False)` method call to execute
custom code on the recreated (remote) workers.
offline_eval_runner_indices: The list of (remote) worker IDs that have been
recreated.
"""
pass
@OverrideToImplementCustomLogic
def on_checkpoint_loaded(
self,
*,
algorithm: "Algorithm",
**kwargs,
) -> None:
"""Callback run when an Algorithm has loaded a new state from a checkpoint.
This method gets called at the end of `Algorithm.load_checkpoint()`.
Args:
algorithm: Reference to the Algorithm instance.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_environment_created(
self,
*,
env_runner: "EnvRunner",
metrics_logger: Optional[MetricsLogger] = None,
env: gym.Env,
env_context: EnvContext,
**kwargs,
) -> None:
"""Callback run when a new environment object has been created.
Note: This only applies to the new API stack. The env used is usually a
gym.Env (or more specifically a gym.vector.Env).
Args:
env_runner: Reference to the current EnvRunner instance.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics after environment creation.
env: The environment object that has been created on `env_runner`. This is
usually a gym.Env (or a gym.vector.Env) object.
env_context: The `EnvContext` object that has been passed to the
`gym.make()` call as kwargs (and to the gym.Env as `config`). It should
have all the config key/value pairs in it as well as the
EnvContext-typical properties: `worker_index`, `num_workers`, and
`remote`.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_episode_created(
self,
*,
# TODO (sven): Deprecate Episode/EpisodeV2 with new API stack.
episode: Union[EpisodeType, EpisodeV2],
# TODO (sven): Deprecate this arg new API stack (in favor of `env_runner`).
worker: Optional["EnvRunner"] = None,
env_runner: Optional["EnvRunner"] = None,
metrics_logger: Optional[MetricsLogger] = None,
# TODO (sven): Deprecate this arg new API stack (in favor of `env`).
base_env: Optional[BaseEnv] = None,
env: Optional[gym.Env] = None,
# TODO (sven): Deprecate this arg new API stack (in favor of `rl_module`).
policies: Optional[Dict[PolicyID, Policy]] = None,
rl_module: Optional[RLModule] = None,
env_index: int,
**kwargs,
) -> None:
"""Callback run when a new episode is created (but has not started yet!).
This method gets called after a new SingleAgentEpisode or MultiAgentEpisode
instance has been created. This happens before the respective sub-environment's
`reset()` is called by RLlib.
1) SingleAgentEpisode/MultiAgentEpisode created: This callback is called.
2) Respective sub-environment (gym.Env) is `reset()`.
3) Callback `on_episode_start` is called.
4) Stepping through sub-environment/episode commences.
Args:
episode: The newly created SingleAgentEpisode or MultiAgentEpisode.
This is the episode that is about to be started with an upcoming
`env.reset()`. Only after this reset call, the `on_episode_start`
callback will be called.
env_runner: Reference to the current EnvRunner.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics after Episode creation.
env: The gym.Env running the episode.
rl_module: The RLModule used to compute actions for stepping the env. In
single-agent mode, this is a simple RLModule, in multi-agent mode, this
is a MultiRLModule.
env_index: The index of the sub-environment that is about to be reset.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_episode_start(
self,
*,
episode: Union[EpisodeType, EpisodeV2],
env_runner: Optional["EnvRunner"] = None,
metrics_logger: Optional[MetricsLogger] = None,
env: Optional[gym.Env] = None,
env_index: int,
rl_module: Optional[RLModule] = None,
# TODO (sven): Deprecate these args.
worker: Optional["EnvRunner"] = None,
base_env: Optional[BaseEnv] = None,
policies: Optional[Dict[PolicyID, Policy]] = None,
**kwargs,
) -> None:
"""Callback run right after an Episode has been started.
This method gets called after a SingleAgentEpisode or MultiAgentEpisode instance
has been reset with a call to `env.reset()` by the EnvRunner.
1) Single-/MultiAgentEpisode created: `on_episode_created()` is called.
2) Respective sub-environment (gym.Env) is `reset()`.
3) Single-/MultiAgentEpisode starts: This callback is called.
4) Stepping through sub-environment/episode commences.
Args:
episode: The just started (after `env.reset()`) SingleAgentEpisode or
MultiAgentEpisode object.
env_runner: Reference to the EnvRunner running the env and episode.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics during env/episode stepping.
env: The gym.Env or gym.vector.Env object running the started episode.
env_index: The index of the sub-environment that is about to be reset
(within the vector of sub-environments of the BaseEnv).
rl_module: The RLModule used to compute actions for stepping the env. In
single-agent mode, this is a simple RLModule, in multi-agent mode, this
is a MultiRLModule.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_episode_step(
self,
*,
episode: Union[EpisodeType, EpisodeV2],
env_runner: Optional["EnvRunner"] = None,
metrics_logger: Optional[MetricsLogger] = None,
env: Optional[gym.Env] = None,
env_index: int,
rl_module: Optional[RLModule] = None,
# TODO (sven): Deprecate these args.
worker: Optional["EnvRunner"] = None,
base_env: Optional[BaseEnv] = None,
policies: Optional[Dict[PolicyID, Policy]] = None,
**kwargs,
) -> None:
"""Called on each episode step (after the action(s) has/have been logged).
Note that on the new API stack, this callback is also called after the final
step of an episode, meaning when terminated/truncated are returned as True
from the `env.step()` call, but is still provided with the non-numpy'ized
episode object (meaning the data has NOT been converted to numpy arrays yet).
The exact time of the call of this callback is after `env.step([action])` and
also after the results of this step (observation, reward, terminated, truncated,
infos) have been logged to the given `episode` object.
Args:
episode: The just stepped SingleAgentEpisode or MultiAgentEpisode object
(after `env.step()` and after returned obs, rewards, etc.. have been
logged to the episode object).
env_runner: Reference to the EnvRunner running the env and episode.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics during env/episode stepping.
env: The gym.Env or gym.vector.Env object running the started episode.
env_index: The index of the sub-environment that has just been stepped.
rl_module: The RLModule used to compute actions for stepping the env. In
single-agent mode, this is a simple RLModule, in multi-agent mode, this
is a MultiRLModule.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_episode_end(
self,
*,
episode: Union[EpisodeType, EpisodeV2],
prev_episode_chunks: Optional[List[EpisodeType]] = None,
env_runner: Optional["EnvRunner"] = None,
metrics_logger: Optional[MetricsLogger] = None,
env: Optional[gym.Env] = None,
env_index: int,
rl_module: Optional[RLModule] = None,
# TODO (sven): Deprecate these args.
worker: Optional["EnvRunner"] = None,
base_env: Optional[BaseEnv] = None,
policies: Optional[Dict[PolicyID, Policy]] = None,
**kwargs,
) -> None:
"""Called when an episode is done (after terminated/truncated have been logged).
The exact time of the call of this callback is after `env.step([action])` and
also after the results of this step (observation, reward, terminated, truncated,
infos) have been logged to the given `episode` object, where either terminated
or truncated were True:
- The env is stepped: `final_obs, rewards, ... = env.step([action])`
- The step results are logged `episode.add_env_step(final_obs, rewards)`
- Callback `on_episode_step` is fired.
- Another env-to-module connector call is made (even though we won't need any
RLModule forward pass anymore). We make this additional call to ensure that in
case users use the connector pipeline to process observations (and write them
back into the episode), the episode object has all observations - even the
terminal one - properly processed.
- ---> This callback `on_episode_end()` is fired. <---
- The episode is numpy'ized (i.e. lists of obs/rewards/actions/etc.. are
converted into numpy arrays).
Args:
episode: The terminated/truncated SingleAgent- or MultiAgentEpisode object
(after `env.step()` that returned terminated=True OR truncated=True and
after the returned obs, rewards, etc.. have been logged to the episode
object). Note that this method is still called before(!) the episode
object is numpy'ized, meaning all its timestep data is still present in
lists of individual timestep data.
prev_episode_chunks: A complete list of all previous episode chunks
with the same ID as `episode` that have been sampled on this EnvRunner.
In order to compile metrics across the complete episode, users should
loop through the list: `[episode] + previous_episode_chunks` and
accumulate the required information.
env_runner: Reference to the EnvRunner running the env and episode.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics during env/episode stepping.
env: The gym.Env or gym.vector.Env object running the started episode.
env_index: The index of the sub-environment that has just been terminated
or truncated.
rl_module: The RLModule used to compute actions for stepping the env. In
single-agent mode, this is a simple RLModule, in multi-agent mode, this
is a MultiRLModule.
kwargs: Forward compatibility placeholder.
"""
pass
@OverrideToImplementCustomLogic
def on_sample_end(
self,
*,
env_runner: Optional["EnvRunner"] = None,
metrics_logger: Optional[MetricsLogger] = None,
samples: Union[SampleBatch, List[EpisodeType]],
# TODO (sven): Deprecate these args.
worker: Optional["EnvRunner"] = None,
**kwargs,
) -> None:
"""Called at the end of `EnvRunner.sample()`.
Args:
env_runner: Reference to the current EnvRunner object.
metrics_logger: The MetricsLogger object inside the `env_runner`. Can be
used to log custom metrics during env/episode stepping.
samples: Lists of SingleAgentEpisode or MultiAgentEpisode instances to be
returned. You can mutate the episodes to modify the returned training
data.
kwargs: Forward compatibility placeholder.
"""
pass
@OldAPIStack
def on_sub_environment_created(
self,
*,
worker: "EnvRunner",
sub_environment: EnvType,
env_context: EnvContext,
env_index: Optional[int] = None,
**kwargs,
) -> None:
"""Callback run when a new sub-environment has been created.
This method gets called after each sub-environment (usually a
gym.Env) has been created, validated (RLlib built-in validation
+ possible custom validation function implemented by overriding
`Algorithm.validate_env()`), wrapped (e.g. video-wrapper), and seeded.
Args:
worker: Reference to the current EnvRunner.
sub_environment: The sub-environment instance that has been
created. This is usually a gym.Env object.
env_context: The `EnvContext` object that has been passed to
the env's constructor.
env_index: The index of the sub-environment that has been created
(within the vector of sub-environments of the gym.vector.Env).
kwargs: Forward compatibility placeholder.
"""
pass
@OldAPIStack
def on_postprocess_trajectory(
self,
*,
worker: "EnvRunner",
episode,
agent_id: AgentID,
policy_id: PolicyID,
policies: Dict[PolicyID, Policy],
postprocessed_batch: SampleBatch,
original_batches: Dict[AgentID, Tuple[Policy, SampleBatch]],
**kwargs,
) -> None:
"""Called immediately after a policy's postprocess_fn is called.
You can use this callback to do additional postprocessing for a policy,
including looking at the trajectory data of other agents in multi-agent
settings.
Args:
worker: Reference to the current rollout worker.
episode: Episode object.
agent_id: Id of the current agent.
policy_id: Id of the current policy for the agent.
policies: Dict mapping policy IDs to policy objects. In single
agent mode there will only be a single "default_policy".
postprocessed_batch: The postprocessed sample batch
for this agent. You can mutate this object to apply your own
trajectory postprocessing.
original_batches: Dict mapping agent IDs to their unpostprocessed
trajectory data. You should not mutate this object.
kwargs: Forward compatibility placeholder.
"""
pass
@OldAPIStack
def on_create_policy(self, *, policy_id: PolicyID, policy: Policy) -> None:
"""Callback run whenever a new policy is added to an algorithm.
Args:
policy_id: ID of the newly created policy.
policy: The policy just created.
"""
pass
@OldAPIStack
def on_learn_on_batch(
self, *, policy: Policy, train_batch: SampleBatch, result: dict, **kwargs
) -> None:
"""Called at the beginning of Policy.learn_on_batch().
Note: This is called before 0-padding via
`pad_batch_to_sequences_of_same_size`.
Also note, SampleBatch.INFOS column will not be available on
train_batch within this callback if framework is tf1, due to
the fact that tf1 static graph would mistake it as part of the
input dict if present.
It is available though, for tf2 and torch frameworks.
Args:
policy: Reference to the current Policy object.
train_batch: SampleBatch to be trained on. You can
mutate this object to modify the samples generated.
result: A results dict to add custom metrics to.
kwargs: Forward compatibility placeholder.
"""
pass
# Deprecated, use `on_env_runners_recreated`, instead.
def on_workers_recreated(
self,
*,
algorithm,
worker_set,
worker_ids,
is_evaluation,
**kwargs,
) -> None:
pass
| RLlibCallback |
python | networkx__networkx | networkx/algorithms/tree/tests/test_mst.py | {
"start": 28157,
"end": 32372
} | class ____:
@classmethod
def setup_class(cls):
global np
np = pytest.importorskip("numpy")
sp = pytest.importorskip("scipy")
def test_nst_disconnected(self):
G = nx.empty_graph(2)
assert np.isclose(nx.number_of_spanning_trees(G), 0)
def test_nst_no_nodes(self):
G = nx.Graph()
with pytest.raises(nx.NetworkXPointlessConcept):
nx.number_of_spanning_trees(G)
def test_nst_weight(self):
G = nx.Graph()
G.add_edge(1, 2, weight=1)
G.add_edge(1, 3, weight=1)
G.add_edge(2, 3, weight=2)
# weights are ignored
assert np.isclose(nx.number_of_spanning_trees(G), 3)
# including weight
assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), 5)
def test_nst_negative_weight(self):
G = nx.Graph()
G.add_edge(1, 2, weight=1)
G.add_edge(1, 3, weight=-1)
G.add_edge(2, 3, weight=-2)
# weights are ignored
assert np.isclose(nx.number_of_spanning_trees(G), 3)
# including weight
assert np.isclose(nx.number_of_spanning_trees(G, weight="weight"), -1)
def test_nst_selfloop(self):
# self-loops are ignored
G = nx.complete_graph(3)
G.add_edge(1, 1)
assert np.isclose(nx.number_of_spanning_trees(G), 3)
def test_nst_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2)
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(2, 3)
assert np.isclose(nx.number_of_spanning_trees(G), 5)
def test_nst_complete_graph(self):
# this is known as Cayley's formula
N = 5
G = nx.complete_graph(N)
assert np.isclose(nx.number_of_spanning_trees(G), N ** (N - 2))
def test_nst_path_graph(self):
G = nx.path_graph(5)
assert np.isclose(nx.number_of_spanning_trees(G), 1)
def test_nst_cycle_graph(self):
G = nx.cycle_graph(5)
assert np.isclose(nx.number_of_spanning_trees(G), 5)
def test_nst_directed_noroot(self):
G = nx.empty_graph(3, create_using=nx.MultiDiGraph)
with pytest.raises(nx.NetworkXError):
nx.number_of_spanning_trees(G)
def test_nst_directed_root_not_exist(self):
G = nx.empty_graph(3, create_using=nx.MultiDiGraph)
with pytest.raises(nx.NetworkXError):
nx.number_of_spanning_trees(G, root=42)
def test_nst_directed_not_weak_connected(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(3, 4)
assert np.isclose(nx.number_of_spanning_trees(G, root=1), 0)
def test_nst_directed_cycle_graph(self):
G = nx.DiGraph()
G = nx.cycle_graph(7, G)
assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1)
def test_nst_directed_complete_graph(self):
G = nx.DiGraph()
G = nx.complete_graph(7, G)
assert np.isclose(nx.number_of_spanning_trees(G, root=0), 7**5)
def test_nst_directed_multi(self):
G = nx.MultiDiGraph()
G = nx.cycle_graph(3, G)
G.add_edge(1, 2)
assert np.isclose(nx.number_of_spanning_trees(G, root=0), 2)
def test_nst_directed_selfloop(self):
G = nx.MultiDiGraph()
G = nx.cycle_graph(3, G)
G.add_edge(1, 1)
assert np.isclose(nx.number_of_spanning_trees(G, root=0), 1)
def test_nst_directed_weak_connected(self):
G = nx.MultiDiGraph()
G = nx.cycle_graph(3, G)
G.remove_edge(1, 2)
assert np.isclose(nx.number_of_spanning_trees(G, root=0), 0)
def test_nst_directed_weighted(self):
# from root=1:
# arborescence 1: 1->2, 1->3, weight=2*1
# arborescence 2: 1->2, 2->3, weight=2*3
G = nx.DiGraph()
G.add_edge(1, 2, weight=2)
G.add_edge(1, 3, weight=1)
G.add_edge(2, 3, weight=3)
Nst = nx.number_of_spanning_trees(G, root=1, weight="weight")
assert np.isclose(Nst, 8)
Nst = nx.number_of_spanning_trees(G, root=2, weight="weight")
assert np.isclose(Nst, 0)
Nst = nx.number_of_spanning_trees(G, root=3, weight="weight")
assert np.isclose(Nst, 0)
| TestNumberSpanningTrees |
python | ipython__ipython | IPython/core/debugger.py | {
"start": 45197,
"end": 46460
} | class ____(Pdb):
"""Version of debugger where KeyboardInterrupt exits the debugger altogether."""
def cmdloop(self, intro=None):
"""Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
try:
return OldPdb.cmdloop(self, intro=intro)
except KeyboardInterrupt:
self.stop_here = lambda frame: False # type: ignore[method-assign]
self.do_quit("")
sys.settrace(None)
self.quitting = False
raise
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message("--KeyboardInterrupt--")
raise
def set_trace(frame=None, header=None):
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
pdb = Pdb()
if header is not None:
pdb.message(header)
pdb.set_trace(frame or sys._getframe().f_back)
| InterruptiblePdb |
python | pytorch__pytorch | test/ao/sparsity/test_activation_sparsifier.py | {
"start": 1164,
"end": 15177
} | class ____(TestCase):
def _check_constructor(self, activation_sparsifier, model, defaults, sparse_config):
"""Helper function to check if the model, defaults and sparse_config are loaded correctly
in the activation sparsifier
"""
sparsifier_defaults = activation_sparsifier.defaults
combined_defaults = {**defaults, "sparse_config": sparse_config}
# more keys are populated in activation sparsifier (even though they may be None)
assert len(combined_defaults) <= len(activation_sparsifier.defaults)
for key, config in sparsifier_defaults.items():
# all the keys in combined_defaults should be present in sparsifier defaults
assert config == combined_defaults.get(key)
def _check_register_layer(
self, activation_sparsifier, defaults, sparse_config, layer_args_list
):
"""Checks if layers in the model are correctly mapped to it's arguments.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
defaults (Dict)
all default config (except sparse_config)
sparse_config (Dict)
default sparse config passed to the sparsifier
layer_args_list (list of tuples)
Each entry in the list corresponds to the layer arguments.
First entry in the tuple corresponds to all the arguments other than sparse_config
Second entry in the tuple corresponds to sparse_config
"""
# check args
data_groups = activation_sparsifier.data_groups
assert len(data_groups) == len(layer_args_list)
for layer_args in layer_args_list:
layer_arg, sparse_config_layer = layer_args
# check sparse config
sparse_config_actual = copy.deepcopy(sparse_config)
sparse_config_actual.update(sparse_config_layer)
name = module_to_fqn(activation_sparsifier.model, layer_arg["layer"])
assert data_groups[name]["sparse_config"] == sparse_config_actual
# assert the rest
other_config_actual = copy.deepcopy(defaults)
other_config_actual.update(layer_arg)
other_config_actual.pop("layer")
for key, value in other_config_actual.items():
assert key in data_groups[name]
assert value == data_groups[name][key]
# get_mask should raise error
with self.assertRaises(ValueError):
activation_sparsifier.get_mask(name=name)
def _check_pre_forward_hook(self, activation_sparsifier, data_list):
"""Registering a layer attaches a pre-forward hook to that layer. This function
checks if the pre-forward hook works as expected. Specifically, checks if the
input is aggregated correctly.
Basically, asserts that the aggregate of input activations is the same as what was
computed in the sparsifier.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data_list (list of torch tensors)
data input to the model attached to the sparsifier
"""
# can only check for the first layer
data_agg_actual = data_list[0]
model = activation_sparsifier.model
layer_name = module_to_fqn(model, model.conv1)
agg_fn = activation_sparsifier.data_groups[layer_name]["aggregate_fn"]
for i in range(1, len(data_list)):
data_agg_actual = agg_fn(data_agg_actual, data_list[i])
assert "data" in activation_sparsifier.data_groups[layer_name]
assert torch.all(
activation_sparsifier.data_groups[layer_name]["data"] == data_agg_actual
)
return data_agg_actual
def _check_step(self, activation_sparsifier, data_agg_actual):
"""Checks if .step() works as expected. Specifically, checks if the mask is computed correctly.
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data_agg_actual (torch tensor)
aggregated torch tensor
"""
model = activation_sparsifier.model
layer_name = module_to_fqn(model, model.conv1)
assert layer_name is not None
reduce_fn = activation_sparsifier.data_groups[layer_name]["reduce_fn"]
data_reduce_actual = reduce_fn(data_agg_actual)
mask_fn = activation_sparsifier.data_groups[layer_name]["mask_fn"]
sparse_config = activation_sparsifier.data_groups[layer_name]["sparse_config"]
mask_actual = mask_fn(data_reduce_actual, **sparse_config)
mask_model = activation_sparsifier.get_mask(layer_name)
assert torch.all(mask_model == mask_actual)
for config in activation_sparsifier.data_groups.values():
assert "data" not in config
def _check_squash_mask(self, activation_sparsifier, data):
"""Makes sure that squash_mask() works as usual. Specifically, checks
if the sparsifier hook is attached correctly.
This is achieved by only looking at the identity layers and making sure that
the output == layer(input * mask).
Args:
activation_sparsifier (sparsifier object)
activation sparsifier object that is being tested.
data (torch tensor)
dummy batched data
"""
# create a forward hook for checking output == layer(input * mask)
def check_output(name):
mask = activation_sparsifier.get_mask(name)
features = activation_sparsifier.data_groups[name].get("features")
feature_dim = activation_sparsifier.data_groups[name].get("feature_dim")
def hook(module, input, output):
input_data = input[0]
if features is None:
assert torch.all(mask * input_data == output)
else:
for feature_idx in range(len(features)):
feature = torch.Tensor(
[features[feature_idx]], device=input_data.device
).long()
inp_data_feature = torch.index_select(
input_data, feature_dim, feature
)
out_data_feature = torch.index_select(
output, feature_dim, feature
)
assert torch.all(
mask[feature_idx] * inp_data_feature == out_data_feature
)
return hook
for name, config in activation_sparsifier.data_groups.items():
if "identity" in name:
config["layer"].register_forward_hook(check_output(name))
activation_sparsifier.model(data)
def _check_state_dict(self, sparsifier1):
"""Checks if loading and restoring of state_dict() works as expected.
Basically, dumps the state of the sparsifier and loads it in the other sparsifier
and checks if all the configuration are in line.
This function is called at various times in the workflow to makes sure that the sparsifier
can be dumped and restored at any point in time.
"""
state_dict = sparsifier1.state_dict()
new_model = Model()
# create an empty new sparsifier
sparsifier2 = ActivationSparsifier(new_model)
assert sparsifier2.defaults != sparsifier1.defaults
assert len(sparsifier2.data_groups) != len(sparsifier1.data_groups)
sparsifier2.load_state_dict(state_dict)
assert sparsifier2.defaults == sparsifier1.defaults
for name, state in sparsifier2.state.items():
assert name in sparsifier1.state
mask1 = sparsifier1.state[name]["mask"]
mask2 = state["mask"]
if mask1 is None:
assert mask2 is None
else:
assert type(mask1) is type(mask2)
if isinstance(mask1, list):
assert len(mask1) == len(mask2)
for idx in range(len(mask1)):
assert torch.all(mask1[idx] == mask2[idx])
else:
assert torch.all(mask1 == mask2)
# make sure that the state dict is stored as torch sparse
for state in state_dict["state"].values():
mask = state["mask"]
if mask is not None:
if isinstance(mask, list):
for idx in range(len(mask)):
assert mask[idx].is_sparse
else:
assert mask.is_sparse
dg1, dg2 = sparsifier1.data_groups, sparsifier2.data_groups
for layer_name, config in dg1.items():
assert layer_name in dg2
# exclude hook and layer
config1 = {
key: value
for key, value in config.items()
if key not in ["hook", "layer"]
}
config2 = {
key: value
for key, value in dg2[layer_name].items()
if key not in ["hook", "layer"]
}
assert config1 == config2
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_activation_sparsifier(self):
"""Simulates the workflow of the activation sparsifier, starting from object creation
till squash_mask().
The idea is to check that everything works as expected while in the workflow.
"""
# defining aggregate, reduce and mask functions
def agg_fn(x, y):
return x + y
def reduce_fn(x):
return torch.mean(x, dim=0)
def _vanilla_norm_sparsifier(data, sparsity_level):
r"""Similar to data norm sparsifier but block_shape = (1,1).
Simply, flatten the data, sort it and mask out the values less than threshold
"""
data_norm = torch.abs(data).flatten()
_, sorted_idx = torch.sort(data_norm)
threshold_idx = round(sparsity_level * len(sorted_idx))
sorted_idx = sorted_idx[:threshold_idx]
mask = torch.ones_like(data_norm)
mask.scatter_(dim=0, index=sorted_idx, value=0)
mask = mask.reshape(data.shape)
return mask
# Creating default function and sparse configs
# default sparse_config
sparse_config = {"sparsity_level": 0.5}
defaults = {"aggregate_fn": agg_fn, "reduce_fn": reduce_fn}
# simulate the workflow
# STEP 1: make data and activation sparsifier object
model = Model() # create model
activation_sparsifier = ActivationSparsifier(model, **defaults, **sparse_config)
# Test Constructor
self._check_constructor(activation_sparsifier, model, defaults, sparse_config)
# STEP 2: Register some layers
register_layer1_args = {
"layer": model.conv1,
"mask_fn": _vanilla_norm_sparsifier,
}
sparse_config_layer1 = {"sparsity_level": 0.3}
register_layer2_args = {
"layer": model.linear1,
"features": [0, 10, 234],
"feature_dim": 1,
"mask_fn": _vanilla_norm_sparsifier,
}
sparse_config_layer2 = {"sparsity_level": 0.1}
register_layer3_args = {
"layer": model.identity1,
"mask_fn": _vanilla_norm_sparsifier,
}
sparse_config_layer3 = {"sparsity_level": 0.3}
register_layer4_args = {
"layer": model.identity2,
"features": [0, 10, 20],
"feature_dim": 1,
"mask_fn": _vanilla_norm_sparsifier,
}
sparse_config_layer4 = {"sparsity_level": 0.1}
layer_args_list = [
(register_layer1_args, sparse_config_layer1),
(register_layer2_args, sparse_config_layer2),
]
layer_args_list += [
(register_layer3_args, sparse_config_layer3),
(register_layer4_args, sparse_config_layer4),
]
# Registering..
for layer_args in layer_args_list:
layer_arg, sparse_config_layer = layer_args
activation_sparsifier.register_layer(**layer_arg, **sparse_config_layer)
# check if things are registered correctly
self._check_register_layer(
activation_sparsifier, defaults, sparse_config, layer_args_list
)
# check state_dict after registering and before model forward
self._check_state_dict(activation_sparsifier)
# check if forward pre hooks actually work
# some dummy data
data_list = []
num_data_points = 5
for _ in range(num_data_points):
rand_data = torch.randn(16, 1, 28, 28)
activation_sparsifier.model(rand_data)
data_list.append(rand_data)
data_agg_actual = self._check_pre_forward_hook(activation_sparsifier, data_list)
# check state_dict() before step()
self._check_state_dict(activation_sparsifier)
# STEP 3: sparsifier step
activation_sparsifier.step()
# check state_dict() after step() and before squash_mask()
self._check_state_dict(activation_sparsifier)
# self.check_step()
self._check_step(activation_sparsifier, data_agg_actual)
# STEP 4: squash mask
activation_sparsifier.squash_mask()
self._check_squash_mask(activation_sparsifier, data_list[0])
# check state_dict() after squash_mask()
self._check_state_dict(activation_sparsifier)
if __name__ == "__main__":
raise_on_run_directly("test/test_ao_sparsity.py")
| TestActivationSparsifier |
python | doocs__leetcode | solution/2700-2799/2716.Minimize String Length/Solution.py | {
"start": 0,
"end": 95
} | class ____:
def minimizedStringLength(self, s: str) -> int:
return len(set(s))
| Solution |
python | TheAlgorithms__Python | web_programming/covid_stats_via_xpath.py | {
"start": 400,
"end": 1605
} | class ____(NamedTuple):
cases: str
deaths: str
recovered: str
def covid_stats(
url: str = (
"https://web.archive.org/web/20250825095350/"
"https://www.worldometers.info/coronavirus/"
),
) -> CovidData:
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
try:
response = httpx.get(url, timeout=10).raise_for_status()
except httpx.TimeoutException:
print(
"Request timed out. Please check your network connection "
"or try again later."
)
return CovidData("N/A", "N/A", "N/A")
except httpx.HTTPStatusError as e:
print(f"HTTP error occurred: {e}")
return CovidData("N/A", "N/A", "N/A")
data = html.fromstring(response.content).xpath(xpath_str)
if len(data) != 3:
print("Unexpected data format. The page structure may have changed.")
data = "N/A", "N/A", "N/A"
return CovidData(*data)
if __name__ == "__main__":
fmt = (
"Total COVID-19 cases in the world: {}\n"
"Total deaths due to COVID-19 in the world: {}\n"
"Total COVID-19 patients recovered in the world: {}"
)
print(fmt.format(*covid_stats()))
| CovidData |
python | apache__airflow | airflow-core/tests/unit/jobs/test_triggerer_job.py | {
"start": 31993,
"end": 34820
} | class ____(BaseTrigger):
def __init__(self, trigger_dag_id, run_ids, states, logical_dates):
self.trigger_dag_id = trigger_dag_id
self.run_ids = run_ids
self.states = states
self.logical_dates = logical_dates
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
f"{type(self).__module__}.{type(self).__qualname__}",
{
"trigger_dag_id": self.trigger_dag_id,
"run_ids": self.run_ids,
"states": self.states,
"logical_dates": self.logical_dates,
},
)
async def run(self, **args) -> AsyncIterator[TriggerEvent]:
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
dag_run_states_count = await sync_to_async(RuntimeTaskInstance.get_dr_count)(
dag_id=self.trigger_dag_id,
run_ids=self.run_ids,
states=self.states,
logical_dates=self.logical_dates,
)
dag_run_state = await sync_to_async(RuntimeTaskInstance.get_dagrun_state)(
dag_id=self.trigger_dag_id,
run_id=self.run_ids[0],
)
yield TriggerEvent({"count": dag_run_states_count, "dag_run_state": dag_run_state})
@pytest.mark.asyncio
@pytest.mark.execution_timeout(20)
async def test_trigger_can_fetch_trigger_dag_run_count_and_state_in_deferrable(session, dag_maker):
"""Checks that the trigger will successfully fetch the count of trigger DAG runs."""
# Create the test DAG and task
with dag_maker(dag_id="trigger_can_fetch_trigger_dag_run_count_and_state_in_deferrable", session=session):
EmptyOperator(task_id="dummy1")
dr = dag_maker.create_dagrun()
task_instance = dr.task_instances[0]
task_instance.state = TaskInstanceState.DEFERRED
# Use the same dag run with states deferred to fetch the count
trigger = CustomTriggerDagRun(
trigger_dag_id=dr.dag_id, run_ids=[dr.run_id], states=[dr.state], logical_dates=[dr.logical_date]
)
trigger_orm = Trigger(
classpath=trigger.serialize()[0],
kwargs={
"trigger_dag_id": dr.dag_id,
"run_ids": [dr.run_id],
"states": [dr.state],
"logical_dates": [dr.logical_date],
},
)
session.add(trigger_orm)
session.commit()
task_instance.trigger_id = trigger_orm.id
job = Job()
session.add(job)
session.commit()
supervisor = DummyTriggerRunnerSupervisor.start(job=job, capacity=1, logger=None)
supervisor.run()
task_instance.refresh_from_db()
assert task_instance.state == TaskInstanceState.SCHEDULED
assert task_instance.next_method != "__fail__"
assert task_instance.next_kwargs == {"event": {"count": 1, "dag_run_state": "running"}}
| CustomTriggerDagRun |
python | jina-ai__jina | tests/integration/docarray_v2/test_issues.py | {
"start": 347,
"end": 457
} | class ____(BaseDoc):
nested: Optional[Nested1Doc] = None
num: Optional[int] = None
text: str
| RootDoc |
python | graphql-python__graphene | graphene/validation/tests/test_depth_limit_validator.py | {
"start": 246,
"end": 345
} | class ____(Interface):
name = String(required=True)
class meta:
name = "Pet"
| PetType |
python | Textualize__textual | docs/examples/widgets/sparkline_colors.py | {
"start": 105,
"end": 979
} | class ____(App[None]):
CSS_PATH = "sparkline_colors.tcss"
def compose(self) -> ComposeResult:
nums = [abs(sin(x / 3.14)) for x in range(0, 360 * 6, 20)]
yield Sparkline(nums, summary_function=max, id="fst")
yield Sparkline(nums, summary_function=max, id="snd")
yield Sparkline(nums, summary_function=max, id="trd")
yield Sparkline(nums, summary_function=max, id="frt")
yield Sparkline(nums, summary_function=max, id="fft")
yield Sparkline(nums, summary_function=max, id="sxt")
yield Sparkline(nums, summary_function=max, id="svt")
yield Sparkline(nums, summary_function=max, id="egt")
yield Sparkline(nums, summary_function=max, id="nnt")
yield Sparkline(nums, summary_function=max, id="tnt")
app = SparklineColorsApp()
if __name__ == "__main__":
app.run()
| SparklineColorsApp |
python | python-markdown__markdown | markdown/extensions/md_in_html.py | {
"start": 1085,
"end": 13789
} | class ____(HTMLExtractor):
"""
Override `HTMLExtractor` and create `etree` `Elements` for any elements which should have content parsed as
Markdown.
"""
def __init__(self, md: Markdown, *args, **kwargs):
# All block-level tags.
self.block_level_tags = set(md.block_level_elements.copy())
# Block-level tags in which the content only gets span level parsing
self.span_tags = set(
['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th']
)
# Block-level tags which never get their content parsed.
self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea'])
super().__init__(md, *args, **kwargs)
# Block-level tags in which the content gets parsed as blocks
self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags)
self.span_and_blocks_tags = self.block_tags | self.span_tags
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.mdstack: list[str] = [] # When markdown=1, stack contains a list of tags
self.treebuilder = etree.TreeBuilder()
self.mdstate: list[Literal['block', 'span', 'off', None]] = []
self.mdstarted: list[bool] = []
super().reset()
def close(self):
"""Handle any buffered data."""
super().close()
# Handle any unclosed tags.
if self.mdstack:
# Close the outermost parent. `handle_endtag` will close all unclosed children.
self.handle_endtag(self.mdstack[0])
def get_element(self) -> etree.Element:
""" Return element from `treebuilder` and reset `treebuilder` for later use. """
element = self.treebuilder.close()
self.treebuilder = etree.TreeBuilder()
return element
def get_state(self, tag, attrs: Mapping[str, str]) -> Literal['block', 'span', 'off', None]:
""" Return state from tag and `markdown` attribute. One of 'block', 'span', or 'off'. """
md_attr = attrs.get('markdown', '0')
if md_attr == 'markdown':
# `<tag markdown>` is the same as `<tag markdown='1'>`.
md_attr = '1'
parent_state = self.mdstate[-1] if self.mdstate else None
if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'):
# Only use the parent state if it is more restrictive than the markdown attribute.
md_attr = parent_state
if ((md_attr == '1' and tag in self.block_tags) or
(md_attr == 'block' and tag in self.span_and_blocks_tags)):
return 'block'
elif ((md_attr == '1' and tag in self.span_tags) or
(md_attr == 'span' and tag in self.span_and_blocks_tags)):
return 'span'
elif tag in self.block_level_tags:
return 'off'
else: # pragma: no cover
return None
def handle_starttag(self, tag, attrs):
# Handle tags that should always be empty and do not specify a closing tag
if tag in self.empty_tags and (self.at_line_start() or self.intail):
attrs = {key: value if value is not None else key for key, value in attrs}
if "markdown" in attrs:
attrs.pop('markdown')
element = etree.Element(tag, attrs)
data = etree.tostring(element, encoding='unicode', method='html')
else:
data = self.get_starttag_text()
self.handle_empty_tag(data, True)
return
if (
tag in self.block_level_tags and
(self.at_line_start() or self.intail or self.mdstarted and self.mdstarted[-1])
):
# Valueless attribute (ex: `<tag checked>`) results in `[('checked', None)]`.
# Convert to `{'checked': 'checked'}`.
attrs = {key: value if value is not None else key for key, value in attrs}
state = self.get_state(tag, attrs)
if self.inraw or (state in [None, 'off'] and not self.mdstack):
# fall back to default behavior
attrs.pop('markdown', None)
super().handle_starttag(tag, attrs)
else:
if 'p' in self.mdstack and tag in self.block_level_tags:
# Close unclosed 'p' tag
self.handle_endtag('p')
self.mdstate.append(state)
self.mdstack.append(tag)
self.mdstarted.append(True)
attrs['markdown'] = state
self.treebuilder.start(tag, attrs)
else:
# Span level tag
if self.inraw:
super().handle_starttag(tag, attrs)
else:
text = self.get_starttag_text()
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
if tag in self.CDATA_CONTENT_ELEMENTS:
# This is presumably a standalone tag in a code span (see #1036).
self.clear_cdata_mode()
def handle_endtag(self, tag):
if tag in self.block_level_tags:
if self.inraw:
super().handle_endtag(tag)
elif tag in self.mdstack:
# Close element and any unclosed children
while self.mdstack:
item = self.mdstack.pop()
self.mdstate.pop()
self.mdstarted.pop()
self.treebuilder.end(item)
if item == tag:
break
if not self.mdstack:
# Last item in stack is closed. Stash it
element = self.get_element()
# Get last entry to see if it ends in newlines
# If it is an element, assume there is no newlines
item = self.cleandoc[-1] if self.cleandoc else ''
# If we only have one newline before block element, add another
if not item.endswith('\n\n') and item.endswith('\n'):
self.cleandoc.append('\n')
# Flatten the HTML structure of "markdown" blocks such that when they
# get parsed, content will be parsed similar inside the blocks as it
# does outside the block. Having real HTML elements in the tree before
# the content adjacent content is processed can cause unpredictable
# issues for extensions.
current = element
last = []
while current is not None:
for child in list(current):
current.remove(child)
text = current.text if current.text is not None else ''
tail = child.tail if child.tail is not None else ''
child.tail = None
state = child.attrib.get('markdown', 'off')
# Add a newline to tail if it is not just a trailing newline
if tail != '\n':
tail = '\n' + tail.rstrip('\n')
# Ensure there is an empty new line between blocks
if not text.endswith('\n\n'):
text = text.rstrip('\n') + '\n\n'
# Process the block nested under the span appropriately
if state in ('span', 'block'):
current.text = f'{text}{self.md.htmlStash.store(child)}{tail}'
last.append(child)
else:
# Non-Markdown HTML will not be recursively parsed for Markdown,
# so we can just remove markers and leave them unflattened.
# Additionally, we don't need to append to our list for further
# processing.
child.attrib.pop('markdown')
[c.attrib.pop('markdown', None) for c in child.iter()]
current.text = f'{text}{self.md.htmlStash.store(child)}{tail}'
# Target the child elements that have been expanded.
current = last.pop(0) if last else None
self.cleandoc.append(self.md.htmlStash.store(element))
self.cleandoc.append('\n\n')
self.state = []
# Check if element has a tail
if not blank_line_re.match(
self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]):
# More content exists after `endtag`.
self.intail = True
else:
# Treat orphan closing tag as a span level tag.
text = self.get_endtag_text(tag)
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
else:
# Span level tag
if self.inraw:
super().handle_endtag(tag)
else:
text = self.get_endtag_text(tag)
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
def handle_startendtag(self, tag, attrs):
if tag in self.empty_tags:
attrs = {key: value if value is not None else key for key, value in attrs}
if "markdown" in attrs:
attrs.pop('markdown')
element = etree.Element(tag, attrs)
data = etree.tostring(element, encoding='unicode', method='html')
else:
data = self.get_starttag_text()
else:
data = self.get_starttag_text()
self.handle_empty_tag(data, is_block=self.md.is_block_level(tag))
def handle_data(self, data):
if self.intail and '\n' in data:
self.intail = False
if self.inraw or not self.mdstack:
super().handle_data(data)
else:
self.mdstarted[-1] = False
self.treebuilder.data(data)
def handle_empty_tag(self, data, is_block):
if self.inraw or not self.mdstack:
super().handle_empty_tag(data, is_block)
else:
if self.at_line_start() and is_block:
self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n')
elif self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(data))
else:
self.handle_data(data)
def parse_pi(self, i: int) -> int:
if self.at_line_start() or self.intail or self.mdstack:
# The same override exists in `HTMLExtractor` without the check
# for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
return super(HTMLExtractor, self).parse_pi(i)
# This is not the beginning of a raw block so treat as plain data
# and avoid consuming any tags which may follow (see #1066).
self.handle_data('<?')
return i + 2
def parse_html_declaration(self, i: int) -> int:
if self.at_line_start() or self.intail or self.mdstack:
if self.rawdata[i:i+3] == '<![' and not self.rawdata[i:i+9] == '<![CDATA[':
# We have encountered the bug in #1534 (Python bug `gh-77057`).
# Provide an override until we drop support for Python < 3.13.
result = self.parse_bogus_comment(i)
if result == -1:
self.handle_data(self.rawdata[i:i + 1])
return i + 1
return result
# The same override exists in `HTMLExtractor` without the check
# for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
return super(HTMLExtractor, self).parse_html_declaration(i)
# This is not the beginning of a raw block so treat as plain data
# and avoid consuming any tags which may follow (see #1066).
self.handle_data('<!')
return i + 2
| HTMLExtractorExtra |
python | getsentry__sentry | tests/sentry/api/helpers/test_group_index.py | {
"start": 20994,
"end": 22008
} | class ____(TestCase):
def setUp(self) -> None:
self.group = self.create_group()
self.group_list = [self.group]
self.project_lookup = {self.group.project_id: self.group.project}
def test_is_subscribed(self) -> None:
resp = handle_is_subscribed(True, self.group_list, self.project_lookup, self.user)
assert GroupSubscription.objects.filter(group=self.group, user_id=self.user.id).exists()
assert resp["reason"] == "unknown"
def test_is_subscribed_updates(self) -> None:
GroupSubscription.objects.create(
group=self.group, project=self.group.project, user_id=self.user.id, is_active=False
)
resp = handle_is_subscribed(True, self.group_list, self.project_lookup, self.user)
subscription = GroupSubscription.objects.filter(group=self.group, user_id=self.user.id)
assert subscription.exists()
assert subscription.first().is_active
assert resp["reason"] == "unknown"
| TestHandleIsSubscribed |
python | networkx__networkx | networkx/algorithms/tests/test_covering.py | {
"start": 2118,
"end": 2718
} | class ____:
"""Tests for :func:`networkx.algorithms.is_edge_cover`"""
def test_empty_graph(self):
G = nx.Graph()
assert nx.is_edge_cover(G, set())
def test_graph_with_loop(self):
G = nx.Graph()
G.add_edge(1, 1)
assert nx.is_edge_cover(G, {(1, 1)})
def test_graph_single_edge(self):
G = nx.Graph()
G.add_edge(0, 1)
assert nx.is_edge_cover(G, {(0, 0), (1, 1)})
assert nx.is_edge_cover(G, {(0, 1), (1, 0)})
assert nx.is_edge_cover(G, {(0, 1)})
assert not nx.is_edge_cover(G, {(0, 0)})
| TestIsEdgeCover |
python | Textualize__textual | tests/test_animation.py | {
"start": 5249,
"end": 5317
} | class ____(Static):
counter: var[float] = var(23)
| CancelAnimWidget |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 196941,
"end": 197924
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str, base_id: str, tables: list[str]):
"""Airbyte Source for Airtable.
Documentation can be found at https://docs.airbyte.com/integrations/sources/airtable
Args:
name (str): The name of the destination.
api_key (str): The API Key for the Airtable account. See the Support Guide for more information on how to obtain this key.
base_id (str): The Base ID to integrate the data from. You can find the Base ID following the link Airtable API, log in to your account, select the base you need and find Base ID in the docs.
tables (List[str]): The list of Tables to integrate.
"""
self.api_key = check.str_param(api_key, "api_key")
self.base_id = check.str_param(base_id, "base_id")
self.tables = check.list_param(tables, "tables", str)
super().__init__("Airtable", name)
| AirtableSource |
python | getsentry__sentry | src/sentry/backup/exports.py | {
"start": 1331,
"end": 3254
} | class ____(ABC):
"""
For very large exports, the exporting environment may fall over half-way through the process:
the thread running it may hit some timeout, or it may OOM, or fail for some other ephemeral
reason. To help in such situations, we'd like an API for saving "checkpoints" during the export.
This class provides per-model checkpointing support for exports. Since there is a topologically
sorted order of models being exported, as we move through this list, we can save the exported
JSON for each kind of model in order to some stable media (disk, GCP, etc). If there is a
failure late in the export process, when it is retried, the exporter can check if that
particular model already exists in the checkpointer's cache, thereby avoiding redoing the work
of pulling the models from the database, processing them, etc. This ensures that in most retry
situations, we can quickly "re-ingest" already-exported models in memory and pick up where we
left off.
"""
def _parse_cached_json(self, json_data: bytes) -> RpcExportOk | None:
max_pk = 0
pk_map = PrimaryKeyMap()
models = orjson.loads(json_data)
for model in models:
model_name = model.get("model", None)
pk = model.get("pk", None)
if model_name is None or pk is None:
raise ExportCheckpointerError("Improperly formatted entry")
pk_map.insert(model_name, pk, pk, ImportKind.Inserted)
if pk > max_pk:
max_pk = pk
return RpcExportOk(
mapped_pks=RpcPrimaryKeyMap.into_rpc(pk_map), max_pk=max_pk, json_data=json_data
)
@abstractmethod
def get(self, model_name: NormalizedModelName) -> RpcExportOk | None:
pass
@abstractmethod
def add(self, model_name: NormalizedModelName, json_data: str) -> None:
pass
| ExportCheckpointer |
python | keras-team__keras | keras/src/layers/preprocessing/rescaling_test.py | {
"start": 177,
"end": 4267
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
next(iter(ds)).numpy()
def test_grain_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = grain.MapDataset.source(x).to_iter_dataset().batch(3).map(layer)
output = next(iter(ds))
self.assertTrue(backend.is_tensor(output))
# Ensure the device of the data is on CPU.
if backend.backend() == "tensorflow":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "jax":
self.assertIn("CPU", str(output.device))
elif backend.backend() == "torch":
self.assertEqual("cpu", str(output.device))
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
| RescalingTest |
python | django__django | tests/m2m_through/models.py | {
"start": 2772,
"end": 3023
} | class ____(models.Model):
title = models.CharField(max_length=50)
invitees = models.ManyToManyField(
to=Person,
through="Invitation",
through_fields=["event", "invitee"],
related_name="events_invited",
)
| Event |
python | PyCQA__pylint | pylint/checkers/base/basic_checker.py | {
"start": 802,
"end": 3530
} | class ____(BaseChecker):
"""Permits separating multiple checks with the same checker name into
classes/file.
"""
name = "basic"
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join(["builtins", x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: f"{x}()"
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
def report_by_type_stats(
sect: reporter_nodes.Section,
stats: LinterStats,
old_stats: LinterStats | None,
) -> None:
"""Make a report of.
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats: dict[str, dict[str, str]] = {}
for node_type in ("module", "class", "method", "function"):
total = stats.get_node_count(node_type)
nice_stats[node_type] = {}
if total != 0:
undocumented_node = stats.get_undocumented(node_type)
documented = total - undocumented_node
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = f"{percent:.2f}"
badname_node = stats.get_bad_names(node_type)
percent = (badname_node * 100.0) / total
nice_stats[node_type]["percent_badname"] = f"{percent:.2f}"
lines = ["type", "number", "old number", "difference", "%documented", "%badname"]
for node_type in ("module", "class", "method", "function"):
node_type = cast(Literal["function", "class", "method", "module"], node_type)
new = stats.get_node_count(node_type)
old = old_stats.get_node_count(node_type) if old_stats else None
diff_str = lint_utils.diff_string(old, new) if old else None
lines += [
node_type,
str(new),
str(old) if old else "NC",
diff_str if diff_str else "NC",
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
]
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
# pylint: disable-next = too-many-public-methods
| _BasicChecker |
python | doocs__leetcode | solution/2000-2099/2037.Minimum Number of Moves to Seat Everyone/Solution.py | {
"start": 0,
"end": 201
} | class ____:
def minMovesToSeat(self, seats: List[int], students: List[int]) -> int:
seats.sort()
students.sort()
return sum(abs(a - b) for a, b in zip(seats, students))
| Solution |
python | django__django | tests/admin_inlines/models.py | {
"start": 3780,
"end": 4085
} | class ____(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(("1", "One"), ("2", "Two")), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
# Models for #12749
| Inner5Tabular |
python | wandb__wandb | wandb/automations/scopes.py | {
"start": 683,
"end": 769
} | class ____(GQLBase):
scope_type: Annotated[ScopeType, Field(frozen=True)]
| _BaseScope |
python | sympy__sympy | sympy/plotting/intervalmath/interval_arithmetic.py | {
"start": 1560,
"end": 15570
} | class ____:
""" Represents an interval containing floating points as start and
end of the interval
The is_valid variable tracks whether the interval obtained as the
result of the function is in the domain and is continuous.
- True: Represents the interval result of a function is continuous and
in the domain of the function.
- False: The interval argument of the function was not in the domain of
the function, hence the is_valid of the result interval is False
- None: The function was not continuous over the interval or
the function's argument interval is partly in the domain of the
function
A comparison between an interval and a real number, or a
comparison between two intervals may return ``intervalMembership``
of two 3-valued logic values.
"""
def __init__(self, *args, is_valid=True, **kwargs):
self.is_valid = is_valid
if len(args) == 1:
if isinstance(args[0], interval):
self.start, self.end = args[0].start, args[0].end
else:
self.start = float(args[0])
self.end = float(args[0])
elif len(args) == 2:
if args[0] < args[1]:
self.start = float(args[0])
self.end = float(args[1])
else:
self.start = float(args[1])
self.end = float(args[0])
else:
raise ValueError("interval takes a maximum of two float values "
"as arguments")
@property
def mid(self):
return (self.start + self.end) / 2.0
@property
def width(self):
return self.end - self.start
def __repr__(self):
return "interval(%f, %f)" % (self.start, self.end)
def __str__(self):
return "[%f, %f]" % (self.start, self.end)
def __lt__(self, other):
if isinstance(other, (int, float)):
if self.end < other:
return intervalMembership(True, self.is_valid)
elif self.start > other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.end < other. start:
return intervalMembership(True, valid)
if self.start > other.end:
return intervalMembership(False, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (int, float)):
if self.start > other:
return intervalMembership(True, self.is_valid)
elif self.end < other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
return other.__lt__(self)
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return intervalMembership(True, self.is_valid)
if other in self:
return intervalMembership(None, self.is_valid)
else:
return intervalMembership(False, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.start == other.start and self.end == other.end:
return intervalMembership(True, valid)
elif self.__lt__(other)[0] is not None:
return intervalMembership(False, valid)
else:
return intervalMembership(None, valid)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return intervalMembership(False, self.is_valid)
if other in self:
return intervalMembership(None, self.is_valid)
else:
return intervalMembership(True, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.start == other.start and self.end == other.end:
return intervalMembership(False, valid)
if not self.__lt__(other)[0] is None:
return intervalMembership(True, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, (int, float)):
if self.end <= other:
return intervalMembership(True, self.is_valid)
if self.start > other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
if isinstance(other, interval):
valid = fuzzy_and([self.is_valid, other.is_valid])
if self.end <= other.start:
return intervalMembership(True, valid)
if self.start > other.end:
return intervalMembership(False, valid)
return intervalMembership(None, valid)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, (int, float)):
if self.start >= other:
return intervalMembership(True, self.is_valid)
elif self.end < other:
return intervalMembership(False, self.is_valid)
else:
return intervalMembership(None, self.is_valid)
elif isinstance(other, interval):
return other.__le__(self)
def __add__(self, other):
if isinstance(other, (int, float)):
if self.is_valid:
return interval(self.start + other, self.end + other)
else:
start = self.start + other
end = self.end + other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start + other.start
end = self.end + other.end
valid = fuzzy_and([self.is_valid, other.is_valid])
return interval(start, end, is_valid=valid)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, (int, float)):
start = self.start - other
end = self.end - other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start - other.end
end = self.end - other.start
valid = fuzzy_and([self.is_valid, other.is_valid])
return interval(start, end, is_valid=valid)
else:
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (int, float)):
start = other - self.end
end = other - self.start
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
return other.__sub__(self)
else:
return NotImplemented
def __neg__(self):
if self.is_valid:
return interval(-self.end, -self.start)
else:
return interval(-self.end, -self.start, is_valid=self.is_valid)
def __mul__(self, other):
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif self.is_valid is None or other.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
inters = []
inters.append(self.start * other.start)
inters.append(self.end * other.start)
inters.append(self.start * other.end)
inters.append(self.end * other.end)
start = min(inters)
end = max(inters)
return interval(start, end)
elif isinstance(other, (int, float)):
return interval(self.start*other, self.end*other, is_valid=self.is_valid)
else:
return NotImplemented
__rmul__ = __mul__
def __contains__(self, other):
if isinstance(other, (int, float)):
return self.start <= other and self.end >= other
else:
return self.start <= other.start and other.end <= self.end
def __rtruediv__(self, other):
if isinstance(other, (int, float)):
other = interval(other)
return other.__truediv__(self)
elif isinstance(other, interval):
return other.__truediv__(self)
else:
return NotImplemented
def __truediv__(self, other):
# Both None and False are handled
if not self.is_valid:
# Don't divide as the value is not valid
return interval(-float('inf'), float('inf'), is_valid=self.is_valid)
if isinstance(other, (int, float)):
if other == 0:
# Divide by zero encountered. valid nowhere
return interval(-float('inf'), float('inf'), is_valid=False)
else:
return interval(self.start / other, self.end / other)
elif isinstance(other, interval):
if other.is_valid is False or self.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif other.is_valid is None or self.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
# denominator contains both signs, i.e. being divided by zero
# return the whole real line with is_valid = None
if 0 in other:
return interval(-float('inf'), float('inf'), is_valid=None)
# denominator negative
this = self
if other.end < 0:
this = -this
other = -other
# denominator positive
inters = []
inters.append(this.start / other.start)
inters.append(this.end / other.start)
inters.append(this.start / other.end)
inters.append(this.end / other.end)
start = max(inters)
end = min(inters)
return interval(start, end)
else:
return NotImplemented
def __pow__(self, other):
# Implements only power to an integer.
from .lib_interval import exp, log
if not self.is_valid:
return self
if isinstance(other, interval):
return exp(other * log(self))
elif isinstance(other, (float, int)):
if other < 0:
return 1 / self.__pow__(abs(other))
else:
if int_valued(other):
return _pow_int(self, other)
else:
return _pow_float(self, other)
else:
return NotImplemented
def __rpow__(self, other):
if isinstance(other, (float, int)):
if not self.is_valid:
#Don't do anything
return self
elif other < 0:
if self.width > 0:
return interval(-float('inf'), float('inf'), is_valid=False)
else:
power_rational = nsimplify(self.start)
num, denom = power_rational.as_numer_denom()
if denom % 2 == 0:
return interval(-float('inf'), float('inf'),
is_valid=False)
else:
start = -abs(other)**self.start
end = start
return interval(start, end)
else:
return interval(other**self.start, other**self.end)
elif isinstance(other, interval):
return other.__pow__(self)
else:
return NotImplemented
def __hash__(self):
return hash((self.is_valid, self.start, self.end))
def _pow_float(inter, power):
"""Evaluates an interval raised to a floating point."""
power_rational = nsimplify(power)
num, denom = power_rational.as_numer_denom()
if num % 2 == 0:
start = abs(inter.start)**power
end = abs(inter.end)**power
if start < 0:
ret = interval(0, max(start, end))
else:
ret = interval(start, end)
return ret
elif denom % 2 == 0:
if inter.end < 0:
return interval(-float('inf'), float('inf'), is_valid=False)
elif inter.start < 0:
return interval(0, inter.end**power, is_valid=None)
else:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0:
start = -abs(inter.start)**power
else:
start = inter.start**power
if inter.end < 0:
end = -abs(inter.end)**power
else:
end = inter.end**power
return interval(start, end, is_valid=inter.is_valid)
def _pow_int(inter, power):
"""Evaluates an interval raised to an integer power"""
power = int(power)
if power & 1:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0 and inter.end > 0:
start = 0
end = max(inter.start**power, inter.end**power)
return interval(start, end)
else:
return interval(inter.start**power, inter.end**power)
| interval |
python | wandb__wandb | tests/system_tests/test_functional/dspy/dspy_callback.py | {
"start": 95,
"end": 2564
} | class ____(dspy.Module):
"""Minimal DSPy module exposing a `Predict` param for signature extraction.
Examples:
>>> mod = MinimalProgram()
"""
def __init__(self) -> None:
super().__init__()
self.predict = dspy.Predict("question: str -> answer: str")
def _build_results_stub():
"""Construct a small set of results for `_log_predictions_table`.
Returns:
list: A list of tuples `(example, prediction, is_correct)`.
Examples:
>>> rows = _build_results_stub()
>>> len(rows) >= 1
True
"""
ex1 = dspy.Example(question="What is 2+2?", answer="4")
pred1 = dspy.Prediction(answer="4")
ex2 = dspy.Example(question="What is 3+3?", answer="6")
pred2 = dspy.Prediction(answer="6")
return [
(ex1, pred1, True),
(ex2, pred2, True),
]
def main() -> None:
"""Run a minimal end-to-end example invoking `WandbDSPyCallback`.
The flow:
- Install a fake `dspy` to avoid external dependencies.
- Initialize a W&B run.
- Instantiate and exercise the callback by simulating evaluate start/end.
- Log a model via `log_best_model` in multiple modes.
Examples:
>>> if __name__ == "__main__":
... main()
"""
from wandb.integration.dspy import WandbDSPyCallback
# Init W&B
with wandb.init(project="dspy-system-test") as run:
# Build callback
cb = WandbDSPyCallback(log_results=True, run=run)
# Simulate dspy.Evaluate instance and lifecycle
class FakeEvaluate:
def __init__(self) -> None:
self.devset = [1, 2, 3] # should be excluded from config
self.num_threads = 2
self.auto = "light"
program = MinimalProgram()
cb.on_evaluate_start(
call_id="c1", instance=FakeEvaluate(), inputs={"program": program}
)
# Emit an evaluation result with prediction rows
results = _build_results_stub()
out = EvaluationResult(score=0.8, results=results)
cb.on_evaluate_end(call_id="c1", outputs=out, exception=None)
# Exercise model artifact saving in different modes using the real Module API
cb.log_best_model(program, save_program=True)
cb.log_best_model(program, save_program=False, filetype="json")
cb.log_best_model(program, save_program=False, filetype="pkl")
if __name__ == "__main__":
main()
| MinimalProgram |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydocstyle/D208.py | {
"start": 418,
"end": 653
} | class ____:
"""All lines are over indented including the last
Args:
Returns"""
# OK: This doesn't get flagged because it is accepted when the closing quotes are on a separate line (see next test). Raises D209
| Platform |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 90110,
"end": 91998
} | class ____(IR):
"""Add new columns to a dataframe."""
__slots__ = ("columns", "should_broadcast")
_non_child = ("schema", "columns", "should_broadcast")
should_broadcast: bool
"""Should the resulting evaluated columns be broadcast to the same length."""
def __init__(
self,
schema: Schema,
columns: Sequence[expr.NamedExpr],
should_broadcast: bool, # noqa: FBT001
df: IR,
):
self.schema = schema
self.columns = tuple(columns)
self.should_broadcast = should_broadcast
self._non_child_args = (self.columns, self.should_broadcast)
self.children = (df,)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="HStack")
def do_evaluate(
cls,
exprs: Sequence[expr.NamedExpr],
should_broadcast: bool, # noqa: FBT001
df: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
columns = [c.evaluate(df) for c in exprs]
if should_broadcast:
columns = broadcast(
*columns,
target_length=df.num_rows if df.num_columns != 0 else None,
stream=df.stream,
)
else:
# Polars ensures this is true, but let's make sure nothing
# went wrong. In this case, the parent node is a
# guaranteed to be a Select which will take care of making
# sure that everything is the same length. The result
# table that might have mismatching column lengths will
# never be turned into a pylibcudf Table with all columns
# by the Select, which is why this is safe.
assert all(e.name.startswith("__POLARS_CSER_0x") for e in exprs)
return df.with_columns(columns, stream=df.stream)
| HStack |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 20136,
"end": 21836
} | class ____(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions. """
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.keys():
try:
if Options.__options__[arg].is_Flag and arg not in flags:
raise FlagError(
"'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| Method |
python | ray-project__ray | python/ray/llm/_internal/batch/processor/base.py | {
"start": 16346,
"end": 19309
} | class ____:
"""Build a processor based on the configuration."""
_registry: Dict[str, Callable] = {}
@classmethod
def register(cls, config_type: Type[ProcessorConfig], builder: Callable) -> None:
"""A decorator to associate a particular pipeline config
with its build function.
"""
type_name = config_type.__name__
if type_name in cls._registry:
raise ValueError(f"Processor config type {type_name} already registered.")
cls._registry[type_name] = builder
@classmethod
def clear_registry(cls) -> None:
"""Clear the processor builder registry."""
cls._registry.clear()
@classmethod
def validate_builder_kwargs(cls, builder_kwargs: Optional[Dict[str, Any]]) -> None:
"""Validate builder kwargs for conflicts with reserved keys.
Args:
builder_kwargs: Optional additional kwargs to pass to the processor builder
function.
Raises:
ValueError: If builder_kwargs contains reserved keys that conflict with
explicit arguments.
"""
if builder_kwargs is not None:
# Check for conflicts with explicitly passed arguments
reserved_keys = {
"preprocess",
"postprocess",
"preprocess_map_kwargs",
"postprocess_map_kwargs",
}
conflicting_keys = reserved_keys & builder_kwargs.keys()
if conflicting_keys:
raise ValueError(
f"builder_kwargs cannot contain {conflicting_keys} as these are "
"passed as explicit arguments to build_llm_processor. "
"Please pass these directly instead of in builder_kwargs."
)
@classmethod
def build(
cls,
config: ProcessorConfig,
override_stage_config_fn: Optional[Callable] = None,
**kwargs,
) -> Processor:
"""Build a processor.
Args:
config: The processor config.
override_stage_config_fn: Custom stages configurations.
**kwargs: Additional keyword arguments to pass through to the
registered builder function. The builder function must accept
these kwargs in its signature, otherwise a TypeError will be raised.
Returns:
The built processor.
"""
type_name = type(config).__name__
if type_name not in cls._registry:
raise ValueError(
f"Processor config type {type_name} not registered. "
f"Available types: {cls._registry.keys()}"
)
processor = cls._registry[type_name](config, **kwargs)
if override_stage_config_fn is not None:
for name, stage in processor.stages.items():
override_stage_config_fn(name, stage)
return processor
| ProcessorBuilder |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial002_py310.py | {
"start": 86,
"end": 392
} | class ____(BaseModel):
model_config = {"extra": "forbid"}
host: str
save_data: bool
if_modified_since: str | None = None
traceparent: str | None = None
x_tag: list[str] = []
@app.get("/items/")
async def read_items(headers: CommonHeaders = Header()):
return headers
| CommonHeaders |
python | streamlit__streamlit | lib/streamlit/elements/widgets/selectbox.py | {
"start": 2110,
"end": 4908
} | class ____(Generic[T]):
options: Sequence[T]
formatted_options: list[str]
formatted_option_to_option_index: dict[str, int]
default_option_index: int | None
def __init__(
self,
options: Sequence[T],
*,
formatted_options: list[str],
formatted_option_to_option_index: dict[str, int],
default_option_index: int | None = None,
) -> None:
"""Initialize the SelectboxSerde.
We do not store an option_to_formatted_option mapping because the generic
options might not be hashable, which would raise a RuntimeError. So we do
two lookups: option -> index -> formatted_option[index].
Parameters
----------
options : Sequence[T]
The sequence of selectable options.
formatted_options : list[str]
The string representations of each option. The formatted_options correspond
to the options sequence by index.
formatted_option_to_option_index : dict[str, int]
A mapping from formatted option strings to their corresponding indices in
the options sequence.
default_option_index : int or None, optional
The index of the default option to use when no selection is made.
If None, no default option is selected.
"""
self.options = options
self.formatted_options = formatted_options
self.formatted_option_to_option_index = formatted_option_to_option_index
self.default_option_index = default_option_index
def serialize(self, v: T | str | None) -> str | None:
if v is None:
return None
if len(self.options) == 0:
return ""
# we don't check for isinstance(v, str) because this could lead to wrong
# results if v is a string that is part of the options itself as it would
# skip formatting in that case
try:
option_index = index_(self.options, v)
return self.formatted_options[option_index]
except ValueError:
# we know that v is a string, otherwise it would have been found in the
# options
return cast("str", v)
def deserialize(self, ui_value: str | None) -> T | str | None:
# check if the option is pointing to a generic option type T,
# otherwise return the option itself
if ui_value is None:
return (
self.options[self.default_option_index]
if self.default_option_index is not None and len(self.options) > 0
else None
)
option_index = self.formatted_option_to_option_index.get(ui_value)
return self.options[option_index] if option_index is not None else ui_value
| SelectboxSerde |
python | docker__docker-py | tests/integration/models_containers_test.py | {
"start": 11572,
"end": 21741
} | class ____(BaseIntegrationTest):
def test_commit(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test'",
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
image = container.commit()
assert client.containers.run(
image.id, "cat /test", remove=True
) == b"hello\n"
def test_diff(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "touch /test", detach=True)
self.tmp_containers.append(container.id)
container.wait()
assert container.diff() == [{'Path': '/test', 'Kind': 1}]
def test_exec_run_success(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
)
self.tmp_containers.append(container.id)
exec_output = container.exec_run("cat /test")
assert exec_output[0] == 0
assert exec_output[1] == b"hello\n"
def test_exec_run_error_code_from_exec(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'sleep 20'", detach=True
)
self.tmp_containers.append(container.id)
exec_output = container.exec_run("sh -c 'exit 42'")
assert exec_output[0] == 42
def test_exec_run_failed(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'sleep 60'", detach=True
)
self.tmp_containers.append(container.id)
exec_output = container.exec_run("non-existent")
# older versions of docker return `126` in the case that an exec cannot
# be started due to a missing executable. We're fixing this for the
# future, so accept both for now.
assert exec_output[0] == 127 or exec_output[0] == 126
def test_kill(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
while container.status != 'running':
container.reload()
assert container.status == 'running'
container.kill()
container.reload()
assert container.status == 'exited'
def test_logs(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello world",
detach=True)
self.tmp_containers.append(container.id)
container.wait()
assert container.logs() == b"hello world\n"
def test_pause(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
container.pause()
container.reload()
assert container.status == "paused"
container.unpause()
container.reload()
assert container.status == "running"
def test_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", detach=True)
self.tmp_containers.append(container.id)
assert container.id in [c.id for c in client.containers.list(all=True)]
container.wait()
container.remove()
containers = client.containers.list(all=True)
assert container.id not in [c.id for c in containers]
def test_rename(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", name="test1",
detach=True)
self.tmp_containers.append(container.id)
assert container.name == "test1"
container.rename("test2")
container.reload()
assert container.name == "test2"
def test_restart(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
container.restart()
container.reload()
second_started_at = container.attrs['State']['StartedAt']
assert first_started_at != second_started_at
def test_start(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.create("alpine", "sleep 50", detach=True)
self.tmp_containers.append(container.id)
assert container.status == "created"
container.start()
container.reload()
assert container.status == "running"
def test_stats(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
stats = container.stats(stream=False)
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
assert key in stats
def test_ports_target_none(self):
client = docker.from_env(version=TEST_API_VERSION)
ports = None
target_ports = {'2222/tcp': ports}
container = client.containers.run(
"alpine", "sleep 100", detach=True,
ports=target_ports
)
self.tmp_containers.append(container.id)
container.reload() # required to get auto-assigned ports
actual_ports = container.ports
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
for target_client, target_host in target_ports.items():
for actual_port in actual_ports[target_client]:
actual_keys = sorted(actual_port.keys())
assert sorted(['HostIp', 'HostPort']) == actual_keys
assert target_host is ports
assert int(actual_port['HostPort']) > 0
client.close()
def test_ports_target_tuple(self):
client = docker.from_env(version=TEST_API_VERSION)
ports = ('127.0.0.1', 1111)
target_ports = {'2222/tcp': ports}
container = client.containers.run(
"alpine", "sleep 100", detach=True,
ports=target_ports
)
self.tmp_containers.append(container.id)
container.reload() # required to get auto-assigned ports
actual_ports = container.ports
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
for target_client, target_host in target_ports.items():
for actual_port in actual_ports[target_client]:
actual_keys = sorted(actual_port.keys())
assert sorted(['HostIp', 'HostPort']) == actual_keys
assert target_host == ports
assert int(actual_port['HostPort']) > 0
client.close()
def test_ports_target_list(self):
client = docker.from_env(version=TEST_API_VERSION)
ports = [1234, 4567]
target_ports = {'2222/tcp': ports}
container = client.containers.run(
"alpine", "sleep 100", detach=True,
ports=target_ports
)
self.tmp_containers.append(container.id)
container.reload() # required to get auto-assigned ports
actual_ports = container.ports
assert sorted(target_ports.keys()) == sorted(actual_ports.keys())
for target_client, target_host in target_ports.items():
for actual_port in actual_ports[target_client]:
actual_keys = sorted(actual_port.keys())
assert sorted(['HostIp', 'HostPort']) == actual_keys
assert target_host == ports
assert int(actual_port['HostPort']) > 0
client.close()
def test_stop(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "top", detach=True)
self.tmp_containers.append(container.id)
assert container.status in ("running", "created")
container.stop(timeout=2)
container.reload()
assert container.status == "exited"
def test_top(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True)
self.tmp_containers.append(container.id)
top = container.top()
assert len(top['Processes']) == 1
assert 'sleep 60' in top['Processes'][0]
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True,
cpu_shares=2)
self.tmp_containers.append(container.id)
assert container.attrs['HostConfig']['CpuShares'] == 2
container.update(cpu_shares=3)
container.reload()
assert container.attrs['HostConfig']['CpuShares'] == 3
def test_wait(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sh -c 'exit 0'",
detach=True)
self.tmp_containers.append(container.id)
assert container.wait()['StatusCode'] == 0
container = client.containers.run("alpine", "sh -c 'exit 1'",
detach=True)
self.tmp_containers.append(container.id)
assert container.wait()['StatusCode'] == 1
def test_create_with_volume_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.create(
'alpine',
'sleep 300',
volume_driver='foo'
)
self.tmp_containers.append(container.id)
assert container.attrs['HostConfig']['VolumeDriver'] == 'foo'
| ContainerTest |
python | jupyterlab__jupyterlab | packages/extensionmanager-extension/examples/listings/main.py | {
"start": 555,
"end": 1130
} | class ____(LabApp):
base_url = "/"
default_url = Unicode("/lab", help="The default URL to redirect to from `/`")
def init_webapp(self):
"""initialize tornado webapp and httpserver."""
super().init_webapp()
default_handlers = [
(
ujoin(self.base_url, r"/listings/(.*)"),
FileFindHandler,
{"path": os.path.join(HERE, "list")},
)
]
self.web_app.add_handlers(".*$", default_handlers)
if __name__ == "__main__":
ListingsApp.launch_instance()
| ListingsApp |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_dynamodb.py | {
"start": 1175,
"end": 5612
} | class ____:
@mock_aws
def test_get_conn_returns_a_boto3_connection(self):
hook = DynamoDBHook(aws_conn_id="aws_default")
conn = hook.get_conn()
assert conn is not None
assert conn.__class__.__name__ == "dynamodb.ServiceResource"
@mock_aws
def test_get_client_from_dynamodb_ressource(self):
hook = DynamoDBHook(aws_conn_id="aws_default")
client = hook.client
assert client.__class__.__name__ == "DynamoDB"
@mock_aws
def test_insert_batch_items_dynamodb_table(self):
hook = DynamoDBHook(
aws_conn_id="aws_default", table_name="test_airflow", table_keys=["id"], region_name="us-east-1"
)
# this table needs to be created in production
hook.get_conn().create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
table = hook.get_conn().Table("test_airflow")
items = [{"id": str(uuid.uuid4()), "name": "airflow"} for _ in range(10)]
hook.write_batch_data(items)
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 10
@mock.patch("pathlib.Path.exists", return_value=True)
def test_waiter_path_generated_from_resource_type(self, _):
hook = DynamoDBHook(aws_conn_id="aws_default")
path = hook.waiter_path
assert path.as_uri().endswith("/airflow/providers/amazon/aws/waiters/dynamodb.json")
@pytest.mark.parametrize(
("response", "status", "error"),
[
pytest.param(
{"ImportTableDescription": {"ImportStatus": "COMPLETED"}}, "COMPLETED", False, id="complete"
),
pytest.param(
{
"ImportTableDescription": {
"ImportStatus": "CANCELLING",
"FailureCode": "Failure1",
"FailureMessage": "Message",
}
},
"CANCELLING",
True,
id="cancel",
),
pytest.param(
{"ImportTableDescription": {"ImportStatus": "IN_PROGRESS"}},
"IN_PROGRESS",
False,
id="progress",
),
],
)
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_get_s3_import_status(self, mock_make_api_call, response, status, error):
mock_make_api_call.return_value = response
hook = DynamoDBHook(aws_conn_id="aws_default")
sta, code, msg = hook.get_import_status(import_arn=TEST_IMPORT_ARN)
mock_make_api_call.assert_called_once_with("DescribeImport", {"ImportArn": TEST_IMPORT_ARN})
assert sta == status
if error:
assert code == "Failure1"
assert msg == "Message"
else:
assert code is None
assert msg is None
@pytest.mark.parametrize(
("effect", "error"),
[
pytest.param(
ClientError(
error_response={"Error": {"Message": "Error message", "Code": "GeneralException"}},
operation_name="UnitTest",
),
ClientError,
id="general-exception",
),
pytest.param(
ClientError(
error_response={"Error": {"Message": "Error message", "Code": "ImportNotFoundException"}},
operation_name="UnitTest",
),
AirflowException,
id="not-found-exception",
),
],
)
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_get_s3_import_status_with_error(self, mock_make_api_call, effect, error):
mock_make_api_call.side_effect = effect
hook = DynamoDBHook(aws_conn_id="aws_default")
with pytest.raises(error):
hook.get_import_status(import_arn=TEST_IMPORT_ARN)
def test_hook_has_import_waiters(self):
hook = DynamoDBHook(aws_conn_id="aws_default")
waiter = hook.get_waiter("import_table")
assert waiter is not None
| TestDynamoDBHook |
python | ray-project__ray | rllib/env/wrappers/atari_wrappers.py | {
"start": 5106,
"end": 5611
} | class ____(gym.ObservationWrapper):
def __init__(self, env):
"""No stacking. Trajectory View API takes care of this."""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
assert shp[2] == 1
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1]), dtype=env.observation_space.dtype
)
def observation(self, observation):
return np.squeeze(observation, axis=-1)
@PublicAPI
| FrameStackTrajectoryView |
python | ray-project__ray | python/ray/data/_internal/execution/operators/actor_pool_map_operator.py | {
"start": 27488,
"end": 31426
} | class ____(_ActorTaskSelector):
def __init__(self, actor_pool: "_ActorPool"):
super().__init__(actor_pool)
def select_actors(
self, input_queue: BundleQueue, actor_locality_enabled: bool
) -> Iterator[Tuple[RefBundle, ActorHandle]]:
"""Picks actors for task submission based on busyness and locality."""
if not self._actor_pool.running_actors():
# Actor pool is empty or all actors are still pending.
return
while input_queue:
# Filter out actors that are invalid, i.e. actors with number of tasks in
# flight >= _max_tasks_in_flight or actor_state is not ALIVE.
bundle = input_queue.peek_next()
valid_actors = [
actor
for actor in self._actor_pool.running_actors()
if self._actor_pool.running_actors()[actor].num_tasks_in_flight
< self._actor_pool.max_tasks_in_flight_per_actor()
and not self._actor_pool.running_actors()[actor].is_restarting
]
if not valid_actors:
# All actors are at capacity or actor state is not ALIVE.
return
# Rank all valid actors
ranks = self._rank_actors(
valid_actors, bundle if actor_locality_enabled else None
)
assert len(ranks) == len(
valid_actors
), f"{len(ranks)} != {len(valid_actors)}"
# Pick the actor with the highest rank (lower value, higher rank)
target_actor_idx = min(range(len(valid_actors)), key=lambda idx: ranks[idx])
target_actor = valid_actors[target_actor_idx]
# We remove the bundle and yield the actor to the operator. We do not use pop()
# in case the queue has changed the order of the bundles.
input_queue.remove(bundle)
yield bundle, target_actor
def _rank_actors(
self,
actors: List[ActorHandle],
bundle: Optional[RefBundle],
) -> List[Tuple[int, int]]:
"""Return ranks for each actor based on node affinity with the blocks in the provided
bundle and current Actor's load.
The rank for each actor is a tuple of
1. Locality rank: a rank of a node Actor is scheduled on determined based on
the ranking of preferred locations for provided ``RefBundle`` (defined by
``RefBundle.get_preferred_locations``). Lower is better.
2. Number of tasks currently executed by Actor. Lower is better.
Args:
actors: List of actors to rank
bundle: Optional bundle whose locality preferences should be considered
Returns:
List of (locality_rank, num_tasks) tuples, one per input actor
"""
locs_priorities = (
{
# NOTE: We're negating total bytes to maintain an invariant
# of the rank used -- lower value corresponding to a higher rank
node_id: -total_bytes
for node_id, total_bytes in bundle.get_preferred_object_locations().items()
}
if bundle is not None
else {}
)
# NOTE: Ranks are ordered in descending order (ie rank[0] is the highest
# and rank[-1] is the lowest)
ranks = [
(
# Priority/rank of the location (based on the object size).
# Defaults to int32 max value (ie no rank)
locs_priorities.get(
self._actor_pool.running_actors()[actor].actor_location, INT32_MAX
),
# Number of tasks currently in flight at the given actor
self._actor_pool.running_actors()[actor].num_tasks_in_flight,
)
for actor in actors
]
return ranks
| _ActorTaskSelectorImpl |
python | scikit-learn__scikit-learn | sklearn/multioutput.py | {
"start": 21695,
"end": 30671
} | class ____(BaseEstimator, metaclass=ABCMeta):
_parameter_constraints: dict = {
"base_estimator": [
HasMethods(["fit", "predict"]),
StrOptions({"deprecated"}),
],
"estimator": [
HasMethods(["fit", "predict"]),
Hidden(None),
],
"order": ["array-like", StrOptions({"random"}), None],
"cv": ["cv_object", StrOptions({"prefit"})],
"random_state": ["random_state"],
"verbose": ["boolean"],
}
# TODO(1.9): Remove base_estimator
def __init__(
self,
estimator=None,
*,
order=None,
cv=None,
random_state=None,
verbose=False,
base_estimator="deprecated",
):
self.estimator = estimator
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
self.verbose = verbose
# TODO(1.9): This is a temporary getter method to validate input wrt deprecation.
# It was only included to avoid relying on the presence of self.estimator_
def _get_estimator(self):
"""Get and validate estimator."""
if self.estimator is not None and (self.base_estimator != "deprecated"):
raise ValueError(
"Both `estimator` and `base_estimator` are provided. You should only"
" pass `estimator`. `base_estimator` as a parameter is deprecated in"
" version 1.7, and will be removed in version 1.9."
)
if self.base_estimator != "deprecated":
warning_msg = (
"`base_estimator` as an argument was deprecated in 1.7 and will be"
" removed in 1.9. Use `estimator` instead."
)
warnings.warn(warning_msg, FutureWarning)
return self.base_estimator
else:
return self.estimator
def _log_message(self, *, estimator_idx, n_estimators, processing_msg):
if not self.verbose:
return None
return f"({estimator_idx} of {n_estimators}) {processing_msg}"
def _get_predictions(self, X, *, output_method):
"""Get predictions for each model in the chain."""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=True, reset=False)
Y_output_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_feature_chain = np.zeros((X.shape[0], len(self.estimators_)))
# `RegressorChain` does not have a `chain_method_` parameter so we
# default to "predict"
chain_method = getattr(self, "chain_method_", "predict")
hstack = sp.hstack if sp.issparse(X) else np.hstack
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_feature_chain[:, :chain_idx]
# if `X` is a scipy sparse dok_array, we convert it to a sparse
# coo_array format before hstacking, it's faster; see
# https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039:
if sp.issparse(X) and not sp.isspmatrix(X) and X.format == "dok":
X = sp.coo_array(X)
X_aug = hstack((X, previous_predictions))
feature_predictions, _ = _get_response_values(
estimator,
X_aug,
response_method=chain_method,
)
Y_feature_chain[:, chain_idx] = feature_predictions
output_predictions, _ = _get_response_values(
estimator,
X_aug,
response_method=output_method,
)
Y_output_chain[:, chain_idx] = output_predictions
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_output = Y_output_chain[:, inv_order]
return Y_output
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
Returns a fitted instance.
"""
X, Y = validate_data(self, X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
self.order_ = self.order
if isinstance(self.order_, tuple):
self.order_ = np.array(self.order_)
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == "random":
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self._get_estimator()) for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format="lil")
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
# TODO: remove this condition check when the minimum supported scipy version
# doesn't support sparse matrices anymore
if not sp.isspmatrix(X):
# if `X` is a scipy sparse dok_array, we convert it to a sparse
# coo_array format before hstacking, it's faster; see
# https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039:
if X.format == "dok":
X = sp.coo_array(X)
# in case that `X` is a sparse array we create `Y_pred_chain` as a
# sparse array format:
Y_pred_chain = sp.coo_array((X.shape[0], Y.shape[1]))
else:
Y_pred_chain = sp.coo_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format="lil")
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch(estimator=Bunch(fit=fit_params))
if hasattr(self, "chain_method"):
chain_method = _check_response_method(
self._get_estimator(),
self.chain_method,
).__name__
self.chain_method_ = chain_method
else:
# `RegressorChain` does not have a `chain_method` parameter
chain_method = "predict"
for chain_idx, estimator in enumerate(self.estimators_):
message = self._log_message(
estimator_idx=chain_idx + 1,
n_estimators=len(self.estimators_),
processing_msg=f"Processing order {self.order_[chain_idx]}",
)
y = Y[:, self.order_[chain_idx]]
with _print_elapsed_time("Chain", message):
estimator.fit(
X_aug[:, : (X.shape[1] + chain_idx)],
y,
**routed_params.estimator.fit,
)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self._get_estimator(),
X_aug[:, :col_idx],
y=y,
cv=self.cv,
method=chain_method,
)
# `predict_proba` output is 2D, we use only output for classes[-1]
if cv_result.ndim > 1:
cv_result = cv_result[:, 1]
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like of shape (n_samples, n_classes)
The predicted values.
"""
return self._get_predictions(X, output_method="predict")
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = get_tags(self._get_estimator()).input_tags.sparse
return tags
| _BaseChain |
python | kamyu104__LeetCode-Solutions | Python/populating-next-right-pointers-in-each-node-ii.py | {
"start": 54,
"end": 241
} | class ____(object):
def __init__(self, val=0, left=None, right=None, next=None):
self.val = val
self.left = left
self.right = right
self.next = next
| Node |
python | django__django | tests/forms_tests/field_tests/test_emailfield.py | {
"start": 168,
"end": 3087
} | class ____(FormFieldAssertionsMixin, SimpleTestCase):
def test_emailfield_1(self):
f = EmailField()
self.assertEqual(f.max_length, 320)
self.assertWidgetRendersTo(
f, '<input type="email" name="f" id="id_f" maxlength="320" required>'
)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual("person@example.com", f.clean("person@example.com"))
with self.assertRaisesMessage(
ValidationError, "'Enter a valid email address.'"
):
f.clean("foo")
self.assertEqual(
"local@domain.with.idn.xyz\xe4\xf6\xfc\xdfabc.part.com",
f.clean("local@domain.with.idn.xyzäöüßabc.part.com"),
)
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take a long time
# if the security fix isn't in place.
addr = "viewx3dtextx26qx3d@yahoo.comx26latlngx3d15854521645943074058"
with self.assertRaisesMessage(ValidationError, "Enter a valid email address."):
f.clean(addr)
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual("", f.clean(""))
self.assertEqual("", f.clean(None))
self.assertEqual("person@example.com", f.clean("person@example.com"))
self.assertEqual(
"example@example.com", f.clean(" example@example.com \t \t ")
)
with self.assertRaisesMessage(
ValidationError, "'Enter a valid email address.'"
):
f.clean("foo")
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(
f,
'<input id="id_f" type="email" name="f" maxlength="15" minlength="10" '
"required>",
)
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at least 10 characters (it has 9).'",
):
f.clean("a@foo.com")
self.assertEqual("alf@foo.com", f.clean("alf@foo.com"))
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at most 15 characters (it has 20).'",
):
f.clean("alf123456788@foo.com")
def test_emailfield_strip_on_none_value(self):
f = EmailField(required=False, empty_value=None)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
def test_emailfield_unable_to_set_strip_kwarg(self):
msg = "got multiple values for keyword argument 'strip'"
with self.assertRaisesMessage(TypeError, msg):
EmailField(strip=False)
| EmailFieldTest |
python | zarr-developers__zarr-python | src/zarr/testing/stateful.py | {
"start": 19480,
"end": 24927
} | class ____(RuleBasedStateMachine):
""" "
Zarr store state machine
This is a subclass of a Hypothesis RuleBasedStateMachine.
It is testing a framework to ensure that the state of a Zarr store matches
an expected state after a set of random operations. It contains a store
(currently, a Zarr MemoryStore) and a model, a simplified version of a
zarr store (in this case, a dict). It also contains rules which represent
actions that can be applied to a zarr store. Rules apply an action to both
the store and the model, and invariants assert that the state of the model
is equal to the state of the store. Hypothesis then generates sequences of
rules, running invariants after each rule. It raises an error if a sequence
produces discontinuity between state of the model and state of the store
(ie. an invariant is violated).
https://hypothesis.readthedocs.io/en/latest/stateful.html
"""
def __init__(self, store: Store) -> None:
super().__init__()
self.model: dict[str, Buffer] = {}
self.store = SyncStoreWrapper(store)
self.prototype = default_buffer_prototype()
@initialize()
def init_store(self) -> None:
self.store.clear()
@rule(key=zarr_keys(), data=st.binary(min_size=0, max_size=MAX_BINARY_SIZE))
def set(self, key: str, data: bytes) -> None:
note(f"(set) Setting {key!r} with {data!r}")
assert not self.store.read_only
data_buf = cpu.Buffer.from_bytes(data)
self.store.set(key, data_buf)
self.model[key] = data_buf
@precondition(lambda self: len(self.model.keys()) > 0)
@rule(key=zarr_keys(), data=st.data())
def get(self, key: str, data: DataObject) -> None:
key = data.draw(
st.sampled_from(sorted(self.model.keys()))
) # hypothesis wants to sample from sorted list
note("(get)")
store_value = self.store.get(key, self.prototype)
# to bytes here necessary because data_buf set to model in set()
assert self.model[key] == store_value
@rule(key=zarr_keys(), data=st.data())
def get_invalid_zarr_keys(self, key: str, data: DataObject) -> None:
note("(get_invalid)")
assume(key not in self.model)
assert self.store.get(key, self.prototype) is None
@precondition(lambda self: len(self.model.keys()) > 0)
@rule(data=st.data())
def get_partial_values(self, data: DataObject) -> None:
key_range = data.draw(
key_ranges(keys=st.sampled_from(sorted(self.model.keys())), max_size=MAX_BINARY_SIZE)
)
note(f"(get partial) {key_range=}")
obs_maybe = self.store.get_partial_values(key_range, self.prototype)
observed = []
for obs in obs_maybe:
assert obs is not None
observed.append(obs.to_bytes())
model_vals_ls = []
for key, byte_range in key_range:
start = byte_range.start
stop = byte_range.end
model_vals_ls.append(self.model[key][start:stop])
assert all(
obs == exp.to_bytes() for obs, exp in zip(observed, model_vals_ls, strict=True)
), (
observed,
model_vals_ls,
)
@precondition(lambda self: self.store.supports_deletes)
@precondition(lambda self: len(self.model.keys()) > 0)
@rule(data=st.data())
def delete(self, data: DataObject) -> None:
key = data.draw(st.sampled_from(sorted(self.model.keys())))
note(f"(delete) Deleting {key=}")
self.store.delete(key)
del self.model[key]
@rule()
def clear(self) -> None:
assert not self.store.read_only
note("(clear)")
self.store.clear()
self.model.clear()
assert self.store.is_empty("")
assert len(self.model.keys()) == len(list(self.store.list())) == 0
@rule()
# Local store can be non-empty when there are subdirectories but no files
@precondition(lambda self: not isinstance(self.store.store, LocalStore))
def is_empty(self) -> None:
note("(is_empty)")
# make sure they either both are or both aren't empty (same state)
assert self.store.is_empty("") == (not self.model)
@rule(key=zarr_keys())
def exists(self, key: str) -> None:
note("(exists)")
assert self.store.exists(key) == (key in self.model)
@invariant()
def check_paths_equal(self) -> None:
note("Checking that paths are equal")
paths = sorted(self.store.list())
assert sorted(self.model.keys()) == paths
@invariant()
def check_vals_equal(self) -> None:
note("Checking values equal")
for key, val in self.model.items():
store_item = self.store.get(key, self.prototype)
assert val == store_item
@invariant()
def check_num_zarr_keys_equal(self) -> None:
note("check num zarr_keys equal")
assert len(self.model) == len(list(self.store.list()))
@invariant()
def check_zarr_keys(self) -> None:
keys = list(self.store.list())
if not keys:
assert self.store.is_empty("") is True
else:
assert self.store.is_empty("") is False
for key in keys:
assert self.store.exists(key) is True
note("checking keys / exists / empty")
| ZarrStoreStateMachine |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/GraphicsItem.py | {
"start": 1096,
"end": 22081
} | class ____(object):
"""
**Bases:** :class:`object`
Abstract class providing useful methods to GraphicsObject and GraphicsWidget.
(This is required because we cannot have multiple inheritance with QObject subclasses.)
A note about Qt's GraphicsView framework:
The GraphicsView system places a lot of emphasis on the notion that the graphics within the scene should be device independent--you should be able to take the same graphics and display them on screens of different resolutions, printers, export to SVG, etc. This is nice in principle, but causes me a lot of headache in practice. It means that I have to circumvent all the device-independent expectations any time I want to operate in pixel coordinates rather than arbitrary scene coordinates. A lot of the code in GraphicsItem is devoted to this task--keeping track of view widgets and device transforms, computing the size and shape of a pixel in local item coordinates, etc. Note that in item coordinates, a pixel does not have to be square or even rectangular, so just asking how to increase a bounding rect by 2px can be a rather complex task.
"""
_pixelVectorGlobalCache = LRU(100)
def __init__(self):
self._pixelVectorCache = [None, None]
self._viewWidget = None
self._viewBox = None
self._connectedView = None
self._exportOpts = False ## If False, not currently exporting. Otherwise, contains dict of export options.
self._cachedView = None
def getViewWidget(self):
"""
Return the view widget for this item.
If the scene has multiple views, only the first view is returned.
The return value is cached; clear the cached value with forgetViewWidget().
If the view has been deleted by Qt, return None.
"""
if self._viewWidget is None:
scene = self.scene()
if scene is None:
return None
views = scene.views()
if len(views) < 1:
return None
self._viewWidget = weakref.ref(self.scene().views()[0])
v = self._viewWidget()
if v is not None and not isQObjectAlive(v):
return None
return v
def forgetViewWidget(self):
self._viewWidget = None
def getViewBox(self):
"""
Return the first ViewBox or GraphicsView which bounds this item's visible space.
If this item is not contained within a ViewBox, then the GraphicsView is returned.
If the item is contained inside nested ViewBoxes, then the inner-most ViewBox is returned.
The result is cached; clear the cache with forgetViewBox()
"""
if self._viewBox is None:
p = self
while True:
try:
p = p.parentItem()
except RuntimeError: ## sometimes happens as items are being removed from a scene and collected.
return None
if p is None:
vb = self.getViewWidget()
if vb is None:
return None
else:
self._viewBox = weakref.ref(vb)
break
if hasattr(p, 'implements') and p.implements('ViewBox'):
self._viewBox = weakref.ref(p)
break
return self._viewBox() ## If we made it this far, _viewBox is definitely not None
def forgetViewBox(self):
self._viewBox = None
def deviceTransform_(self):
"""
Return the transform that converts local item coordinates to device coordinates (usually pixels).
"""
if (view := self.getViewWidget()) is None:
return None
viewportTransform = view.viewportTransform()
dt = self.deviceTransform(viewportTransform)
if dt.determinant() == 0: ## occurs when deviceTransform is invalid because widget has not been displayed
return None
else:
return dt
def viewTransform(self):
"""Return the transform that maps from local coordinates to the item's ViewBox coordinates
If there is no ViewBox, return the scene transform.
Returns None if the item does not have a view."""
view = self.getViewBox()
if view is None:
return None
if hasattr(view, 'implements') and view.implements('ViewBox'):
return self.itemTransform(view.innerSceneItem())[0]
else:
return self.sceneTransform()
def getBoundingParents(self):
"""Return a list of parents to this item that have child clipping enabled."""
p = self
parents = []
while True:
p = p.parentItem()
if p is None:
break
if p.flags() & self.GraphicsItemFlag.ItemClipsChildrenToShape:
parents.append(p)
return parents
def viewRect(self):
"""Return the visible bounds of this item's ViewBox or GraphicsWidget,
in the local coordinate system of the item."""
if self._cachedView is not None:
return self._cachedView
# Note that in cases of early returns here, the view cache stays empty (None).
view = self.getViewBox()
if view is None:
return None
bounds = self.mapRectFromView(view.viewRect())
if bounds is None:
return None
bounds = bounds.normalized()
self._cachedView = bounds
## nah.
#for p in self.getBoundingParents():
#bounds &= self.mapRectFromScene(p.sceneBoundingRect())
return bounds
def pixelVectors(self, direction=None):
"""Return vectors in local coordinates representing the width and height of a view pixel.
If direction is specified, then return vectors parallel and orthogonal to it.
Return (None, None) if pixel size is not yet defined (usually because the item has not yet been displayed)
or if pixel size is below floating-point precision limit.
"""
## This is an expensive function that gets called very frequently.
## We have two levels of cache to try speeding things up.
if (dt := self.deviceTransform_()) is None:
return None, None
## Ignore translation. If the translation is much larger than the scale
## (such as when looking at unix timestamps), we can get floating-point errors.
dt.setMatrix(dt.m11(), dt.m12(), 0, dt.m21(), dt.m22(), 0, 0, 0, 1)
if direction is None:
direction = QtCore.QPointF(1, 0)
elif direction.manhattanLength() == 0:
raise Exception("Cannot compute pixel length for 0-length vector.")
key = (dt.m11(), dt.m21(), dt.m12(), dt.m22(), direction.x(), direction.y())
## check local cache
if key == self._pixelVectorCache[0]:
return tuple(map(Point, self._pixelVectorCache[1])) ## return a *copy*
## check global cache
pv = self._pixelVectorGlobalCache.get(key, None)
if pv is not None:
self._pixelVectorCache = [key, pv]
return tuple(map(Point,pv)) ## return a *copy*
## attempt to re-scale direction vector to fit within the precision of the coordinate system
## Here's the problem: we need to map the vector 'direction' from the item to the device, via transform 'dt'.
## In some extreme cases, this mapping can fail unless the length of 'direction' is cleverly chosen.
## Example:
## dt = [ 1, 0, 2
## 0, 2, 1e20
## 0, 0, 1 ]
## Then we map the origin (0,0) and direction (0,1) and get:
## o' = 2,1e20
## d' = 2,1e20 <-- should be 1e20+2, but this can't be represented with a 32-bit float
##
## |o' - d'| == 0 <-- this is the problem.
## Perhaps the easiest solution is to exclude the transformation column from dt. Does this cause any other problems?
#if direction.x() == 0:
#r = abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))
##r = 1.0/(abs(dt.m12()) + abs(dt.m22()))
#elif direction.y() == 0:
#r = abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))
##r = 1.0/(abs(dt.m11()) + abs(dt.m21()))
#else:
#r = ((abs(dt.m32())/(abs(dt.m12()) + abs(dt.m22()))) * (abs(dt.m31())/(abs(dt.m11()) + abs(dt.m21()))))**0.5
#if r == 0:
#r = 1. ## shouldn't need to do this; probably means the math above is wrong?
#directionr = direction * r
directionr = direction
## map direction vector onto device
#viewDir = Point(dt.map(directionr) - dt.map(Point(0,0)))
#mdirection = dt.map(directionr)
dirLine = QtCore.QLineF(QtCore.QPointF(0,0), directionr)
viewDir = dt.map(dirLine)
if viewDir.length() == 0:
return None, None ## pixel size cannot be represented on this scale
## get unit vector and orthogonal vector (length of pixel)
#orthoDir = Point(viewDir[1], -viewDir[0]) ## orthogonal to line in pixel-space
try:
normView = viewDir.unitVector()
#normView = viewDir.norm() ## direction of one pixel orthogonal to line
normOrtho = normView.normalVector()
#normOrtho = orthoDir.norm()
except:
raise Exception("Invalid direction %s" %directionr)
## map back to item
dti = fn.invertQTransform(dt)
#pv = Point(dti.map(normView)-dti.map(Point(0,0))), Point(dti.map(normOrtho)-dti.map(Point(0,0)))
pv = Point(dti.map(normView).p2()), Point(dti.map(normOrtho).p2())
self._pixelVectorCache[1] = pv
self._pixelVectorCache[0] = key
self._pixelVectorGlobalCache[key] = pv
return self._pixelVectorCache[1]
def pixelLength(self, direction, ortho=False):
"""Return the length of one pixel in the direction indicated (in local coordinates)
If ortho=True, then return the length of one pixel orthogonal to the direction indicated.
Return None if pixel size is not yet defined (usually because the item has not yet been displayed).
"""
normV, orthoV = self.pixelVectors(direction)
if normV is None or orthoV is None:
return None
if ortho:
return orthoV.length()
return normV.length()
def pixelSize(self):
## deprecated
v = self.pixelVectors()
if v == (None, None):
return None, None
return (hypot(v[0].x(), v[0].y()), hypot(v[1].x(), v[1].y())) # lengths
def pixelWidth(self):
## deprecated
if (vt := self.deviceTransform_()) is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 1, 0)).length()
def pixelHeight(self):
## deprecated
if (vt := self.deviceTransform_()) is None:
return 0
vt = fn.invertQTransform(vt)
return vt.map(QtCore.QLineF(0, 0, 0, 1)).length()
#return Point(vt.map(QtCore.QPointF(0, 1))-vt.map(QtCore.QPointF(0, 0))).length()
def mapToDevice(self, obj):
"""
Return *obj* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
if (vt := self.deviceTransform_()) is None:
return None
return vt.map(obj)
def mapFromDevice(self, obj):
"""
Return *obj* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
if (vt := self.deviceTransform_()) is None:
return None
if isinstance(obj, QtCore.QPoint):
obj = QtCore.QPointF(obj)
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectToDevice(self, rect):
"""
Return *rect* mapped from local coordinates to device coordinates (pixels).
If there is no device mapping available, return None.
"""
if (vt := self.deviceTransform_()) is None:
return None
return vt.mapRect(rect)
def mapRectFromDevice(self, rect):
"""
Return *rect* mapped from device coordinates (pixels) to local coordinates.
If there is no device mapping available, return None.
"""
if (vt := self.deviceTransform_()) is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(rect)
def mapToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.map(obj)
def mapRectToView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
return vt.mapRect(obj)
def mapFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.map(obj)
def mapRectFromView(self, obj):
vt = self.viewTransform()
if vt is None:
return None
vt = fn.invertQTransform(vt)
return vt.mapRect(obj)
def viewPos(self):
return self.mapToView(self.mapFromParent(self.pos()))
def transformAngle(self, relativeItem=None):
"""Return the rotation produced by this item's transform (this assumes there is no shear in the transform)
If relativeItem is given, then the angle is determined relative to that item.
"""
if relativeItem is None:
relativeItem = self.parentItem()
tr = self.itemTransform(relativeItem)[0]
vec = tr.map(QtCore.QLineF(0,0,1,0))
return vec.angleTo(QtCore.QLineF(vec.p1(), vec.p1()+QtCore.QPointF(1,0)))
def changeParent(self):
"""Called when the item's parent has changed.
This method handles connecting / disconnecting from ViewBox signals
to make sure viewRangeChanged works properly. It should generally be
extended, not overridden."""
self._updateView()
def _updateView(self):
## called to see whether this item has a new view to connect to
## NOTE: This is called from GraphicsObject.itemChange or GraphicsWidget.itemChange.
if not hasattr(self, '_connectedView'):
# Happens when Python is shutting down.
return
## It is possible this item has moved to a different ViewBox or widget;
## clear out previously determined references to these.
self.forgetViewBox()
self.forgetViewWidget()
## check for this item's current viewbox or view widget
view = self.getViewBox()
#if view is None:
##print " no view"
#return
oldView = None
if self._connectedView is not None:
oldView = self._connectedView()
if view is oldView:
#print " already have view", view
return
## disconnect from previous view
if oldView is not None:
Device = 'Device' if hasattr(oldView, 'sigDeviceRangeChanged') else ''
for signal, slot in [(f'sig{Device}RangeChanged', self.viewRangeChanged),
(f'sig{Device}TransformChanged', self.viewTransformChanged)]:
try:
getattr(oldView, signal).disconnect(slot)
except (TypeError, AttributeError, RuntimeError):
# TypeError and RuntimeError are from pyqt and pyside, respectively
pass
self._connectedView = None
## connect to new view
if view is not None:
#print "connect:", self, view
if hasattr(view, 'sigDeviceRangeChanged'):
# connect signals from GraphicsView
view.sigDeviceRangeChanged.connect(self.viewRangeChanged)
view.sigDeviceTransformChanged.connect(self.viewTransformChanged)
else:
# connect signals from ViewBox
view.sigRangeChanged.connect(self.viewRangeChanged)
view.sigTransformChanged.connect(self.viewTransformChanged)
self._connectedView = weakref.ref(view)
self.viewRangeChanged()
self.viewTransformChanged()
## inform children that their view might have changed
self._replaceView(oldView)
self.viewChanged(view, oldView)
def viewChanged(self, view, oldView):
"""Called when this item's view has changed
(ie, the item has been added to or removed from a ViewBox)"""
def _replaceView(self, oldView, item=None):
if item is None:
item = self
for child in item.childItems():
if isinstance(child, GraphicsItem):
if child.getViewBox() is oldView:
child._updateView()
#self._replaceView(oldView, child)
else:
self._replaceView(oldView, child)
@QtCore.Slot()
def viewRangeChanged(self):
"""
Called whenever the view coordinates of the ViewBox containing this item have changed.
"""
# when this is called, _cachedView is not invalidated.
# this means that for functions overriding viewRangeChanged, viewRect() may be stale.
@QtCore.Slot()
def viewTransformChanged(self):
"""
Called whenever the transformation matrix of the view has changed.
For example, when the view range has changed or the view was resized.
Invalidates the viewRect cache.
"""
self._cachedView = None
def informViewBoundsChanged(self):
"""
Inform this item's container ViewBox that the bounds of this item have changed.
This is used by ViewBox to react if auto-range is enabled.
"""
view = self.getViewBox()
if view is not None and hasattr(view, 'implements') and view.implements('ViewBox'):
view.itemBoundsChanged(self) ## inform view so it can update its range if it wants
def childrenShape(self):
"""Return the union of the shapes of all descendants of this item in local coordinates."""
shapes = [self.mapFromItem(c, c.shape()) for c in self.allChildItems()]
return reduce(operator.add, shapes)
def allChildItems(self, root=None):
"""Return list of the entire item tree descending from this item."""
if root is None:
root = self
tree = []
for ch in root.childItems():
tree.append(ch)
tree.extend(self.allChildItems(ch))
return tree
def setExportMode(self, export, opts=None):
"""
This method is called by exporters to inform items that they are being drawn for export
with a specific set of options. Items access these via self._exportOptions.
When exporting is complete, _exportOptions is set to False.
"""
if opts is None:
opts = {}
if export:
self._exportOpts = opts
#if 'antialias' not in opts:
#self._exportOpts['antialias'] = True
else:
self._exportOpts = False
def getContextMenus(self, event):
return [self.getMenu()] if hasattr(self, "getMenu") else []
def generateSvg(
self,
nodes: dict[str, Element]
) -> Optional[tuple[Element, list[Element]]]:
"""Method to override to manually specify the SVG writer mechanism.
Parameters
----------
nodes
Dictionary keyed by the name of graphics items and the XML
representation of the the item that can be written as valid
SVG.
Returns
-------
tuple
First element is the top level group for this item. The
second element is a list of xml Elements corresponding to the
child nodes of the item.
None
Return None if no XML is needed for rendering
Raises
------
NotImplementedError
override method to implement in subclasses of GraphicsItem
See Also
--------
pyqtgraph.exporters.SVGExporter._generateItemSvg
The generic and default implementation
"""
raise NotImplementedError
| GraphicsItem |
python | readthedocs__readthedocs.org | readthedocs/api/v3/permissions.py | {
"start": 179,
"end": 1014
} | class ____(BasePermission):
"""
Check if the project being accessed has access to the Embed API.
The embedded API V3 allows getting content from external sites tha
aren't attached to a project. Those sites are restricted to the ones
from ``RTD_EMBED_API_EXTERNAL_DOMAINS``, so we just allow that.
"""
message = (
"Content embedding isn't available in your current plan. "
"Upgrade your subscription to enable this feature. "
"https://about.readthedocs.com/pricing/."
)
def has_permission(self, request, view):
project = view._get_project()
# The project is None when the is requesting a section from an external site.
if project and not get_feature(project, feature_type=TYPE_EMBED_API):
return False
return True
| HasEmbedAPIAccess |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 2432,
"end": 2535
} | class ____(Post):
class Meta:
proxy = True
ordering = ["title"]
| PostWithTitleOrdering |
python | doocs__leetcode | solution/2600-2699/2682.Find the Losers of the Circular Game/Solution.py | {
"start": 0,
"end": 283
} | class ____:
def circularGameLosers(self, n: int, k: int) -> List[int]:
vis = [False] * n
i, p = 0, 1
while not vis[i]:
vis[i] = True
i = (i + p * k) % n
p += 1
return [i + 1 for i in range(n) if not vis[i]]
| Solution |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 33700,
"end": 44588
} | class ____:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, *, inplace=False, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
inplace : bool, optional
If `False` (the default), a copy of the model with the fitted
parameters set will be returned. If `True`, the returned model will
be the same instance as the model passed in, and the parameter
values will be changed inplace.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
If ``inplace`` is `False` (the default), this is a copy of the
input model with parameters set by the fitter. If ``inplace`` is
`True`, this is the same model as the input model, with parameters
updated to be those set by the fitter.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(
model, x, y, z, weights=weights, inplace=inplace, **kwargs
)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
niter = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for _ in range(1, self.niter + 1):
niter += 1
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
inplace=inplace,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
inplace=inplace,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": niter}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
| FittingWithOutlierRemoval |
python | RaRe-Technologies__gensim | gensim/test/test_corpora.py | {
"start": 1202,
"end": 8948
} | class ____(unittest.TestCase):
TEST_CORPUS = [[(1, 1.0)], [], [(0, 0.5), (2, 1.0)], []]
def setUp(self):
self.corpus_class = None
self.file_extension = None
def run(self, result=None):
if type(self) is not CorpusTestCase:
super(CorpusTestCase, self).run(result)
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('gensim_corpus.tst')
extensions = ['', '', '.bz2', '.gz', '.index', '.vocab']
for ext in itertools.permutations(extensions, 2):
try:
os.remove(fname + ext[0] + ext[1])
except OSError:
pass
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_load(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
docs = list(corpus)
# the deerwester corpus always has nine documents
self.assertEqual(len(docs), 9)
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_len(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
# make sure corpus.index works, too
corpus = self.corpus_class(fname)
self.assertEqual(len(corpus), 9)
# for subclasses of IndexedCorpus, we need to nuke this so we don't
# test length on the index, but just testcorpus contents
if hasattr(corpus, 'index'):
corpus.index = None
self.assertEqual(len(corpus), 9)
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_empty_input(self):
tmpf = get_tmpfile('gensim_corpus.tst')
with open(tmpf, 'w') as f:
f.write('')
with open(tmpf + '.vocab', 'w') as f:
f.write('')
corpus = self.corpus_class(tmpf)
self.assertEqual(len(corpus), 0)
docs = list(corpus)
self.assertEqual(len(docs), 0)
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_save(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
# make sure the corpus can be saved
self.corpus_class.save_corpus(tmpf, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = list(self.corpus_class(tmpf))
self.assertEqual(corpus, corpus2)
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_serialize(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
# make sure the corpus can be saved
self.corpus_class.serialize(tmpf, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(tmpf)
self.assertEqual(corpus, list(corpus2))
# make sure the indexing corpus[i] works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
# make sure that subclasses of IndexedCorpus support fancy indexing
# after deserialisation
if isinstance(corpus, indexedcorpus.IndexedCorpus):
idx = [1, 3, 5, 7]
self.assertEqual(corpus[idx], corpus2[idx])
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_serialize_compressed(self):
corpus = self.TEST_CORPUS
tmpf = get_tmpfile('gensim_corpus.tst')
for extension in ['.gz', '.bz2']:
fname = tmpf + extension
# make sure the corpus can be saved
self.corpus_class.serialize(fname, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(fname)
self.assertEqual(corpus, list(corpus2))
# make sure the indexing `corpus[i]` syntax works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_switch_id2word(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
if hasattr(corpus, 'id2word'):
firstdoc = next(iter(corpus))
testdoc = set((to_unicode(corpus.id2word[x]), y) for x, y in firstdoc)
self.assertEqual(testdoc, {('computer', 1), ('human', 1), ('interface', 1)})
d = corpus.id2word
d[0], d[1] = d[1], d[0]
corpus.id2word = d
firstdoc2 = next(iter(corpus))
testdoc2 = set((to_unicode(corpus.id2word[x]), y) for x, y in firstdoc2)
self.assertEqual(testdoc2, {('computer', 1), ('human', 1), ('interface', 1)})
@unittest.skipIf(GITHUB_ACTIONS_WINDOWS, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
def test_indexing(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
docs = list(corpus)
for idx, doc in enumerate(docs):
self.assertEqual(doc, corpus[idx])
self.assertEqual(doc, corpus[np.int64(idx)])
self.assertEqual(docs, list(corpus[:]))
self.assertEqual(docs[0:], list(corpus[0:]))
self.assertEqual(docs[0:-1], list(corpus[0:-1]))
self.assertEqual(docs[2:4], list(corpus[2:4]))
self.assertEqual(docs[::2], list(corpus[::2]))
self.assertEqual(docs[::-1], list(corpus[::-1]))
# make sure sliced corpora can be iterated over multiple times
c = corpus[:]
self.assertEqual(docs, list(c))
self.assertEqual(docs, list(c))
self.assertEqual(len(docs), len(corpus))
self.assertEqual(len(docs), len(corpus[:]))
self.assertEqual(len(docs[::2]), len(corpus[::2]))
def _get_slice(corpus, slice_):
# assertRaises for python 2.6 takes a callable
return corpus[slice_]
# make sure proper input validation for sliced corpora is done
self.assertRaises(ValueError, _get_slice, corpus, {1})
self.assertRaises(ValueError, _get_slice, corpus, 1.0)
# check sliced corpora that use fancy indexing
c = corpus[[1, 3, 4]]
self.assertEqual([d for i, d in enumerate(docs) if i in [1, 3, 4]], list(c))
self.assertEqual([d for i, d in enumerate(docs) if i in [1, 3, 4]], list(c))
self.assertEqual(len(corpus[[0, 1, -1]]), 3)
self.assertEqual(len(corpus[np.asarray([0, 1, -1])]), 3)
# check that TransformedCorpus supports indexing when the underlying
# corpus does, and throws an error otherwise
corpus_ = TransformedCorpus(DummyTransformer(), corpus)
if hasattr(corpus, 'index') and corpus.index is not None:
self.assertEqual(corpus_[0][0][1], docs[0][0][1] + 1)
self.assertRaises(ValueError, _get_slice, corpus_, {1})
transformed_docs = [val + 1 for i, d in enumerate(docs) for _, val in d if i in [1, 3, 4]]
self.assertEqual(transformed_docs, list(v for doc in corpus_[[1, 3, 4]] for _, v in doc))
self.assertEqual(3, len(corpus_[[1, 3, 4]]))
else:
self.assertRaises(RuntimeError, _get_slice, corpus_, [1, 3, 4])
self.assertRaises(RuntimeError, _get_slice, corpus_, {1})
self.assertRaises(RuntimeError, _get_slice, corpus_, 1.0)
| CorpusTestCase |
python | sympy__sympy | sympy/physics/units/quantities.py | {
"start": 254,
"end": 4517
} | class ____(AtomicExpr):
"""
Physical quantity: can be a unit of measure, a constant or a generic quantity.
"""
is_commutative = True
is_real = True
is_number = False
is_nonzero = True
is_physical_constant = False
_diff_wrt = True
def __new__(cls, name, abbrev=None,
latex_repr=None, pretty_unicode_repr=None,
pretty_ascii_repr=None, mathml_presentation_repr=None,
is_prefixed=False,
**assumptions):
if not isinstance(name, Symbol):
name = Symbol(name)
if abbrev is None:
abbrev = name
elif isinstance(abbrev, str):
abbrev = Symbol(abbrev)
# HACK: These are here purely for type checking. They actually get assigned below.
cls._is_prefixed = is_prefixed
obj = AtomicExpr.__new__(cls, name, abbrev)
obj._name = name
obj._abbrev = abbrev
obj._latex_repr = latex_repr
obj._unicode_repr = pretty_unicode_repr
obj._ascii_repr = pretty_ascii_repr
obj._mathml_repr = mathml_presentation_repr
obj._is_prefixed = is_prefixed
return obj
def set_global_dimension(self, dimension):
_QuantityMapper._quantity_dimension_global[self] = dimension
def set_global_relative_scale_factor(self, scale_factor, reference_quantity):
"""
Setting a scale factor that is valid across all unit system.
"""
from sympy.physics.units import UnitSystem
scale_factor = sympify(scale_factor)
if isinstance(scale_factor, Prefix):
self._is_prefixed = True
# replace all prefixes by their ratio to canonical units:
scale_factor = scale_factor.replace(
lambda x: isinstance(x, Prefix),
lambda x: x.scale_factor
)
scale_factor = sympify(scale_factor)
UnitSystem._quantity_scale_factors_global[self] = (scale_factor, reference_quantity)
UnitSystem._quantity_dimensional_equivalence_map_global[self] = reference_quantity
@property
def name(self):
return self._name
@property
def dimension(self):
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_default_unit_system()
return unit_system.get_quantity_dimension(self)
@property
def abbrev(self):
"""
Symbol representing the unit name.
Prepend the abbreviation with the prefix symbol if it is defines.
"""
return self._abbrev
@property
def scale_factor(self):
"""
Overall magnitude of the quantity as compared to the canonical units.
"""
from sympy.physics.units import UnitSystem
unit_system = UnitSystem.get_default_unit_system()
return unit_system.get_quantity_scale_factor(self)
def _eval_is_positive(self):
return True
def _eval_is_constant(self):
return True
def _eval_Abs(self):
return self
def _eval_subs(self, old, new):
if isinstance(new, Quantity) and self != old:
return self
def _latex(self, printer):
if self._latex_repr:
return self._latex_repr
else:
return r'\text{{{}}}'.format(self.args[1] \
if len(self.args) >= 2 else self.args[0])
def convert_to(self, other, unit_system="SI"):
"""
Convert the quantity to another quantity of same dimensions.
Examples
========
>>> from sympy.physics.units import speed_of_light, meter, second
>>> speed_of_light
speed_of_light
>>> speed_of_light.convert_to(meter/second)
299792458*meter/second
>>> from sympy.physics.units import liter
>>> liter.convert_to(meter**3)
meter**3/1000
"""
from .util import convert_to
return convert_to(self, other, unit_system)
@property
def free_symbols(self):
"""Return free symbols from quantity."""
return set()
@property
def is_prefixed(self):
"""Whether or not the quantity is prefixed. Eg. `kilogram` is prefixed, but `gram` is not."""
return self._is_prefixed
| Quantity |
python | bottlepy__bottle | bottle.py | {
"start": 76384,
"end": 76981
} | class ____(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
| LocalResponse |
python | getsentry__sentry | tests/sentry/deletions/test_group.py | {
"start": 20300,
"end": 28114
} | class ____(TestCase, SnubaTestCase, OccurrenceTestMixin):
referrer = Referrer.TESTING_TEST.value
def create_occurrence(self, event: Event, type_id: int) -> tuple[IssueOccurrence, Group]:
occurrence, group_info = self.process_occurrence(
project_id=self.project.id,
event_id=event.event_id,
type=type_id,
# Convert event data dict for occurrence processing
event_data=dict(event.data),
)
assert group_info is not None
return occurrence, group_info.group
def select_error_events(self, project_id: int) -> object:
columns = ["event_id", "group_id"]
return self.select_rows(Entity(EntityKey.Events.value), columns, project_id)
def select_issue_platform_events(self, project_id: int) -> object:
columns = ["event_id", "group_id", "occurrence_id"]
return self.select_rows(Entity(EntityKey.IssuePlatform.value), columns, project_id)
def select_rows(
self, entity: Entity, columns: list[str], project_id: int
) -> None | dict[str, object]:
# Adding the random microseconds is to circumvent Snuba's caching mechanism
now = datetime.now()
start_time = now - timedelta(days=1, microseconds=random.randint(0, 100000000))
end_time = now + timedelta(days=1, microseconds=random.randint(0, 100000000))
select = [Column(column) for column in columns]
where = [
Condition(Column("project_id"), Op.IN, Function("tuple", [project_id])),
Condition(Column("timestamp"), Op.GTE, start_time),
Condition(Column("timestamp"), Op.LT, end_time),
]
query = Query(match=entity, select=select, where=where)
request = Request(
# Using IssuePlatform dataset for occurrence queries
dataset=Dataset.IssuePlatform.value,
app_id=self.referrer,
query=query,
tenant_ids=self.tenant_ids,
)
results = bulk_snuba_queries([request])[0]["data"]
return results[0] if results else None
@property
def tenant_ids(self) -> dict[str, str]:
return {"referrer": self.referrer, "organization_id": self.organization.id}
@mock.patch("sentry.deletions.tasks.nodestore.bulk_snuba_queries")
def test_simple_issue_platform(self, mock_bulk_snuba_queries: mock.Mock) -> None:
# Adding this query here to make sure that the cache is not being used
assert self.select_error_events(self.project.id) is None
assert self.select_issue_platform_events(self.project.id) is None
# Create initial error event and occurrence related to it; two different groups will exist
event = self.store_event(data={}, project_id=self.project.id)
# XXX: We need a different way of creating occurrences which will insert into the nodestore
occurrence_event, issue_platform_group = self.create_occurrence(
event, type_id=FeedbackGroup.type_id
)
# Assertions after creation
assert occurrence_event.id != event.event_id
assert event.group_id != issue_platform_group.id
assert event.group.issue_category == GroupCategory.ERROR
assert issue_platform_group.issue_category == GroupCategory.FEEDBACK
assert issue_platform_group.type == FeedbackGroup.type_id
# Assert that the error event has been inserted in the nodestore & Snuba
event_node_id = Event.generate_node_id(event.project_id, event.event_id)
assert nodestore.backend.get(event_node_id)
expected_error = {"event_id": event.event_id, "group_id": event.group_id}
assert self.select_error_events(self.project.id) == expected_error
# Assert that the occurrence event has been inserted in the nodestore & Snuba
expected_occurrence_event = {
"event_id": occurrence_event.event_id,
"group_id": issue_platform_group.id,
"occurrence_id": occurrence_event.id,
}
assert self.select_issue_platform_events(self.project.id) == expected_occurrence_event
# This will delete the group and the events from the node store and Snuba
with self.tasks():
delete_groups_for_project(
object_ids=[issue_platform_group.id],
transaction_id=uuid4().hex,
project_id=self.project.id,
)
# The original error event and group still exist
assert Group.objects.filter(id=event.group_id).exists()
assert nodestore.backend.get(event_node_id)
assert self.select_error_events(self.project.id) == expected_error
# The Issue Platform group and occurrence have been deleted from Postgres
assert not Group.objects.filter(id=issue_platform_group.id).exists()
# assert not nodestore.backend.get(occurrence_node_id)
# Verify that a DELETE query was sent to Snuba with the correct conditions
mock_bulk_snuba_queries.assert_called_once()
requests = mock_bulk_snuba_queries.call_args[0][0]
assert len(requests) == 1
delete_request = requests[0]
assert isinstance(delete_request.query, DeleteQuery)
assert delete_request.dataset == "search_issues"
assert delete_request.query.column_conditions["project_id"] == [self.project.id]
assert delete_request.query.column_conditions["group_id"] == [issue_platform_group.id]
@mock.patch("sentry.deletions.tasks.nodestore.bulk_snuba_queries")
def test_issue_platform_batching(self, mock_bulk_snuba_queries: mock.Mock) -> None:
# Patch max_rows_to_delete to a small value for testing
with (
self.tasks(),
mock.patch("sentry.deletions.tasks.nodestore.ISSUE_PLATFORM_MAX_ROWS_TO_DELETE", 6),
):
# Create three groups with times_seen such that batching is required
group1 = self.create_group(project=self.project)
group2 = self.create_group(project=self.project)
group3 = self.create_group(project=self.project)
group4 = self.create_group(project=self.project)
# Set times_seen for each group
Group.objects.filter(id=group1.id).update(times_seen=3, type=FeedbackGroup.type_id)
Group.objects.filter(id=group2.id).update(times_seen=1, type=FeedbackGroup.type_id)
Group.objects.filter(id=group3.id).update(times_seen=3, type=FeedbackGroup.type_id)
Group.objects.filter(id=group4.id).update(times_seen=3, type=FeedbackGroup.type_id)
# This will delete the group and the events from the node store and Snuba
delete_groups_for_project(
object_ids=[group1.id, group2.id, group3.id, group4.id],
transaction_id=uuid4().hex,
project_id=self.project.id,
)
assert mock_bulk_snuba_queries.call_count == 1
# There should be two batches with max_rows_to_delete=6
# First batch: [group2, group1] (1+3=4 events, under limit)
# Second batch: [group3, group4] (3+3=6 events, at limit)
requests = mock_bulk_snuba_queries.call_args[0][0]
assert len(requests) == 2
first_batch = requests[0].query.column_conditions["group_id"]
second_batch = requests[1].query.column_conditions["group_id"]
# Since we sort by times_seen, the first batch will be [group2, group1]
# and the second batch will be [group3, group4]
assert first_batch == [group2.id, group1.id] # group2 has less times_seen than group1
# group3 and group4 have the same times_seen, thus sorted by id
assert second_batch == [group3.id, group4.id]
| DeleteIssuePlatformTest |
python | pytorch__pytorch | test/functorch/test_aotdispatch.py | {
"start": 323958,
"end": 329745
} | class ____(TestAOTAutograd):
"""
These are the same as TestAOTAutograd tests, but we run dynamo first to get a graph module.
"""
def assertExpectedInline(self, *args, **kwargs):
# These will have different outputs because dynamo returns a different graph module
# But we don't really care about that assertion when testing with dynamo,
# only that the outputs match, etc.
pass
def make_compiler(self, graph_cell):
return make_boxed_compiler(partial(extract_graph, graph_cell=graph_cell))
# Compiler to passes to dynamo
def run_autograd(
self,
f: Callable,
fw_graph_cell: list[Optional[Callable]],
decompositions: Optional[dict],
keep_input_mutations: bool,
dynamic: bool,
):
"""
Runs dynamo and aot_autograd with the specified settings
"""
def dynamo_compiler(gm, inputs, **kwargs):
result = aot_module_simplified(
gm,
inputs,
fw_compiler=self.make_compiler(fw_graph_cell),
bw_compiler=self.make_compiler([None]),
decompositions=decompositions,
keep_inference_input_mutations=keep_input_mutations,
# Dynamic is calculated from whether the inputs have fake tensors
)
return result
def torch_compile_wrapper(*args, **kwargs):
torch._dynamo.reset()
fn = torch.compile(f, backend=dynamo_compiler)
try:
result = fn(*args, **kwargs)
except torch._dynamo.exc.BackendCompilerFailed as e:
# So that assertRaises works properly
raise e.inner_exception from e
return result
return torch_compile_wrapper
def test_inputs_overlapping_unsqueeze_with_mutation(self):
def f(x, y):
x.add_(1)
y.add_(1)
return x
def run(f):
base = torch.ones(10)
inputs = [base.unsqueeze(0), base.unsqueeze(0)]
return f(*inputs)
optf = torch.compile(backend="aot_eager", dynamic=True)(f)
out = run(f)
optout = run(optf)
self.assertEqual(out, optout)
def test_inputs_overlapping_with_mutation_guard_base(self):
def f(x, y):
x.add_(1)
y.add_(1)
return x
def run(f):
base = torch.ones(10)
inputs = [base[1:], base[1:]]
return f(*inputs)
optf = torch.compile(backend="aot_eager", dynamic=True)(f)
out = run(f)
optout = run(optf)
self.assertEqual(out, optout)
def test_mutations_in_bw_detached_from_tangent(self):
class AF(torch.autograd.Function):
@staticmethod
def forward(ctx, dummy, inplace_tensor):
ctx.inplace_tensor = inplace_tensor
return dummy.clone()
@staticmethod
def backward(ctx, grad_output):
inplace_tensor = ctx.inplace_tensor
gradient_attachment = grad_output * 0 + 1
inplace_tensor.add_(1 * gradient_attachment)
return grad_output, None, None
def fn(dummy, inplace_tensor):
return AF.apply(dummy, inplace_tensor)
def _inps():
dummy = torch.zeros((2,), requires_grad=True)
inplace_tensor = torch.zeros((2,), requires_grad=False)
return dummy, inplace_tensor
inps = _inps()
out = fn(*inps)
ref_inps_after_fw = [x.clone().detach() for x in inps]
out.sum().backward()
ref_inps_after_bw = [x.clone().detach() for x in inps]
inps = _inps()
out = torch.compile(fn, backend="aot_eager", fullgraph=True)(*inps)
inps_after_fw = [x.clone().detach() for x in inps]
out.sum().backward()
inps_after_bw = [x.clone().detach() for x in inps]
self.assertEqual(ref_inps_after_fw, inps_after_fw)
self.assertEqual(ref_inps_after_bw, inps_after_bw)
def test_mutation_of_input_in_fw_and_bw(self):
class AF(torch.autograd.Function):
@staticmethod
def forward(ctx, dummy, inplace_tensor):
inplace_tensor.add_(1)
ctx.inplace_tensor = inplace_tensor
return dummy.clone()
@staticmethod
def backward(ctx, grad_output):
inplace_tensor = ctx.inplace_tensor
inplace_tensor.add_(1)
return grad_output, None, None
def fn(dummy, inplace_tensor):
return AF.apply(dummy, inplace_tensor)
def inps():
dummy = torch.randn((2,), requires_grad=True)
inplace_tensor = torch.zeros((2,), requires_grad=False)
return dummy, inplace_tensor
def sc_inps():
dummy = TwoTensor(
torch.randn((2,), requires_grad=True),
torch.randn((2,), requires_grad=True),
)
inplace_tensor = TwoTensor(
torch.zeros((2,), requires_grad=False),
torch.zeros((2,), requires_grad=False),
)
return dummy, inplace_tensor
for _inps in [inps, sc_inps]:
dummy, inplace = _inps()
y = fn(dummy, inplace)
ref0 = inplace.clone().detach()
y.sum().backward()
ref = inplace.clone().detach()
dummy, inplace = _inps()
y = torch.compile(fn, backend="aot_eager", fullgraph=True)(dummy, inplace)
self.assertEqual(ref0, inplace)
y.sum().backward()
self.assertEqual(ref, inplace)
| TestAOTAutogradWithDynamo |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 10705,
"end": 11380
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->InstructBlip
| InstructBlipMLP |
python | sqlalchemy__sqlalchemy | test/orm/test_naturalpks.py | {
"start": 1252,
"end": 21366
} | class ____(fixtures.MappedTest):
# MySQL 5.5 on Windows crashes (the entire server, not the client)
# if you screw around with ON UPDATE CASCADE type of stuff.
__requires__ = ("skip_mysql_on_windows",)
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table(
"users",
metadata,
Column("username", String(50), primary_key=True),
Column("fullname", String(100)),
test_needs_fk=True,
)
Table(
"addresses",
metadata,
Column("email", String(50), primary_key=True),
Column(
"username", String(50), ForeignKey("users.username", **fk_args)
),
test_needs_fk=True,
)
Table(
"items",
metadata,
Column("itemname", String(50), primary_key=True),
Column("description", String(100)),
test_needs_fk=True,
)
Table(
"users_to_items",
metadata,
Column(
"username",
String(50),
ForeignKey("users.username", **fk_args),
primary_key=True,
),
Column(
"itemname",
String(50),
ForeignKey("items.itemname", **fk_args),
primary_key=True,
),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Item(cls.Comparable):
pass
def test_entity(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
assert sess.get(User, "jack") is u1
u1.username = "ed"
sess.flush()
def go():
assert sess.get(User, "ed") is u1
self.assert_sql_count(testing.db, go, 0)
assert sess.get(User, "jack") is None
sess.expunge_all()
u1 = sess.get(User, "ed")
eq_(User(username="ed", fullname="jack"), u1)
def test_load_after_expire(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
assert sess.get(User, "jack") is u1
sess.execute(
users.update().values({User.username: "jack"}), dict(username="ed")
)
# expire/refresh works off of primary key. the PK is gone
# in this case so there's no way to look it up. criterion-
# based session invalidation could solve this [ticket:911]
sess.expire(u1)
assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u1, "username")
sess.expunge_all()
assert sess.get(User, "jack") is None
assert sess.get(User, "ed").fullname == "jack"
@testing.requires.update_returning
def test_update_to_sql_expr(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
# note this is the primary key, so you need UPDATE..RETURNING
# to catch this
u1.username = User.username + " jones"
sess.flush()
eq_(u1.username, "jack jones")
def test_update_to_self_sql_expr(self):
# SQL expression where the PK won't actually change,
# such as to bump a server side trigger
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
u1.username = User.username + ""
sess.flush()
eq_(u1.username, "jack")
def test_flush_new_pk_after_expire(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
assert sess.get(User, "jack") is u1
sess.expire(u1)
u1.username = "ed"
sess.flush()
sess.expunge_all()
assert sess.get(User, "ed").fullname == "jack"
@testing.requires.on_update_cascade
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def _test_onetomany(self, passive_updates):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, passive_updates=passive_updates
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
u1.addresses.append(Address(email="jack1"))
u1.addresses.append(Address(email="jack2"))
sess.add(u1)
sess.flush()
assert sess.get(Address, "jack1") is u1.addresses[0]
u1.username = "ed"
sess.flush()
assert u1.addresses[0].username == "ed"
sess.expunge_all()
eq_(
[Address(username="ed"), Address(username="ed")],
sess.query(Address).all(),
)
u1 = sess.get(User, "ed")
u1.username = "jack"
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False;
# load addresses, update user, update 2 addresses
self.assert_sql_count(testing.db, go, 3)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(
username="jack",
addresses=[Address(username="jack"), Address(username="jack")],
) == sess.get(User, "jack")
u1 = sess.get(User, "jack")
u1.addresses = []
u1.username = "fred"
sess.flush()
sess.expunge_all()
assert sess.get(Address, "jack1").username is None
u1 = sess.get(User, "fred")
eq_(User(username="fred", fullname="jack"), u1)
@testing.requires.on_update_cascade
def test_manytoone_passive(self):
self._test_manytoone(True)
def test_manytoone_nonpassive(self):
self._test_manytoone(False)
@testing.requires.on_update_cascade
def test_manytoone_passive_uselist(self):
self._test_manytoone(True, True)
def test_manytoone_nonpassive_uselist(self):
self._test_manytoone(False, True)
def test_manytoone_nonpassive_cold_mapping(self):
"""test that the mapper-level m2o dependency processor
is set up even if the opposite side relationship
hasn't yet been part of a flush.
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
with testing.db.begin() as conn:
conn.execute(
users.insert(), dict(username="jack", fullname="jack")
)
conn.execute(
addresses.insert(), dict(email="jack1", username="jack")
)
conn.execute(
addresses.insert(), dict(email="jack2", username="jack")
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"user": relationship(User, passive_updates=False)},
)
sess = fixture_session()
u1 = sess.query(User).first()
a1, a2 = sess.query(Address).all()
u1.username = "ed"
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 2)
def _test_manytoone(self, passive_updates, uselist=False, dynamic=False):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User, uselist=uselist, passive_updates=passive_updates
)
},
)
sess = fixture_session()
a1 = Address(email="jack1")
a2 = Address(email="jack2")
a3 = Address(email="fred")
u1 = User(username="jack", fullname="jack")
if uselist:
a1.user = [u1]
a2.user = [u1]
else:
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.add(a3)
sess.flush()
u1.username = "ed"
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
assert a1.username == a2.username == "ed"
sess.expunge_all()
if uselist:
eq_(
[
Address(email="fred", user=[]),
Address(username="ed"),
Address(username="ed"),
],
sess.query(Address).order_by(Address.email).all(),
)
else:
eq_(
[
Address(email="fred", user=None),
Address(username="ed"),
Address(username="ed"),
],
sess.query(Address).order_by(Address.email).all(),
)
@testing.requires.on_update_cascade
def test_onetoone_passive(self):
self._test_onetoone(True)
def test_onetoone_nonpassive(self):
self._test_onetoone(False)
def _test_onetoone(self, passive_updates):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address, passive_updates=passive_updates, uselist=False
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(username="jack", fullname="jack")
sess.add(u1)
sess.flush()
a1 = Address(email="jack1")
u1.address = a1
sess.add(a1)
sess.flush()
u1.username = "ed"
def go():
sess.flush()
if passive_updates:
sess.expire(u1, ["address"])
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
eq_([Address(username="ed")], sess.query(Address).all())
@testing.requires.on_update_cascade
def test_bidirectional_passive(self):
self._test_bidirectional(True)
def test_bidirectional_nonpassive(self):
self._test_bidirectional(False)
def _test_bidirectional(self, passive_updates):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User, passive_updates=passive_updates, backref="addresses"
)
},
)
sess = fixture_session(autoflush=False)
a1 = Address(email="jack1")
a2 = Address(email="jack2")
u1 = User(username="jack", fullname="jack")
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = "ed"
(ad1, ad2) = sess.query(Address).all()
eq_([Address(username="jack"), Address(username="jack")], [ad1, ad2])
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
# two updates bundled
self.assert_sql_count(testing.db, go, 2)
eq_([Address(username="ed"), Address(username="ed")], [ad1, ad2])
sess.expunge_all()
eq_(
[Address(username="ed"), Address(username="ed")],
sess.query(Address).all(),
)
u1 = sess.get(User, "ed")
assert len(u1.addresses) == 2 # load addresses
u1.username = "fred"
def go():
sess.flush()
# check that the passive_updates is on on the other side
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
# two updates bundled
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
eq_(
[Address(username="fred"), Address(username="fred")],
sess.query(Address).all(),
)
@testing.requires.on_update_cascade
def test_manytomany_passive(self):
self._test_manytomany(True)
@testing.fails_if(
testing.requires.on_update_cascade
+ testing.requires.sane_multi_rowcount
)
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
def _test_manytomany(self, passive_updates):
users, items, Item, User, users_to_items = (
self.tables.users,
self.tables.items,
self.classes.Item,
self.classes.User,
self.tables.users_to_items,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"items": relationship(
Item,
secondary=users_to_items,
backref="users",
passive_updates=passive_updates,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
u1 = User(username="jack")
u2 = User(username="fred")
i1 = Item(itemname="item1")
i2 = Item(itemname="item2")
u1.items.append(i1)
u1.items.append(i2)
i2.users.append(u2)
sess.add(u1)
sess.add(u2)
sess.flush()
r = sess.query(Item).all()
# ComparableEntity can't handle a comparison with the backrefs
# involved....
eq_(Item(itemname="item1"), r[0])
eq_(["jack"], [u.username for u in r[0].users])
eq_(Item(itemname="item2"), r[1])
eq_(["jack", "fred"], [u.username for u in r[1].users])
u2.username = "ed"
def go():
sess.flush()
go()
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
r = sess.query(Item).all()
eq_(Item(itemname="item1"), r[0])
eq_(["jack"], [u.username for u in r[0].users])
eq_(Item(itemname="item2"), r[1])
eq_(["ed", "jack"], sorted([u.username for u in r[1].users]))
sess.expunge_all()
u2 = sess.get(User, u2.username)
u2.username = "wendy"
sess.flush()
r = sess.query(Item).filter(with_parent(u2, User.items)).all()
eq_(Item(itemname="item2"), r[0])
def test_manytoone_deferred_relationship_expr(self):
"""for [ticket:4359], test that updates to the columns embedded
in an object expression are also updated."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
passive_updates=testing.requires.on_update_cascade.enabled,
)
},
)
s = fixture_session()
a1 = Address(email="jack1")
u1 = User(username="jack", fullname="jack")
a1.user = u1
# scenario 1. object is still transient, we get a value.
expr = Address.user == u1
eq_(expr.left.callable(), "jack")
# scenario 2. value has been changed while we are transient.
# we get the updated value.
u1.username = "ed"
eq_(expr.left.callable(), "ed")
s.add_all([u1, a1])
s.commit()
eq_(a1.username, "ed")
# scenario 3. the value is changed and flushed, we get the new value.
u1.username = "fred"
s.flush()
eq_(expr.left.callable(), "fred")
# scenario 4. the value is changed, flushed, and expired.
# the callable goes out to get that value.
u1.username = "wendy"
s.commit()
assert "username" not in u1.__dict__
eq_(expr.left.callable(), "wendy")
# scenario 5. the value is changed flushed, expired,
# and then when we hit the callable, we are detached.
u1.username = "jack"
s.commit()
assert "username" not in u1.__dict__
s.expunge(u1)
# InstanceState has a "last known values" feature we use
# to pick up on this
eq_(expr.left.callable(), "jack")
# doesn't unexpire the attribute
assert "username" not in u1.__dict__
# once we are persistent again, we check the DB
s.add(u1)
eq_(expr.left.callable(), "jack")
assert "username" in u1.__dict__
# scenario 6. we are using del
u2 = User(username="jack", fullname="jack")
expr = Address.user == u2
eq_(expr.left.callable(), "jack")
del u2.username
assert_raises_message(
sa.exc.InvalidRequestError,
"Can't resolve value for column users.username",
expr.left.callable,
)
u2.username = "ed"
eq_(expr.left.callable(), "ed")
s.add(u2)
s.commit()
eq_(expr.left.callable(), "ed")
del u2.username
# object is persistent, so since we deleted, we get None
with expect_warnings("Got None for value of column "):
eq_(expr.left.callable(), None)
s.expunge(u2)
# however that None isn't in the dict, that's just the default
# attribute value, so after expunge it's gone
assert "username" not in u2.__dict__
# detached, we don't have it
assert_raises_message(
sa.exc.InvalidRequestError,
"Can't resolve value for column users.username",
expr.left.callable,
)
| NaturalPKTest |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 67038,
"end": 70134
} | class ____(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
float_value = float(self.value)
str_float_value = ("%.330f" % float_value).strip('0')
str_value = Utils.normalise_float_repr(self.value)
if str_value not in (str_float_value, repr(float_value).lstrip('0')):
warning(self.pos, "Using this floating point value with DEF may lose precision, using %r" % float_value)
return float_value
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, str)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
ctype = PyrexTypes.parse_basic_type(name)
if ctype is not None and env.in_c_type_context:
return ctype
global_scope = env.global_scope()
global_entry = global_scope.lookup(name)
if global_entry and global_entry.is_type:
type = global_entry.type
if type and (type.is_pyobject or env.in_c_type_context):
return type
ctype = ctype or type
# This is fairly heavy, so it's worth trying some easier things above.
from .TreeFragment import TreeFragment
with local_errors(ignore=True):
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment("sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
pass
else:
sizeof_node = declaration.root.stats[0].expr
if isinstance(sizeof_node, SizeofTypeNode):
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
type = sizeof_node.arg_type
if type and (type.is_pyobject or env.in_c_type_context):
return type
ctype = ctype or type
return ctype
| FloatNode |
python | wandb__wandb | tools/graphql_codegen/plugin.py | {
"start": 1541,
"end": 17670
} | class ____(Plugin):
"""Plugin to customize generated Python code for the `wandb` package.
For more info about allowed methods, see:
- https://github.com/mirumee/ariadne-codegen/blob/main/PLUGINS.md
- https://github.com/mirumee/ariadne-codegen/blob/main/ariadne_codegen/plugins/base.py
"""
# Inherited
schema: GraphQLSchema
config_dict: dict[str, Any]
package_dir: Path
"""The directory where the generated modules will be added."""
classes_to_drop: set[str]
"""Generated classes that we don't need in the final code."""
# From ariadne-codegen, we don't currently need the generated httpx client,
# base model, exceptions, etc., so drop these generated modules in favor of
# the existing, internal GQL client.
modules_to_drop: ClassVar[set[str]] = {
"async_base_client",
"base_client",
"base_model", # We'll swap in a module with our own custom base class
"client",
"exceptions",
}
def __init__(self, schema: GraphQLSchema, config_dict: dict[str, Any]) -> None:
super().__init__(schema, config_dict)
codegen_config: dict[str, Any] = self.config_dict["tool"]["ariadne-codegen"]
package_path = codegen_config["target_package_path"]
package_name = codegen_config["target_package_name"]
self.package_dir = Path(package_path) / package_name
self.classes_to_drop = set()
# Remove any previously-generated files
self._remove_existing_package_dir()
# HACK: Override the default python type that ariadne-codegen uses for GraphQL's `ID` type.
# See: https://github.com/mirumee/ariadne-codegen/issues/316
if (id_name := "ID") in codegen_config["scalars"]:
from ariadne_codegen.client_generators import constants
constants.SIMPLE_TYPE_MAP.pop(id_name, None)
constants.INPUT_SCALARS_MAP.pop(id_name, None)
def _remove_existing_package_dir(self) -> None:
"""Remove the existing generated files in the target package directory, if any."""
# Only remove existing files if `shutil.rmtree` is safe to use on the current platform.
if not rmtree.avoids_symlink_attacks:
sys.stdout.write(f"Skipping removal of {self.package_dir!s}\n")
return
with suppress(FileNotFoundError):
rmtree(self.package_dir)
sys.stdout.write(f"Removed existing files in: {self.package_dir!s}\n")
def generate_init_code(self, generated_code: str) -> str:
# This should be the last hook in the codegen process, after all modules have been generated.
# So at this step, perform any final cleanup actions.
self._remove_excluded_module_files()
self._run_ruff()
return super().generate_init_code(generated_code)
def _remove_excluded_module_files(self) -> None:
"""Remove any generated module files we don't need."""
paths = (
self.package_dir / f"{name}.py" for name in sorted(self.modules_to_drop)
)
sys.stdout.write("\n========== Removing excluded modules ==========\n")
for path in paths:
sys.stdout.write(f"Removing: {path!s}\n")
path.unlink(missing_ok=True)
def _run_ruff(self) -> None:
"""Autofix and format the generated code via Ruff."""
commands = (
["ruff", "check", "--fix", "--unsafe-fixes", str(self.package_dir)],
["ruff", "format", str(self.package_dir)],
)
sys.stdout.write(f"\n========== Reformatting: {self.package_dir} ==========\n")
for cmd in commands:
try:
subprocess.run(cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
msg = f"Error running command: {cmd!r}. Captured output:\n{e.output.decode('utf-8')}"
raise RuntimeError(msg) from e
def generate_init_module(self, module: ast.Module) -> ast.Module:
return self._rewrite_init_module(module)
def _rewrite_init_module(self, module: ast.Module) -> ast.Module:
"""Remove dropped imports and rewrite `__all__` exports in `__init__`."""
# Drop selected import statements from the __init__ module
kept_import_stmts = list(self._filter_init_imports(module.body))
# Regenerate the `__all__ = [...]` export statement
names_to_export = chain.from_iterable(map(imported_names, kept_import_stmts))
module.body = [
make_all_assignment(names_to_export),
*kept_import_stmts,
]
return ast.fix_missing_locations(module)
def _filter_init_imports(
self, stmts: Iterable[ast.stmt]
) -> Iterator[ast.ImportFrom]:
"""Yield only import statements to keep from the given module statements."""
omit_modules = self.modules_to_drop
omit_names = self.classes_to_drop
for stmt in stmts:
# Keep only imports from modules that aren't being dropped
if is_import_from(imp := stmt) and (imp.module not in omit_modules):
# Keep only imported names that aren't being dropped
kept_names = sorted(set(imported_names(imp)) - omit_names)
yield make_import_from(imp.module, kept_names, level=imp.level)
def process_schema(self, schema: GraphQLSchema) -> GraphQLSchema:
# `ariadne-codegen` doesn't automatically recognize standard introspection fields
# like `__type`, `__schema`, etc., so inject them here on `Query`.
if schema.query_type:
meta_fields = {
"__type": TypeMetaFieldDef,
"__schema": SchemaMetaFieldDef,
}
schema.query_type.fields.update(meta_fields)
return schema
def generate_enums_module(self, module: ast.Module) -> ast.Module:
return self._rewrite_generated_module(module)
def generate_input_field(
self,
field_implementation: ast.AnnAssign,
input_field: GraphQLInputField,
field_name: str,
) -> ast.AnnAssign:
# Apply any `@constraints` from the GraphQL schema to this pydantic field.
if constraints := self._parse_constraints(input_field):
return self._apply_constraints(constraints, field_implementation)
return field_implementation
def generate_input_class(self, class_def: ast.ClassDef, *_, **__) -> ast.ClassDef:
# Replace the default base class: `BaseModel` -> `GQLInput`
return ClassReplacer({BASE_MODEL: GQL_INPUT}).visit(class_def)
def generate_inputs_module(self, module: ast.Module) -> ast.Module:
return self._rewrite_generated_module(module)
def generate_result_field(
self,
field_implementation: ast.AnnAssign,
operation_definition: ExecutableDefinitionNode,
field: FieldNode,
) -> ast.AnnAssign:
# Apply any `@constraints` from the GraphQL schema to this pydantic field.
if (gql_field := self._get_field_def(field, operation_definition)) and (
constraints := self._parse_constraints(gql_field)
):
return self._apply_constraints(constraints, field_implementation)
return field_implementation
def _get_field_def(
self, field: FieldNode, defn: ExecutableDefinitionNode
) -> GraphQLField | None:
"""Get the original GraphQL definition of a field from an operation definition."""
# NOTE:
# - The `field` node here is parsed from the GraphQL _operation_,
# i.e. from inside the query/mutation/fragment/etc.
# - However, it doesn't yet know about the typed field _definition_
# it maps to, i.e. from the original GraphQL schema.
#
# The `Visitor` logic below is needed to:
# - traverse the operation (query/mutation/etc.) to find the current field, and
# - map it back to the original `GraphQLField` definition from the source schema.
#
# It's a bit convoluted, but better this than implementing and maintaining
# the traversal logic from scratch.
class FieldDefFinder(Visitor):
def __init__(self, target: FieldNode, schema: GraphQLSchema) -> None:
super().__init__()
self.type_info = TypeInfo(schema)
self.target = target
self.field_def: GraphQLField | None = None
def enter_field(self, node: FieldNode, *_: Any) -> Any:
if node is self.target:
# On reaching the field we're looking for, `TypeInfo.get_field_def()`
# returns the original `GraphQLField` definition from the schema.
# If this happens, we're done, so `BREAK` immediately.
self.field_def = self.type_info.get_field_def()
return self.BREAK
finder = FieldDefFinder(field, self.schema)
visit(defn, finder)
return finder.field_def
def _apply_constraints(
self,
constraints: ParsedConstraints,
ann: ast.AnnAssign,
# gql_field: GraphQLField | GraphQLInputField,
) -> ast.AnnAssign:
"""Apply any `@constraints(...)` from the GraphQL field definition to this pydantic `Field(...)`.
Should preserve any existing `Field(...)` calls, as well as any assigned default value.
"""
field_kws = constraints.to_ast_keywords()
# Preserve existing `= Field(...)` calls in the annotated assignment.
if is_field_call(pydantic_field := ann.value):
pydantic_field.keywords = [*pydantic_field.keywords, *field_kws]
return ann
# Otherwise, if there's a default value assigned to the field, preserve it.
if (default_expr := ann.value) is not None:
field_kws = [ast.keyword("default", default_expr), *field_kws]
ann.value = ast.Call(ast.Name("Field"), args=[], keywords=field_kws)
return ann
def _parse_constraints(
self, gql_field: GraphQLField | GraphQLInputField
) -> ParsedConstraints | None:
"""Translate the @constraints directive, if present, to python AST keywords for a pydantic `Field`.
Explicit handling by GraphQL type:
- Lists: min/max -> min_length/max_length
- String: min/max/pattern -> min_length/max_length/pattern
- Int, Int64, Float: min/max -> ge/le
Raises:
TypeError: if the directive is present on an unsupported/unexpected GraphQL type.
"""
# Don't bother unless there are actually any `@constraints(...)` on this field.
if not (
(directive_defn := self.schema.get_directive("constraints"))
and (field_defn := gql_field.ast_node)
and (argmap := get_directive_values(directive_defn, field_defn))
):
return None
# Unwrap NonNull types, e.g. `Int! → Int`
gql_type = get_nullable_type(gql_field.type)
# However, DO NOT unwrap List, as this would miss `@constraints` on List types:
# e.g. `tags: [TagInput!]! @constraints(max: 20)`
if is_list_type(gql_type):
return ListConstraints(**argmap)
# Otherwise handle scalar-like named types, e.g. `String`, `Int`, `Float`
named_type = get_named_type(gql_type)
if is_scalar_type(named_type):
if named_type.name in {"String"}:
return StringConstraints(**argmap)
if named_type.name in {"Int", "Int64", "Float"}:
return NumericConstraints(**argmap)
raise TypeError(
f"Unable to parse @constraints on field with GraphQL type: {named_type!r}"
)
def _concrete_typenames(self, gql_name: str) -> list[str] | None:
"""Returns the actual concrete GQL type names from the given GQL type name.
Necessary to accurately constrain the allowed `typename__`
strings on generated fragment classes.
Necessary if the type is a union or interface. Should expect examples like:
- `"ArtifactCollection" -> ["ArtifactPortfolio", "ArtifactSequence"]`
- `"ArtifactSequence" -> ["ArtifactSequence"]`
- `"NotARealType" -> None`
"""
if not (gql_type := self.schema.get_type(gql_name)):
return None
if not (impl_types := self.schema.get_possible_types(gql_type)):
# No implementations/unioned types, so assume it's already a concrete type.
return [gql_name]
return [impl.name for impl in impl_types]
def generate_result_class(
self,
class_def: ast.ClassDef,
operation_definition: ExecutableDefinitionNode,
selection_set: SelectionSetNode,
) -> ast.ClassDef:
# Don't export this class from __init__.py (in a later step) unless:
# - It's the the outermost result type for an operation, or
# - It's a fragment type
if class_def.name.lower() != operation_definition.name.value.lower():
self.classes_to_drop.add(class_def.name)
# Replace the default base class: `BaseModel` -> `GQLResult`
return ClassReplacer({BASE_MODEL: GQL_RESULT}).visit(class_def)
def generate_result_types_module(self, module: ast.Module, *_, **__) -> ast.Module:
return self._rewrite_generated_module(module)
def generate_fragments_module(
self,
module: ast.Module,
fragments_definitions: dict[str, FragmentDefinitionNode],
) -> ast.Module:
# Maps {fragment name -> orig GQL object type names}
# If a fragment was defined on an interface type, `typename__` should
# only allow the names of the interface's implemented object types.
fragment2typenames: dict[str, list[str]] = {
name: typenames
for name, frag in fragments_definitions.items()
if (typenames := self._concrete_typenames(frag.type_condition.name.value))
}
# Rewrite `typename__` fields:
# - BEFORE: `typename__: str = Field(alias="__typename")`
# - AFTER: `typename__: Literal["OrigSchemaTypeName"] = "OrigSchemaTypeName"`
for class_def in filter(is_class_def, module.body):
for stmt in class_def.body:
if (
isinstance(stmt, ast.AnnAssign)
and (stmt.target.id == "typename__")
and (names := fragment2typenames.get(class_def.name))
):
stmt.annotation = make_literal(*names)
# Determine if we prepopulate `typename__` with a default field value
# - assign default: Fragment defined on a GQL object type OR interface with 1 impl.
# - omit default: Fragment defined on a GQL interface with multiple impls.
stmt.value = ast.Constant(names[0]) if len(names) == 1 else None
return self._rewrite_generated_module(module)
def _rewrite_generated_module(self, module: ast.Module) -> ast.Module:
"""Apply common transformations to the generated module, excluding `__init__`."""
module = PydanticModuleRewriter().visit(module)
module = self._replace_redundant_classes(module)
return ast.fix_missing_locations(module)
def _replace_redundant_classes(self, module: ast.Module) -> ast.Module:
# Identify redundant classes that we can drop/replace in the code,
# by mapping `{redundant_class_name -> replacement_class_name}`.
rename_map = {
class_def.name: base_class_names(class_def)[0]
for class_def in filter(is_redundant_class, module.body)
}
# Record replaced classes for later cleanup in __init__.py
self.classes_to_drop.update(rename_map.keys())
# Update any references to redundant classes in the remaining class definitions
# Replace the module body with the cleaned-up statements
return ClassReplacer(rename_map).visit(module)
| GraphQLCodegenPlugin |
python | imageio__imageio | imageio/plugins/grab.py | {
"start": 1236,
"end": 1935
} | class ____(BaseGrabFormat):
"""The ScreenGrabFormat provided a means to grab screenshots using
the uri of "<screen>".
This functionality is provided via Pillow. Note that "<screen>" is
only supported on Windows and OS X.
Parameters for reading
----------------------
No parameters.
"""
def _can_read(self, request):
if request.filename != "<screen>":
return False
return bool(self._init_pillow())
def _get_data(self, index):
ImageGrab = self._init_pillow()
assert ImageGrab
pil_im = ImageGrab.grab()
assert pil_im is not None
im = np.asarray(pil_im)
return im, {}
| ScreenGrabFormat |
python | kamyu104__LeetCode-Solutions | Python/unique-number-of-occurrences.py | {
"start": 50,
"end": 420
} | class ____(object):
def uniqueOccurrences(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
count = collections.Counter(arr)
lookup = set()
for v in count.itervalues():
if v in lookup:
return False
lookup.add(v)
return True
# Time: O(n)
# Space: O(n)
| Solution |
python | gevent__gevent | src/gevent/_config.py | {
"start": 8180,
"end": 8352
} | class ____(object):
validate = staticmethod(validate_bool)
# Don't do string-to-list conversion.
_convert = staticmethod(convert_str_value_as_is)
| BoolSettingMixin |
python | huggingface__transformers | tests/utils/test_backbone_utils.py | {
"start": 1128,
"end": 12136
} | class ____(unittest.TestCase):
def test_get_aligned_output_features_output_indices(self):
stage_names = ["a", "b", "c"]
# Defaults to last layer if both are None
out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names)
self.assertEqual(out_features, ["c"])
self.assertEqual(out_indices, [2])
# Out indices set to match out features
out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features set to match out indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features selected from negative indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [-3, -1])
def test_verify_out_features_out_indices(self):
# Stage names must be set
with pytest.raises(ValueError, match="Stage_names must be set for transformers backbones"):
verify_out_features_out_indices(["a", "b"], (0, 1), None)
# Out features must be a list
with pytest.raises(ValueError, match="out_features must be a list got <class 'tuple'>"):
verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"])
# Out features must be a subset of stage names
with pytest.raises(
ValueError, match=r"out_features must be a subset of stage_names: \['a'\] got \['a', 'b'\]"
):
verify_out_features_out_indices(["a", "b"], [0, 1], ["a"])
# Out features must contain no duplicates
with pytest.raises(ValueError, match=r"out_features must not contain any duplicates, got \['a', 'a'\]"):
verify_out_features_out_indices(["a", "a"], None, ["a"])
# Out indices must be a list
with pytest.raises(ValueError, match="out_indices must be a list, got <class 'int'>"):
verify_out_features_out_indices(None, 0, ["a", "b"])
with pytest.raises(ValueError, match="out_indices must be a list, got <class 'tuple'>"):
verify_out_features_out_indices(None, (0, 1), ["a", "b"])
# Out indices must be a subset of stage names
with pytest.raises(
ValueError, match=r"out_indices must be valid indices for stage_names \['a'\], got \[0, 1\]"
):
verify_out_features_out_indices(None, [0, 1], ["a"])
# Out indices must contain no duplicates
with pytest.raises(ValueError, match=r"out_indices must not contain any duplicates, got \[0, 0\]"):
verify_out_features_out_indices(None, [0, 0], ["a"])
# Out features and out indices must be the same length
with pytest.raises(
ValueError, match="out_features and out_indices should have the same length if both are set"
):
verify_out_features_out_indices(["a", "b"], [0], ["a", "b", "c"])
# Out features should match out indices
with pytest.raises(
ValueError, match="out_features and out_indices should correspond to the same stages if both are set"
):
verify_out_features_out_indices(["a", "b"], [0, 2], ["a", "b", "c"])
# Out features and out indices should be in order
with pytest.raises(
ValueError,
match=r"out_features must be in the same order as stage_names, expected \['a', 'b'\] got \['b', 'a'\]",
):
verify_out_features_out_indices(["b", "a"], [0, 1], ["a", "b"])
with pytest.raises(
ValueError, match=r"out_indices must be in the same order as stage_names, expected \[-2, 1\] got \[1, -2\]"
):
verify_out_features_out_indices(["a", "b"], [1, -2], ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"], [0, 1, -1], ["a", "b", "c", "d"])
def test_backbone_mixin(self):
backbone = BackboneMixin()
backbone.stage_names = ["a", "b", "c"]
backbone._out_features = ["a", "c"]
backbone._out_indices = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [0, 2])
# Check out features and indices are updated correctly
backbone.out_features = ["a", "b"]
self.assertEqual(backbone.out_features, ["a", "b"])
self.assertEqual(backbone.out_indices, [0, 1])
backbone.out_indices = [-3, -1]
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [-3, -1])
@slow
@require_torch
def test_load_backbone_from_config(self):
"""
Test that load_backbone correctly loads a backbone from a backbone config.
"""
config = MaskFormerConfig(backbone_config=ResNetConfig(out_indices=(0, 2)))
backbone = load_backbone(config)
self.assertEqual(backbone.out_features, ["stem", "stage2"])
self.assertEqual(backbone.out_indices, (0, 2))
self.assertIsInstance(backbone, ResNetBackbone)
@slow
@require_torch
def test_load_backbone_from_checkpoint(self):
"""
Test that load_backbone correctly loads a backbone from a checkpoint.
"""
config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_config=None)
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, [4])
self.assertEqual(backbone.out_features, ["stage4"])
self.assertIsInstance(backbone, ResNetBackbone)
config = MaskFormerConfig(
backbone="resnet18",
use_timm_backbone=True,
)
backbone = load_backbone(config)
# We can't know ahead of time the exact output features and indices, or the layer names before
# creating the timm model, so it defaults to the last layer (-1,) and has a different layer name
self.assertEqual(backbone.out_indices, (-1,))
self.assertEqual(backbone.out_features, ["layer4"])
self.assertIsInstance(backbone, TimmBackbone)
@slow
@require_torch
def test_load_backbone_backbone_kwargs(self):
"""
Test that load_backbone correctly configures the loaded backbone with the provided kwargs.
"""
config = MaskFormerConfig(backbone="resnet18", use_timm_backbone=True, backbone_kwargs={"out_indices": (0, 1)})
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, (0, 1))
self.assertIsInstance(backbone, TimmBackbone)
config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_kwargs={"out_indices": (0, 2)})
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, (0, 2))
self.assertIsInstance(backbone, ResNetBackbone)
# Check can't be passed with a backone config
with pytest.raises(ValueError):
config = MaskFormerConfig(
backbone="microsoft/resnet-18",
backbone_config=ResNetConfig(out_indices=(0, 2)),
backbone_kwargs={"out_indices": (0, 1)},
)
@slow
@require_torch
def test_load_backbone_in_new_model(self):
"""
Tests that new model can be created, with its weights instantiated and pretrained backbone weights loaded.
"""
# Inherit from PreTrainedModel to ensure that the weights are initialized
class NewModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
self.layer_0 = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.layer_1 = torch.nn.Linear(config.hidden_size, config.hidden_size)
def get_equal_not_equal_weights(model_0, model_1):
equal_weights = []
not_equal_weights = []
for (k0, v0), (k1, v1) in zip(model_0.named_parameters(), model_1.named_parameters()):
self.assertEqual(k0, k1)
weights_are_equal = torch.allclose(v0, v1)
if weights_are_equal:
equal_weights.append(k0)
else:
not_equal_weights.append(k0)
return equal_weights, not_equal_weights
config = MaskFormerConfig(use_pretrained_backbone=False, backbone="microsoft/resnet-18")
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "normalization" not in w]
self.assertEqual(len(equal_weights), 0)
self.assertEqual(len(not_equal_weights), 24)
# Now we create a new model with backbone weights that are pretrained
config.use_pretrained_backbone = True
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "normalization" not in w]
self.assertEqual(len(equal_weights), 20)
# Linear layers are still initialized randomly
self.assertEqual(len(not_equal_weights), 4)
# Check loading in timm backbone
config = DetrConfig(use_pretrained_backbone=False, backbone="resnet18", use_timm_backbone=True)
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w]
self.assertEqual(len(equal_weights), 0)
self.assertEqual(len(not_equal_weights), 24)
# Now we create a new model with backbone weights that are pretrained
config.use_pretrained_backbone = True
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w]
self.assertEqual(len(equal_weights), 20)
# Linear layers are still initialized randomly
self.assertEqual(len(not_equal_weights), 4)
| BackboneUtilsTester |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/tests/test_code_hierarchy_no_skeleton.py | {
"start": 6074,
"end": 13882
} | class ____:
@bar
@barfoo
def bar() -> None:
print("bar")"""
)
assert chunks[1].metadata["module"] == "example.foo"
assert chunks[1].metadata["inclusive_scopes"] == [
{"name": "Foo", "type": "class_definition", "signature": "class Foo:"}
]
assert isinstance(chunks[1].relationships[NodeRelationship.PARENT], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.PARENT]).node_id
== chunks[0].id_
)
assert [c.node_id for c in chunks[1].relationships[NodeRelationship.CHILD]] == [
chunks[2].id_,
]
assert isinstance(chunks[1].relationships[NodeRelationship.SOURCE], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[1].relationships
assert NodeRelationship.NEXT not in chunks[1].relationships
# This is the first method scope
assert (
chunks[2].text
== """\
def bar() -> None:
print("bar")"""
)
assert chunks[2].metadata["module"] == "example.foo"
assert chunks[2].metadata["inclusive_scopes"] == [
{"name": "Foo", "type": "class_definition", "signature": "class Foo:"},
{
"name": "bar",
"type": "function_definition",
"signature": "def bar() -> None:",
},
]
assert isinstance(chunks[2].relationships[NodeRelationship.PARENT], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.PARENT]).node_id
== chunks[1].id_
)
assert chunks[2].relationships[NodeRelationship.CHILD] == []
assert isinstance(chunks[2].relationships[NodeRelationship.SOURCE], RelatedNodeInfo)
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[2].relationships
assert NodeRelationship.NEXT not in chunks[2].relationships
def test_html_code_splitter() -> None:
"""Test case for code splitting using HTML."""
if "CI" in os.environ:
return
code_splitter = CodeHierarchyNodeParser(
language="html",
chunk_min_characters=len(" <title>My Example Page</title>") + 1,
skeleton=False,
)
text = """\
<!DOCTYPE html>
<html>
<head>
<title>My Example Page</title>
</head>
<body>
<h1>Welcome to My Example Page</h1>
<p>This is a basic HTML page example.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<img src="https://example.com/image.jpg" alt="Example Image">
</body>
</html>"""
text_node = TextNode(
text=text,
)
chunks = code_splitter.get_nodes_from_documents([text_node])
# This is the DOCTYPE scope
assert chunks[0].text == text
assert chunks[0].metadata["inclusive_scopes"] == []
assert NodeRelationship.PARENT not in chunks[0].relationships
assert [c.node_id for c in chunks[0].relationships[NodeRelationship.CHILD]] == [
chunks[1].id_
]
assert (
cast(RelatedNodeInfo, chunks[0].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[0].relationships
assert NodeRelationship.NEXT not in chunks[0].relationships
# This is the html scope
assert (
chunks[1].text
== """\
<html>
<head>
<title>My Example Page</title>
</head>
<body>
<h1>Welcome to My Example Page</h1>
<p>This is a basic HTML page example.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<img src="https://example.com/image.jpg" alt="Example Image">
</body>
</html>"""
)
assert chunks[1].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"}
]
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.PARENT]).node_id
== chunks[0].id_
)
assert [c.node_id for c in chunks[1].relationships[NodeRelationship.CHILD]] == [
chunks[2].id_,
chunks[3].id_,
]
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[1].relationships
assert NodeRelationship.NEXT not in chunks[1].relationships
# Head chunk
assert (
chunks[2].text
== """\
<head>
<title>My Example Page</title>
</head>"""
)
assert chunks[2].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"},
{"name": "head", "type": "element", "signature": "<head>"},
]
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.PARENT]).node_id
== chunks[1].id_
) # Parent should be <html>
assert [
c.node_id for c in chunks[2].relationships[NodeRelationship.CHILD]
] == [] # Child should be <title>
assert (
cast(RelatedNodeInfo, chunks[2].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[2].relationships
assert NodeRelationship.NEXT not in chunks[2].relationships
# Test the fourth chunk (<body> tag and its content)
assert (
chunks[3].text
== """\
<body>
<h1>Welcome to My Example Page</h1>
<p>This is a basic HTML page example.</p>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<img src="https://example.com/image.jpg" alt="Example Image">
</body>"""
)
assert chunks[3].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"},
{"name": "body", "type": "element", "signature": "<body>"},
]
assert (
cast(RelatedNodeInfo, chunks[3].relationships[NodeRelationship.PARENT]).node_id
== chunks[1].id_
) # Parent should be <html>
assert chunks[5].id_ in [
c.node_id for c in chunks[3].relationships[NodeRelationship.CHILD]
]
assert (
cast(RelatedNodeInfo, chunks[3].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[3].relationships
assert NodeRelationship.NEXT not in chunks[3].relationships
# Test the seventh chunk (<ul> tag and its content)
assert (
chunks[6].text
== """\
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>"""
)
assert chunks[6].metadata["inclusive_scopes"] == [
{"name": "html", "type": "element", "signature": "<html>"},
{"name": "body", "type": "element", "signature": "<body>"},
{"name": "ul", "type": "element", "signature": "<ul>"},
]
assert (
cast(RelatedNodeInfo, chunks[6].relationships[NodeRelationship.PARENT]).node_id
== chunks[3].id_
) # Parent should be <body>
assert [c.node_id for c in chunks[6].relationships[NodeRelationship.CHILD]] == []
assert (
cast(RelatedNodeInfo, chunks[6].relationships[NodeRelationship.SOURCE]).node_id
== text_node.id_
)
assert NodeRelationship.PREVIOUS not in chunks[6].relationships
assert NodeRelationship.NEXT not in chunks[6].relationships
def test_typescript_code_splitter() -> None:
"""Test case for code splitting using TypeScript."""
if "CI" in os.environ:
return
code_splitter = CodeHierarchyNodeParser(
language="typescript", skeleton=False, chunk_min_characters=0
)
text = """\
function foo() {
console.log("bar");
}
| Foo |
python | getsentry__sentry | src/sentry/preprod/api/bases/preprod_artifact_endpoint.py | {
"start": 666,
"end": 847
} | class ____(APIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = "The requested base preprod artifact does not exist"
| BasePreprodArtifactResourceDoesNotExist |
python | tensorflow__tensorflow | tensorflow/python/training/slot_creator_test.py | {
"start": 1569,
"end": 12177
} | class ____(test.TestCase):
def testCreateSlotFromVariable(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, initialized_value(v), name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([1.0, 2.5], self.evaluate(slot))
def testCreateSlotFromTensor(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = constant_op.constant([1.0, 2.5], name="const")
slot = slot_creator.create_slot(v, v * 2, name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([2.0, 5.0], self.evaluate(slot))
def testCreateZerosSlotFromVariable(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
def testCreateZerosSlotFromDynamicShapedVariable(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
dyn_shape = constant_op.constant([2], dtype=dtypes.int32)
dyn_shape = array_ops.placeholder_with_default(dyn_shape,
shape=[None])
v = variable_scope.get_variable(
"var",
initializer=random_ops.random_uniform(dyn_shape,
dtype=dtypes.float64),
validate_shape=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
def testCreateZerosSlotFromTensor(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = constant_op.constant([1.0, 2.5], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(v, name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
def testCreateZerosSlotFromDynamicShapedTensor(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = random_ops.random_uniform([2], dtype=dtypes.float64)
v = array_ops.placeholder_with_default(v, shape=[None], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
def testCreateSlotFromVariableRespectsScope(self):
# See discussion on #2740.
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
with variable_scope.variable_scope("scope"):
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, initialized_value(v), name="slot")
self.assertEqual("scope/scope/var/slot", slot.op.name)
def testCreateSlotFromFirstMDimensionVariable(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.test_session():
s = variables.Variable([1.0, 2.5], name="var")
p_v = variable_scope.get_variable(
"var",
shape=[2, 2],
partitioner=partitioned_variables.fixed_size_partitioner(2))
for i, v in enumerate(p_v):
slot = slot_creator.create_slot(v, initialized_value(s), name="slot")
si = slot._save_slice_info
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([1.0, 2.5], slot)
self.assertAllEqual([2], si.full_shape)
self.assertAllEqual([i], si.var_offset)
self.assertAllEqual([1], si.var_shape)
def testCreateSlotFromScalarVariable(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.test_session():
s = variables.Variable(1.0, name="var")
p_v = variable_scope.get_variable(
"var",
shape=[2, 2],
partitioner=partitioned_variables.fixed_size_partitioner(2))
for i, v in enumerate(p_v):
slot = slot_creator.create_slot(v, initialized_value(s), name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
self.assertEqual([], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual(1.0, slot)
def testCreateSlotFromVariableCopyXlaSharding(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
v = xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
slot = slot_creator.create_slot(
v, initialized_value(v), name="slot", copy_xla_sharding=True)
self.assertEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(slot))
def testCreateZerosSlotFromVariableCopyXlaSharding(self):
# slot_creator is used only in optimizer V1.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
v = xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64, copy_xla_sharding=True)
self.assertEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(slot))
def testCreateSlotWithoutXlaSharding(self):
# slot_creator is used only in optimizer V1.
# The SPMD sharding annotations should not be copied since the primary
# variable and slot variable have different ranks.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
v = xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
with ops.control_dependencies(None):
slot = slot_creator.create_slot(
v,
constant_op.constant(10, name="const"),
name="slot",
copy_xla_sharding=True)
self.assertIsNone(xla_sharding.get_tensor_sharding(slot))
self.assertNotEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(slot))
def testCreateSlotWithCustomReplicatedXlaSharding(self):
# slot_creator is used only in optimizer V1.
# We insert our own custom replicated XLA sharding that overrides the SPMD
# sharding copied over by the slot_creator.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
v = xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64, copy_xla_sharding=True)
slot = xla_sharding.replicate(slot, use_sharding_op=False)
self.assertNotEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(slot))
slot_sharding = xla_sharding.get_tensor_sharding(slot)
slot_proto = xla_data_pb2.OpSharding()
slot_proto.ParseFromString(slot_sharding)
self.assertEqual(
slot_proto,
xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.REPLICATED))
def testCreateSlotWithCustomSplitXlaSharding(self):
# slot_creator is used only in optimizer V1.
# We insert our own custom split XLA sharding that overrides the SPMD
# sharding copied over by the slot_creator.
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable([1.0, 2.5, 10.0, 15.1], name="var")
v = xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64, copy_xla_sharding=True)
slot = xla_sharding.split(
slot, split_dimension=0, num_devices=4, use_sharding_op=False)
self.assertNotEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(slot))
slot_sharding = xla_sharding.get_tensor_sharding(slot)
slot_proto = xla_data_pb2.OpSharding()
slot_proto.ParseFromString(slot_sharding)
self.assertEqual(
slot_proto,
xla_data_pb2.OpSharding(
type=xla_data_pb2.OpSharding.OTHER,
tile_assignment_dimensions=[4],
tile_assignment_devices=range(4)))
if __name__ == "__main__":
test.main()
| SlotCreatorTest |
python | apache__airflow | providers/discord/src/airflow/providers/discord/hooks/discord_webhook.py | {
"start": 7125,
"end": 10423
} | class ____(HttpAsyncHook):
"""
This hook allows you to post messages to Discord using incoming webhooks using async HTTP.
Takes a Discord connection ID with a default relative webhook endpoint. The
default endpoint can be overridden using the webhook_endpoint parameter
(https://discordapp.com/developers/docs/resources/webhook).
Each Discord webhook can be pre-configured to use a specific username and
avatar_url. You can override these defaults in this hook.
:param http_conn_id: Http connection ID with host as "https://discord.com/api/" and
default webhook endpoint in the extra field in the form of
{"webhook_endpoint": "webhooks/{webhook.id}/{webhook.token}"}
:param webhook_endpoint: Discord webhook endpoint in the form of
"webhooks/{webhook.id}/{webhook.token}"
:param message: The message you want to send to your Discord channel
(max 2000 characters)
:param username: Override the default username of the webhook
:param avatar_url: Override the default avatar of the webhook
:param tts: Is a text-to-speech message
:param proxy: Proxy to use to make the Discord webhook call
"""
default_headers = {
"Content-Type": "application/json",
}
conn_name_attr = "http_conn_id"
default_conn_name = "discord_default"
conn_type = "discord"
hook_name = "Async Discord"
def __init__(
self,
*,
http_conn_id: str = "",
webhook_endpoint: str | None = None,
message: str = "",
username: str | None = None,
avatar_url: str | None = None,
tts: bool = False,
proxy: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.http_conn_id = http_conn_id
self.webhook_endpoint = webhook_endpoint
self.message = message
self.username = username
self.avatar_url = avatar_url
self.tts = tts
self.proxy = proxy
self.handler = DiscordCommonHandler()
async def _get_webhook_endpoint(self) -> str:
"""
Return the default webhook endpoint or override if a webhook_endpoint is manually supplied.
:param http_conn_id: The provided connection ID
:param webhook_endpoint: The manually provided webhook endpoint
:return: Webhook endpoint (str) to use
"""
conn = None
if not self.webhook_endpoint and self.http_conn_id:
conn = await get_async_connection(self.http_conn_id)
return self.handler.get_webhook_endpoint(conn, self.webhook_endpoint)
async def execute(self) -> None:
"""Execute the Discord webhook call."""
webhook_endpoint = await self._get_webhook_endpoint()
discord_payload = self.handler.build_discord_payload(
tts=self.tts, message=self.message, username=self.username, avatar_url=self.avatar_url
)
async with aiohttp.ClientSession(proxy=self.proxy) as session:
await super().run(
session=session,
endpoint=webhook_endpoint,
data=discord_payload,
headers=self.default_headers,
)
| DiscordWebhookAsyncHook |
python | pytorch__pytorch | torchgen/_autoheuristic/mm/train_decision_mm.py | {
"start": 299,
"end": 1947
} | class ____(AHTrainDecisionTree):
def __init__(self):
super().__init__()
def add_new_features(self, results):
ops = mm_operations()
added_categorical_features = []
for op in ops:
results[op.name] = results.apply(op.func, axis=1)
if op.is_categorical:
added_categorical_features.append(op.name)
return (results, added_categorical_features)
def get_default_config(self, row):
return "extern_mm"
def get_allowed_wrong_prediction_pct(self):
return 1.0
def get_test_and_val_size(self):
return (0.01, 0.19)
def get_grid_search_values(self):
return {"max_depth": [5], "min_samples_leaf": [0.01], "criterion": ["entropy"]}
def add_training_data(self, df_train, datasets):
# add each dataset to the training data 3 times
# we really want to make sure that the heuristic performs well on these datasets
df_timm_train = datasets["train_timm"]
df_timm_train = df_timm_train.loc[df_timm_train.index.repeat(3)].reset_index(
drop=True
)
df_hf_train = datasets["train_hf"]
df_hf_train = df_hf_train.loc[df_hf_train.index.repeat(3)].reset_index(
drop=True
)
df_train = datasets["train"]
df_train = pd.concat(
[df_train, df_timm_train, df_hf_train],
ignore_index=True,
)
return df_train
def ranking_always_included_choices(self):
return ["extern_mm"]
if __name__ == "__main__":
train = AHTrainDecisionTreeMM()
train.generate_heuristic()
| AHTrainDecisionTreeMM |
python | aimacode__aima-python | learning.py | {
"start": 27263,
"end": 28843
} | class ____:
"""
Single Unit of Multiple Layer Neural Network
inputs: Incoming connections
weights: Weights to incoming connections
"""
def __init__(self, activation=sigmoid, weights=None, inputs=None):
self.weights = weights or []
self.inputs = inputs or []
self.value = None
self.activation = activation
def network(input_units, hidden_layer_sizes, output_units, activation=sigmoid):
"""
Create Directed Acyclic Network of given number layers.
hidden_layers_sizes : List number of neuron units in each hidden layer
excluding input and output layers
"""
layers_sizes = [input_units] + hidden_layer_sizes + [output_units]
net = [[NNUnit(activation) for _ in range(size)] for size in layers_sizes]
n_layers = len(net)
# make connection
for i in range(1, n_layers):
for n in net[i]:
for k in net[i - 1]:
n.inputs.append(k)
n.weights.append(0)
return net
def init_examples(examples, idx_i, idx_t, o_units):
inputs, targets = {}, {}
for i, e in enumerate(examples):
# input values of e
inputs[i] = [e[i] for i in idx_i]
if o_units > 1:
# one-hot representation of e's target
t = [0 for i in range(o_units)]
t[e[idx_t]] = 1
targets[i] = t
else:
# target value of e
targets[i] = [e[idx_t]]
return inputs, targets
def find_max_node(nodes):
return nodes.index(max(nodes, key=lambda node: node.value))
| NNUnit |
python | huggingface__transformers | src/transformers/models/vitmatte/image_processing_vitmatte.py | {
"start": 1413,
"end": 13353
} | class ____(BaseImageProcessor):
r"""
Constructs a ViTMatte image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the width and height divisible by `size_divisor`. Can be overridden
by the `do_pad` parameter in the `preprocess` method.
size_divisor (`int`, *optional*, defaults to 32):
The width and height of the image will be padded to be divisible by this number.
"""
model_input_names = ["pixel_values"]
valid_kwargs = VitMatteImageProcessorKwargs
def __init__(
self,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: bool = True,
size_divisor: int = 32,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.do_normalize = do_normalize
self.do_pad = do_pad
self.rescale_factor = rescale_factor
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
size_divisibility = kwargs.get("size_divisibility")
self.size_divisor = size_divisibility if size_divisibility is not None else size_divisor
def pad_image(
self,
image: np.ndarray,
size_divisor: int = 32,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Args:
image (`np.ndarray`):
Image to pad.
size_divisor (`int`, *optional*, defaults to 32):
The width and height of the image will be padded to be divisible by this number.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
height, width = get_image_size(image, input_data_format)
pad_height = 0 if height % size_divisor == 0 else size_divisor - height % size_divisor
pad_width = 0 if width % size_divisor == 0 else size_divisor - width % size_divisor
if pad_width + pad_height > 0:
padding = ((0, pad_height), (0, pad_width))
image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_data_format)
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
trimaps: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
trimaps (`ImageInput`):
Trimap to preprocess.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The size divisibility to pad the image to if `do_pad` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_pad = do_pad if do_pad is not None else self.do_pad
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
images = make_flat_list_of_images(images)
trimaps = make_flat_list_of_images(trimaps, expected_ndims=2)
if not valid_images(trimaps):
raise ValueError("Invalid trimap type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
trimaps = [to_numpy_array(trimap) for trimap in trimaps]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
trimaps = [
self.rescale(image=trimap, scale=rescale_factor, input_data_format=input_data_format)
for trimap in trimaps
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
# concatenate images and trimaps
axis = -1 if input_data_format == ChannelDimension.LAST else 0
images = [
np.concatenate([image, np.expand_dims(trimap, axis=axis)], axis=axis)
for image, trimap in zip(images, trimaps)
]
if do_pad:
images = [
self.pad_image(image, size_divisor=size_divisor, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image=image, channel_dim=data_format, input_channel_dim=input_data_format)
for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["VitMatteImageProcessor"]
| VitMatteImageProcessor |
python | mlflow__mlflow | examples/paddle/train_low_level_api.py | {
"start": 764,
"end": 2542
} | class ____(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc = Linear(in_features=13, out_features=1)
@paddle.jit.to_static
def forward(self, inputs):
x = self.fc(inputs)
return x
if __name__ == "__main__":
model = Regressor()
model.train()
training_data, test_data = load_data()
opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
EPOCH_NUM = 10
BATCH_SIZE = 10
for epoch_id in range(EPOCH_NUM):
np.random.shuffle(training_data)
mini_batches = [
training_data[k : k + BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)
]
for iter_id, mini_batch in enumerate(mini_batches):
x = np.array(mini_batch[:, :-1]).astype("float32")
y = np.array(mini_batch[:, -1:]).astype("float32")
house_features = paddle.to_tensor(x)
prices = paddle.to_tensor(y)
predicts = model(house_features)
loss = F.square_error_cost(predicts, label=prices)
avg_loss = paddle.mean(loss)
if iter_id % 20 == 0:
print(f"epoch: {epoch_id}, iter: {iter_id}, loss is: {avg_loss.numpy()}")
avg_loss.backward()
opt.step()
opt.clear_grad()
with mlflow.start_run() as run:
mlflow.log_param("learning_rate", 0.01)
mlflow.paddle.log_model(model, name="model")
print(f"Model saved in run {mlflow.active_run().info.run_id}")
# load model
model_path = mlflow.get_artifact_uri("model")
pd_model = mlflow.paddle.load_model(model_path)
np_test_data = np.array(test_data).astype("float32")
print(pd_model(np_test_data[:, :-1]))
| Regressor |
python | tensorflow__tensorflow | tensorflow/python/ops/summary_ops_v2.py | {
"start": 16667,
"end": 54033
} | class ____(SummaryWriter):
"""A summary writer that does nothing, for create_noop_writer()."""
def set_as_default(self, step=None):
pass
@tf_contextlib.contextmanager
def as_default(self, step=None):
yield
def init(self):
pass
def flush(self):
pass
def close(self):
pass
@tf_export(v1=["summary.initialize"])
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This operation is a no-op when executing eagerly.
This helper method provides a higher-level alternative to using
`tf.contrib.summary.summary_writer_initializer_op` and
`tf.contrib.summary.graph`.
Most users will also want to call `tf.compat.v1.train.create_global_step`
which can happen before or after this function is called.
Args:
graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call `tf.Session.run`. This defaults
to `tf.compat.v1.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
`tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session.
"""
if context.executing_eagerly():
return
if _summary_state.writer is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("Argument `session must be passed if no default "
"session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(graph_v1(x, 0), feed_dict={x: data})
@tf_export("summary.create_file_writer", v1=[])
def create_file_writer_v2(
logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None,
experimental_trackable=False,
experimental_mesh=None,
):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will flush
once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
experimental_trackable: a boolean that controls whether the returned writer
will be a `TrackableResource`, which makes it compatible with SavedModel
when used as a `tf.Module` property.
experimental_mesh: a `tf.experimental.dtensor.Mesh` instance. When running
with DTensor, the mesh (experimental_mesh.host_mesh()) will be used for
bringing all the DTensor logging from accelerator to CPU mesh.
Returns:
A SummaryWriter object.
"""
# TODO(b/291655717): Revisit the experimental_mesh once we have soft placment.
if logdir is None:
raise ValueError("Argument `logdir` cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
def create_fn():
# Use unique shared_name to prevent resource sharing in eager mode, but
# otherwise use a fixed shared_name to allow SavedModel TF 1.x loading.
if context.executing_eagerly():
shared_name = context.anonymous_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return gen_summary_ops.summary_writer(
shared_name=shared_name, name=name)
init_op_fn = functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
if experimental_trackable:
return _TrackableResourceSummaryWriter(
create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh
)
else:
return _ResourceSummaryWriter(
create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh
)
def create_file_writer(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context under the given name.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: Shared name for this SummaryWriter resource stored to default
Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
summary writer resource with this shared name already exists, the returned
SummaryWriter wraps that resource and the other arguments have no effect.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return _NoopSummaryWriter()
logdir = str(logdir)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
if name is None:
name = "logdir:" + logdir
resource = gen_summary_ops.summary_writer(shared_name=name)
return _LegacyResourceSummaryWriter(
resource=resource,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix))
@tf_export("summary.create_noop_writer", v1=[])
def create_noop_writer():
"""Returns a summary writer that does nothing.
This is useful as a placeholder in code that expects a context manager.
"""
return _NoopSummaryWriter()
def _cleanse_string(name, pattern, value):
if isinstance(value, str) and pattern.search(value) is None:
raise ValueError(f"{name} ({value}) must match {pattern.pattern}")
return ops.convert_to_tensor(value, dtypes.string)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
@tf_export(v1=["summary.all_v2_summary_ops"])
def all_v2_summary_ops():
"""Returns all V2-style summary ops defined in the current default graph.
This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except
for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but
does *not* include TF 1.x tf.summary ops.
Returns:
List of summary ops, or None if called under eager execution.
"""
if context.executing_eagerly():
return None
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]")
@tf_export("summary.experimental.summary_scope", v1=[])
@tf_contextlib.contextmanager
def summary_scope(name, default_name="summary", values=None):
"""Experimental context manager for use when defining a custom summary op.
This behaves similarly to `tf.name_scope`, except that it returns a generated
summary tag in addition to the scope name. The tag is structurally similar to
the scope name - derived from the user-provided name, prefixed with enclosing
name scopes if any - but we relax the constraint that it be uniquified, as
well as the character set limitation (so the user-provided name can contain
characters not legal for scope names; in the scope name these are removed).
This makes the summary tag more predictable and consistent for the user.
For example, to define a new summary op called `my_op`:
```python
def my_op(name, my_value, step):
with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope):
my_value = tf.convert_to_tensor(my_value)
return tf.summary.write(tag, my_value, step=step)
```
Args:
name: string name for the summary.
default_name: Optional; if provided, used as default name of the summary.
values: Optional; passed as `values` parameter to name_scope.
Yields:
A tuple `(tag, scope)` as described above.
"""
name = name or default_name
current_scope = ops.get_name_scope()
tag = current_scope + "/" + name if current_scope else name
# Strip illegal characters from the scope name, and if that leaves nothing,
# use None instead so we pick up the default name.
name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope:
yield tag, scope
@tf_export("summary.write", v1=[])
def write(tag, tensor, step=None, metadata=None, name=None):
"""Writes a generic summary to the default SummaryWriter if one exists.
This exists primarily to support the definition of type-specific summary ops
like scalar() and image(), and is not intended for direct use unless defining
a new type-specific summary op.
Args:
tag: string tag used to identify the summary (e.g. in TensorBoard), usually
generated with `tf.summary.summary_scope`
tensor: the Tensor holding the summary data to write or a callable that
returns this Tensor. If a callable is passed, it will only be called when
a default SummaryWriter exists and the recording condition specified by
`record_if()` is met.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
metadata: Optional SummaryMetadata, as a proto or serialized bytes
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_summary") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if metadata is None:
serialized_metadata = b""
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = metadata.SerializeToString()
else:
serialized_metadata = metadata
def record():
"""Record the actual summary and return True."""
if step is None:
raise ValueError("No step set. Please specify one either through the "
"`step` argument or through "
"tf.summary.experimental.set_step()")
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
summary_tensor = tensor() if callable(tensor) else array_ops.identity(
tensor)
# For DTensor, the device scope above doesn't work, we need to
# explicitly copy the resource tensor to host mesh, which is a cpu
# mesh.
writer = _summary_state.writer
summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor)
step_value = _maybe_convert_tensor_to_dtensor(writer, step)
write_summary_op = gen_summary_ops.write_summary(
writer._resource, # pylint: disable=protected-access
step_value,
summary_value,
tag,
serialized_metadata,
name=scope,
)
with ops.control_dependencies([write_summary_op]):
return constant_op.constant(True)
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
@tf_export("summary.experimental.write_raw_pb", v1=[])
def write_raw_pb(tensor, step=None, name=None):
"""Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.
Experimental: this exists to support the usage of V1-style manual summary
writing (via the construction of a `tf.compat.v1.Summary` protocol buffer)
with the V2 summary writing API.
Args:
tensor: the string Tensor holding one or more serialized `Summary` protobufs
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_raw_pb") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set. Please specify one either through the "
"`step` argument or through "
"tf.summary.experimental.set_step()")
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
raw_summary_op = gen_summary_ops.write_raw_proto_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
step,
array_ops.identity(tensor),
name=scope)
with ops.control_dependencies([raw_summary_op]):
return constant_op.constant(True)
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
name_scope = ops.get_name_scope()
if name_scope:
# Add a slash to allow reentering the name scope.
name_scope += "/"
def record():
with ops.name_scope(name_scope), summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if _summary_state.writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
_legacy_contrib_should_record_summaries(), record, _nothing, name="")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def generic(name, tensor, metadata=None, family=None, step=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
if metadata is None:
serialized_metadata = constant_op.constant("")
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = constant_op.constant(metadata.SerializeToString())
else:
serialized_metadata = metadata
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
Unlike `tf.contrib.summary.generic` this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
tensor: A `tf.Tensor` Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
to `tf.compat.v1.train.get_global_step`.
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
"""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None, step=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
"""Writes an image summary if possible."""
def function(tag, scope):
bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
bad_color_,
max_images,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def graph_v1(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
like `tf.summary.should_record_summaries` do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
the `graph` parameter to `tf.compat.v1.summary.initialize` instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
param: A `tf.Tensor` containing a serialized graph proto. When
eager execution is enabled, this function will automatically
coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
Raises:
TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
"""
if not context.executing_eagerly() and not isinstance(
param, tensor_lib.Tensor
):
raise TypeError(
"graph() needs a argument `param` to be tf.Tensor "
"(e.g. tf.placeholder) in graph mode, but received "
f"param={param} of type {type(param).__name__}."
)
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
else:
tensor = array_ops.identity(param)
return gen_summary_ops.write_graph_summary(
writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access
@tf_export("summary.graph", v1=[])
def graph(graph_data):
"""Writes a TensorFlow graph summary.
Write an instance of `tf.Graph` or `tf.compat.v1.GraphDef` as summary only
in an eager mode. Please prefer to use the trace APIs (`tf.summary.trace_on`,
`tf.summary.trace_off`, and `tf.summary.trace_export`) when using
`tf.function` which can automatically collect and record graphs from
executions.
Usage Example:
```py
writer = tf.summary.create_file_writer("/tmp/mylogs")
@tf.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with writer.as_default():
tf.summary.graph(f.get_concrete_function().graph)
# Another example: in a very rare use case, when you are dealing with a TF v1
# graph.
graph = tf.Graph()
with graph.as_default():
c = tf.constant(30.0)
with writer.as_default():
tf.summary.graph(graph)
```
Args:
graph_data: The TensorFlow graph to write, as a `tf.Graph` or a
`tf.compat.v1.GraphDef`.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: `graph` summary API is invoked in a graph mode.
"""
if not context.executing_eagerly():
raise ValueError("graph() cannot be invoked inside a graph context.")
writer = _summary_state.writer
if writer is None:
return constant_op.constant(False)
with ops.device("cpu:0"):
if not should_record_summaries():
return constant_op.constant(False)
if isinstance(graph_data, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(
_serialize_graph(graph_data), dtypes.string)
else:
raise ValueError("Argument 'graph_data' is not tf.Graph or "
"tf.compat.v1.GraphDef. Received graph_data="
f"{graph_data} of type {type(graph_data).__name__}.")
gen_summary_ops.write_graph_summary(
writer._resource, # pylint: disable=protected-access
# Graph does not have step. Set to 0.
0,
tensor,
)
return constant_op.constant(True)
def import_event(tensor, name=None):
"""Writes a `tf.compat.v1.Event` binary proto.
This can be used to import existing event logs into a new summary writer sink.
Please note that this is lower level than the other summary functions and
will ignore the `tf.summary.should_record_summaries` setting.
Args:
tensor: A `tf.Tensor` of type `string` containing a serialized
`tf.compat.v1.Event` proto.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
return gen_summary_ops.import_event(
_summary_state.writer._resource, tensor, name=name) # pylint: disable=protected-access
@tf_export("summary.flush", v1=[])
def flush(writer=None, name=None):
"""Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The `tf.summary.SummaryWriter` to flush. If None, the current
default writer will be used instead; if there is no current writer, this
returns `tf.no_op`.
name: Ignored legacy argument for a name for the operation.
Returns:
The created `tf.Operation`.
"""
del name # unused
if writer is None:
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
if isinstance(writer, SummaryWriter):
return writer.flush()
raise ValueError("Invalid argument to flush(): %r" % (writer,))
def legacy_raw_flush(writer=None, name=None):
"""Legacy version of flush() that accepts a raw resource tensor for `writer`.
Do not use this function in any new code. Not supported and not part of the
public TF APIs.
Args:
writer: The `tf.summary.SummaryWriter` to flush. If None, the current
default writer will be used instead; if there is no current writer, this
returns `tf.no_op`. For this legacy version only, also accepts a raw
resource tensor pointing to the underlying C++ writer resource.
name: Ignored legacy argument for a name for the operation.
Returns:
The created `tf.Operation`.
"""
if writer is None or isinstance(writer, SummaryWriter):
# Forward to the TF2 implementation of flush() when possible.
return flush(writer, name)
else:
# Legacy fallback in case we were passed a raw resource tensor.
with ops.device("cpu:0"):
return gen_summary_ops.flush_summary_writer(writer, name=name)
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
@deprecation.deprecated(date=None,
instructions="Renamed to create_file_writer().")
def create_summary_file_writer(*args, **kwargs):
"""Please use `tf.contrib.summary.create_file_writer`."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
def _serialize_graph(arbitrary_graph):
if isinstance(arbitrary_graph, ops.Graph):
return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return arbitrary_graph.SerializeToString()
def _choose_step(step):
if step is None:
return training_util.get_or_create_global_step()
if not isinstance(step, tensor_lib.Tensor):
return ops.convert_to_tensor(step, dtypes.int64)
return step
def _check_create_file_writer_args(inside_function, **kwargs):
"""Helper to check the validity of arguments to a create_file_writer() call.
Args:
inside_function: whether the create_file_writer() call is in a tf.function
**kwargs: the arguments to check, as kwargs to give them names.
Raises:
ValueError: if the arguments are graph tensors.
"""
for arg_name, arg in kwargs.items():
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg):
if inside_function:
raise ValueError(
f"Invalid graph Tensor argument '{arg_name}={arg}' to "
"create_file_writer() inside an @tf.function. The create call will "
"be lifted into the outer eager execution context, so it cannot "
"consume graph tensors defined inside the function body.")
else:
raise ValueError(
f"Invalid graph Tensor argument '{arg_name}={arg}' to eagerly "
"executed create_file_writer().")
def run_metadata(name, data, step=None):
"""Writes entire RunMetadata summary.
A RunMetadata can contain DeviceStats, partition graphs, and function graphs.
Please refer to the proto for definition of each field.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata"
# version number = 1
summary_metadata.plugin_data.content = b"1"
with summary_scope(name,
"graph_run_metadata_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
def run_metadata_graphs(name, data, step=None):
"""Writes graphs from a RunMetadata summary.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph"
# version number = 1
summary_metadata.plugin_data.content = b"1"
data = config_pb2.RunMetadata(
function_graphs=data.function_graphs,
partition_graphs=data.partition_graphs)
with summary_scope(name,
"graph_run_metadata_graph_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler"))
_current_trace_context_lock = threading.Lock()
_current_trace_context = None
@tf_export("summary.trace_on", v1=[])
def trace_on(graph=True, profiler=False, profiler_outdir=None): # pylint: disable=redefined-outer-name
"""Starts a trace to record computation graphs and profiling information.
Must be invoked in eager mode.
When enabled, TensorFlow runtime will collect information that can later be
exported and consumed by TensorBoard. The trace is activated across the entire
TensorFlow runtime and affects all threads of execution.
To stop the trace and export the collected information, use
`tf.summary.trace_export`. To stop the trace without exporting, use
`tf.summary.trace_off`.
Args:
graph: If True, enables collection of executed graphs. It includes ones from
tf.function invocation and ones from the legacy graph mode. The default is
True.
profiler: If True, enables the advanced profiler. Enabling profiler
implicitly enables the graph collection. The profiler may incur a high
memory overhead. The default is False.
profiler_outdir: Output directory for profiler. It is required when profiler
is enabled when trace was started. Otherwise, it is ignored.
"""
if ops.inside_function():
logging.warn("Cannot enable trace inside a tf.function.")
return
if not context.executing_eagerly():
logging.warn("Must enable trace in eager mode.")
return
global _current_trace_context
with _current_trace_context_lock:
if _current_trace_context:
logging.warn("Trace already enabled")
return
if graph and not profiler:
context.context().enable_graph_collection()
if profiler:
if profiler_outdir is None:
# TODO(b/149431324): Change this to throw a ValueError when Tensorflow
# major version advances. (current version is 2.15)
logging.warn(
"No `profiler_outdir` passed to trace_on(). Profiler won't be"
" enabled."
)
else:
context.context().enable_run_metadata()
_profiler.start(profiler_outdir)
_current_trace_context = _TraceContext(graph=graph, profiler=profiler)
# TODO(b/149431324): Delete `profiler_outdir` arg when Tensorflow major version
# advances. (current version is 2.15)
@tf_export("summary.trace_export", v1=[])
def trace_export(name, step=None, profiler_outdir=None):
"""Stops and exports the active trace as a Summary and/or profile file.
Stops the trace and exports all metadata collected during the trace to the
default SummaryWriter, if one has been set.
Args:
name: A name for the summary to be written.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
profiler_outdir: This arg is a no-op. Please set this in trace_on().
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
# TODO(stephanlee): See if we can remove profiler_outdir and infer it from
# the SummaryWriter's logdir.
global _current_trace_context
if ops.inside_function():
logging.warn("Cannot export trace inside a tf.function.")
return
if not context.executing_eagerly():
logging.warn("Can only export trace while executing eagerly.")
return
with _current_trace_context_lock:
if _current_trace_context is None:
raise ValueError("Must enable trace before export through "
"tf.summary.trace_on.")
graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name
run_meta = context.context().export_run_metadata()
if graph and not profiler:
run_metadata_graphs(name, run_meta, step)
else:
run_metadata(name, run_meta, step)
if profiler:
if profiler_outdir:
logging.warn(
"Ignoring `profiler_outdir` passed to trace_export(). Please pass it"
" to trace_on() instead."
)
_profiler.stop()
trace_off()
@tf_export("summary.trace_off", v1=[])
def trace_off():
"""Stops the current trace and discards any collected information."""
global _current_trace_context
with _current_trace_context_lock:
if _current_trace_context is None:
return # tracing already off
graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name, unpacking-non-sequence
_current_trace_context = None
if graph:
# Disabling run_metadata disables graph collection as well.
context.context().disable_run_metadata()
if profiler:
try:
_profiler.stop()
except Exception as e: # pylint: disable=broad-except
logging.warn("Error while stopping profiler: %s", e)
pass
def _maybe_convert_tensor_to_dtensor(writer, tensor):
if getattr(writer, "_mesh", None) is not None:
mesh = writer._mesh.host_mesh() # pylint: disable=protected-access
tensor = dtensor_api.copy_to_mesh(
tensor, layout_lib.Layout.replicated(mesh, rank=tensor.shape.rank)
)
return tensor
| _NoopSummaryWriter |
python | scipy__scipy | scipy/_lib/_array_api_override.py | {
"start": 883,
"end": 5239
} | class ____(enum.Enum):
skip = 0
numpy = 1
array_like = 2
unknown = 3
@lru_cache(100)
def _validate_array_cls(cls: type) -> _ArrayClsInfo:
if issubclass(cls, (list, tuple)):
return _ArrayClsInfo.array_like
# this comes from `_util._asarray_validated`
if issubclass(cls, SparseABC):
msg = ('Sparse arrays/matrices are not supported by this function. '
'Perhaps one of the `scipy.sparse.linalg` functions '
'would work instead.')
raise ValueError(msg)
if issubclass(cls, np.ma.MaskedArray):
raise TypeError("Inputs of type `numpy.ma.MaskedArray` are not supported.")
if issubclass(cls, np.matrix):
raise TypeError("Inputs of type `numpy.matrix` are not supported.")
if issubclass(cls, (np.ndarray, np.generic)):
return _ArrayClsInfo.numpy
# Note: this must happen after the test for np.generic, because
# np.float64 and np.complex128 are subclasses of float and complex respectively.
# This matches the behavior of array_api_compat.
if issubclass(cls, (int, float, complex, bool, type(None))):
return _ArrayClsInfo.skip
return _ArrayClsInfo.unknown
def array_namespace(*arrays: Array) -> ModuleType:
"""Get the array API compatible namespace for the arrays xs.
Parameters
----------
*arrays : sequence of array_like
Arrays used to infer the common namespace.
Returns
-------
namespace : module
Common namespace.
Notes
-----
Wrapper around `array_api_compat.array_namespace`.
1. Check for the global switch `SCIPY_ARRAY_API`. If disabled, just
return array_api_compat.numpy namespace and skip all compliance checks.
2. Check for known-bad array classes.
The following subclasses are not supported and raise and error:
- `numpy.ma.MaskedArray`
- `numpy.matrix`
- NumPy arrays which do not have a boolean or numerical dtype
- `scipy.sparse` arrays
3. Coerce array-likes to NumPy arrays and check their dtype.
Note that non-scalar array-likes can't be mixed with non-NumPy Array
API objects; e.g.
- `array_namespace([1, 2])` returns NumPy namespace;
- `array_namespace(np.asarray([1, 2], [3, 4])` returns NumPy namespace;
- `array_namespace(cp.asarray([1, 2], [3, 4])` raises an error.
"""
if not SCIPY_ARRAY_API:
# here we could wrap the namespace if needed
return np_compat
numpy_arrays = []
api_arrays = []
for array in arrays:
arr_info = _validate_array_cls(type(array))
if arr_info is _ArrayClsInfo.skip:
pass
elif arr_info is _ArrayClsInfo.numpy:
if array.dtype.kind in 'iufcb': # Numeric or bool
numpy_arrays.append(array)
elif array.dtype.kind == 'V' and is_jax_array(array):
# Special case for JAX zero gradient arrays;
# see array_api_compat._common._helpers._is_jax_zero_gradient_array
api_arrays.append(array) # JAX zero gradient array
else:
raise TypeError(f"An argument has dtype `{array.dtype!r}`; "
"only boolean and numerical dtypes are supported.")
elif arr_info is _ArrayClsInfo.unknown and is_array_api_obj(array):
api_arrays.append(array)
else:
# list, tuple, or arbitrary object
try:
array = np.asanyarray(array)
except TypeError:
raise TypeError("An argument is neither array API compatible nor "
"coercible by NumPy.")
if array.dtype.kind not in 'iufcb': # Numeric or bool
raise TypeError(f"An argument has dtype `{array.dtype!r}`; "
"only boolean and numerical dtypes are supported.")
numpy_arrays.append(array)
# When there are exclusively NumPy and ArrayLikes, skip calling
# array_api_compat.array_namespace for performance.
if not api_arrays:
return np_compat
# In case of mix of NumPy/ArrayLike and non-NumPy Array API arrays,
# let array_api_compat.array_namespace raise an error.
return array_api_compat.array_namespace(*numpy_arrays, *api_arrays)
| _ArrayClsInfo |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1522797,
"end": 1524607
} | class ____(Transform):
"""
PivotTransform schema wrapper.
Parameters
----------
pivot : str, :class:`FieldName`
The data field to pivot on. The unique values of this field become new field names
in the output stream.
value : str, :class:`FieldName`
The data field to populate pivoted fields. The aggregate values of this field become
the values of the new pivoted fields.
groupby : Sequence[str, :class:`FieldName`]
The optional data fields to group by. If not specified, a single group containing
all data objects will be used.
limit : float
An optional parameter indicating the maximum number of pivoted fields to generate.
The default (``0``) applies no limit. The pivoted ``pivot`` names are sorted in
ascending order prior to enforcing the limit. **Default value:** ``0``
op : :class:`AggregateOp`, Literal['argmax', 'argmin', 'average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
The aggregation operation to apply to grouped ``value`` field values. **Default
value:** ``sum``
"""
_schema = {"$ref": "#/definitions/PivotTransform"}
def __init__(
self,
pivot: Optional[str | SchemaBase] = Undefined,
value: Optional[str | SchemaBase] = Undefined,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
limit: Optional[float] = Undefined,
op: Optional[SchemaBase | AggregateOp_T] = Undefined,
**kwds,
):
super().__init__(
pivot=pivot, value=value, groupby=groupby, limit=limit, op=op, **kwds
)
| PivotTransform |
python | ansible__ansible | lib/ansible/executor/task_queue_manager.py | {
"start": 2755,
"end": 3489
} | class ____(multiprocessing.queues.SimpleQueue):
def __init__(self, *args, **kwargs):
kwargs['ctx'] = multiprocessing_context
super().__init__(*args, **kwargs)
def send_callback(self, method_name: str, task_result: _RawTaskResult) -> None:
self.put(CallbackSend(method_name=method_name, wire_task_result=task_result.as_wire_task_result()))
def send_task_result(self, task_result: _RawTaskResult) -> None:
self.put(task_result.as_wire_task_result())
def send_display(self, method, *args, **kwargs):
self.put(
DisplaySend(method, *args, **kwargs),
)
def send_prompt(self, **kwargs):
self.put(
PromptSend(**kwargs),
)
| FinalQueue |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 113141,
"end": 113268
} | class ____:
xlSizeIsArea = 1 # from enum XlSizeRepresents
xlSizeIsWidth = 2 # from enum XlSizeRepresents
| SizeRepresents |
python | pytorch__pytorch | torchgen/_autoheuristic/mixed_mm/test_mixed_mm.py | {
"start": 236,
"end": 1114
} | class ____(TestCase):
def test_mixedmm_a100(self) -> None:
run_bash("get_mixedmm_dataset.sh")
run_bash("gen_mixedmm_heuristic_a100.sh")
file_path = "../../../torch/_inductor/autoheuristic/artifacts/_MixedMMA100.py"
a100_heuristic_generated_code = read_file_to_string(file_path)
self.assertExpectedInline(
a100_heuristic_generated_code,
"""\
# flake8: noqa: B950
# fmt: off
# This file was generated by AutoHeuristic. Do not modify it manually!
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
from typing import Optional
from torch._inductor.autoheuristic.autoheuristic_utils import (
AHContext,
AHMetadata,
Choice,
)
from torch._inductor.autoheuristic.learnedheuristic_interface import (
LearnedHeuristicDecision,
)
| TestMixedMM |
python | pytorch__pytorch | test/ao/sparsity/test_structured_sparsifier.py | {
"start": 30926,
"end": 37929
} | class ____(TestCase):
"""
Test case for the implementation of paper:
`Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration <https://arxiv.org/abs/1811.00250>`_.
"""
class SimpleConvFPGM(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv2d1 = nn.Conv2d(
in_channels=1, out_channels=3, kernel_size=3, padding=1, bias=False
)
# Manually set the filter weights for demonstration purposes
"""
Three filters' weight are manually set to values 3.0, 2.0, and 0.1.
Different from the norm-based decision that prunes filter with value 0.1,
FPGM will prune the one with value 2.0.
"""
weights = torch.tensor([3.0, 2.0, 0.1]) # Weight weights for each filter
weights = weights[:, None, None, None] # broadcasting
self.conv2d1.weight.data.copy_(
torch.ones(self.conv2d1.weight.shape) * weights
)
# Second Convolutional Layer
self.conv2d2 = nn.Conv2d(
in_channels=3, out_channels=4, kernel_size=3, padding=1, bias=False
)
weights = torch.tensor([6.0, 7.0, 0.4, 0.5])
weights = weights[:, None, None, None]
self.conv2d2.weight.data.copy_(
torch.ones(self.conv2d2.weight.shape) * weights
)
def forward(self, x):
x = self.conv2d1(x)
x = self.conv2d2(x)
return x
def test_compute_distance(self, device="cpu"):
"""Test the distance computation function"""
model = TestFPGMPruner.SimpleConvFPGM().to(device)
pruner = FPGMPruner(0.3)
dist_conv1 = pruner._compute_distance(model.conv2d1.weight)
# compute the distance matrix using torch.cdist
flattened_filters = torch.Tensor(
[
[
3.0000,
3.0000,
3.0000,
3.0000,
3.0000,
3.0000,
3.0000,
3.0000,
3.0000,
],
[
2.0000,
2.0000,
2.0000,
2.0000,
2.0000,
2.0000,
2.0000,
2.0000,
2.0000,
],
[
0.1000,
0.1000,
0.1000,
0.1000,
0.1000,
0.1000,
0.1000,
0.1000,
0.1000,
],
]
)
"""
Expected distance matrix should have the following values:
[0.0000, 3.0000, 8.7000],
[3.0000, 0.0000, 5.7000],
[8.7000, 5.7000, 0.0000],
the distance should therefore be:
[11.7000, 8.7000, 14.4000]
"""
expected_dist_matrix_conv1 = torch.cdist(
flattened_filters, flattened_filters, p=2
)
expected_dist_conv1 = torch.sum(torch.abs(expected_dist_matrix_conv1), 1)
assert torch.isclose(
dist_conv1, expected_dist_conv1, rtol=1e-05, atol=1e-07
).all()
def _test_update_mask_on_single_layer(self, expected_conv1, device):
"""Test that pruning is conducted based on the pair-wise distance measurement instead of absolute norm value"""
# test pruning with one layer of conv2d
model = TestFPGMPruner.SimpleConvFPGM().to(device)
x = torch.ones((1, 1, 32, 32), device=device)
pruner = FPGMPruner(0.3)
config = [{"tensor_fqn": "conv2d1.weight"}]
pruner.prepare(model, config)
pruner.enable_mask_update = True
pruner.step()
assert (
pruner.groups[0]["module"].parametrizations.weight[0].mask[-1].item()
is not False
), "do not prune the least-norm filter"
# fusion step
pruned_model = pruner.prune()
pruned_y = pruned_model(x)
# assert shapes
expected_conv1 = expected_conv1.to(device)
assert pruned_y.shape == (1, 4, 32, 32)
assert pruned_model.conv2d1.weight.shape == expected_conv1.shape
assert pruned_model.conv2d2.weight.shape == (
4,
2,
3,
3,
), "conv2d2 should have input channel pruned"
# assert value
assert torch.isclose(
pruned_model.conv2d1.weight, expected_conv1, rtol=1e-05, atol=1e-07
).all()
def _test_update_mask_on_multiple_layer(
self, expected_conv1, expected_conv2, device
):
# the second setting
model = TestFPGMPruner.SimpleConvFPGM().to(device)
x = torch.ones((1, 1, 32, 32), device=device)
pruner = FPGMPruner(0.3)
config = [
{"tensor_fqn": "conv2d1.weight"},
{"tensor_fqn": "conv2d2.weight", "sparsity_level": 0.5},
]
pruner.prepare(model, config)
pruner.enable_mask_update = True
pruner.step()
# Get the masks for the two least-norm filters
mask1 = pruner.groups[0]["module"].parametrizations.weight[0].mask[-1]
mask2 = pruner.groups[0]["module"].parametrizations.weight[0].mask[-2]
# Check if either of the least-norm filters is not pruned
assert mask1.item() is not False or mask2.item() is not False, (
"Do not prune all least-norm filters"
)
# fusion step
pruned_model = pruner.prune()
pruned_y = pruned_model(x)
# assert shapes
expected_conv1 = expected_conv1.to(device)
expected_conv2 = expected_conv2.to(device)
assert pruned_y.shape == (1, 2, 32, 32)
assert pruned_model.conv2d1.weight.shape == expected_conv1.shape
assert pruned_model.conv2d2.weight.shape == expected_conv2.shape
# assert values
assert torch.isclose(
pruned_model.conv2d1.weight, expected_conv1, rtol=1e-05, atol=1e-07
).all()
assert torch.isclose(
pruned_model.conv2d2.weight, expected_conv2, rtol=1e-05, atol=1e-07
).all()
def test_update_mask(self):
weights = torch.tensor([3.0, 0.1])
expected_conv1 = torch.ones((2, 1, 3, 3)) * weights[:, None, None, None]
weights = torch.tensor([7.0, 0.4])
expected_conv2 = torch.ones((2, 2, 3, 3)) * weights[:, None, None, None]
for device in DEVICES:
self._test_update_mask_on_single_layer(expected_conv1, device)
self._test_update_mask_on_multiple_layer(
expected_conv1, expected_conv2, device
)
if __name__ == "__main__":
raise_on_run_directly("test/test_ao_sparsity.py")
| TestFPGMPruner |
python | getsentry__sentry | src/social_auth/backends/__init__.py | {
"start": 13175,
"end": 14703
} | class ____(BaseAuth):
"""OAuth base class"""
SETTINGS_KEY_NAME = ""
SETTINGS_SECRET_NAME = ""
SCOPE_VAR_NAME: str | None = None
SCOPE_PARAMETER_NAME = "scope"
DEFAULT_SCOPE: list[str] | None = None
SCOPE_SEPARATOR = " "
def __init__(self, request, redirect):
"""Init method"""
super().__init__(request, redirect)
self.redirect_uri = self.build_absolute_uri(self.redirect)
@classmethod
def get_key_and_secret(cls):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return (setting(cls.SETTINGS_KEY_NAME), setting(cls.SETTINGS_SECRET_NAME))
@classmethod
def enabled(cls):
"""Return backend enabled status by checking basic settings"""
return bool(setting(cls.SETTINGS_KEY_NAME) and setting(cls.SETTINGS_SECRET_NAME))
def get_scope(self):
"""Return list with needed access scope"""
scope: list[str] = self.DEFAULT_SCOPE or []
if self.SCOPE_VAR_NAME:
scope = scope + setting(self.SCOPE_VAR_NAME, [])
return scope
def get_scope_argument(self):
param = {}
scope = self.get_scope()
if scope:
param[self.SCOPE_PARAMETER_NAME] = self.SCOPE_SEPARATOR.join(scope)
return param
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service. Implement in subclass"""
return {}
| OAuthAuth |
python | getsentry__sentry | tests/sentry/db/models/fields/bitfield/test_bitfield.py | {
"start": 690,
"end": 1015
} | class ____(models.Model):
class Meta:
app_label = "fixtures"
class flags(TypedClassBitField):
FLAG_0: bool
FLAG_1: bool
FLAG_2: bool
FLAG_3: bool
bitfield_default = ("FLAG_1", "FLAG_2")
bitfield_db_column = "another_name"
| BitFieldTestModelWithDefaultsAsKeyNames |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 2878,
"end": 7170
} | class ____(nn.Module, ABC):
"""This defines the interface expected from all models used commonly for
FSDP unit tests."""
@abstractmethod
def get_input(self, device) -> tuple[torch.Tensor, ...]:
"""Returns an input for the model as as tuple."""
...
@abstractmethod
def get_loss(self, input, output) -> torch.Tensor:
"""Returns the loss given the input and output."""
...
@abstractmethod
def run_backward(self, loss) -> None:
"""Runs the backward pass (e.g. including ``loss.backward()``)."""
...
@staticmethod
@abstractmethod
def init(*args: Any, **kwargs: Any) -> nn.Module:
"""Initializes an instance of this model."""
...
def _assert_module_states(
model: nn.Module,
process_group: dist.ProcessGroup,
assert_fn: Callable,
):
"""
All-gathers module states across ranks and calls ``assert_fn`` on each pair
of corresponding states from rank 0 and a nonzero rank. For example, if
``assert_fn`` is ``self.assertEqual()``, then this checks that all module
states are equal across ranks.
"""
# Include names for debugging convenience
named_module_states = [
(param_name, param.detach().cpu())
for param_name, param in model.named_parameters()
]
named_module_states += [
(buffer_name, buffer.detach().cpu())
for buffer_name, buffer in model.named_buffers()
]
world_size = dist.get_world_size(process_group)
olist = [None for _ in range(world_size)]
dist.all_gather_object(olist, named_module_states, group=process_group)
rank0_states = olist[0]
assert rank0_states is not None # mypy
for state in olist[1:]:
assert state is not None # mypy
for (_, p1), (_, p2) in zip(rank0_states, state, strict=True):
assert_fn(p1, p2)
def get_devtype():
return torch.device(DEVICE_TYPE)
def _zero_model(
model: nn.Module,
zero_buffers: bool = False,
summon_full=True,
):
"""Zeros the parameters and optionally buffers of ``model`` in place."""
ctx = FSDP.summon_full_params(model) if summon_full else nullcontext()
with ctx:
for param in model.parameters():
with torch.no_grad():
param.zero_()
if zero_buffers:
for buffer in model.buffers():
with torch.no_grad():
buffer.zero_()
def _get_state_dict(model, cpu_offload=False, half=False):
if not cpu_offload:
model = model.to(DEVICE_TYPE)
if half:
model.half()
return model.state_dict()
def subtest_name(test_name_mapping, *args):
return "_".join(
[test_name_mapping[str(s)] if s is not None else "none" for s in args]
)
def _broadcast_state_dict(rank, state_dict):
# For non-FSDP roots, some parts of the model state on rank 0 may
# not be on CPU, so we move everything to CPU to avoid issues like:
# https://github.com/pytorch/pytorch/issues/77113.
for param_name, param in state_dict.items():
if param.device != torch.device("cpu"):
state_dict[param_name] = param.cpu()
olist = [state_dict if rank == 0 else None]
dist.broadcast_object_list(olist)
state_dict = cast(dict[str, torch.Tensor], olist[0])
# Ensure that the state is on DEVICE
for param_name in state_dict:
state_dict[param_name] = state_dict[param_name].to(DEVICE_TYPE)
return state_dict
def get_full_params(model: nn.Module, recurse: bool = True):
"""
Returns the full unsharded parameters of ``model``. Any FSDP-managed
parameters offloaded to CPU are moved to GPU in the returned list.
Args:
recurse (bool): If ``False``, only unshards the parameters immediate to
``model``; if ``True``, recurses through the module hierarchy
rooted at ``model``.
"""
with FSDP.summon_full_params(model, recurse=recurse):
return deepcopy(list(model.parameters()))
def _move_to_device(model: nn.Module, move_to_device: bool):
return model.to(DEVICE_TYPE) if move_to_device else model
def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
return model if not wrap_fsdp else FSDP(model, *args, **kwargs)
| FSDPTestModel |
python | gevent__gevent | src/gevent/tests/test__fileobject.py | {
"start": 16919,
"end": 18849
} | class ____(CleanupMixin, greentest.TestCase):
def _getTargetClass(self):
return OpenDescriptor
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _check(self, regex, kind, *args, **kwargs):
with self.assertRaisesRegex(kind, regex):
self._makeOne(*args, **kwargs)
case = lambda re, **kwargs: (re, TypeError, kwargs)
vase = lambda re, **kwargs: (re, ValueError, kwargs)
CASES = (
case('mode', mode=42),
case('buffering', buffering='nope'),
case('encoding', encoding=42),
case('errors', errors=42),
vase('mode', mode='aoeug'),
vase('mode U cannot be combined', mode='wU'),
vase('text and binary', mode='rtb'),
vase('append mode at once', mode='rw'),
vase('exactly one', mode='+'),
vase('take an encoding', mode='rb', encoding='ascii'),
vase('take an errors', mode='rb', errors='strict'),
vase('take a newline', mode='rb', newline='\n'),
)
def test_atomicwrite_fd(self):
from gevent._fileobjectcommon import WriteallMixin
# It basically only does something when buffering is otherwise disabled
fileno, _w = self._pipe()
desc = self._makeOne(fileno, 'wb',
buffering=0,
closefd=False,
atomic_write=True)
self.assertTrue(desc.atomic_write)
fobj = desc.opened()
self.assertIsInstance(fobj, WriteallMixin)
os.close(fileno)
def pop():
for regex, kind, kwargs in TestOpenDescriptor.CASES:
setattr(
TestOpenDescriptor, 'test_' + regex.replace(' ', '_'),
lambda self, _re=regex, _kind=kind, _kw=kwargs: self._check(_re, _kind, 1, **_kw)
)
pop()
@unittest.skipIf(GreenOpenDescriptor is None, "No support for non-blocking IO")
| TestOpenDescriptor |
python | encode__starlette | starlette/_utils.py | {
"start": 1270,
"end": 1377
} | class ____(Awaitable[T_co], AbstractAsyncContextManager[T_co], Protocol[T_co]): ...
| AwaitableOrContextManager |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/hooks/test_display_video.py | {
"start": 7178,
"end": 8161
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.api_version = "v2"
self.hook = GoogleDisplayVideo360Hook(api_version=self.api_version, gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks.display_video.build")
def test_get_conn_to_display_video(self, mock_build, mock_authorize):
result = self.hook.get_conn_to_display_video()
mock_build.assert_called_once_with(
"displayvideo",
self.api_version,
http=mock_authorize.return_value,
cache_discovery=False,
)
assert mock_build.return_value == result
| TestGoogleDisplayVideo360v2Hook |
python | pypa__pipenv | pipenv/vendor/tomlkit/exceptions.py | {
"start": 1278,
"end": 1528
} | class ____(ParseError):
"""
A datetime field was improperly specified.
"""
def __init__(self, line: int, col: int) -> None:
message = "Invalid datetime"
super().__init__(line, col, message=message)
| InvalidDateTimeError |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_ai_review_analyze_smoke.py | {
"start": 151,
"end": 4966
} | class ____:
"""Basic smoke tests for the ai-review-analyze command."""
def test_import_and_basic_structure(self):
"""Test that command can be imported and has expected structure."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
assert ai_review_analyze is not None
assert ai_review_analyze.name == "ai-review-analyze"
assert callable(ai_review_analyze)
def test_help_command(self):
"""Test that help command works."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--help"])
assert result.exit_code == 0
assert "ai-review-analyze" in result.output
assert "--human" in result.output
assert "--json" in result.output
assert "--minimal" in result.output
def test_command_execution_without_errors(self):
"""Test command executes without argument parsing errors."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--json"])
# Command may succeed or fail depending on environment,
# but it shouldn't crash due to argument parsing issues
if result.exit_code == 0:
# Success - verify it produces JSON
import json
data = json.loads(result.output)
assert isinstance(data, dict)
else:
# Failure should be graceful with error message
assert "Error" in result.output
@patch("automation.dagster_dev.commands.ai_review_analyze.subprocess.run")
def test_subprocess_error_handling(self, mock_run):
"""Test graceful handling of subprocess errors."""
import subprocess
mock_run.side_effect = subprocess.CalledProcessError(1, ["test"])
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--json"])
assert result.exit_code == 1
assert "Error" in result.output
def test_valid_flags_accepted(self):
"""Test that valid command flags are accepted."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
# Just test that the flags don't cause argument parsing errors
result = runner.invoke(ai_review_analyze, ["--help"])
assert result.exit_code == 0
# Check that key flags are documented
assert "--minimal" in result.output
assert "--human" in result.output
assert "--json" in result.output
@patch("automation.dagster_dev.commands.ai_review_analyze.subprocess.run")
def test_full_command_with_mocked_dependencies(self, mock_run):
"""Test command with all dependencies properly mocked."""
# Mock all subprocess calls that the command makes
mock_responses = {
("git", "branch", "--show-current"): "test-branch\n",
(
"dagster-dev",
"gt-stack",
"--current-only",
): '[{"name": "test-branch", "is_current": true}]',
("dagster-dev", "gt-stack"): '[{"name": "test-branch", "is_current": true}]',
("git", "diff", "--stat", "master..test-branch"): "1 file changed, 5 insertions(+)\n",
("git", "diff", "master..test-branch"): "+added line\n",
("git", "log", "--oneline", "master..test-branch"): "abc123 test commit\n",
("gh", "pr", "view", "--json", "number", "--jq", ".number"): "123\n",
("git", "status", "--porcelain"): "",
("gt", "log", "--stack"): "abc123 test commit\n",
}
def side_effect(cmd, **kwargs):
cmd_key = tuple(cmd)
mock_result = MagicMock()
if cmd_key in mock_responses:
mock_result.stdout = mock_responses[cmd_key]
mock_result.returncode = 0
return mock_result
else:
# Default response for unknown commands
mock_result.stdout = ""
mock_result.returncode = 0
return mock_result
mock_run.side_effect = side_effect
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--json"])
assert result.exit_code == 0
# Should produce valid JSON output
output_data = json.loads(result.output)
assert "current_branch" in output_data
assert "repository_state" in output_data
| TestAiReviewAnalyzeSmoke |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 58315,
"end": 62774
} | class ____(torch.nn.Module):
def forward(self, tangents_1: "f32[8, 8]"):
partitioned_bw_subgraph_0_0 = self.partitioned_bw_subgraph_0_0
invoke_subgraph_3 = torch.ops.higher_order.invoke_subgraph(partitioned_bw_subgraph_0_0, 'partitioned_bw_subgraph_0_0', tangents_1, tangents_1); partitioned_bw_subgraph_0_0 = tangents_1 = None
getitem_2: "f32[8, 8]" = invoke_subgraph_3[0]; invoke_subgraph_3 = None
return (getitem_2,)
class partitioned_bw_subgraph_0_0(torch.nn.Module):
def forward(self, tangents_0: "f32[8, 8]", tangents_1: "f32[8, 8]"):
mul_2: "f32[8, 8]" = torch.ops.aten.mul.Tensor(tangents_1, 3)
mul_3: "f32[8, 8]" = torch.ops.aten.mul.Tensor(tangents_1, 2); tangents_1 = None
add: "f32[8, 8]" = torch.ops.aten.add.Tensor(mul_2, mul_3); mul_2 = mul_3 = None
return (add,)
""",
)
def test_dynamic(self):
@nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
return gn(x) + gn(x)
x = torch.randn(8, 8, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
ref = fn(x)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_complex(self):
# Observed in Wan2.1
@nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
return gn(x) + gn(x)
x = torch.randn(2, 2, dtype=torch.complex64)
ref = fn(x)
opt_fn = torch.compile(fn, backend="inductor", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_pending_unbacked(self):
@nested_compile_region
def gn(x):
u = x[0].item()
return x * u
def fn(x):
return gn(x)
x = torch.randn(8)
torch._dynamo.mark_dynamic(x, 0)
ref = fn(x)
opt_fn = torch.compile(
fn, backend="eager", fullgraph=True
) # Inductor fails with cpp compilation error
res = opt_fn(x)
self.assertEqual(ref, res)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_unbacked1(self):
@nested_compile_region
def gn(x, y):
b = x.item()
return y[:b].clone()
def fn(x, y):
return gn(x, y)
x = torch.tensor(4)
y = torch.randn(8)
ref = fn(x, y)
opt_fn = torch.compile(
fn, backend="eager", fullgraph=True
) # Inductor fails with assertion error when lowering aten.sym_constrain_range_for_size.default
res = opt_fn(x, y)
self.assertEqual(ref, res)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_unbacked2(self):
@nested_compile_region
def gn(x, y):
b = x.item()
torch._check(b >= 0)
torch._check(b < y.shape[0])
return y[:b].clone()
def fn(x, y):
return gn(x, y)
x = torch.tensor(4)
y = torch.randn(8)
ref = fn(x, y)
opt_fn = torch.compile(
fn, backend="eager", fullgraph=True
) # Inductor fails with assertion error when lowering aten.sym_constrain_range_for_size.default
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_bwd_partitioning(self):
@nested_compile_region
def gn(x, y):
z = torch.matmul(x, y)
return torch.sin(z)
def fn(x, y):
return torch.sin(gn(x, y))
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, 8, requires_grad=True)
y = torch.randn(8, 8, requires_grad=True)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
ref = fn(x, y)
res = opt_fn(x_clone, y_clone)
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.fw_graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | wandb__wandb | wandb/sdk/lib/fsm.py | {
"start": 1438,
"end": 1676
} | class ____(Protocol[T_FsmInputs, T_FsmContext_contra]):
@abstractmethod
def on_enter(
self, inputs: T_FsmInputs, context: T_FsmContext_contra
) -> None: ... # pragma: no cover
@runtime_checkable
| FsmStateEnterWithContext |
python | huggingface__transformers | src/transformers/models/rag/configuration_rag.py | {
"start": 4699,
"end": 8518
} | class ____(PreTrainedConfig):
model_type = "rag"
has_no_defaults_at_init = True
def __init__(
self,
vocab_size=None,
is_encoder_decoder=True,
prefix=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
decoder_start_token_id=None,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
retrieval_vector_size=768,
retrieval_batch_size=8,
dataset="wiki_dpr",
dataset_split="train",
index_name="compressed",
index_path=None,
passages_path=None,
use_dummy_dataset=False,
reduce_loss=False,
label_smoothing=0.0,
do_deduplication=True,
exclude_bos_score=False,
do_marginalize=False,
output_retrieved=False,
use_cache=True,
forced_eos_token_id=None,
dataset_revision=None,
**kwargs,
):
super().__init__(
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
is_encoder_decoder=is_encoder_decoder,
prefix=prefix,
vocab_size=vocab_size,
**kwargs,
)
if "question_encoder" not in kwargs or "generator" not in kwargs:
raise ValueError(
f"A configuration of type {self.model_type} cannot be instantiated because "
f"both `question_encoder` and `generator` sub-configurations were not passed, only {kwargs}"
)
question_encoder_config = kwargs.pop("question_encoder")
question_encoder_model_type = question_encoder_config.pop("model_type")
decoder_config = kwargs.pop("generator")
decoder_model_type = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config)
self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.reduce_loss = reduce_loss
self.label_smoothing = label_smoothing
self.exclude_bos_score = exclude_bos_score
self.do_marginalize = do_marginalize
self.title_sep = title_sep
self.doc_sep = doc_sep
self.n_docs = n_docs
self.max_combined_length = max_combined_length
self.dataset = dataset
self.dataset_split = dataset_split
self.index_name = index_name
self.retrieval_vector_size = retrieval_vector_size
self.retrieval_batch_size = retrieval_batch_size
self.passages_path = passages_path
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
self.dataset_revision = dataset_revision
self.output_retrieved = output_retrieved
self.do_deduplication = do_deduplication
self.use_cache = use_cache
if forced_eos_token_id is None:
self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None)
@classmethod
def from_question_encoder_generator_configs(
cls, question_encoder_config: PreTrainedConfig, generator_config: PreTrainedConfig, **kwargs
) -> PreTrainedConfig:
r"""
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
"""
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
__all__ = ["RagConfig"]
| RagConfig |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 19255,
"end": 23116
} | class ____(Pix2StructPreTrainedModel):
config: Pix2StructVisionConfig
main_input_name = "flattened_patches"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["Pix2StructVisionLayer"]
def __init__(self, config: Pix2StructVisionConfig):
super().__init__(config)
self.config = config
self.embeddings = Pix2StructVisionEmbeddings(config)
self.encoder = Pix2StructVisionEncoder(config)
self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_projection
@auto_docstring
def forward(
self,
flattened_patches: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):
Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See
[`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original
paper](https://huggingface.co/papers/2210.03347) (figure 5) for more details.
Example:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, Pix2StructVisionModel
>>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 2048, 768]
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if flattened_patches is None:
raise ValueError("You have to specify flattened_patches")
if attention_mask is None:
# check where `flattened_patches` is not 0
attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
embedding_output = self.embeddings(flattened_patches)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
if not return_dict:
head_outputs = (sequence_output,)
return head_outputs + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size
| Pix2StructVisionModel |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/values.py | {
"start": 5685,
"end": 6978
} | class ____(composite_tensor.CompositeTensor):
"""A container that holds a list of values, one value per worker.
`tf.distribute.experimental.coordinator.PerWorkerValues` contains a collection
of values, where each of the values is located on its corresponding worker,
and upon being used as one of the `args` or `kwargs` of
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule()`, the
value specific to a worker will be passed into the function being executed at
that corresponding worker.
Currently, the only supported path to create an object of
`tf.distribute.experimental.coordinator.PerWorkerValues` is through calling
`iter` on a `ClusterCoordinator.create_per_worker_dataset`-returned
distributed dataset instance. The mechanism to create a custom
`tf.distribute.experimental.coordinator.PerWorkerValues` is not yet supported.
"""
def __init__(self, values):
for v in values:
if not isinstance(v, remote_value.RemoteValue):
raise AssertionError(
"`PerWorkerValues` should only take `RemoteValue`s.")
self._values = tuple(values)
@property
def _type_spec(self):
return PerWorkerValuesTypeSpec(
self._values[0]._type_spec, # pylint: disable=protected-access
type(self))
| PerWorkerValues |
python | scipy__scipy | scipy/stats/tests/test_qmc.py | {
"start": 33423,
"end": 33904
} | class ____:
def test_examples(self):
test_vector = [
# from low_0_bit's docstring
(0b0000, 1),
(0b0001, 2),
(0b0010, 1),
(0b0101, 2),
(0b0111, 4),
# gh-23409
(2 ** 32 - 1, 33),
(2 ** 32, 1),
(2 ** 33 - 1, 34),
(2 ** 64 - 1, 65),
]
for in_, out in test_vector:
assert_equal(_test_low_0_bit(in_), out)
| TestLow0Bit |
python | keon__algorithms | algorithms/linkedlist/merge_two_list.py | {
"start": 213,
"end": 856
} | class ____:
def __init__(self, x):
self.val = x
self.next = None
def merge_two_list(l1, l2):
ret = cur = Node(0)
while l1 and l2:
if l1.val < l2.val:
cur.next = l1
l1 = l1.next
else:
cur.next = l2
l2 = l2.next
cur = cur.next
cur.next = l1 or l2
return ret.next
# recursively
def merge_two_list_recur(l1, l2):
if not l1 or not l2:
return l1 or l2
if l1.val < l2.val:
l1.next = merge_two_list_recur(l1.next, l2)
return l1
else:
l2.next = merge_two_list_recur(l1, l2.next)
return l2
| Node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.