language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/air/tests/execution/test_tracked_actor.py | {
"start": 1037,
"end": 10895
} | class ____:
def __init__(self, **kwargs):
self.kwargs = kwargs
def get_kwargs(self):
return self.kwargs
def task(self, value: Any):
return value
@ray.remote(num_cpus=4)
def fn():
return True
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
@pytest.mark.parametrize("actor_cls", [Actor, ray.remote(Actor)])
@pytest.mark.parametrize("kill", [False, True])
def test_start_stop_actor(ray_start_4_cpus, resource_manager_cls, actor_cls, kill):
"""Test that starting and stopping actors work and invokes a callback.
- Start an actor
- Starting should trigger start callback
- Schedule actor task, which should resolve (meaning actor successfully started)
- Stop actor, which should resolve and trigger stop callback
- Schedule remote fn that takes up all cluster resources. This should resolve,
meaning that the actor was stopped successfully.
"""
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
# Start actor, set callbacks
tracked_actor = actor_manager.add_actor(
cls=actor_cls,
kwargs={"key": "val"},
resource_request=ResourceRequest([{"CPU": 4}]),
on_start=_raise(Started),
on_stop=_raise(Stopped),
on_error=_raise(Failed),
)
# Actor should be started
with pytest.raises(Started):
actor_manager.next()
# Schedule task on actor which should resolve (actor successfully started)
actor_manager.schedule_actor_task(
tracked_actor, "task", (1,), on_result=_raise(Result)
)
with pytest.raises(Result):
actor_manager.next()
# Now we can assert that there are no CPUS resources available anymore.
# Note that actor starting is asynchronous, so we can't assert this right away
# - that's why we wait for the actor task to resolve first.
assert ray.available_resources().get("CPU", 0.0) == 0, ray.available_resources()
# Stop actor
actor_manager.remove_actor(tracked_actor, kill=kill)
with pytest.raises(Stopped):
actor_manager.next()
# This task takes up all the cluster resources. It should resolve now that
# the actor was terminated.
assert ray.get(fn.remote(), timeout=5)
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_start_many_actors(ray_start_4_cpus, resource_manager_cls):
"""Test that starting more actors than fit onto the cluster works.
- Request 10 actors
- 4 can be started. Assert they are started
- Stop 2
- Assert 2 are stopped and 2 new ones are started
"""
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
running_actors = []
# stats keeps track of started/stopped actors
stats = Counter()
def start_callback(tracked_actor):
running_actors.append(tracked_actor)
stats["started"] += 1
def stop_callback(tracked_actor):
running_actors.remove(tracked_actor)
stats["stopped"] += 1
# start 10 actors
expected_actors = []
for i in range(10):
tracked_actor = actor_manager.add_actor(
cls=Actor,
kwargs={"key": "val"},
resource_request=ResourceRequest([{"CPU": 1}]),
on_start=start_callback,
on_stop=stop_callback,
on_error=_raise(Failed),
)
expected_actors.append(tracked_actor)
# wait for some actor starts
for i in range(4):
actor_manager.next()
# we should now have 4 started actors
assert stats["started"] == 4
assert stats["stopped"] == 0
assert len(running_actors) == 4
assert set(running_actors) == set(expected_actors[:4])
# stop 2 actors
actor_manager.remove_actor(running_actors[0])
actor_manager.remove_actor(running_actors[1])
# Wait four times, twice for termination, twice for start
for i in range(4):
actor_manager.next()
# we should have 4 running actors, 6 started and 2 stopped
assert stats["started"] == 6
assert stats["stopped"] == 2
assert len(running_actors) == 4
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
@pytest.mark.parametrize("where", ["init", "fn"])
def test_actor_fail(ray_start_4_cpus, cleanup, resource_manager_cls, where):
"""Test that actor failures are handled properly.
- Start actor that either fails on init or in a task (RayActorError)
- Schedule task on actor
- Assert that the correct callbacks are called
"""
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
# keep track of failed tasks and actors
stats = Counter()
@ray.remote
class FailingActor:
def __init__(self, where):
self._where = where
if self._where == "init":
raise RuntimeError("INIT")
def fn(self):
if self._where == "fn":
# SystemExit will invoke a RayActorError
raise SystemExit
return True
def fail_callback_actor(tracked_actor, exception):
stats["failed_actor"] += 1
def fail_callback_task(tracked_actor, exception):
stats["failed_task"] += 1
# Start actor
tracked_actor = actor_manager.add_actor(
cls=FailingActor,
kwargs={"where": where},
resource_request=ResourceRequest([{"CPU": 1}]),
on_error=fail_callback_actor,
)
if where != "init":
# Wait until it is started. This won't invoke any callback, yet
actor_manager.next()
assert stats["failed_actor"] == 0
assert stats["failed_task"] == 0
# Schedule task
actor_manager.schedule_actor_task(
tracked_actor, "fn", on_error=fail_callback_task
)
# Yield control and wait for task resolution. This will invoke the callback.
actor_manager.next()
assert stats["failed_actor"] == 1
assert stats["failed_task"] == bool(where != "init")
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_stop_actor_before_start(
ray_start_4_cpus, tmp_path, cleanup, resource_manager_cls
):
"""Test that actor failures are handled properly.
- Start actor that either fails on init or in a task (RayActorError)
- Schedule task on actor
- Assert that the correct callbacks are called
"""
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
hang_marker = tmp_path / "hang.txt"
@ray.remote
class HangingActor:
def __init__(self):
while not hang_marker.exists():
time.sleep(0.05)
tracked_actor = actor_manager.add_actor(
HangingActor,
kwargs={},
resource_request=ResourceRequest([{"CPU": 1}]),
on_start=_raise(RuntimeError, "Should not have started"),
on_stop=_raise(RuntimeError, "Should not have stopped"),
)
while not actor_manager.is_actor_started(tracked_actor):
actor_manager.next(0.05)
# Actor started but hasn't triggered on_start, yet
actor_manager.remove_actor(tracked_actor)
hang_marker.write_text("")
while actor_manager.is_actor_started(tracked_actor):
actor_manager.next(0.05)
assert actor_manager.num_live_actors == 0
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
@pytest.mark.parametrize("start_thread", [False, True])
def test_stop_actor_custom_future(
ray_start_4_cpus, tmp_path, cleanup, resource_manager_cls, start_thread
):
"""If we pass a custom stop future, the actor should still be shutdown by GC.
This should also be the case when we start a thread in the background, as we
do e.g. in Ray Tune's function runner.
"""
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
hang_marker = tmp_path / "hang.txt"
actor_name = f"stopping_actor_{resource_manager_cls.__name__}_{start_thread}"
@ray.remote(name=actor_name)
class HangingStopActor:
def __init__(self):
self._thread = None
self._stop_event = threading.Event()
if start_thread:
def entrypoint():
while True:
print("Thread!")
time.sleep(1)
if self._stop_event.is_set():
sys.exit(0)
self._thread = threading.Thread(target=entrypoint)
self._thread.start()
def stop(self):
print("Waiting")
while not hang_marker.exists():
time.sleep(0.05)
self._stop_event.set()
print("stopped")
start_barrier = Barrier(max_results=1)
stop_barrier = Barrier(max_results=1)
tracked_actor = actor_manager.add_actor(
HangingStopActor,
kwargs={},
resource_request=ResourceRequest([{"CPU": 1}]),
on_start=start_barrier.arrive,
on_stop=stop_barrier.arrive,
)
while not start_barrier.completed:
actor_manager.next(0.05)
# Actor is alive
assert ray.get_actor(actor_name)
stop_future = actor_manager.schedule_actor_task(tracked_actor, "stop")
actor_manager.remove_actor(tracked_actor, kill=False, stop_future=stop_future)
assert not stop_barrier.completed
hang_marker.write_text("!")
while not stop_barrier.completed:
actor_manager.next(0.05)
# Actor should have stopped now and should get cleaned up
with pytest.raises(ValueError):
ray.get_actor(actor_name)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| Actor |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 521,
"end": 768
} | class ____(graphene.Enum):
DEPLOYMENT_JOB_RUN_STATE_UNSPECIFIED = 1
NO_VALID_DEPLOYMENT_JOB_FOUND = 2
RUNNING = 3
SUCCEEDED = 4
FAILED = 5
PENDING = 6
APPROVAL = 7
| MlflowModelVersionDeploymentJobStateDeploymentJobRunState |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 18888,
"end": 19423
} | class ____(ASTExpression):
def __eq__(self, other: object) -> bool:
return isinstance(other, ASTThisLiteral)
def __hash__(self) -> int:
return hash('this')
def _stringify(self, transform: StringifyTransform) -> str:
return 'this'
def get_id(self, version: int) -> str:
return 'fpT'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('this', 'this')
| ASTThisLiteral |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 112681,
"end": 112819
} | class ____(MaybeAlignPartitions):
_projection_passthrough = True
_parameters = ["frame", "value"]
_expr_cls = Fillna
| FillnaAlign |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 22227,
"end": 30709
} | class ____:
Nodes = NodesPermissions
Groups = GroupsPermissions
@staticmethod
def alias(
*,
alias: Union[str, Sequence[str]],
collection: Union[str, Sequence[str]],
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(alias, str):
alias = [alias]
if isinstance(collection, str):
collection = [collection]
for a in alias:
for c in collection:
permission = _AliasPermission(alias=a, collection=c, actions=set())
if create:
permission.actions.add(AliasAction.CREATE)
if read:
permission.actions.add(AliasAction.READ)
if update:
permission.actions.add(AliasAction.UPDATE)
if delete:
permission.actions.add(AliasAction.DELETE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def data(
*,
collection: Union[str, Sequence[str]],
tenant: Union[str, Sequence[str], None] = None,
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(collection, str):
collection = [collection]
if tenant is None:
tenant = ["*"]
if isinstance(tenant, str):
tenant = [tenant]
for c in collection:
for t in tenant:
permission = _DataPermission(collection=c, tenant=t, actions=set())
if create:
permission.actions.add(DataAction.CREATE)
if read:
permission.actions.add(DataAction.READ)
if update:
permission.actions.add(DataAction.UPDATE)
if delete:
permission.actions.add(DataAction.DELETE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def collections(
*,
collection: Union[str, Sequence[str]],
create_collection: bool = False,
read_config: bool = False,
update_config: bool = False,
delete_collection: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(collection, str):
collection = [collection]
for c in collection:
permission = _CollectionsPermission(collection=c, actions=set())
if create_collection:
permission.actions.add(CollectionsAction.CREATE)
if read_config:
permission.actions.add(CollectionsAction.READ)
if update_config:
permission.actions.add(CollectionsAction.UPDATE)
if delete_collection:
permission.actions.add(CollectionsAction.DELETE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def tenants(
*,
collection: Union[str, Sequence[str]],
tenant: Union[str, Sequence[str], None] = None,
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(collection, str):
collection = [collection]
if tenant is None:
tenant = ["*"]
if isinstance(tenant, str):
tenant = [tenant]
for c in collection:
for t in tenant:
permission = _TenantsPermission(collection=c, tenant=t, actions=set())
if create:
permission.actions.add(TenantsAction.CREATE)
if read:
permission.actions.add(TenantsAction.READ)
if update:
permission.actions.add(TenantsAction.UPDATE)
if delete:
permission.actions.add(TenantsAction.DELETE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def replicate(
*,
collection: Union[str, Sequence[str]],
shard: Union[str, Sequence[str], None] = None,
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(collection, str):
collection = [collection]
if shard is None:
shard = ["*"]
if isinstance(shard, str):
shard = [shard]
for c in collection:
for s in shard:
permission = _ReplicatePermission(collection=c, shard=s, actions=set())
if create:
permission.actions.add(ReplicateAction.CREATE)
if read:
permission.actions.add(ReplicateAction.READ)
if update:
permission.actions.add(ReplicateAction.UPDATE)
if delete:
permission.actions.add(ReplicateAction.DELETE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def roles(
*,
role: Union[str, Sequence[str]],
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
scope: Optional[RoleScope] = None,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(role, str):
role = [role]
for r in role:
permission = _RolesPermission(role=r, actions=set())
if read:
permission.actions.add(RolesAction.READ)
if create:
permission.actions.add(RolesAction.CREATE)
if update:
permission.actions.add(RolesAction.UPDATE)
if delete:
permission.actions.add(RolesAction.DELETE)
if scope is not None:
permission.scope = scope.value
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def users(
*,
user: Union[str, Sequence[str]],
create: bool = False,
read: bool = False,
update: bool = False,
delete: bool = False,
assign_and_revoke: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(user, str):
user = [user]
for u in user:
permission = _UsersPermission(users=u, actions=set())
if create:
permission.actions.add(UsersAction.CREATE)
if read:
permission.actions.add(UsersAction.READ)
if update:
permission.actions.add(UsersAction.UPDATE)
if delete:
permission.actions.add(UsersAction.DELETE)
if assign_and_revoke:
permission.actions.add(UsersAction.ASSIGN_AND_REVOKE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def backup(
*, collection: Union[str, Sequence[str]], manage: bool = False
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(collection, str):
collection = [collection]
for c in collection:
permission = _BackupsPermission(collection=c, actions=set())
if manage:
permission.actions.add(BackupsAction.MANAGE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
@staticmethod
def cluster(*, read: bool = False) -> PermissionsCreateType:
if read:
return [_ClusterPermission(actions={ClusterAction.READ})]
return []
| Permissions |
python | huggingface__transformers | utils/modular_model_converter.py | {
"start": 10637,
"end": 22632
} | class ____(cst.CSTTransformer):
"""
This Transformer is used to unravel all calls to `super().func(...)` in class methods by the explicit parent's
code. It will also in turn replace all calls of the form `module.Class.func(...)` by a call of the form
`super().func(...)`. Those calls are used to explicitly skip the unravelling of code, but we should still follow
python's standards and use `super().func(...)` instead of `Parent.func(self, ...)`.
"""
def __init__(
self,
python_module: cst.Module,
original_modeling_methods: dict[str, cst.FunctionDef],
modular_methods: dict[str, cst.FunctionDef],
new_bases: list[cst.Arg],
):
self.python_module = python_module
self.original_modeling_methods = original_modeling_methods
self.modular_methods = modular_methods
self.all_assign_target = {}
self.deleted_targets = {} # child node can delete some arguments
new_bases = [get_full_attribute_name(base.value) for base in new_bases]
self.parent_class_call_transformer = ReplaceParentClassCallTransformer(new_bases)
def update_body(self, existing_body, new_statements):
"""
Helper method to update the body by removing duplicates before adding new statements.
`existing_body` is the body of the original method, the parent class
`new_statements` are the additional statements
"""
deduplicated_new_body = []
existing_nodes = set()
for node in new_statements:
if m.matches(node, m.SimpleStatementLine(body=[m.Assign()])):
target = self.python_module.code_for_node(node.body[0].targets[0].target)
self.all_assign_target[target] = node
if m.matches(node, m.SimpleStatementLine(body=[m.Del()])):
target = self.python_module.code_for_node(node.body[0].target)
self.deleted_targets[target] = node
for stmt in existing_body:
if m.matches(stmt, m.SimpleStatementLine(body=[m.Assign()])):
target = self.python_module.code_for_node(stmt.body[0].targets[0].target)
if target in self.deleted_targets:
continue
if target in self.all_assign_target:
stmt = self.all_assign_target[target]
# Skip the docstring (will be added later on, at the beginning)
elif m.matches(stmt, DOCSTRING_NODE):
continue
comment_less_code = re.sub(r"#.*", "", self.python_module.code_for_node(stmt)).strip()
comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip()
deduplicated_new_body.append(stmt)
existing_nodes.add(comment_less_code)
for node in new_statements:
code = self.python_module.code_for_node(node)
comment_less_code = re.sub(r"#.*", "", code).strip()
comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip()
if node not in deduplicated_new_body and comment_less_code not in existing_nodes:
if not m.matches(node, m.SimpleStatementLine(body=[m.Del()])):
deduplicated_new_body.append(node)
existing_nodes.add(comment_less_code)
deduplicated_new_body = self._fix_post_init_location(deduplicated_new_body)
return deduplicated_new_body
def _fix_post_init_location(self, new_body: list[cst.CSTNode]):
"""Fix the location of the `post_init()` in the new body, if we added statements after the call to
`super()` (it needs to be the very last statement called)"""
# Fix the post_init() that has to be last
for i, node in enumerate(new_body):
code = self.python_module.code_for_node(node)
comment_less_code = re.sub(r"#.*", "", code).strip()
comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip()
if "self.post_init(" in comment_less_code and i < len(new_body) - 1:
# Remove it and add it again at the end
new_body.pop(i)
new_body.append(node)
break
return new_body
def _fix_init_location(self, new_body, original_body):
"""
Fix the location of the `super().__init__()` in the new body, if we had new statements before it.
If the original class' `super().__init__()` is not in the beginning, do not fix it and leave where it is.
In some cases we do not want to call super() at the very beginning.
"""
start_index = 0
for i, node in enumerate(original_body):
if m.matches(node, DOCSTRING_NODE) and i == start_index:
start_index += 1
continue
code = self.python_module.code_for_node(node)
comment_less_code = re.sub(r"#.*", "", code).strip()
comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip()
if "super().__init__" in comment_less_code and i > start_index:
return new_body
start_index = 0
for i, node in enumerate(new_body):
if m.matches(node, DOCSTRING_NODE) and i == start_index:
start_index += 1
continue
code = self.python_module.code_for_node(node)
comment_less_code = re.sub(r"#.*", "", code).strip()
comment_less_code = re.sub(r"\ *\n", "\n", comment_less_code).strip()
if "super().__init__" in comment_less_code and i > start_index:
# Remove it and add it again at the top after the docstrings
node = new_body.pop(i)
new_body = new_body[:start_index] + [node] + new_body[start_index:]
break
return new_body
def is_call_to_super(self, node: cst.BaseStatement, func_name: str):
"""Check whether `node` corresponds to a call to `super().func_name(...)`"""
super_call_node = m.Call(func=m.Attribute(value=m.Call(func=m.Name("super")), attr=m.Name(func_name)))
return m.matches(node, m.SimpleStatementLine(body=[m.Return(super_call_node) | m.Expr(super_call_node)]))
def leave_FunctionDef(self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef) -> cst.FunctionDef:
func_name = updated_node.name.value
self.should_check_statements = False
if func_name in self.modular_methods:
actual_body = updated_node.body.body # first body is an `IndentedBlock` wrapper
new_body = []
for i, base_statement_node in enumerate(actual_body):
if self.is_call_to_super(base_statement_node, func_name):
original_modeling_method_body = self.original_modeling_methods[func_name].body.body
new_body.extend(self.update_body(original_modeling_method_body, actual_body[i + 1 :]))
new_body = self._fix_init_location(new_body, original_modeling_method_body)
# Break here as all future statement were already accounted for in `update_body`
break
# If not a call to super, this will replace all calls of the form `module.Class.func(...)` by a
# call of the form `super().func(...)
new_body.append(base_statement_node.visit(self.parent_class_call_transformer))
return updated_node.with_changes(body=updated_node.body.with_changes(body=new_body))
return updated_node
def find_all_dependencies(
dependency_mapping: dict[str, set],
start_entity: str | None = None,
initial_dependencies: set | None = None,
initial_checked_dependencies: set | None = None,
return_parent: bool = False,
) -> list | set:
"""Return all the dependencies of the given `start_entity` or `initial_dependencies`. This is basically some kind of
BFS traversal algorithm. It can either start from `start_entity`, or `initial_dependencies`.
Args:
dependency_mapping (`Dict[str, set]`):
A mapping from entities (usually function/assignment names), to immediate dependencies. That is, for function names,
a mapping {"foo": {"bar", "test"}} would indicate that functions `bar` and `test` are immediately called
in `foo`'s definition.
start_entity (str | None, *optional*):
A key of `dependency_mapping`, indicating from which entity to start the search.
initial_dependencies (set | None, *optional*):
If `start_entity` is not provided, this can be used as an alternative. In this case, the search will continue
from all the entities in `initial_dependencies`, if they are in `dependency_mapping`.
initial_checked_dependencies (set | None, *optional*):
If provided, entities already present in `initial_checked_dependencies` will not be part of the returned dependencies.
return_parent (bool, *optional*):
If `True`, will return a list consisting of tuples (dependency, parent) instead of a simple set of dependencies. Note
that the order of the items in the list reflects the traversal order. Thus, no parent can ever appear before children.
Returns:
A set of all the dependencies, or a list of tuples `(dependency, parent)` if `return_parent=True`.
Example:
Given the following structure in the `modular_xxx.py` file:
```
def foo1():
pass
def foo2():
pass
def bar():
foo1()
def foobar():
bar()
foo2()
class MyLayer(SomeOtherModelLayer):
def forward(...):
foobar()
```
and the `dependency_mapping` created when visiting the `modular_xxx.py` file, we get:
```
dependency_mapping = {'bar': {'foo1'}, 'foobar': {'bar', 'foo2'}}
find_all_dependencies(dependency_mapping, start_entity='foobar', return_parent=True)
>>> [('bar', 'foobar'), ('foo2', 'foobar'), ('foo1', 'bar')]
```
That is, all the functions needed (and potentially their immediate parent) so that the function to be added
in MyLayer (`foobar`) can work correctly.
"""
if initial_dependencies is None and start_entity is not None:
initial_dependencies = dependency_mapping[start_entity]
if initial_checked_dependencies is None:
initial_checked_dependencies = set()
dependency_queue = deque(initial_dependencies)
all_dependencies = set()
all_dependencies_with_parent = []
checked_dependencies = set(initial_checked_dependencies)
parents = dict.fromkeys(initial_dependencies, start_entity)
while len(dependency_queue) > 0:
# Pick element to visit
current = dependency_queue.popleft()
if current not in checked_dependencies:
# Add the dependencies
all_dependencies.add(current)
all_dependencies_with_parent += [(current, parents[current])]
if current in dependency_mapping:
# Update dependency queue
dependency_queue.extend(dependency_mapping[current])
parents.update(dict.fromkeys(dependency_mapping[current], current))
# add visited node to the list
checked_dependencies.add(current)
if not return_parent:
return all_dependencies
# no child can ever appear before its parent thanks to the queue (needed to add them at the correct location in the body later)
return all_dependencies_with_parent
# Top-level variables that match the following patterns will always use the value in the `modular_xxx.py` file
ASSIGNMENTS_REGEX_TO_KEEP = [r"_CHECKPOINT", r"_EXPECTED", r"_FOR_DOC", r"_HIDDEN_STATES_START_POSITION"]
# Top-level variables that match the following patterns will use the value in the `modular_xxx.py` file only if they are not None
ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE = [r"_DOCSTRING"]
| ReplaceSuperCallTransformer |
python | ipython__ipython | IPython/utils/PyColorize.py | {
"start": 11500,
"end": 17417
} | class ____:
"""Format colored Python source."""
_theme_name: str
out: Any
pos: int
lines: list[int]
raw: str
def __init__(self, out: Any = sys.stdout, *, theme_name: str | None = None) -> None:
"""Create a parser with a specified color table and output channel.
Call format() to process code.
"""
assert theme_name is not None
self.out = out
self.pos = 0
self.lines = []
self.raw = ""
if theme_name is not None:
if theme_name in ["Linux", "LightBG", "Neutral", "NoColor"]:
warnings.warn(
f"Theme names and color schemes are lowercase in IPython 9.0 use {theme_name.lower()} instead",
DeprecationWarning,
stacklevel=2,
)
theme_name = theme_name.lower()
if not theme_name:
self.theme_name = "nocolor"
else:
self.theme_name = theme_name
@property
def theme_name(self) -> str:
return self._theme_name
@theme_name.setter
def theme_name(self, value: str) -> None:
assert value == value.lower()
self._theme_name = value
@property
def style(self) -> str:
assert False
return self._theme_name
@style.setter
def style(self, val: str) -> None:
assert False
assert val == val.lower()
self._theme_name = val
def format(self, raw: str, out: Any = None) -> str | None:
return self.format2(raw, out)[0]
def format2(self, raw: str, out: Any = None) -> tuple[str | None, bool]:
"""Parse and send the colored source.
If out is not specified, the defaults (given to constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == "str" or self.out == "str" or isinstance(self.out, StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO()
string_output = 1
elif out is not None:
self.out = out
else:
raise ValueError(
'`out` or `self.out` should be file-like or the value `"str"`'
)
# Fast return of the unmodified input for nocolor scheme
# TODO:
if self.theme_name == "nocolor":
error = False
self.out.write(raw)
if string_output:
return raw, error
return None, error
# local shorthands
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while True:
pos = raw_find("\n", pos) + 1
if not pos:
break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write(
theme_table[self.theme_name].format(
[
(Token, "\n\n"),
(
Token.Error,
f"*** ERROR: {msg}{self.raw[self.lines[line] :]}",
),
(Token, "\n"),
]
)
)
error = True
self.out.write(
theme_table[self.theme_name].format(
[
(Token, "\n"),
]
)
)
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def _inner_call_(
self, toktype: int, toktext: str, start_pos: tuple[int, int]
) -> str:
"""like call but write to a temporary buffer"""
srow, scol = start_pos
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
acc = self.raw[oldpos:newpos]
else:
acc = ""
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return acc
# map token type to a color group
if token.LPAR <= toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
pyg_tok_type = _pygment_token_mapping.get(toktype, Token.Text)
# send text, pygments should take care of splitting on newline and resending
# the correct self.colors after the new line, which is necessary for pagers
acc += theme_table[self.theme_name].format([(pyg_tok_type, toktext)])
return acc
def __call__(
self,
toktype: int,
toktext: str,
start_pos: tuple[int, int],
end_pos: tuple[int, int],
line: str,
) -> None:
"""Token handler, with syntax highlighting."""
self.out.write(self._inner_call_(toktype, toktext, start_pos))
| Parser |
python | getsentry__sentry | src/sentry/replays/lib/new_query/conditions.py | {
"start": 9137,
"end": 9874
} | class ____(GenericBase):
@staticmethod
def visit_eq(expression: Expression, value: Any) -> Condition:
return Condition(Function("has", parameters=[expression, value]), Op.EQ, 1)
@staticmethod
def visit_neq(expression: Expression, value: Any) -> Condition:
return Condition(Function("has", parameters=[expression, value]), Op.EQ, 0)
@staticmethod
def visit_in(expression: Expression, value: list[Any]) -> Condition:
return Condition(Function("hasAny", parameters=[expression, value]), Op.EQ, 1)
@staticmethod
def visit_not_in(expression: Expression, value: list[Any]) -> Condition:
return Condition(Function("hasAny", parameters=[expression, value]), Op.EQ, 0)
| GenericArray |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/output.py | {
"start": 886,
"end": 7825
} | class ____:
"""Defines an output from an op's compute function.
Ops can have multiple outputs, in which case outputs cannot be anonymous.
Many ops have only one output, in which case the user can provide a single output definition
that will be given the default name, "result".
Output definitions may be typed using the Dagster type system.
Args:
dagster_type (Optional[Union[Type, DagsterType]]]): The type of this output.
Users should provide the Python type of the objects that they expect the op to yield
for this output, or a :py:class:`DagsterType` that defines a runtime check that they
want to be run on this output. Defaults to :py:class:`Any`.
name (Optional[str]): Name of the output. (default: "result")
description (Optional[str]): Human-readable description of the output.
is_required (Optional[bool]): Whether the presence of this field is required. (default: True)
io_manager_key (Optional[str]): The resource key of the IOManager used for storing this
output and loading it in downstream steps (default: "io_manager").
metadata (Optional[Dict[str, Any]]): A dict of the metadata for the output.
For example, users can provide a file path if the data object will be stored in a
filesystem, or provide information of a database table when it is going to load the data
into the table.
code_version (Optional[str]): Version of the code that generates this output. In
general, versions should be set only for code that deterministically produces the same
output when given the same inputs.
"""
def __init__(
self,
dagster_type=None,
name: Optional[str] = None,
description: Optional[str] = None,
is_required: bool = True,
io_manager_key: Optional[str] = None,
metadata: Optional[ArbitraryMetadataMapping] = None,
code_version: Optional[str] = None,
# make sure new parameters are updated in combine_with_inferred below
):
self._name = check_valid_name(check.opt_str_param(name, "name", DEFAULT_OUTPUT))
self._type_not_set = dagster_type is None
self._dagster_type = resolve_dagster_type(dagster_type)
self._description = check.opt_str_param(description, "description")
self._is_required = check.bool_param(is_required, "is_required")
self._io_manager_key = check.opt_str_param(
io_manager_key,
"io_manager_key",
default=DEFAULT_IO_MANAGER_KEY,
)
self._code_version = check.opt_str_param(code_version, "code_version")
self._raw_metadata = check.opt_mapping_param(metadata, "metadata", key_type=str)
self._metadata = normalize_metadata(self._raw_metadata, allow_invalid=True)
@property
def name(self) -> str:
return self._name
@property
def dagster_type(self) -> DagsterType:
return self._dagster_type
@property
def description(self) -> Optional[str]:
return self._description
@property
def is_required(self) -> bool:
return self._is_required
@property
def io_manager_key(self) -> str:
return self._io_manager_key
@property
def code_version(self) -> Optional[str]:
return self._code_version
@property
def optional(self) -> bool:
return not self.is_required
@property
def metadata(self) -> ArbitraryMetadataMapping:
return self._raw_metadata
@property
def is_dynamic(self) -> bool:
return False
def mapping_from(
self, node_name: str, output_name: Optional[str] = None, from_dynamic_mapping: bool = False
) -> "OutputMapping":
"""Create an output mapping from an output of a child node.
In a GraphDefinition, you can use this helper function to construct
an :py:class:`OutputMapping` from the output of a child node.
Args:
node_name (str): The name of the child node from which to map this output.
output_name (str): The name of the child node's output from which to map this output.
Examples:
.. code-block:: python
output_mapping = OutputDefinition(Int).mapping_from('child_node')
"""
return OutputMapping(
graph_output_name=self.name,
mapped_node_name=node_name,
mapped_node_output_name=output_name or DEFAULT_OUTPUT,
graph_output_description=self.description,
dagster_type=self.dagster_type,
from_dynamic_mapping=from_dynamic_mapping or self.is_dynamic,
)
@staticmethod
def create_from_inferred(
inferred: Optional[InferredOutputProps], code_version: Optional[str] = None
) -> "OutputDefinition":
if not inferred:
return OutputDefinition(code_version=code_version)
if is_dynamic_output_annotation(inferred.annotation):
return DynamicOutputDefinition(
dagster_type=_checked_inferred_type(inferred.annotation),
description=inferred.description,
code_version=code_version,
)
else:
return OutputDefinition(
dagster_type=_checked_inferred_type(inferred.annotation),
description=inferred.description,
code_version=code_version,
)
def combine_with_inferred(
self: TOutputDefinition, inferred: InferredOutputProps
) -> TOutputDefinition:
dagster_type = self.dagster_type
if self._type_not_set:
dagster_type = _checked_inferred_type(inferred.annotation)
if self.description is None:
description = inferred.description
else:
description = self.description
return self.__class__(
name=self.name,
dagster_type=dagster_type,
description=description,
is_required=self.is_required,
io_manager_key=self.io_manager_key,
metadata=self._metadata,
)
def _checked_inferred_type(inferred: Any) -> DagsterType:
try:
if inferred == inspect.Parameter.empty:
return resolve_dagster_type(None)
elif inferred is None:
# When inferred.annotation is None, it means someone explicitly put "None" as the
# annotation, so want to map it to a DagsterType that checks for the None type
return resolve_dagster_type(type(None))
else:
return resolve_dagster_type(inferred)
except DagsterError as e:
raise DagsterInvalidDefinitionError(
f"Problem using type '{inferred}' from return type annotation, correct the issue "
"or explicitly set the dagster_type via Out()."
) from e
@public
| OutputDefinition |
python | pola-rs__polars | py-polars/src/polars/expr/ext.py | {
"start": 355,
"end": 1543
} | class ____:
"""Namespace for extension type related expressions."""
_accessor = "ext"
def __init__(self, expr: Expr) -> None:
self._pyexpr = expr._pyexpr
@unstable()
def to(
self,
dtype: PolarsDataType | pl.DataTypeExpr,
) -> Expr:
"""
Convert to an extension `dtype`.
The input must be of the storage type of the extension dtype.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
"""
py_dtype = parse_into_datatype_expr(dtype)._pydatatype_expr
return wrap_expr(self._pyexpr.ext_to(py_dtype))
@unstable()
def storage(self) -> Expr:
"""
Get the storage values of an extension data type.
If the input does not have an extension data type, it is returned as-is.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
"""
return wrap_expr(self._pyexpr.ext_storage())
| ExprExtensionNameSpace |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-bst-in-binary-tree.py | {
"start": 217,
"end": 1301
} | class ____(object):
def maxSumBST(self, root):
"""
:type root: TreeNode
:rtype: int
"""
result = 0
stk = [[root, None, []]]
while stk:
node, tmp, ret = stk.pop()
if tmp:
lvalid, lsum, lmin, lmax = tmp[0]
rvalid, rsum, rmin, rmax = tmp[1]
if lvalid and rvalid and lmax < node.val < rmin:
total = lsum + node.val + rsum
result = max(result, total)
ret[:] = [True, total, min(lmin, node.val), max(node.val, rmax)]
continue
ret[:] = [False, 0, 0, 0]
continue
if not node:
ret[:] = [True, 0, float("inf"), float("-inf")]
continue
new_tmp = [[], []]
stk.append([node, new_tmp, ret])
stk.append([node.right, None, new_tmp[1]])
stk.append([node.left, None, new_tmp[0]])
return result
# Time: O(n)
# Space: O(h)
# dfs solution with recursion
| Solution |
python | networkx__networkx | networkx/linalg/algebraicconnectivity.py | {
"start": 329,
"end": 1938
} | class ____:
"""Preconditioned conjugate gradient method.
To solve Ax = b:
M = A.diagonal() # or some other preconditioner
solver = _PCGSolver(lambda x: A * x, lambda x: M * x)
x = solver.solve(b)
The inputs A and M are functions which compute
matrix multiplication on the argument.
A - multiply by the matrix A in Ax=b
M - multiply by M, the preconditioner surrogate for A
Warning: There is no limit on number of iterations.
"""
def __init__(self, A, M):
self._A = A
self._M = M
def solve(self, B, tol):
import numpy as np
# Densifying step - can this be kept sparse?
B = np.asarray(B)
X = np.ndarray(B.shape, order="F")
for j in range(B.shape[1]):
X[:, j] = self._solve(B[:, j], tol)
return X
def _solve(self, b, tol):
import numpy as np
import scipy as sp
A = self._A
M = self._M
tol *= sp.linalg.blas.dasum(b)
# Initialize.
x = np.zeros(b.shape)
r = b.copy()
z = M(r)
rz = sp.linalg.blas.ddot(r, z)
p = z.copy()
# Iterate.
while True:
Ap = A(p)
alpha = rz / sp.linalg.blas.ddot(p, Ap)
x = sp.linalg.blas.daxpy(p, x, a=alpha)
r = sp.linalg.blas.daxpy(Ap, r, a=-alpha)
if sp.linalg.blas.dasum(r) < tol:
return x
z = M(r)
beta = sp.linalg.blas.ddot(r, z)
beta, rz = beta / rz, beta
p = sp.linalg.blas.daxpy(p, z, a=beta)
| _PCGSolver |
python | openai__openai-python | src/openai/resources/uploads/parts.py | {
"start": 7488,
"end": 7709
} | class ____:
def __init__(self, parts: AsyncParts) -> None:
self._parts = parts
self.create = _legacy_response.async_to_raw_response_wrapper(
parts.create,
)
| AsyncPartsWithRawResponse |
python | pytest-dev__pytest | src/_pytest/assertion/rewrite.py | {
"start": 1550,
"end": 1922
} | class ____:
pass
assertstate_key = StashKey["AssertionState"]()
# pytest caches rewritten pycs in pycache dirs
PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}"
PYC_EXT = ".py" + ((__debug__ and "c") or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
# Special marker that denotes we have just left a scope definition
_SCOPE_END_MARKER = Sentinel()
| Sentinel |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 7792,
"end": 7943
} | class ____(_QuantizerConfigCreate):
@staticmethod
def quantizer_name() -> str:
return "skipDefaultQuantization"
| _UncompressedConfigCreate |
python | streamlit__streamlit | lib/streamlit/elements/widgets/time_widgets.py | {
"start": 14842,
"end": 15702
} | class ____:
value: _DateInputValues
def deserialize(self, ui_value: Any) -> DateWidgetReturn:
return_value: Sequence[date] | None
if ui_value is not None:
return_value = tuple(
datetime.strptime(v, "%Y/%m/%d").date() for v in ui_value
)
else:
return_value = self.value.value
if return_value is None or len(return_value) == 0:
return () if self.value.is_range else None
if not self.value.is_range:
return return_value[0]
return cast("DateWidgetReturn", tuple(return_value))
def serialize(self, v: DateWidgetReturn) -> list[str]:
if v is None:
return []
to_serialize = list(v) if isinstance(v, Sequence) else [v]
return [date.strftime(v, "%Y/%m/%d") for v in to_serialize]
| DateInputSerde |
python | ipython__ipython | tests/test_run.py | {
"start": 6108,
"end": 13103
} | class ____(tt.TempFileMixin):
def test_simpledef(self):
"""Test that simple class definitions work."""
src = "class foo: pass\n" "def f(): return foo()"
self.mktmp(src)
_ip.run_line_magic("run", str(self.fname))
_ip.run_cell("t = isinstance(f(), foo)")
assert _ip.user_ns["t"] is True
@pytest.mark.xfail(
platform.python_implementation() == "PyPy",
reason="expecting __del__ call on exit is unreliable and doesn't happen on PyPy",
)
def test_obj_del(self):
"""Test that object's __del__ methods are called on exit."""
src = (
"class A(object):\n"
" def __del__(self):\n"
" print('object A deleted')\n"
"a = A()\n"
)
self.mktmp(src)
err = None
tt.ipexec_validate(self.fname, "object A deleted", err)
def test_aggressive_namespace_cleanup(self):
"""Test that namespace cleanup is not too aggressive GH-238
Returning from another run magic deletes the namespace"""
# see ticket https://github.com/ipython/ipython/issues/238
with tt.TempFileMixin() as empty:
empty.mktmp("")
# On Windows, the filename will have \users in it, so we need to use the
# repr so that the \u becomes \\u.
src = (
"ip = get_ipython()\n"
"for i in range(5):\n"
" try:\n"
" ip.run_line_magic(%r, %r)\n"
" except NameError as e:\n"
" print(i)\n"
" break\n" % ("run", empty.fname)
)
self.mktmp(src)
_ip.run_line_magic("run", str(self.fname))
_ip.run_cell("ip == get_ipython()")
assert _ip.user_ns["i"] == 4
def test_run_second(self):
"""Test that running a second file doesn't clobber the first, gh-3547"""
self.mktmp("avar = 1\n" "def afunc():\n" " return avar\n")
with tt.TempFileMixin() as empty:
empty.mktmp("")
_ip.run_line_magic("run", self.fname)
_ip.run_line_magic("run", empty.fname)
assert _ip.user_ns["afunc"]() == 1
@pytest.mark.xfail(is_freethreaded, reason="C-third leaks on free-threaded python")
def test_tclass(self):
mydir = os.path.dirname(__file__)
tc = os.path.join(mydir, "tclass")
src = f"""\
import gc
%run "{tc}" C-first
gc.collect(0)
%run "{tc}" C-second
gc.collect(0)
%run "{tc}" C-third
gc.collect(0)
%reset -f
"""
self.mktmp(src, ".ipy")
out = """\
ARGV 1-: ['C-first']
ARGV 1-: ['C-second']
tclass.py: deleting object: C-first
ARGV 1-: ['C-third']
tclass.py: deleting object: C-second
tclass.py: deleting object: C-third
"""
err = None
tt.ipexec_validate(self.fname, out, err)
def test_run_i_after_reset(self):
"""Check that %run -i still works after %reset (gh-693)"""
src = "yy = zz\n"
self.mktmp(src)
_ip.run_cell("zz = 23")
try:
_ip.run_line_magic("run", "-i %s" % self.fname)
assert _ip.user_ns["yy"] == 23
finally:
_ip.run_line_magic("reset", "-f")
_ip.run_cell("zz = 23")
try:
_ip.run_line_magic("run", "-i %s" % self.fname)
assert _ip.user_ns["yy"] == 23
finally:
_ip.run_line_magic("reset", "-f")
def test_unicode(self):
"""Check that files in odd encodings are accepted."""
mydir = os.path.dirname(__file__)
na = os.path.join(mydir, "nonascii.py")
_ip.run_line_magic("run", na)
assert _ip.user_ns["u"] == "Ўт№Ф"
def test_run_py_file_attribute(self):
"""Test handling of `__file__` attribute in `%run <file>.py`."""
src = "t = __file__\n"
self.mktmp(src)
_missing = object()
file1 = _ip.user_ns.get("__file__", _missing)
_ip.run_line_magic("run", self.fname)
file2 = _ip.user_ns.get("__file__", _missing)
# Check that __file__ was equal to the filename in the script's
# namespace.
assert _ip.user_ns["t"] == self.fname
# Check that __file__ was not leaked back into user_ns.
assert file1 == file2
def test_run_ipy_file_attribute(self):
"""Test handling of `__file__` attribute in `%run <file.ipy>`."""
src = "t = __file__\n"
self.mktmp(src, ext=".ipy")
_missing = object()
file1 = _ip.user_ns.get("__file__", _missing)
_ip.run_line_magic("run", self.fname)
file2 = _ip.user_ns.get("__file__", _missing)
# Check that __file__ was equal to the filename in the script's
# namespace.
assert _ip.user_ns["t"] == self.fname
# Check that __file__ was not leaked back into user_ns.
assert file1 == file2
def test_run_formatting(self):
"""Test that %run -t -N<N> does not raise a TypeError for N > 1."""
src = "pass"
self.mktmp(src)
_ip.run_line_magic("run", "-t -N 1 %s" % self.fname)
_ip.run_line_magic("run", "-t -N 10 %s" % self.fname)
def test_ignore_sys_exit(self):
"""Test the -e option to ignore sys.exit()"""
src = "import sys; sys.exit(1)"
self.mktmp(src)
with tt.AssertPrints("SystemExit"):
_ip.run_line_magic("run", self.fname)
with tt.AssertNotPrints("SystemExit"):
_ip.run_line_magic("run", "-e %s" % self.fname)
def test_run_nb(self):
"""Test %run notebook.ipynb"""
pytest.importorskip("nbformat")
from nbformat import v4, writes
nb = v4.new_notebook(
cells=[
v4.new_markdown_cell("The Ultimate Question of Everything"),
v4.new_code_cell("answer=42"),
]
)
src = writes(nb, version=4)
self.mktmp(src, ext=".ipynb")
_ip.run_line_magic("run", self.fname)
assert _ip.user_ns["answer"] == 42
def test_run_nb_error(self):
"""Test %run notebook.ipynb error"""
pytest.importorskip("nbformat")
from nbformat import v4, writes
# %run when a file name isn't provided
pytest.raises(Exception, _ip.run_line_magic, "run")
# %run when a file doesn't exist
pytest.raises(Exception, _ip.run_line_magic, "run", "foobar.ipynb")
# %run on a notebook with an error
nb = v4.new_notebook(cells=[v4.new_code_cell("0/0")])
src = writes(nb, version=4)
self.mktmp(src, ext=".ipynb")
pytest.raises(Exception, _ip.run_line_magic, "run", self.fname)
def test_file_options(self):
src = "import sys\n" 'a = " ".join(sys.argv[1:])\n'
self.mktmp(src)
test_opts = "-x 3 --verbose"
_ip.run_line_magic("run", "{0} {1}".format(self.fname, test_opts))
assert _ip.user_ns["a"] == test_opts
| TestMagicRunSimple |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 19509,
"end": 24951
} | class ____(BaseParam[Range]):
"""Filter on range in between the lower and upper bound."""
def __init__(self, value: Range | None, attribute: InstrumentedAttribute) -> None:
super().__init__(value)
self.attribute: InstrumentedAttribute = attribute
def to_orm(self, select: Select) -> Select:
if self.skip_none is False:
raise ValueError(f"Cannot set 'skip_none' to False on a {type(self)}")
if self.value is None:
return select
if self.value.lower_bound_gte:
select = select.where(self.attribute >= self.value.lower_bound_gte)
if self.value.lower_bound_gt:
select = select.where(self.attribute > self.value.lower_bound_gt)
if self.value.upper_bound_lte:
select = select.where(self.attribute <= self.value.upper_bound_lte)
if self.value.upper_bound_lt:
select = select.where(self.attribute < self.value.upper_bound_lt)
return select
@classmethod
def depends(cls, *args: Any, **kwargs: Any) -> Self:
raise NotImplementedError("Use the `range_filter_factory` function to create the dependency")
def is_active(self) -> bool:
"""Check if the range filter has any active bounds."""
return self.value is not None and (
self.value.lower_bound_gte is not None
or self.value.lower_bound_gt is not None
or self.value.upper_bound_lte is not None
or self.value.upper_bound_lt is not None
)
def datetime_range_filter_factory(
filter_name: str, model: Base, attribute_name: str | None = None
) -> Callable[[datetime | None, datetime | None, datetime | None, datetime | None], RangeFilter]:
def depends_datetime(
lower_bound_gte: datetime | None = Query(alias=f"{filter_name}_gte", default=None),
lower_bound_gt: datetime | None = Query(alias=f"{filter_name}_gt", default=None),
upper_bound_lte: datetime | None = Query(alias=f"{filter_name}_lte", default=None),
upper_bound_lt: datetime | None = Query(alias=f"{filter_name}_lt", default=None),
) -> RangeFilter:
attr = getattr(model, attribute_name or filter_name)
if filter_name in ("start_date", "end_date"):
attr = func.coalesce(attr, func.now())
return RangeFilter(
Range(
lower_bound_gte=lower_bound_gte,
lower_bound_gt=lower_bound_gt,
upper_bound_lte=upper_bound_lte,
upper_bound_lt=upper_bound_lt,
),
attr,
)
return depends_datetime
def float_range_filter_factory(
filter_name: str, model: Base
) -> Callable[[float | None, float | None, float | None, float | None], RangeFilter]:
def depends_float(
lower_bound_gte: float | None = Query(alias=f"{filter_name}_gte", default=None),
lower_bound_gt: float | None = Query(alias=f"{filter_name}_gt", default=None),
upper_bound_lte: float | None = Query(alias=f"{filter_name}_lte", default=None),
upper_bound_lt: float | None = Query(alias=f"{filter_name}_lt", default=None),
) -> RangeFilter:
return RangeFilter(
Range(
lower_bound_gte=lower_bound_gte,
lower_bound_gt=lower_bound_gt,
upper_bound_lte=upper_bound_lte,
upper_bound_lt=upper_bound_lt,
),
getattr(model, filter_name),
)
return depends_float
# Common Safe DateTime
DateTimeQuery = Annotated[str, AfterValidator(_safe_parse_datetime)]
OptionalDateTimeQuery = Annotated[str | None, AfterValidator(_safe_parse_datetime_optional)]
# Dag
QueryLimit = Annotated[LimitFilter, Depends(LimitFilter.depends)]
QueryOffset = Annotated[OffsetFilter, Depends(OffsetFilter.depends)]
QueryPausedFilter = Annotated[
FilterParam[bool | None],
Depends(filter_param_factory(DagModel.is_paused, bool | None, filter_name="paused")),
]
QueryHasImportErrorsFilter = Annotated[
FilterParam[bool | None],
Depends(
filter_param_factory(
DagModel.has_import_errors,
bool | None,
filter_name="has_import_errors",
description="Filter Dags by having import errors. Only Dags that have been successfully loaded before will be returned.",
)
),
]
QueryFavoriteFilter = Annotated[_FavoriteFilter, Depends(_FavoriteFilter.depends)]
QueryExcludeStaleFilter = Annotated[_ExcludeStaleFilter, Depends(_ExcludeStaleFilter.depends)]
QueryDagIdPatternSearch = Annotated[
_SearchParam, Depends(search_param_factory(DagModel.dag_id, "dag_id_pattern"))
]
QueryDagDisplayNamePatternSearch = Annotated[
_SearchParam, Depends(search_param_factory(DagModel.dag_display_name, "dag_display_name_pattern"))
]
QueryBundleNameFilter = Annotated[
FilterParam[str | None],
Depends(filter_param_factory(DagModel.bundle_name, str | None, filter_name="bundle_name")),
]
QueryBundleVersionFilter = Annotated[
FilterParam[str | None],
Depends(filter_param_factory(DagModel.bundle_version, str | None, filter_name="bundle_version")),
]
QueryDagIdPatternSearchWithNone = Annotated[
_SearchParam, Depends(search_param_factory(DagModel.dag_id, "dag_id_pattern", False))
]
QueryTagsFilter = Annotated[_TagsFilter, Depends(_TagsFilter.depends)]
QueryOwnersFilter = Annotated[_OwnersFilter, Depends(_OwnersFilter.depends)]
| RangeFilter |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/layout.py | {
"start": 21627,
"end": 25614
} | class ____(LayoutOperatorBase):
"""Operator for torch.cat() operation."""
def __init__(self):
"""Initialize CatOperator."""
super().__init__("cat")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.cat"
def can_produce(self, output_spec: Spec) -> bool:
"""Cat can produce any tensor output."""
if not isinstance(output_spec, TensorSpec):
return False
# Cat can produce any tensor with at least one dimension
return len(output_spec.size) > 0
def _get_cat_params(self, output_spec: TensorSpec) -> tuple[int, int]:
"""Get consistent cat parameters based on output spec.
This method uses the output_spec to deterministically choose cat parameters,
ensuring that fuzz_inputs_specs and codegen make the same choices.
"""
# Use output_spec properties to seed random choices
# This ensures both methods make the same choices
seed_value = hash((output_spec.size, output_spec.dtype))
rng = random.Random(seed_value)
cat_dim = rng.randint(0, len(output_spec.size) - 1)
num_tensors = rng.randint(2, 4)
return cat_dim, num_tensors
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for cat operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("CatOperator can only produce TensorSpec outputs")
# torch.cat() concatenates tensors along a dimension
# Choose a random dimension to concatenate along
if len(output_spec.size) == 0:
raise ValueError("Cannot concatenate scalar tensors")
cat_dim, num_tensors = self._get_cat_params(output_spec)
# Distribute output size along cat_dim across input tensors
total_size = output_spec.size[cat_dim]
# Use deterministic RNG for splitting sizes
seed_value = hash((output_spec.size, output_spec.dtype))
rng = random.Random(seed_value + 1) # +1 to differentiate from param selection
# Generate sizes for each input tensor along cat_dim
input_sizes_at_cat_dim = []
remaining_size = total_size
for i in range(num_tensors - 1):
if remaining_size > 0:
# Randomly split the remaining size
max_size = max(1, remaining_size - (num_tensors - i - 1))
size_for_this_tensor = rng.randint(1, max_size)
input_sizes_at_cat_dim.append(size_for_this_tensor)
remaining_size -= size_for_this_tensor
else:
input_sizes_at_cat_dim.append(0)
# Last tensor gets the remaining size
input_sizes_at_cat_dim.append(max(0, remaining_size))
# Create input tensor specs
from torchfuzz.tensor_fuzzer import fuzz_valid_stride
input_specs = []
for size_at_cat_dim in input_sizes_at_cat_dim:
input_size = list(output_spec.size)
input_size[cat_dim] = size_at_cat_dim
input_size = tuple(input_size)
input_stride = fuzz_valid_stride(input_size)
input_specs.append(
TensorSpec(
size=input_size, stride=input_stride, dtype=output_spec.dtype
)
)
return input_specs
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for cat operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("CatOperator can only produce TensorSpec outputs")
# Use the same cat_dim that was used in fuzz_inputs_specs
cat_dim, _ = self._get_cat_params(output_spec)
# Generate the cat operation
tensors_str = ", ".join(input_names)
return f"{output_name} = torch.cat([{tensors_str}], dim={cat_dim})"
| CatOperator |
python | lazyprogrammer__machine_learning_examples | rl2/a3c/worker.py | {
"start": 111,
"end": 416
} | class ____:
def __init__(self, state, action, reward, next_state, done):
self.state = state
self.action = action
self.reward = reward
self.next_state = next_state
self.done = done
# Transform raw images for input into neural network
# 1) Convert to grayscale
# 2) Resize
# 3) Crop
| Step |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 16738,
"end": 16947
} | class ____(SentryAppInstallationPermission):
scope_map = {
"POST": ("event:read", "event:write", "event:admin"),
"DELETE": ("event:admin",),
}
| SentryAppInstallationExternalIssuePermission |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 55597,
"end": 57246
} | class ____(ASTBase):
def __init__(
self, ident: ASTNestedName, args: list[ASTMacroParameter] | None
) -> None:
self.ident = ident
self.args = args
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTMacro):
return NotImplemented
return self.ident == other.ident and self.args == other.args
def __hash__(self) -> int:
return hash((self.ident, self.args))
@property
def name(self) -> ASTNestedName:
return self.ident
def get_id(self, version: int, objectType: str, symbol: Symbol) -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.ident)]
if self.args is not None:
res.append('(')
first = True
for arg in self.args:
if not first:
res.append(', ')
first = False
res.append(transform(arg))
res.append(')')
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.ident.describe_signature(signode, mode, env, symbol)
if self.args is None:
return
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
param = addnodes.desc_parameter('', '', noemph=True)
arg.describe_signature(param, 'param', env, symbol=symbol)
paramlist += param
signode += paramlist
| ASTMacro |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/psycopg.py | {
"start": 7422,
"end": 7625
} | class ____(JSON):
def bind_processor(self, dialect):
return self._make_bind_processor(None, dialect._psycopg_Json)
def result_processor(self, dialect, coltype):
return None
| _PGJSON |
python | pytest-dev__pytest | testing/python/fixtures.py | {
"start": 60182,
"end": 65940
} | class ____:
@pytest.fixture
def pytester(self, pytester: Pytester) -> Pytester:
pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True)
def perfunction(request, tmp_path):
pass
@pytest.fixture()
def arg1(tmp_path):
pass
@pytest.fixture(autouse=True)
def perfunction2(arg1):
pass
@pytest.fixture
def fm(request):
return request._fixturemanager
@pytest.fixture
def item(request):
return request._pyfuncitem
"""
)
return pytester
def test_parsefactories_conftest(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
from _pytest.pytester import get_public_names
def test_check_setup(item, fm):
autousenames = list(fm._getautousenames(item))
assert len(get_public_names(autousenames)) == 2
assert "perfunction2" in autousenames
assert "perfunction" in autousenames
"""
)
reprec = pytester.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_two_classes_separated_autouse(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
class TestA(object):
values = []
@pytest.fixture(autouse=True)
def setup1(self):
self.values.append(1)
def test_setup1(self):
assert self.values == [1]
class TestB(object):
values = []
@pytest.fixture(autouse=True)
def setup2(self):
self.values.append(1)
def test_setup2(self):
assert self.values == [1]
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2)
def test_setup_at_classlevel(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
class TestClass(object):
@pytest.fixture(autouse=True)
def permethod(self, request):
request.instance.funcname = request.function.__name__
def test_method1(self):
assert self.funcname == "test_method1"
def test_method2(self):
assert self.funcname == "test_method2"
"""
)
reprec = pytester.inline_run("-s")
reprec.assertoutcome(passed=2)
@pytest.mark.xfail(reason="'enabled' feature not implemented")
def test_setup_enabled_functionnode(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
def enabled(parentnode, markers):
return "needsdb" in markers
@pytest.fixture(params=[1,2])
def db(request):
return request.param
@pytest.fixture(enabled=enabled, autouse=True)
def createdb(db):
pass
def test_func1(request):
assert "db" not in request.fixturenames
@pytest.mark.needsdb
def test_func2(request):
assert "db" in request.fixturenames
"""
)
reprec = pytester.inline_run("-s")
reprec.assertoutcome(passed=2)
def test_callables_nocode(self, pytester: Pytester) -> None:
"""An imported mock.call would break setup/factory discovery due to
it being callable and __code__ not being a code object."""
pytester.makepyfile(
"""
class _call(tuple):
def __call__(self, *k, **kw):
pass
def __getattr__(self, k):
return self
call = _call()
"""
)
reprec = pytester.inline_run("-s")
reprec.assertoutcome(failed=0, passed=0)
def test_autouse_in_conftests(self, pytester: Pytester) -> None:
a = pytester.mkdir("a")
b = pytester.mkdir("a1")
conftest = pytester.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True)
def hello():
xxx
"""
)
conftest.rename(a.joinpath(conftest.name))
a.joinpath("test_something.py").write_text(
"def test_func(): pass", encoding="utf-8"
)
b.joinpath("test_otherthing.py").write_text(
"def test_func(): pass", encoding="utf-8"
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*1 error*
"""
)
def test_autouse_in_module_and_two_classes(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
values = []
@pytest.fixture(autouse=True)
def append1():
values.append("module")
def test_x():
assert values == ["module"]
class TestA(object):
@pytest.fixture(autouse=True)
def append2(self):
values.append("A")
def test_hello(self):
assert values == ["module", "module", "A"], values
class TestA2(object):
def test_world(self):
assert values == ["module", "module", "A", "module"], values
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=3)
| TestAutouseDiscovery |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB189.py | {
"start": 292,
"end": 355
} | class ____(EnumMeta):
pass
# positives
| CaseInsensitiveEnumMeta |
python | automl__auto-sklearn | test/test_pipeline/components/classification/test_libsvm_svc.py | {
"start": 264,
"end": 2738
} | class ____(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.96
res["default_iris_iterative"] = -1
res["default_iris_proba"] = 0.36298338197586716
res["default_iris_sparse"] = 0.64
res["default_digits"] = 0.096539162112932606
res["default_digits_iterative"] = -1
res["default_digits_binary"] = 0.90103217972070426
res["default_digits_multilabel"] = -1
res["default_digits_multilabel_proba"] = -1
sk_mod = sklearn.svm.SVC
module = LibSVM_SVC
def test_default_configuration_predict_proba_individual(self):
# Leave this additional test here
for i in range(2):
predictions, targets = _test_classifier_predict_proba(
LibSVM_SVC, sparse=True, dataset="digits", train_size_maximum=500
)
self.assertAlmostEqual(
5.273502056835706, sklearn.metrics.log_loss(targets, predictions)
)
for i in range(2):
predictions, targets = _test_classifier_predict_proba(
LibSVM_SVC, sparse=True, dataset="iris"
)
self.assertAlmostEqual(
0.8408320837510618, sklearn.metrics.log_loss(targets, predictions)
)
# 2 class
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset="iris")
remove_training_data = Y_train == 2
remove_test_data = Y_test == 2
X_train = X_train[~remove_training_data]
Y_train = Y_train[~remove_training_data]
X_test = X_test[~remove_test_data]
Y_test = Y_test[~remove_test_data]
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = LibSVM_SVC.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
cls = LibSVM_SVC(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None
},
)
cls = cls.fit(X_train, Y_train)
prediction = cls.predict_proba(X_test)
self.assertAlmostEqual(
sklearn.metrics.log_loss(Y_test, prediction),
0.6927962762794081,
places=4,
)
| LibSVM_SVCComponentTest |
python | openai__openai-python | src/openai/types/beta/chatkit/thread_list_params.py | {
"start": 206,
"end": 812
} | class ____(TypedDict, total=False):
after: str
"""List items created after this thread item ID.
Defaults to null for the first page.
"""
before: str
"""List items created before this thread item ID.
Defaults to null for the newest results.
"""
limit: int
"""Maximum number of thread items to return. Defaults to 20."""
order: Literal["asc", "desc"]
"""Sort order for results by creation time. Defaults to `desc`."""
user: str
"""Filter threads that belong to this user identifier.
Defaults to null to return all users.
"""
| ThreadListParams |
python | django__django | tests/backends/mysql/test_operations.py | {
"start": 242,
"end": 1819
} | class ____(SimpleTestCase):
def test_sql_flush(self):
# allow_cascade doesn't change statements on MySQL.
for allow_cascade in [False, True]:
with self.subTest(allow_cascade=allow_cascade):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
allow_cascade=allow_cascade,
),
[
"SET FOREIGN_KEY_CHECKS = 0;",
"DELETE FROM `backends_person`;",
"DELETE FROM `backends_tag`;",
"SET FOREIGN_KEY_CHECKS = 1;",
],
)
def test_sql_flush_sequences(self):
# allow_cascade doesn't change statements on MySQL.
for allow_cascade in [False, True]:
with self.subTest(allow_cascade=allow_cascade):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
reset_sequences=True,
allow_cascade=allow_cascade,
),
[
"SET FOREIGN_KEY_CHECKS = 0;",
"TRUNCATE `backends_person`;",
"TRUNCATE `backends_tag`;",
"SET FOREIGN_KEY_CHECKS = 1;",
],
)
| MySQLOperationsTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/operators.py | {
"start": 18518,
"end": 20419
} | class ____(Operators):
"""defines ORDER BY operators, which can operate as single expressions
or comma-separated lists
"""
__slots__ = ()
if typing.TYPE_CHECKING:
def operate(
self, op: OperatorType, *other: Any, **kwargs: Any
) -> OrderingOperators: ...
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
) -> OrderingOperators: ...
def desc(self) -> OrderingOperators:
"""Produce a :func:`_expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self) -> OrderingOperators:
"""Produce a :func:`_expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nulls_first(self) -> OrderingOperators:
"""Produce a :func:`_expression.nulls_first` clause against the
parent object.
.. versionchanged:: 1.4 The ``nulls_first()`` operator is
renamed from ``nullsfirst()`` in previous releases.
The previous name remains available for backwards compatibility.
"""
return self.operate(nulls_first_op)
# deprecated 1.4; see #5435
if TYPE_CHECKING:
def nullsfirst(self) -> OrderingOperators: ...
else:
nullsfirst = nulls_first
def nulls_last(self) -> OrderingOperators:
"""Produce a :func:`_expression.nulls_last` clause against the
parent object.
.. versionchanged:: 1.4 The ``nulls_last()`` operator is
renamed from ``nullslast()`` in previous releases.
The previous name remains available for backwards compatibility.
"""
return self.operate(nulls_last_op)
# deprecated 1.4; see #5429
if TYPE_CHECKING:
def nullslast(self) -> OrderingOperators: ...
else:
nullslast = nulls_last
| OrderingOperators |
python | ray-project__ray | python/ray/tune/stopper/timeout.py | {
"start": 178,
"end": 1813
} | class ____(Stopper):
"""Stops all trials after a certain timeout.
This stopper is automatically created when the `time_budget_s`
argument is passed to `tune.RunConfig()`.
Args:
timeout: Either a number specifying the timeout in seconds, or
a `datetime.timedelta` object.
"""
def __init__(self, timeout: Union[int, float, datetime.timedelta]):
from datetime import timedelta
if isinstance(timeout, timedelta):
self._timeout_seconds = timeout.total_seconds()
elif isinstance(timeout, (int, float)):
self._timeout_seconds = timeout
else:
raise ValueError(
"`timeout` parameter has to be either a number or a "
"`datetime.timedelta` object. Found: {}".format(type(timeout))
)
self._budget = self._timeout_seconds
# To account for setup overhead, set the last check time only after
# the first call to `stop_all()`.
self._last_check = None
def __call__(self, trial_id, result):
return False
def stop_all(self):
now = time.time()
if self._last_check:
taken = now - self._last_check
self._budget -= taken
self._last_check = now
if self._budget <= 0:
logger.info(
f"Reached timeout of {self._timeout_seconds} seconds. "
f"Stopping all trials."
)
return True
return False
def __setstate__(self, state: dict):
state["_last_check"] = None
self.__dict__.update(state)
| TimeoutStopper |
python | walkccc__LeetCode | solutions/2663. Lexicographically Smallest Beautiful String/2663.py | {
"start": 0,
"end": 1128
} | class ____:
def smallestBeautifulString(self, s: str, k: int) -> str:
chars = list(s)
for i in reversed(range(len(chars))):
chars[i] = chr(ord(chars[i]) + 1)
while self._containsPalindrome(chars, i):
chars[i] = chr(ord(chars[i]) + 1)
if chars[i] < chr(ord('a') + k):
# If s[i] is among the first k letters, then change the letters after
# s[i] to the smallest ones that don't form any palindrome substring.
return self._changeSuffix(chars, i + 1)
return ''
def _containsPalindrome(self, chars: list[str], i: int) -> bool:
"""Returns True if chars[0..i] contains palindrome."""
return ((i > 0 and chars[i] == chars[i - 1]) or
(i > 1 and chars[i] == chars[i - 2]))
def _changeSuffix(self, chars: list[str], i: int) -> str:
"""
Returns a string, where replacing sb[i..n) with the smallest possible
letters don't form any palindrome substring.
"""
for j in range(i, len(chars)):
chars[j] = 'a'
while self._containsPalindrome(chars, j):
chars[j] = chr(ord(chars[j]) + 1)
return ''.join(chars)
| Solution |
python | getsentry__sentry | src/sentry/utils/locking/backends/__init__.py | {
"start": 0,
"end": 1162
} | class ____:
"""
Interface for providing lock behavior that is used by the
``sentry.utils.locking.Lock`` class.
"""
def acquire(self, key: str, duration: int, routing_key: str | None = None) -> None:
"""
Acquire a lock, represented by the given key for the given duration (in
seconds.) This method should attempt to acquire the lock once, in a
non-blocking fashion, allowing attempt retry policies to be defined
separately. A routing key may also be provided to control placement,
but how or if it is implemented is dependent on the specific backend
implementation.
The return value is not used. If the lock cannot be acquired, an
exception should be raised.
"""
raise NotImplementedError
def release(self, key: str, routing_key: str | None = None) -> None:
"""
Release a lock. The return value is not used.
"""
raise NotImplementedError
def locked(self, key: str, routing_key: str | None = None) -> bool:
"""
Check if a lock has been taken.
"""
raise NotImplementedError
| LockBackend |
python | getsentry__sentry | tests/sentry_plugins/trello/test_plugin.py | {
"start": 1663,
"end": 9710
} | class ____(TrelloPluginTestBase):
def setUp(self) -> None:
self.group = self.create_group(message="Hello world", culprit="foo.bar")
self.plugin.set_option("token", "7c8951d1", self.project)
self.plugin.set_option("key", "39g", self.project)
self.plugin.set_option("organization", "f187", self.project)
self.login_as(self.user)
def test_get_config_no_org(self) -> None:
self.plugin.unset_option("organization", self.project)
out = self.plugin.get_config(self.project)
assert out == [
{
"default": "39g",
"required": True,
"type": "text",
"name": "key",
"label": "Trello API Key",
},
{
"name": "token",
"default": None,
"required": False,
"label": "Trello API Token",
"prefix": "7c895",
"type": "secret",
"has_saved_value": True,
},
]
@responses.activate
def test_get_config_include_additional(self) -> None:
self.plugin.unset_option("organization", self.project)
responses.add(
responses.GET,
"https://api.trello.com/1/members/me/organizations",
json=[{"name": "team 1", "id": "2d8e"}, {"name": "team 2", "id": "d0cc"}],
)
out = self.plugin.get_config(self.project, add_additional_fields=True)
assert out == [
{
"default": "39g",
"required": True,
"type": "text",
"name": "key",
"label": "Trello API Key",
},
{
"name": "token",
"default": None,
"required": False,
"label": "Trello API Token",
"prefix": "7c895",
"type": "secret",
"has_saved_value": True,
},
{
"name": "organization",
"default": None,
"required": False,
"choices": [("2d8e", "team 1"), ("d0cc", "team 2")],
"label": "Trello Organization",
"type": "select",
},
]
@responses.activate
def test_create_issue(self) -> None:
responses.add(responses.POST, "https://api.trello.com/1/cards", json={"shortLink": "rds43"})
form_data = {
"title": "Hello",
"description": "Fix this.",
"board": "ads23f",
"list": "23tds",
}
request = drf_request_from_request(self.make_request(user=self.user, method="POST"))
assert self.plugin.create_issue(request, self.group, form_data) == "rds43"
responses_request = responses.calls[0].request
assert responses_request.url == "https://api.trello.com/1/cards?token=7c8951d1&key=39g"
payload = orjson.loads(responses_request.body)
assert payload == {"name": "Hello", "desc": "Fix this.", "idList": "23tds"}
@responses.activate
def test_link_issue(self) -> None:
responses.add(
responses.GET,
"https://api.trello.com/1/cards/SstgnBIQ",
json={"idShort": 2, "name": "MyTitle", "shortLink": "SstgnBIQ"},
)
responses.add(
responses.POST, "https://api.trello.com/1/cards/SstgnBIQ/actions/comments", json={}
)
form_data = {"comment": "please fix this", "issue_id": "SstgnBIQ"}
request = drf_request_from_request(self.make_request(user=self.user, method="POST"))
assert self.plugin.link_issue(request, self.group, form_data) == {
"title": "MyTitle",
"id": "SstgnBIQ",
}
responses_request = responses.calls[0].request
assert (
responses_request.url
== "https://api.trello.com/1/cards/SstgnBIQ?fields=name%2CshortLink%2CidShort&token=7c8951d1&key=39g"
)
responses_request = responses.calls[1].request
assert (
responses_request.url
== "https://api.trello.com/1/cards/SstgnBIQ/actions/comments?text=please+fix+this&token=7c8951d1&key=39g"
)
@responses.activate
def test_view_options(self) -> None:
responses.add(
responses.GET,
"https://api.trello.com/1/boards/f34/lists",
json=[{"id": "8f3", "name": "list 1"}, {"id": "j8f", "name": "list 2"}],
)
request = drf_request_from_request(
self.make_request(
user=self.user, method="GET", GET={"option_field": "list", "board": "f34"}
)
)
response = self.plugin.view_options(request, self.group)
assert response.data == {"list": [("8f3", "list 1"), ("j8f", "list 2")]}
responses_request = responses.calls[0].request
assert (
responses_request.url
== "https://api.trello.com/1/boards/f34/lists?token=7c8951d1&key=39g"
)
@responses.activate
def test_view_autocomplete(self) -> None:
responses.add(
responses.GET,
"https://api.trello.com/1/search",
json={
"cards": [
{"id": "4fsdafad", "name": "KeyError", "idShort": 1, "shortLink": "0lr"},
{"id": "f4usdfa", "name": "Key Missing", "idShort": 3, "shortLink": "9lf"},
]
},
)
request = drf_request_from_request(
self.make_request(
user=self.user,
method="GET",
GET={"autocomplete_field": "issue_id", "autocomplete_query": "Key"},
)
)
response = self.plugin.view_autocomplete(request, self.group)
assert response.data == {
"issue_id": [
{"id": "0lr", "text": "(#1) KeyError"},
{"id": "9lf", "text": "(#3) Key Missing"},
]
}
responses_request = responses.calls[0].request
url = urlparse(responses_request.url)
query = dict(parse_qsl(url.query))
assert url.path == "/1/search"
assert query == {
"cards_limit": "100",
"partial": "true",
"modelTypes": "cards",
"token": "7c8951d1",
"card_fields": "name,shortLink,idShort",
"key": "39g",
"query": "Key",
"idOrganizations": "f187",
}
@responses.activate
def test_view_autocomplete_no_org(self) -> None:
self.plugin.unset_option("organization", self.project)
responses.add(
responses.GET,
"https://api.trello.com/1/search",
json={
"cards": [
{"id": "4fsdafad", "name": "KeyError", "idShort": 1, "shortLink": "0lr"},
{"id": "f4usdfa", "name": "Key Missing", "idShort": 3, "shortLink": "9lf"},
]
},
)
request = drf_request_from_request(
self.make_request(
user=self.user,
method="GET",
GET={"autocomplete_field": "issue_id", "autocomplete_query": "Key"},
)
)
response = self.plugin.view_autocomplete(request, self.group)
assert response.data == {
"issue_id": [
{"id": "0lr", "text": "(#1) KeyError"},
{"id": "9lf", "text": "(#3) Key Missing"},
]
}
responses_request = responses.calls[0].request
url = urlparse(responses_request.url)
query = dict(parse_qsl(url.query))
assert url.path == "/1/search"
assert query == {
"cards_limit": "100",
"partial": "true",
"modelTypes": "cards",
"token": "7c8951d1",
"card_fields": "name,shortLink,idShort",
"key": "39g",
"query": "Key",
}
| TrelloPluginApiTests |
python | ethereum__web3.py | ens/_normalization.py | {
"start": 2562,
"end": 16814
} | class ____:
labels: list[Label]
def __init__(self, normalized_labels: list[Label]) -> None:
self.labels = normalized_labels
@property
def as_text(self) -> str:
return ".".join(label.text for label in self.labels)
# -----
GROUP_COMBINED_VALID_CPS = []
for d in NORMALIZATION_SPEC["groups"]:
GROUP_COMBINED_VALID_CPS.extend(d["primary"])
GROUP_COMBINED_VALID_CPS.extend(d["secondary"])
VALID_BY_GROUPS = {
d["name"]: set(d["primary"] + d["secondary"]) for d in NORMALIZATION_SPEC["groups"]
}
def _extract_valid_codepoints() -> set[int]:
all_valid = set()
for _name, valid_cps in VALID_BY_GROUPS.items():
all_valid.update(valid_cps)
all_valid.update(map(ord, NFD("".join(map(chr, all_valid)))))
return all_valid
def _construct_whole_confusable_map() -> dict[int, set[str]]:
"""
Create a mapping, per confusable, that contains all the groups in the cp's whole
confusable excluding the confusable extent of the cp itself - as per the spec at
https://docs.ens.domains/ens-improvement-proposals/ensip-15-normalization-standard
"""
whole_map: dict[int, set[str]] = {}
for whole in NORMALIZATION_SPEC["wholes"]:
whole_confusables: set[int] = set(whole["valid"] + whole["confused"])
confusable_extents: list[tuple[set[int], set[str]]] = []
for confusable_cp in whole_confusables:
# create confusable extents for all whole confusables
groups: set[str] = set()
for gn, gv in VALID_BY_GROUPS.items():
if confusable_cp in gv:
groups.add(gn)
if len(confusable_extents) == 0:
confusable_extents.append(({confusable_cp}, groups))
else:
extent_exists = False
for entry in confusable_extents:
if any(g in entry[1] for g in groups):
extent_exists = True
entry[0].update({confusable_cp})
entry[1].update(groups)
break
if not extent_exists:
confusable_extents.append(({confusable_cp}, groups))
for confusable_cp in whole_confusables:
confusable_cp_extent_groups: set[str] = set()
if confusable_cp in whole["confused"]:
whole_map[confusable_cp] = set()
for ce in confusable_extents:
if confusable_cp in ce[0]:
confusable_cp_extent_groups.update(ce[1])
else:
whole_map[confusable_cp].update(ce[1])
# remove the groups from confusable_cp's confusable extent
whole_map[confusable_cp] = whole_map[confusable_cp].difference(
confusable_cp_extent_groups
)
return whole_map
WHOLE_CONFUSABLE_MAP = _construct_whole_confusable_map()
VALID_CODEPOINTS = _extract_valid_codepoints()
MAX_LEN_EMOJI_PATTERN = max(len(e) for e in NORMALIZATION_SPEC["emoji"])
NSM_MAX = NORMALIZATION_SPEC["nsm_max"]
def _is_fenced(cp: int) -> bool:
return cp in [fenced[0] for fenced in NORMALIZATION_SPEC["fenced"]]
def _codepoints_to_text(cps: list[list[int]] | list[int]) -> str:
return "".join(
chr(cp) if isinstance(cp, int) else _codepoints_to_text(cp) for cp in cps
)
def _validate_tokens_and_get_label_type(tokens: list[Token]) -> str:
"""
Validate tokens and return the label type.
:param List[Token] tokens: the tokens to validate
:raises InvalidName: if any of the tokens are invalid
"""
if all(token.type == TokenType.EMOJI for token in tokens):
return "emoji"
label_text = "".join(token.text for token in tokens)
concat_text_tokens_as_str = "".join(
t.text for t in tokens if t.type == TokenType.TEXT
)
all_token_cps = [cp for t in tokens for cp in t.codepoints]
if len(tokens) == 1 and tokens[0].type == TokenType.TEXT:
# if single text token
encoded = concat_text_tokens_as_str.encode()
try:
encoded.decode("ascii") # if label is ascii
if "_" in concat_text_tokens_as_str[concat_text_tokens_as_str.count("_") :]:
raise InvalidName(
"Underscores '_' may only occur at the start of a label: "
f"'{label_text}'"
)
elif concat_text_tokens_as_str[2:4] == "--":
raise InvalidName(
"A label's third and fourth characters cannot be hyphens '-': "
f"'{label_text}'"
)
return "ascii"
except UnicodeDecodeError:
pass
if 95 in all_token_cps[all_token_cps.count(95) :]:
raise InvalidName(
f"Underscores '_' may only occur at the start of a label: '{label_text}'"
)
if _is_fenced(all_token_cps[0]) or _is_fenced(all_token_cps[-1]):
raise InvalidName(
f"Label cannot start or end with a fenced codepoint: '{label_text}'"
)
for cp_index, cp in enumerate(all_token_cps):
if cp_index == len(all_token_cps) - 1:
break
next_cp = all_token_cps[cp_index + 1]
if _is_fenced(cp) and _is_fenced(next_cp):
raise InvalidName(
f"Label cannot contain two fenced codepoints in a row: '{label_text}'"
)
if any(
t.codepoints[0] in NORMALIZATION_SPEC["cm"]
for t in tokens
if t.type == TokenType.TEXT
):
raise InvalidName(
"At least one text token in label starts with a "
f"combining mark: '{label_text}'"
)
# find first group that contains all chars in label
text_token_cps_set = {
cp
for token in tokens
if token.type == TokenType.TEXT
for cp in token.codepoints
}
chars_group_name = None
for group_name, group_cps in VALID_BY_GROUPS.items():
if text_token_cps_set.issubset(group_cps):
chars_group_name = group_name
break
if not chars_group_name:
raise InvalidName(
f"Label contains codepoints from multiple groups: '{label_text}'"
)
# apply NFD and check contiguous NSM sequences
for group in NORMALIZATION_SPEC["groups"]:
if group["name"] == chars_group_name:
if "cm" not in group:
nfd_cps = [
ord(nfd_c) for c in concat_text_tokens_as_str for nfd_c in NFD(c)
]
next_index = -1
for cp_i, cp in enumerate(nfd_cps):
if cp_i <= next_index:
continue
if cp in NORMALIZATION_SPEC["nsm"]:
if cp_i == len(nfd_cps) - 1:
break
contiguous_nsm_cps = [cp]
next_index = cp_i + 1
next_cp = nfd_cps[next_index]
while next_cp in NORMALIZATION_SPEC["nsm"]:
contiguous_nsm_cps.append(next_cp)
if len(contiguous_nsm_cps) > NSM_MAX:
raise InvalidName(
"Contiguous NSM sequence for label greater than NSM"
f" max of {NSM_MAX}: '{label_text}'"
)
next_index += 1
if next_index == len(nfd_cps):
break
next_cp = nfd_cps[next_index]
if not len(contiguous_nsm_cps) == len(set(contiguous_nsm_cps)):
raise InvalidName(
"Contiguous NSM sequence for label contains duplicate "
f"codepoints: '{label_text}'"
)
break
# check wholes
# start with set of all groups with confusables
retained_groups = set(VALID_BY_GROUPS.keys())
confused_chars = set()
buffer = set()
for char_cp in text_token_cps_set:
groups_excluding_ce = WHOLE_CONFUSABLE_MAP.get(char_cp)
if groups_excluding_ce and len(groups_excluding_ce) > 0:
if len(retained_groups) == 0:
break
else:
retained_groups = retained_groups.intersection(groups_excluding_ce)
confused_chars.add(char_cp)
elif GROUP_COMBINED_VALID_CPS.count(char_cp) == 1:
return chars_group_name
else:
buffer.add(char_cp)
if len(confused_chars) > 0:
for retained_group_name in retained_groups:
if all(cp in VALID_BY_GROUPS[retained_group_name] for cp in buffer):
# Though the spec doesn't mention this explicitly, if the buffer is
# empty, the label is confusable. This allows for using ``all()`` here
# since that yields ``True`` on empty sets.
# e.g. ``all(cp in group_cps for cp in set())`` is ``True``
# for any ``group_cps``.
if len(buffer) == 0:
msg = (
f"All characters in label are confusable: "
f"'{label_text}' ({chars_group_name} / "
)
msg += (
f"{[rgn for rgn in retained_groups]})"
if len(retained_groups) > 1
else f"{retained_group_name})"
)
else:
msg = (
f"Label is confusable: '{label_text}' "
f"({chars_group_name} / {retained_group_name})"
)
raise InvalidName(msg)
return chars_group_name
def _build_and_validate_label_from_tokens(tokens: list[Token]) -> Label:
for token in tokens:
if token.type == TokenType.TEXT:
# apply NFC normalization to text tokens
chars = [chr(cp) for cp in token._original_codepoints]
nfc = NFC(chars)
token._normalized_codepoints = [ord(c) for c in nfc]
label_type = _validate_tokens_and_get_label_type(tokens)
label = Label()
label.type = label_type
label.tokens = tokens
return label
def _buffer_codepoints_to_chars(buffer: list[int] | list[list[int]]) -> str:
return "".join(
"".join(chr(c) for c in char) if isinstance(char, list) else chr(char)
for char in buffer
)
# -----
def normalize_name_ensip15(name: str) -> ENSNormalizedName:
"""
Normalize an ENS name according to ENSIP-15
https://docs.ens.domains/ens-improvement-proposals/ensip-15-normalization-standard
:param str name: the dot-separated ENS name
:raises InvalidName: if ``name`` has invalid syntax
"""
if not name:
return ENSNormalizedName([])
elif isinstance(name, (bytes, bytearray)):
name = name.decode("utf-8")
raw_labels = name.split(".")
if any(len(label) == 0 for label in raw_labels):
raise InvalidName("Labels cannot be empty")
normalized_labels = []
for label_str in raw_labels:
# _input takes the label and breaks it into a list of unicode code points
# e.g. "xyz👨🏻" -> [120, 121, 122, 128104, 127995]
_input = [ord(c) for c in label_str]
buffer: list[int] = []
tokens: list[Token] = []
while len(_input) > 0:
emoji_codepoint = None
end_index = 1
while end_index <= len(_input):
current_emoji_sequence = _input[:end_index]
if len(current_emoji_sequence) > MAX_LEN_EMOJI_PATTERN:
# if we've reached the max length of all known emoji patterns
break
# remove 0xFE0F (65039)
elif 65039 in current_emoji_sequence:
current_emoji_sequence.remove(65039)
_input.remove(65039)
if len(_input) == 0:
raise InvalidName("Empty name after removing 65039 (0xFE0F)")
end_index -= 1 # reset end_index after removing 0xFE0F
if current_emoji_sequence in NORMALIZATION_SPEC["emoji"]:
emoji_codepoint = current_emoji_sequence
end_index += 1
if emoji_codepoint:
if len(buffer) > 0:
# emit `Text` token with values in buffer
tokens.append(TextToken(buffer))
buffer = [] # clear the buffer
# emit `Emoji` token with values in emoji_codepoint
tokens.append(EmojiToken(emoji_codepoint))
_input = _input[len(emoji_codepoint) :]
else:
leading_codepoint = _input.pop(0)
if leading_codepoint in NORMALIZATION_SPEC["ignored"]:
pass
elif leading_codepoint in NORMALIZATION_SPEC["mapped"]:
mapped = NORMALIZATION_SPEC["mapped"][leading_codepoint]
for cp in mapped:
buffer.append(cp)
else:
if leading_codepoint in VALID_CODEPOINTS:
buffer.append(leading_codepoint)
else:
raise InvalidName(
f"Invalid character: '{chr(leading_codepoint)}' | "
f"codepoint {leading_codepoint} ({hex(leading_codepoint)})"
)
if len(buffer) > 0 and len(_input) == 0:
tokens.append(TextToken(buffer))
# create a `Label` instance from tokens
# - Apply NFC to each `Text` token
# - Run tokens through "Validation" section of ENSIP-15
normalized_label = _build_and_validate_label_from_tokens(tokens)
normalized_labels.append(normalized_label)
# - join labels back together after normalization
return ENSNormalizedName(normalized_labels)
| ENSNormalizedName |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solverHigherOrder11.py | {
"start": 410,
"end": 771
} | class ____:
def method(self, v: T) -> T: ...
def func1(a: Proto1[T], b: T) -> T: ...
v1 = func1(a=Impl1(), b="abc")
reveal_type(v1, expected_text="str")
v2 = func1(b="abc", a=Impl1())
reveal_type(v2, expected_text="str")
v3 = func1(a=Impl1(), b=1)
reveal_type(v3, expected_text="int")
v4 = func1(b=1, a=Impl1())
reveal_type(v4, expected_text="int")
| Impl1 |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_indexing.py | {
"start": 61419,
"end": 64924
} | class ____:
@pytest.mark.parametrize(
"key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]
)
def test_getitem_dict_and_set_deprecated(self, key):
# GH#42825 enforced in 2.0
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
with pytest.raises(TypeError, match="as an indexer is not supported"):
df.loc[key]
@pytest.mark.parametrize(
"key",
[
{1},
{1: 1},
(({1}, 2), "a"),
(({1: 1}, 2), "a"),
((1, 2), {"a"}),
((1, 2), {"a": "a"}),
],
)
def test_getitem_dict_and_set_deprecated_multiindex(self, key):
# GH#42825 enforced in 2.0
df = DataFrame(
[[1, 2], [3, 4]],
columns=["a", "b"],
index=MultiIndex.from_tuples([(1, 2), (3, 4)]),
)
with pytest.raises(TypeError, match="as an indexer is not supported"):
df.loc[key]
@pytest.mark.parametrize(
"key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]
)
def test_setitem_dict_and_set_disallowed(self, key):
# GH#42825 enforced in 2.0
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
with pytest.raises(TypeError, match="as an indexer is not supported"):
df.loc[key] = 1
@pytest.mark.parametrize(
"key",
[
{1},
{1: 1},
(({1}, 2), "a"),
(({1: 1}, 2), "a"),
((1, 2), {"a"}),
((1, 2), {"a": "a"}),
],
)
def test_setitem_dict_and_set_disallowed_multiindex(self, key):
# GH#42825 enforced in 2.0
df = DataFrame(
[[1, 2], [3, 4]],
columns=["a", "b"],
index=MultiIndex.from_tuples([(1, 2), (3, 4)]),
)
with pytest.raises(TypeError, match="as an indexer is not supported"):
df.loc[key] = 1
def test_adding_new_conditional_column() -> None:
# https://github.com/pandas-dev/pandas/issues/55025
df = DataFrame({"x": [1]})
df.loc[df["x"] == 1, "y"] = "1"
expected = DataFrame({"x": [1], "y": ["1"]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"x": [1]})
# try inserting something which numpy would store as 'object'
value = lambda x: x
df.loc[df["x"] == 1, "y"] = value
expected = DataFrame({"x": [1], "y": [value]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
("dtype", "infer_string"),
[
(object, False),
(pd.StringDtype(na_value=np.nan), True),
],
)
def test_adding_new_conditional_column_with_string(dtype, infer_string) -> None:
# https://github.com/pandas-dev/pandas/issues/56204
df = DataFrame({"a": [1, 2], "b": [3, 4]})
with pd.option_context("future.infer_string", infer_string):
df.loc[df["a"] == 1, "c"] = "1"
expected = DataFrame({"a": [1, 2], "b": [3, 4], "c": ["1", float("nan")]}).astype(
{"a": "int64", "b": "int64", "c": dtype}
)
tm.assert_frame_equal(df, expected)
def test_add_new_column_infer_string():
# GH#55366
df = DataFrame({"x": [1]})
with pd.option_context("future.infer_string", True):
df.loc[df["x"] == 1, "y"] = "1"
expected = DataFrame(
{"x": [1], "y": Series(["1"], dtype=pd.StringDtype(na_value=np.nan))},
columns=Index(["x", "y"], dtype="str"),
)
tm.assert_frame_equal(df, expected)
| TestDeprecatedIndexers |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 10661,
"end": 11451
} | class ____(torch.autograd.Function):
generate_vmap_rule = True
@staticmethod
def forward(x, ind, ind_inv, dim):
return torch.take_along_dim(x, ind, dim)
@staticmethod
def setup_context(ctx, inputs, outputs):
_x, ind, ind_inv, dim = inputs
ctx.save_for_backward(ind, ind_inv)
ctx.save_for_forward(ind, ind_inv)
ctx.dim = dim
@staticmethod
def backward(ctx, grad_output):
ind, ind_inv = ctx.saved_tensors
result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
return result, None, None, None
@staticmethod
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
ind, ind_inv = ctx.saved_tensors
return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
| TakeGenVmap |
python | jazzband__django-model-utils | tests/models.py | {
"start": 5323,
"end": 5875
} | class ____(models.Model):
published = models.BooleanField(default=False)
confirmed = models.BooleanField(default=False)
order = models.IntegerField()
objects = models.Manager()
public: ClassVar[QueryManager[Post]] = QueryManager(published=True)
public_confirmed: ClassVar[QueryManager[Post]] = QueryManager(
models.Q(published=True) & models.Q(confirmed=True))
public_reversed: ClassVar[QueryManager[Post]] = QueryManager(
published=True).order_by("-order")
class Meta:
ordering = ("order",)
| Post |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 35555,
"end": 35960
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
arg: SymbolicCallArg
graph: GraphLowering
def codegen(self, code: IndentedBuffer) -> None:
self.wrapper._generate_symbolic_call_arg_helper(self.arg, self.graph)
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_symbolic_call_arg
@dataclasses.dataclass
| SymbolicCallArgLine |
python | wandb__wandb | wandb/sdk/artifacts/_validators.py | {
"start": 12134,
"end": 12281
} | class ____(ArtifactPath):
"""Same as ArtifactPath, but with all parts required."""
name: str
project: str
prefix: str
| FullArtifactPath |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/optimization/test_manual_optimization.py | {
"start": 1187,
"end": 5174
} | class ____(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
opt_a, opt_b = self.optimizers()
# make sure there are no grads
assert_emtpy_grad(self.layer.weight.grad)
loss_1 = self.step(batch[0])
self.manual_backward(loss_1)
opt_a.step()
opt_a.zero_grad()
assert_emtpy_grad(self.layer.weight.grad)
loss_2 = self.step(batch[0])
# ensure we forward the correct params to the optimizer
# without retain_graph we can't do multiple backward passes
self.manual_backward(loss_2, retain_graph=True)
self.manual_backward(loss_2)
assert self.layer.weight.grad is not None
opt_b.step()
opt_b.zero_grad()
assert_emtpy_grad(self.layer.weight.grad)
return loss_2
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer, optimizer_2
@pytest.mark.parametrize(
"kwargs",
[{}, pytest.param({"accelerator": "gpu", "devices": 1, "precision": "16-mixed"}, marks=RunIf(min_cuda_gpus=1))],
)
def test_multiple_optimizers_manual_call_counts(tmp_path, kwargs):
model = ManualOptModel()
limit_train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
**kwargs,
)
if kwargs.get("precision") == "16-mixed":
# mock the scaler instead of the optimizer step because it can be skipped with NaNs
scaler_step_patch = mock.patch.object(
trainer.precision_plugin.scaler, "step", wraps=trainer.precision_plugin.scaler.step
)
scaler_step = scaler_step_patch.start()
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 3
assert trainer.global_step == limit_train_batches * 2
if kwargs.get("precision") == "16-mixed":
scaler_step_patch.stop()
assert scaler_step.call_count == len(model.optimizers()) * limit_train_batches
def test_multiple_optimizers_manual_log(tmp_path):
class TestModel(ManualOptModel):
def training_step(self, batch, batch_idx):
loss_2 = super().training_step(batch, batch_idx)
self.log("a", loss_2, on_epoch=True)
model = TestModel()
limit_train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 3
assert set(trainer.logged_metrics) == {"a_step", "a_epoch"}
# precision = 16 not yet working properly with mps backend
@pytest.mark.parametrize("accelerator", [pytest.param("gpu", marks=RunIf(min_cuda_gpus=1))])
def test_multiple_optimizers_manual_amp(tmp_path, accelerator):
model = ManualOptModel()
model.val_dataloader = None
limit_train_batches = 2
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=limit_train_batches,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
precision="16-mixed",
accelerator=accelerator,
devices=1,
)
with mock.patch.object(Strategy, "backward", wraps=trainer.strategy.backward) as bwd_mock:
trainer.fit(model)
assert bwd_mock.call_count == limit_train_batches * 3
| ManualOptModel |
python | doocs__leetcode | solution/1200-1299/1202.Smallest String With Swaps/Solution.py | {
"start": 0,
"end": 537
} | class ____:
def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
n = len(s)
p = list(range(n))
for a, b in pairs:
p[find(a)] = find(b)
d = defaultdict(list)
for i, c in enumerate(s):
d[find(i)].append(c)
for i in d.keys():
d[i].sort(reverse=True)
return "".join(d[find(i)].pop() for i in range(n))
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_parent_decorator.py | {
"start": 423,
"end": 566
} | class ____(TestC):
def __init__(self, foo, bar, baz):
_test_sink(foo)
_test_sink(bar)
_test_sink(baz)
@d1
@d2
| TestC_2 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 15442,
"end": 15646
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("AZURE_DEVOPS", "BITBUCKET_SERVER", "GITHUB_ARCHIVE")
| MigrationSourceType |
python | tiangolo__fastapi | docs_src/graphql/tutorial001.py | {
"start": 168,
"end": 424
} | class ____:
@strawberry.field
def user(self) -> User:
return User(name="Patrick", age=100)
schema = strawberry.Schema(query=Query)
graphql_app = GraphQLRouter(schema)
app = FastAPI()
app.include_router(graphql_app, prefix="/graphql")
| Query |
python | getsentry__sentry | src/sentry/models/releasefile.py | {
"start": 5224,
"end": 6880
} | class ____:
"""Read-only view of uploaded ZIP-archive of release files"""
def __init__(self, fileobj: IO):
self._fileobj = fileobj
self._zip_file = zipfile.ZipFile(self._fileobj)
self.manifest = self._read_manifest()
self.artifact_count = len(self.manifest.get("files", {}))
files = self.manifest.get("files", {})
self._entries_by_url = {entry["url"]: (path, entry) for path, entry in files.items()}
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.close()
def close(self):
self._zip_file.close()
self._fileobj.close()
def info(self, filename: str) -> zipfile.ZipInfo:
return self._zip_file.getinfo(filename)
def read(self, filename: str) -> bytes:
return self._zip_file.read(filename)
def _read_manifest(self) -> dict:
manifest_bytes = self.read("manifest.json")
return json.loads(manifest_bytes.decode("utf-8"))
def get_file_by_url(self, url: str) -> tuple[IO[bytes], dict]:
"""Return file-like object and headers.
The caller is responsible for closing the returned stream.
May raise ``KeyError``
"""
filename, entry = self._entries_by_url[url]
return self._zip_file.open(filename), entry.get("headers", {})
def extract(self) -> TemporaryDirectory:
"""Extract contents to a temporary directory.
The caller is responsible for cleanup of the temporary files.
"""
temp_dir = TemporaryDirectory()
safe_extract_zip(self._fileobj, temp_dir.name)
return temp_dir
| ReleaseArchive |
python | fsspec__filesystem_spec | fsspec/implementations/cached.py | {
"start": 26296,
"end": 33819
} | class ____(WholeFileCacheFileSystem):
"""Caches whole remote files on first access
This class is intended as a layer over any other file system, and
will make a local copy of each file accessed, so that all subsequent
reads are local. This implementation only copies whole files, and
does not keep any metadata about the download time or file details.
It is therefore safer to use in multi-threaded/concurrent situations.
This is the only of the caching filesystems that supports write: you will
be given a real local open file, and upon close and commit, it will be
uploaded to the target filesystem; the writability or the target URL is
not checked until that time.
"""
protocol = "simplecache"
local_file = True
transaction_type = WriteCachedTransaction
def __init__(self, **kwargs):
kw = kwargs.copy()
for key in ["cache_check", "expiry_time", "check_files"]:
kw[key] = False
super().__init__(**kw)
for storage in self.storage:
if not os.path.exists(storage):
os.makedirs(storage, exist_ok=True)
def _check_file(self, path):
self._check_cache()
sha = self._mapper(path)
for storage in self.storage:
fn = os.path.join(storage, sha)
if os.path.exists(fn):
return fn
def save_cache(self):
pass
def load_cache(self):
pass
def pipe_file(self, path, value=None, **kwargs):
if self._intrans:
with self.open(path, "wb") as f:
f.write(value)
else:
super().pipe_file(path, value)
def ls(self, path, detail=True, **kwargs):
path = self._strip_protocol(path)
details = []
try:
details = self.fs.ls(
path, detail=True, **kwargs
).copy() # don't edit original!
except FileNotFoundError as e:
ex = e
else:
ex = None
if self._intrans:
path1 = path.rstrip("/") + "/"
for f in self.transaction.files:
if f.path == path:
details.append(
{"name": path, "size": f.size or f.tell(), "type": "file"}
)
elif f.path.startswith(path1):
if f.path.count("/") == path1.count("/"):
details.append(
{"name": f.path, "size": f.size or f.tell(), "type": "file"}
)
else:
dname = "/".join(f.path.split("/")[: path1.count("/") + 1])
details.append({"name": dname, "size": 0, "type": "directory"})
if ex is not None and not details:
raise ex
if detail:
return details
return sorted(_["name"] for _ in details)
def info(self, path, **kwargs):
path = self._strip_protocol(path)
if self._intrans:
f = [_ for _ in self.transaction.files if _.path == path]
if f:
size = os.path.getsize(f[0].fn) if f[0].closed else f[0].tell()
return {"name": path, "size": size, "type": "file"}
f = any(_.path.startswith(path + "/") for _ in self.transaction.files)
if f:
return {"name": path, "size": 0, "type": "directory"}
return self.fs.info(path, **kwargs)
def pipe(self, path, value=None, **kwargs):
if isinstance(path, str):
self.pipe_file(self._strip_protocol(path), value, **kwargs)
elif isinstance(path, dict):
for k, v in path.items():
self.pipe_file(self._strip_protocol(k), v, **kwargs)
else:
raise ValueError("path must be str or dict")
async def _cat_file(self, path, start=None, end=None, **kwargs):
logger.debug("async cat_file %s", path)
path = self._strip_protocol(path)
sha = self._mapper(path)
fn = self._check_file(path)
if not fn:
fn = os.path.join(self.storage[-1], sha)
await self.fs._get_file(path, fn, **kwargs)
with open(fn, "rb") as f: # noqa ASYNC230
if start:
f.seek(start)
size = -1 if end is None else end - f.tell()
return f.read(size)
async def _cat_ranges(
self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
):
logger.debug("async cat ranges %s", paths)
lpaths = []
rset = set()
download = []
rpaths = []
for p in paths:
fn = self._check_file(p)
if fn is None and p not in rset:
sha = self._mapper(p)
fn = os.path.join(self.storage[-1], sha)
download.append(fn)
rset.add(p)
rpaths.append(p)
lpaths.append(fn)
if download:
await self.fs._get(rpaths, download, on_error=on_error)
return LocalFileSystem().cat_ranges(
lpaths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
)
def cat_ranges(
self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
):
logger.debug("cat ranges %s", paths)
lpaths = [self._check_file(p) for p in paths]
rpaths = [p for l, p in zip(lpaths, paths) if l is False]
lpaths = [l for l, p in zip(lpaths, paths) if l is False]
self.fs.get(rpaths, lpaths)
paths = [self._check_file(p) for p in paths]
return LocalFileSystem().cat_ranges(
paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
)
def _open(self, path, mode="rb", **kwargs):
path = self._strip_protocol(path)
sha = self._mapper(path)
if "r" not in mode:
fn = os.path.join(self.storage[-1], sha)
user_specified_kwargs = {
k: v
for k, v in kwargs.items()
if k not in ["autocommit", "block_size", "cache_options"]
} # those were added by open()
return LocalTempFile(
self,
path,
mode=mode,
autocommit=not self._intrans,
fn=fn,
**user_specified_kwargs,
)
fn = self._check_file(path)
if fn:
return open(fn, mode)
fn = os.path.join(self.storage[-1], sha)
logger.debug("Copying %s to local cache", path)
kwargs["mode"] = mode
self._mkcache()
self._cache_size = None
if self.compression:
with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
if isinstance(f, AbstractBufferedFile):
# want no type of caching if just downloading whole thing
f.cache = BaseCache(0, f.cache.fetcher, f.size)
comp = (
infer_compression(path)
if self.compression == "infer"
else self.compression
)
f = compr[comp](f, mode="rb")
data = True
while data:
block = getattr(f, "blocksize", 5 * 2**20)
data = f.read(block)
f2.write(data)
else:
self.fs.get_file(path, fn)
return self._open(path, mode)
| SimpleCacheFileSystem |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 105477,
"end": 105833
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("path", "contents")
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
contents = sgqlc.types.Field(
sgqlc.types.non_null(Base64String), graphql_name="contents"
)
| FileAddition |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 40975,
"end": 62551
} | class ____(Glm4vPreTrainedModel):
base_model_prefix = "model"
_checkpoint_conversion_mapping = {}
# Reference: fix gemma3 grad acc #37208
accepts_loss_kwargs = False
config: Glm4vConfig
_no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"]
def __init__(self, config):
super().__init__(config)
self.visual = Glm4vVisionModel._from_config(config.vision_config)
self.language_model = Glm4vTextModel._from_config(config.text_config)
self.rope_deltas = None # cache rope_deltas here
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_rope_index(
self,
input_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Temporal (Time): 3 patches, representing different segments of the video in time.
Height: 2 patches, dividing each frame vertically.
Width: 2 patches, dividing each frame horizontally.
We also have some important parameters:
fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [101, 102, 103, 104, 105]
text height position_ids: [101, 102, 103, 104, 105]
text width position_ids: [101, 102, 103, 104, 105]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = self.config.vision_config.spatial_merge_size
image_token_id = self.config.image_token_id
video_start_token_id = self.config.video_start_token_id
video_end_token_id = self.config.video_end_token_id
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is None:
attention_mask = torch.ones_like(total_input_ids)
position_ids = torch.ones(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=input_ids.dtype,
device=input_ids.device,
)
image_index, video_index = 0, 0
video_group_index = 0
attention_mask = attention_mask.to(total_input_ids.device)
for i, input_ids in enumerate(total_input_ids):
input_ids = input_ids[attention_mask[i] == 1]
input_tokens = input_ids.tolist()
input_token_type = []
video_check_flg = False
for token in input_tokens:
if token == video_start_token_id:
video_check_flg = True
elif token == video_end_token_id:
video_check_flg = False
if token == image_token_id and not video_check_flg:
input_token_type.append("image")
elif token == image_token_id and video_check_flg:
input_token_type.append("video")
else:
input_token_type.append("text")
input_type_group = []
for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
group = list(group)
start_index = group[0][0]
end_index = group[-1][0] + 1
input_type_group.append((key, start_index, end_index))
llm_pos_ids_list = []
video_frame_num = 1
for modality_type, start_idx, end_idx in input_type_group:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
if modality_type == "image":
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
image_index += 1
video_frame_num = 1
elif modality_type == "video":
t, h, w = (
video_frame_num,
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t,
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
for t_idx in range(llm_grid_t):
t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
video_group_index += 1
if video_group_index >= video_grid_thw[video_index][0]:
video_index += 1
video_group_index = 0
video_frame_num += 1
else:
text_len = end_idx - start_idx
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
video_frame_num = 1
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
else:
position_ids = (
torch.arange(input_ids.shape[1], device=input_ids.device)
.view(1, 1, -1)
.expand(3, input_ids.shape[0], -1)
)
mrope_position_deltas = torch.zeros(
[input_ids.shape[0], 1],
device=input_ids.device,
dtype=input_ids.dtype,
)
return position_ids, mrope_position_deltas
def get_video_features(
self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
):
"""
Encodes videos into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
# reshape video_grid_thw -> [b, 3] -> [1, h, w] * frames
temp_frames_hw = []
for t, h, w in video_grid_thw:
repeated_row = torch.tensor([1, h.item(), w.item()]).unsqueeze(0).repeat(t, 1)
temp_frames_hw.append(repeated_row)
flattened_video_grid_thw = torch.cat(temp_frames_hw, dim=0)
video_embeds = self.visual(pixel_values_videos, grid_thw=flattened_video_grid_thw)
split_sizes = (video_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
video_embeds = torch.split(video_embeds, split_sizes)
return video_embeds
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype)
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
image_embeds = torch.split(image_embeds, split_sizes)
return image_embeds
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: Optional[torch.FloatTensor] = None,
video_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
else:
# GLM-4.1V and GLM-4.5V special_video_mask is special_image_mask
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
)
return special_image_mask, special_video_mask
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.Tensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
rope_deltas: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Glm4vModelOutputWithPast]:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_embeds = self.get_image_features(pixel_values, image_grid_thw)
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(input_ids, inputs_embeds, image_features=image_embeds)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(input_ids, inputs_embeds, video_features=video_embeds)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if position_ids is None:
attention_mask_tensor = (
attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"]
)
if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2)
# Only apply conversion for floating point tensors (inverted masks)
if attention_mask_tensor.dtype.is_floating_point:
attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min
attention_mask_tensor = (1.0 - attention_mask_tensor).int()
# Calculate RoPE index once per generation in the pre-fill stage only.
# When compiling, we can't check tensor values thus we check only input length
# It is safe to assume that `length!=1` means we're in pre-fill because compiled
# models currently cannot do asssisted decoding
prefill_compiled_stage = is_torchdynamo_compiling() and (
(input_ids is not None and input_ids.shape[1] != 1)
or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
)
prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
(cache_position is not None and cache_position[0] == 0)
or (past_key_values is None or past_key_values.get_seq_length() == 0)
)
if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
position_ids, rope_deltas = self.get_rope_index(
input_ids,
image_grid_thw,
video_grid_thw,
attention_mask=attention_mask_tensor,
)
self.rope_deltas = rope_deltas
# then use the prev pre-calculated rope-deltas to get the correct position ids
else:
batch_size, seq_length, _ = inputs_embeds.shape
delta = (
(cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
if cache_position is not None
else 0
)
position_ids = torch.arange(seq_length, device=inputs_embeds.device)
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
if cache_position is not None: # otherwise `deltas` is an int `0`
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
position_ids = position_ids.add(delta)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
return Glm4vModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=self.rope_deltas,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for Glm4v causal language model (or autoregressive) outputs.
"""
)
| Glm4vModel |
python | pytorch__pytorch | test/distributed/_composable/test_composability/test_2d_composability.py | {
"start": 2080,
"end": 2523
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.net1 = nn.Linear(5, 8)
self.relu = nn.ReLU()
self.net2 = nn.Linear(8, 4)
self.net3 = nn.Linear(4, 12)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
def get_input(self):
return torch.rand(4, 5, device=device_type)
| SimpleModel |
python | pydantic__pydantic | tests/test_types.py | {
"start": 141420,
"end": 141513
} | class ____(Secret[date]):
def _display(self) -> str:
return '****/**/**'
| SecretDate |
python | django__django | tests/admin_inlines/models.py | {
"start": 4641,
"end": 4893
} | class ____(Person):
fullname = models.CharField(max_length=100)
nationality = models.CharField(max_length=100)
residency = models.CharField(max_length=100)
siblings = models.IntegerField()
children = models.IntegerField()
| Photographer |
python | numba__numba | numba/parfors/parfor.py | {
"start": 54340,
"end": 63774
} | class ____(object):
"""Preprocessing for the Parfor pass. It mostly inlines parallel
implementations of numpy functions if available.
"""
def __init__(self, func_ir, typemap, calltypes, typingctx, targetctx,
options, swapped=None, replace_functions_map=None):
if swapped is None:
swapped = {}
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.typingctx = typingctx
self.targetctx = targetctx
self.options = options
# diagnostics
self.swapped = swapped
if replace_functions_map is None:
replace_functions_map = swap_functions_map
self.replace_functions_map = replace_functions_map
self.stats = {
'replaced_func': 0,
'replaced_dtype': 0,
}
def run(self):
"""Run pre-parfor processing pass.
"""
# e.g. convert A.sum() to np.sum(A) for easier match and optimization
canonicalize_array_math(self.func_ir, self.typemap,
self.calltypes, self.typingctx)
if self.options.numpy:
self._replace_parallel_functions(self.func_ir.blocks)
self.func_ir.blocks = simplify_CFG(self.func_ir.blocks)
def _replace_parallel_functions(self, blocks):
"""
Replace functions with their parallel implementation in
replace_functions_map if available.
The implementation code is inlined to enable more optimization.
"""
swapped = self.swapped
from numba.core.inline_closurecall import inline_closure_call
work_list = list(blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
lhs_typ = self.typemap[lhs.name]
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
# Try and inline known calls with their parallel implementations
def replace_func():
func_def = get_definition(self.func_ir, expr.func)
callname = find_callname(self.func_ir, expr)
repl_func = self.replace_functions_map.get(callname, None)
# Handle method on array type
if (repl_func is None and
len(callname) == 2 and
isinstance(callname[1], ir.Var) and
isinstance(self.typemap[callname[1].name],
types.npytypes.Array)):
repl_func = replace_functions_ndarray.get(callname[0], None)
if repl_func is not None:
# Add the array that the method is on to the arg list.
expr.args.insert(0, callname[1])
require(repl_func is not None)
typs = tuple(self.typemap[x.name] for x in expr.args)
kws_typs = {k: self.typemap[x.name] for k, x in expr.kws}
try:
new_func = repl_func(lhs_typ, *typs, **kws_typs)
except:
new_func = None
require(new_func is not None)
# bind arguments to the new_func
typs = utils.pysignature(new_func).bind(*typs, **kws_typs).args
g = copy.copy(self.func_ir.func_id.func.__globals__)
g['numba'] = numba
g['np'] = numpy
g['math'] = math
# if the function being inlined has a function
# checking the inputs, find it and add it to globals
check = replace_functions_checkers_map.get(callname,
None)
if check is not None:
g[check.name] = check.func
# inline the parallel implementation
new_blocks, _ = inline_closure_call(self.func_ir, g,
block, i, new_func, self.typingctx, self.targetctx,
typs, self.typemap, self.calltypes, work_list)
call_table = get_call_table(new_blocks, topological_ordering=False)
# find the prange in the new blocks and record it for use in diagnostics
for call in call_table:
for k, v in call.items():
if v[0] == 'internal_prange':
swapped[k] = [callname, repl_func.__name__, func_def, block.body[i].loc]
break
return True
if guard(replace_func):
self.stats['replaced_func'] += 1
break
elif (isinstance(expr, ir.Expr) and expr.op == 'getattr' and
expr.attr == 'dtype'):
# Replace getattr call "A.dtype" with numpy.dtype(<actual type>).
# This helps remove superfluous dependencies from parfor.
typ = self.typemap[expr.value.name]
if isinstance(typ, types.npytypes.Array):
# Convert A.dtype to four statements.
# 1) Get numpy global.
# 2) Create var for known type of array as string
# constant. e.g. 'float64'
# 3) Get dtype function from numpy module.
# 4) Create var for numpy.dtype(var from #2).
# Create var for numpy module.
dtype = typ.dtype
scope = block.scope
loc = instr.loc
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
self.typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# Create var for the inferred type of the array
# e.g., 'float64'
dtype_str = str(dtype)
if dtype_str == 'bool':
dtype_str = 'bool_'
typ_var = ir.Var(
scope, mk_unique_var("$np_typ_var"), loc)
self.typemap[typ_var.name] = types.StringLiteral(
dtype_str)
typ_var_assign = ir.Assign(
ir.Const(dtype_str, loc), typ_var, loc)
# Get the dtype function from the numpy module.
dtype_attr_var = ir.Var(scope, mk_unique_var("$dtype_attr_var"), loc)
temp = find_template(numpy.dtype)
tfunc = numba.core.types.Function(temp)
tfunc.get_call_type(self.typingctx, (self.typemap[typ_var.name],), {})
self.typemap[dtype_attr_var.name] = types.functions.Function(temp)
dtype_attr_getattr = ir.Expr.getattr(g_np_var, 'dtype', loc)
dtype_attr_assign = ir.Assign(dtype_attr_getattr, dtype_attr_var, loc)
# Call numpy.dtype on the statically coded type two steps above.
dtype_var = ir.Var(scope, mk_unique_var("$dtype_var"), loc)
self.typemap[dtype_var.name] = types.npytypes.DType(dtype)
dtype_getattr = ir.Expr.call(dtype_attr_var, [typ_var], (), loc)
dtype_assign = ir.Assign(dtype_getattr, dtype_var, loc)
self.calltypes[dtype_getattr] = signature(
self.typemap[dtype_var.name], self.typemap[typ_var.name])
# The original A.dtype rhs is replaced with result of this call.
instr.value = dtype_var
# Add statements to body of the code.
block.body.insert(0, dtype_assign)
block.body.insert(0, dtype_attr_assign)
block.body.insert(0, typ_var_assign)
block.body.insert(0, g_np_assign)
self.stats['replaced_dtype'] += 1
break
def find_template(op):
for ft in numba.core.typing.templates.builtin_registry.functions:
if ft.key == op:
return ft
| PreParforPass |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 86016,
"end": 87632
} | class ____(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if is_peft_available():
from peft.tuners.lora import LoraLayer
if is_peft_available():
if isinstance(self.kernel, LoraLayer):
warnings.warn(
"Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
"You should exclude TDNNLayer from LoRA's target modules.",
)
# for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
hidden_states = hidden_states.transpose(1, 2)
weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification.
"""
)
| TDNNLayer |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/resource.py | {
"start": 30324,
"end": 30680
} | class ____(Generic[V]):
def __set_name__(self, _owner, name):
self._name = name
def __get__(self, obj: "ConfigurableResourceFactory", owner: Any) -> V:
return getattr(obj, self._name)
def __set__(self, obj: Optional[object], value: ResourceOrPartialOrValue[V]) -> None:
setattr(obj, self._name, value)
| ResourceDependency |
python | ray-project__ray | python/ray/serve/_private/deployment_state.py | {
"start": 51015,
"end": 55662
} | class ____:
"""Container for mapping ReplicaStates to lists of DeploymentReplicas."""
def __init__(self):
self._replicas: Dict[ReplicaState, List[DeploymentReplica]] = defaultdict(list)
def add(self, state: ReplicaState, replica: DeploymentReplica):
"""Add the provided replica under the provided state.
Args:
state: state to add the replica under.
replica: replica to add.
"""
assert isinstance(state, ReplicaState), f"Type: {type(state)}"
replica.update_state(state)
self._replicas[state].append(replica)
def get(
self, states: Optional[List[ReplicaState]] = None
) -> List[DeploymentReplica]:
"""Get all replicas of the given states.
This does not remove them from the container. Replicas are returned
in order of state as passed in.
Args:
states: states to consider. If not specified, all replicas
are considered.
"""
if states is None:
states = ALL_REPLICA_STATES
assert isinstance(states, list)
return sum((self._replicas[state] for state in states), [])
def pop(
self,
exclude_version: Optional[DeploymentVersion] = None,
states: Optional[List[ReplicaState]] = None,
max_replicas: Optional[int] = math.inf,
) -> List[DeploymentReplica]:
"""Get and remove all replicas of the given states.
This removes the replicas from the container. Replicas are returned
in order of state as passed in.
Args:
exclude_version: if specified, replicas of the
provided version will *not* be removed.
states: states to consider. If not specified, all replicas
are considered.
max_replicas: max number of replicas to return. If not
specified, will pop all replicas matching the criteria.
"""
if states is None:
states = ALL_REPLICA_STATES
assert exclude_version is None or isinstance(exclude_version, DeploymentVersion)
assert isinstance(states, list)
replicas = []
for state in states:
popped = []
remaining = []
for replica in self._replicas[state]:
if len(replicas) + len(popped) == max_replicas:
remaining.append(replica)
elif exclude_version is not None and replica.version == exclude_version:
remaining.append(replica)
else:
popped.append(replica)
self._replicas[state] = remaining
replicas.extend(popped)
return replicas
def count(
self,
exclude_version: Optional[DeploymentVersion] = None,
version: Optional[DeploymentVersion] = None,
states: Optional[List[ReplicaState]] = None,
):
"""Get the total count of replicas of the given states.
Args:
exclude_version: version to exclude. If not
specified, all versions are considered.
version: version to filter to. If not specified,
all versions are considered.
states: states to consider. If not specified, all replicas
are considered.
"""
if states is None:
states = ALL_REPLICA_STATES
assert isinstance(states, list)
assert exclude_version is None or isinstance(exclude_version, DeploymentVersion)
assert version is None or isinstance(version, DeploymentVersion)
if exclude_version is None and version is None:
return sum(len(self._replicas[state]) for state in states)
elif exclude_version is None and version is not None:
return sum(
len(list(filter(lambda r: r.version == version, self._replicas[state])))
for state in states
)
elif exclude_version is not None and version is None:
return sum(
len(
list(
filter(
lambda r: r.version != exclude_version,
self._replicas[state],
)
)
)
for state in states
)
else:
raise ValueError(
"Only one of `version` or `exclude_version` may be provided."
)
def __str__(self):
return str(self._replicas)
def __repr__(self):
return repr(self._replicas)
| ReplicaStateContainer |
python | optuna__optuna | tutorial/20_recipes/013_wilcoxon_pruner.py | {
"start": 3263,
"end": 13348
} | class ____:
max_iter: int = 10000
T0: float = 1.0
alpha: float = 2.0
patience: int = 50
def tsp_cost(vertices: np.ndarray, idxs: np.ndarray) -> float:
return norm(vertices[idxs] - vertices[np.roll(idxs, 1)], axis=-1).sum()
###################################################################################################
# Greedy solution for initial guess.
def tsp_greedy(vertices: np.ndarray) -> np.ndarray:
idxs = [0]
for _ in range(len(vertices) - 1):
dists_from_last = norm(vertices[idxs[-1], None] - vertices, axis=-1)
dists_from_last[idxs] = np.inf
idxs.append(np.argmin(dists_from_last))
return np.array(idxs)
###################################################################################################
# .. note::
# For simplicity of implementation, we use SA with the 2-opt neighborhood to solve TSP,
# but note that this is far from the "best" way to solve TSP. There are significantly more
# advanced methods than this method.
###################################################################################################
# The implementation of SA with 2-opt neighborhood is following.
def tsp_simulated_annealing(vertices: np.ndarray, options: SAOptions) -> np.ndarray:
def temperature(t: float):
assert 0.0 <= t and t <= 1.0
return options.T0 * (1 - t) ** options.alpha
N = len(vertices)
idxs = tsp_greedy(vertices)
cost = tsp_cost(vertices, idxs)
best_idxs = idxs.copy()
best_cost = cost
remaining_patience = options.patience
for iter in range(options.max_iter):
i = np.random.randint(0, N)
j = (i + 2 + np.random.randint(0, N - 3)) % N
i, j = min(i, j), max(i, j)
# Reverse the order of vertices between range [i+1, j].
# cost difference by 2-opt reversal
delta_cost = (
-norm(vertices[idxs[(i + 1) % N]] - vertices[idxs[i]])
- norm(vertices[idxs[j]] - vertices[idxs[(j + 1) % N]])
+ norm(vertices[idxs[i]] - vertices[idxs[j]])
+ norm(vertices[idxs[(i + 1) % N]] - vertices[idxs[(j + 1) % N]])
)
temp = temperature(iter / options.max_iter)
if delta_cost <= 0.0 or np.random.random() < math.exp(-delta_cost / temp):
# accept the 2-opt reversal
cost += delta_cost
idxs[i + 1 : j + 1] = idxs[i + 1 : j + 1][::-1]
if cost < best_cost:
best_idxs[:] = idxs
best_cost = cost
remaining_patience = options.patience
if cost > best_cost:
# If the best solution is not updated for "patience" iteratoins,
# restart from the best solution.
remaining_patience -= 1
if remaining_patience == 0:
idxs[:] = best_idxs
cost = best_cost
remaining_patience = options.patience
return best_idxs
###################################################################################################
# We make a random dataset of TSP.
def make_dataset(num_vertex: int, num_problem: int, seed: int = 0) -> np.ndarray:
rng = np.random.default_rng(seed=seed)
return rng.random((num_problem, num_vertex, 2))
dataset = make_dataset(
num_vertex=100,
num_problem=50,
)
N_TRIALS = 50
###################################################################################################
# We set a very small number of SA iterations for demonstration purpose.
# In practice, you should set a larger number of iterations (e.g., 1000000).
N_SA_ITER = 10000
###################################################################################################
# We counts the number of evaluation to know how many problems is pruned.
num_evaluation = 0
###################################################################################################
# In this tutorial, we optimize three parameters: ``T0``, ``alpha``, and ``patience``.
#
# ``T0`` and ``alpha`` defining the temperature schedule
# ---------------------------------------------------------------------------------------
#
# In simulated annealing, it is important to determine a good temperature scheduling, but
# there is no "silver schedule" that is good for all problems, so we must tune the schedule
# for this problem.
# This code parametrizes the temperature as a monomial function ``T0 * (1 - t) ** alpha``, where
# `t` progresses from 0 to 1. We try to optimize the two parameters ``T0`` and ``alpha``.
#
# ``patience``
# -----------------------------
#
# This parameter specifies a threshold of how many iterations we allow the annealing process
# continue without updating the best value. Practically, simulated annealing often drives
# the solution far away from the current best solution, and rolling back to the best solution
# periodically often improves optimization efficiency a lot. However, if the rollback happens
# too often, the optimization may get stuck in a local optimum, so we must tune the threshold
# to a sensible amount.
#
# .. note::
# Some samplers, including the default ``TPESampler``, currently cannot utilize the
# information of pruned trials effectively (especially when the last intermediate value
# is not the best approximation to the final objective function).
# As a workaround for this issue, you can return an estimation of the final value
# (e.g., the average of all evaluated values) when ``trial.should_prune()`` returns ``True``,
# instead of `raise optuna.TrialPruned()`.
# This will improve the sampler performance.
###################################################################################################
# We define the objective function to be optimized as follows.
# We early stop the evaluation by using the pruner.
def objective(trial: optuna.Trial) -> float:
global num_evaluation
options = SAOptions(
max_iter=N_SA_ITER,
T0=trial.suggest_float("T0", 0.01, 10.0, log=True),
alpha=trial.suggest_float("alpha", 1.0, 10.0, log=True),
patience=trial.suggest_int("patience", 10, 1000, log=True),
)
results = []
# For best results, shuffle the evaluation order in each trial.
instance_ids = np.random.permutation(len(dataset))
for instance_id in instance_ids:
num_evaluation += 1
result_idxs = tsp_simulated_annealing(vertices=dataset[instance_id], options=options)
result_cost = tsp_cost(dataset[instance_id], result_idxs)
results.append(result_cost)
trial.report(result_cost, instance_id)
if trial.should_prune():
# Return the current predicted value instead of raising `TrialPruned`.
# This is a workaround to tell the Optuna about the evaluation
# results in pruned trials.
return sum(results) / len(results)
return sum(results) / len(results)
###################################################################################################
# We use ``TPESampler`` with ``WilcoxonPruner``.
np.random.seed(0)
sampler = optuna.samplers.TPESampler(seed=1)
pruner = optuna.pruners.WilcoxonPruner(p_threshold=0.1)
study = optuna.create_study(direction="minimize", sampler=sampler, pruner=pruner)
study.enqueue_trial({"T0": 1.0, "alpha": 2.0, "patience": 50}) # default params
study.optimize(objective, n_trials=N_TRIALS)
###################################################################################################
# We can show the optimization results as:
print(f"The number of trials: {len(study.trials)}")
print(f"Best value: {study.best_value} (params: {study.best_params})")
print(f"Number of evaluation: {num_evaluation} / {len(dataset) * N_TRIALS}")
###################################################################################################
# Visualize the optimization history.
# Note that this plot shows both completed and pruned trials in same ways.
optuna.visualization.plot_optimization_history(study)
###################################################################################################
# Visualize the number of evaluations in each trial.
x_values = [x for x in range(len(study.trials)) if x != study.best_trial.number]
y_values = [
len(t.intermediate_values) for t in study.trials if t.number != study.best_trial.number
]
best_trial_y = [len(study.best_trial.intermediate_values)]
best_trial_x = [study.best_trial.number]
fig = go.Figure()
fig.add_trace(go.Bar(x=x_values, y=y_values, name="Evaluations"))
fig.add_trace(go.Bar(x=best_trial_x, y=best_trial_y, name="Best Trial", marker_color="red"))
fig.update_layout(
title="Number of evaluations in each trial",
xaxis_title="Trial number",
yaxis_title="Number of evaluations before pruned",
)
fig
###################################################################################################
# Visualize the greedy solution (used by initial guess) of a TSP problem.
d = dataset[0]
result_idxs = tsp_greedy(d)
result_idxs = np.append(result_idxs, result_idxs[0])
fig = go.Figure()
fig.add_trace(go.Scatter(x=d[result_idxs, 0], y=d[result_idxs, 1], mode="lines+markers"))
fig.update_layout(
title=f"greedy solution (initial guess), cost: {tsp_cost(d, result_idxs):.3f}",
xaxis=dict(scaleanchor="y", scaleratio=1),
)
fig
###################################################################################################
# Visualize the solution found by ``tsp_simulated_annealing`` of the same TSP problem.
params = study.best_params
options = SAOptions(
max_iter=N_SA_ITER,
patience=params["patience"],
T0=params["T0"],
alpha=params["alpha"],
)
result_idxs = tsp_simulated_annealing(d, options)
result_idxs = np.append(result_idxs, result_idxs[0])
fig = go.Figure()
fig.add_trace(go.Scatter(x=d[result_idxs, 0], y=d[result_idxs, 1], mode="lines+markers"))
fig.update_layout(
title=f"n_iter: {options.max_iter}, cost: {tsp_cost(d, result_idxs):.3f}",
xaxis=dict(scaleanchor="y", scaleratio=1),
)
fig
| SAOptions |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 3896,
"end": 4126
} | class ____(PrefectBaseModel):
"""
Class for storing the concurrency limit config in database.
"""
limit: int
collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.ENQUEUE
| ConcurrencyLimitConfig |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/winres.py | {
"start": 827,
"end": 1392
} | class ____(c_preproc.c_parser):
def filter_comments(self, node):
code = node.read()
if c_preproc.use_trigraphs:
for (a, b) in c_preproc.trig_def:
code = code.split(a).join(b)
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
ret = []
for m in re.finditer(re_lines, code):
if m.group(2):
ret.append((m.group(2), m.group(3)))
else:
ret.append(('include', m.group(5)))
return ret
| rc_parser |
python | explosion__spaCy | spacy/lang/xx/__init__.py | {
"start": 35,
"end": 266
} | class ____(Language):
"""Language class to be used for models that support multiple languages.
This module allows models to specify their language ID as 'xx'.
"""
lang = "xx"
__all__ = ["MultiLanguage"]
| MultiLanguage |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 22243,
"end": 25807
} | class ____(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = SqueezeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| SqueezeBertForSequenceClassification |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_thinking_turns_param.py | {
"start": 222,
"end": 348
} | class ____(TypedDict, total=False):
type: Required[Literal["thinking_turns"]]
value: Required[int]
| BetaThinkingTurnsParam |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 92311,
"end": 94417
} | class ____(TestCase):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
| TestRoll |
python | python__mypy | mypy/types.py | {
"start": 121173,
"end": 124968
} | class ____(ProperType):
"""The union type Union[T1, ..., Tn] (at least one type argument)."""
__slots__ = (
"items",
"is_evaluated",
"uses_pep604_syntax",
"original_str_expr",
"original_str_fallback",
)
def __init__(
self,
items: Sequence[Type],
line: int = -1,
column: int = -1,
*,
is_evaluated: bool = True,
uses_pep604_syntax: bool = False,
) -> None:
super().__init__(line, column)
# We must keep this false to avoid crashes during semantic analysis.
# TODO: maybe switch this to True during type-checking pass?
self.items = flatten_nested_unions(items, handle_type_alias_type=False)
# is_evaluated should be set to false for type comments and string literals
self.is_evaluated = is_evaluated
# uses_pep604_syntax is True if Union uses OR syntax (X | Y)
self.uses_pep604_syntax = uses_pep604_syntax
# The meaning of these two is the same as for UnboundType. A UnionType can be
# return by type parser from a string "A|B", and we need to be able to fall back
# to plain string, when such a string appears inside a Literal[...].
self.original_str_expr: str | None = None
self.original_str_fallback: str | None = None
def can_be_true_default(self) -> bool:
return any(item.can_be_true for item in self.items)
def can_be_false_default(self) -> bool:
return any(item.can_be_false for item in self.items)
def __hash__(self) -> int:
return hash(frozenset(self.items))
def __eq__(self, other: object) -> bool:
if not isinstance(other, UnionType):
return NotImplemented
if self is other:
return True
return frozenset(self.items) == frozenset(other.items)
@overload
@staticmethod
def make_union(
items: Sequence[ProperType], line: int = -1, column: int = -1
) -> ProperType: ...
@overload
@staticmethod
def make_union(items: Sequence[Type], line: int = -1, column: int = -1) -> Type: ...
@staticmethod
def make_union(items: Sequence[Type], line: int = -1, column: int = -1) -> Type:
if len(items) > 1:
return UnionType(items, line, column)
elif len(items) == 1:
return items[0]
else:
return UninhabitedType()
def length(self) -> int:
return len(self.items)
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_union_type(self)
def relevant_items(self) -> list[Type]:
"""Removes NoneTypes from Unions when strict Optional checking is off."""
if state.strict_optional:
return self.items
else:
return [i for i in self.items if not isinstance(get_proper_type(i), NoneType)]
def serialize(self) -> JsonDict:
return {
".class": "UnionType",
"items": [t.serialize() for t in self.items],
"uses_pep604_syntax": self.uses_pep604_syntax,
}
@classmethod
def deserialize(cls, data: JsonDict) -> UnionType:
assert data[".class"] == "UnionType"
return UnionType(
[deserialize_type(t) for t in data["items"]],
uses_pep604_syntax=data["uses_pep604_syntax"],
)
def write(self, data: WriteBuffer) -> None:
write_tag(data, UNION_TYPE)
write_type_list(data, self.items)
write_bool(data, self.uses_pep604_syntax)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> UnionType:
ret = UnionType(read_type_list(data), uses_pep604_syntax=read_bool(data))
assert read_tag(data) == END_TAG
return ret
| UnionType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass13.py | {
"start": 187,
"end": 334
} | class ____:
id: int
x: int = field()
# This should generate an error because it will result in a runtime exception
y = field()
| MyClass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/mro1.py | {
"start": 199,
"end": 223
} | class ____(A, B):
pass
| C |
python | ipython__ipython | tests/test_pretty.py | {
"start": 383,
"end": 881
} | class ____(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for i, child in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
| MyList |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_nixl_connector.py | {
"start": 316,
"end": 1839
} | class ____:
@pytest.fixture
def nixl_backend(self, engine_id: str):
"""Fixture for the NixlConnectorBackend."""
return NixlConnectorBackend(
llm_config=LLMConfig(
model_loading_config=dict(
model_id="Qwen/Qwen3-0.6B",
),
engine_kwargs=dict(
kv_transfer_config=dict(
kv_connector="NixlConnector",
kv_role="kv_both",
engine_id=engine_id,
)
),
),
)
@pytest.mark.parametrize(
"env_vars",
[
{},
{"VLLM_NIXL_SIDE_CHANNEL_PORT": "8080"},
{"VLLM_NIXL_SIDE_CHANNEL_HOST": "127.0.0.1"},
{
"VLLM_NIXL_SIDE_CHANNEL_PORT": "8080",
"VLLM_NIXL_SIDE_CHANNEL_HOST": "127.0.0.1",
},
],
)
def test_setup_environment_variables(self, nixl_backend, env_vars, engine_id: str):
"""Test that setup configures environment variables and overrides engine_id correctly."""
with patch.dict("os.environ", env_vars, clear=True):
nixl_backend.setup()
assert "VLLM_NIXL_SIDE_CHANNEL_PORT" in os.environ
assert "VLLM_NIXL_SIDE_CHANNEL_HOST" in os.environ
assert engine_id in nixl_backend.kv_transfer_config["engine_id"]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestNixlConnectorBackend |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_string.py | {
"start": 6460,
"end": 7788
} | class ____:
def test_to_string_header_false(self):
# GH#49230
df = DataFrame([1, 2])
df.index.name = "a"
s = df.to_string(header=False)
expected = "a \n0 1\n1 2"
assert s == expected
df = DataFrame([[1, 2], [3, 4]])
df.index.name = "a"
s = df.to_string(header=False)
expected = "a \n0 1 2\n1 3 4"
assert s == expected
def test_to_string_multindex_header(self):
# GH#16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
| TestDataFrameToStringHeader |
python | ray-project__ray | python/ray/dashboard/modules/serve/tests/test_serve_dashboard.py | {
"start": 22229,
"end": 47115
} | class ____:
def _run_serve_deploy(self, config_path: Path):
proc = subprocess.run(
[
"serve",
"deploy",
"-a",
"http://localhost:8265",
str(config_path),
],
capture_output=True,
)
assert proc.returncode == 0, proc.stderr.decode("utf-8")
def _get_deployment_details(
self, app_name="test_app", deployment_name="hello_world"
):
"""Get deployment details from serve instance."""
serve_details = ServeInstanceDetails(**requests.get(SERVE_HEAD_URL).json())
app_details = serve_details.applications[app_name]
return app_details.deployments[deployment_name]
def _scale_and_verify_deployment(
self,
num_replicas,
app_name="test_app",
deployment_name="hello_world",
verify_actual_replicas=True,
):
"""Scale a deployment and verify both target and actual replica counts."""
response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name=app_name, deployment_name=deployment_name
),
json={"target_num_replicas": num_replicas},
timeout=30,
)
response_data = response.json()
assert response.status_code == 200
assert "message" in response_data
assert (
"Scaling request received. Deployment will get scaled asynchronously."
in response_data["message"]
)
self._verify_deployment_details(
app_name=app_name,
deployment_name=deployment_name,
target_num_replicas=num_replicas,
verify_actual_replicas=verify_actual_replicas,
)
def _verify_deployment_details(
self,
app_name="test_app",
deployment_name="hello_world",
target_num_replicas=None,
deployment_status=None,
verify_actual_replicas=True,
):
deployment_details = self._get_deployment_details(app_name, deployment_name)
if target_num_replicas is not None:
assert deployment_details.target_num_replicas == target_num_replicas
if deployment_status is not None:
assert deployment_details.status == deployment_status
if verify_actual_replicas:
wait_for_condition(
lambda: get_num_alive_replicas(deployment_name, app_name)
== target_num_replicas,
timeout=30,
)
return True
def test_scale_deployment_endpoint_comprehensive(self, ray_start_stop):
serve.run(DeploymentClass.bind(), name="test_app", external_scaler_enabled=True)
wait_for_condition(
lambda: self._get_deployment_details().status == DeploymentStatus.HEALTHY
) # Wait for deployment to be healthy
self._scale_and_verify_deployment(
3
) # Test 1: Basic scaling up and down with actual replica verification
self._scale_and_verify_deployment(1)
self._scale_and_verify_deployment(0) # Test 2: Scale to zero replicas
self._scale_and_verify_deployment(2) # Test 3: Scale from zero replicas
def test_scale_deployment_during_application_startup(self, ray_start_stop):
semaphore = Semaphore.remote(value=0)
serve._run(
DeploymentClassWithBlockingInit.bind(semaphore),
name="test_app",
_blocking=False,
external_scaler_enabled=True,
)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=2,
deployment_status=DeploymentStatus.UPDATING,
verify_actual_replicas=False,
timeout=30,
)
self._scale_and_verify_deployment(4, verify_actual_replicas=False)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=4,
deployment_status=DeploymentStatus.UPDATING,
verify_actual_replicas=False,
timeout=30,
)
ray.get(semaphore.release.remote())
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=4,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
def test_scale_deployment_during_application_upgrade(self, ray_start_stop):
semaphore = Semaphore.remote(value=1)
serve._run(
DeploymentClass.bind(),
name="test_app",
_blocking=False,
external_scaler_enabled=True,
)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=1,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
serve._run(
DeploymentClassWithBlockingInit.bind(semaphore),
name="test_app",
_blocking=False,
external_scaler_enabled=True,
)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=2,
deployment_status=DeploymentStatus.UPDATING,
verify_actual_replicas=False,
timeout=30,
)
assert (
get_num_alive_replicas(deployment_name="hello_world", app_name="test_app")
== 1
)
self._scale_and_verify_deployment(3, verify_actual_replicas=False)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.UPDATING,
verify_actual_replicas=False,
timeout=30,
)
ray.get(
semaphore.release.remote()
) # Release the semaphore to allow the second and third replica to start
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
def test_scale_deployment_during_application_deletion(self, ray_start_stop):
signal_actor = SignalActor.remote()
@serve.deployment(name="hello_world", num_replicas=1)
class DeploymentClassWithBlockingDel:
def __init__(self, signal_actor_handle):
self.signal_actor_handle = signal_actor_handle
def __del__(self):
ray.get(self.signal_actor_handle.wait.remote())
def __call__(self):
return "test"
serve._run(
DeploymentClassWithBlockingDel.bind(signal_actor),
name="test_app",
_blocking=False,
external_scaler_enabled=True,
)
wait_for_condition(
lambda: self._get_deployment_details().status == DeploymentStatus.HEALTHY
) # Wait for deployment to be healthy
serve.delete("test_app", _blocking=False)
wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)
response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name="test_app", deployment_name="hello_world"
),
json={"target_num_replicas": 5},
timeout=30,
)
assert response.status_code == 412
assert "Deployment is deleted" in response.json()["error"]
ray.get(signal_actor.send.remote())
def test_scale_deployment_retention_across_application_upgrade(
self, ray_start_stop
):
"""Test that replica counts set via /scale are retained across application upgrade."""
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
config_v1_file = tmp_path / "config_v1.yaml"
config_v1_file.write_text(CONFIG_FILE_TEXT)
self._run_serve_deploy(config_v1_file)
wait_for_condition(
self._verify_deployment_details,
deployment_status=DeploymentStatus.HEALTHY,
target_num_replicas=1,
timeout=30,
)
self._scale_and_verify_deployment(
3, verify_actual_replicas=False
) # Scale to 3 replicas
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
self._run_serve_deploy(config_v1_file) # Redeploy the application
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
def test_scale_deployment_retention_during_serve_controller_restart(
self, ray_start_stop
):
"""Test that replica counts set via /scale are retained after serve controller restart."""
serve.start()
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
config_v1_file = tmp_path / "config_v1.yaml"
config_v1_file.write_text(CONFIG_FILE_TEXT)
self._run_serve_deploy(config_v1_file)
wait_for_condition(
self._verify_deployment_details,
deployment_status=DeploymentStatus.HEALTHY,
target_num_replicas=1,
timeout=30,
)
self._scale_and_verify_deployment(
3, verify_actual_replicas=False
) # Scale to 3 replicas
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
ray.kill(serve.context._get_global_client()._controller, no_restart=False)
wait_for_condition(
self._verify_deployment_details,
target_num_replicas=3,
deployment_status=DeploymentStatus.HEALTHY,
verify_actual_replicas=True,
timeout=30,
)
def test_error_case(self, ray_start_stop):
serve.start()
error_response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name="nonexistent", deployment_name="hello_world"
),
json={"target_num_replicas": 2},
timeout=30,
)
assert error_response.status_code == 400
assert "not found" in error_response.json()["error"].lower()
error_response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name="test_app", deployment_name="hello_world"
),
json={"invalid_field": 2},
timeout=30,
)
assert error_response.status_code == 400
assert "invalid request body" in error_response.json()["error"].lower()
def test_external_scaler_enabled_switchback(self, ray_start_stop):
"""Test switching external_scaler_enabled on and off without restarting Serve.
This test verifies that:
1. Scaling fails when external_scaler_enabled is False
2. Scaling succeeds when external_scaler_enabled is True
3. The switchback (False -> True -> False) works correctly
"""
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
config_disabled_file = tmp_path / "config_disabled.yaml"
config_enabled_file = tmp_path / "config_enabled.yaml"
config_disabled_file.write_text(CONFIG_FILE_TEXT_EXTERNAL_SCALER_DISABLED)
config_enabled_file.write_text(CONFIG_FILE_TEXT)
# Step 1: Deploy with external_scaler_enabled=False
self._run_serve_deploy(config_disabled_file)
wait_for_condition(
self._verify_deployment_details,
deployment_status=DeploymentStatus.HEALTHY,
target_num_replicas=1,
verify_actual_replicas=True,
timeout=30,
)
# Step 2: Try to scale - should fail
response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name="test_app", deployment_name="hello_world"
),
json={"target_num_replicas": 3},
timeout=30,
)
assert response.status_code == 412
assert (
"Current value: external_scaler_enabled=false. To use this API, redeploy your application with 'external_scaler_enabled: true' in the config."
in response.json()["error"]
)
# Verify replicas didn't change
assert self._get_deployment_details().target_num_replicas == 1
# Step 3: Enable external_scaler_enabled
self._run_serve_deploy(config_enabled_file)
wait_for_condition(
self._verify_deployment_details,
deployment_status=DeploymentStatus.HEALTHY,
target_num_replicas=1,
verify_actual_replicas=True,
timeout=30,
)
# Step 4: Scale - should succeed
self._scale_and_verify_deployment(3, verify_actual_replicas=True)
# Step 5: Disable external_scaler_enabled again
self._run_serve_deploy(config_disabled_file)
# The deployment should maintain 3 replicas from the previous scale operation
# but external scaler should be disabled
wait_for_condition(
self._verify_deployment_details,
deployment_status=DeploymentStatus.HEALTHY,
target_num_replicas=3,
verify_actual_replicas=True,
timeout=30,
)
# Step 6: Try to scale again - should fail
response = requests.post(
SERVE_HEAD_DEPLOYMENT_SCALE_URL.format(
app_name="test_app", deployment_name="hello_world"
),
json={"target_num_replicas": 5},
timeout=30,
)
assert response.status_code == 412
assert (
"Current value: external_scaler_enabled=false. To use this API, redeploy your application with 'external_scaler_enabled: true' in the config."
in response.json()["error"]
)
# Verify replicas stayed at 3
assert self._get_deployment_details().target_num_replicas == 3
@pytest.mark.skipif(
sys.platform == "darwin" and not TEST_ON_DARWIN, reason="Flaky on OSX."
)
def test_get_serve_instance_details_api_type_filtering(ray_start_stop):
"""
Test the api_type query parameter for filtering applications by API type.
Tests both declarative and imperative applications.
"""
# First, deploy declarative applications
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
declarative_config = {
"applications": [
{
"name": "declarative_app1",
"route_prefix": "/declarative1",
"import_path": world_import_path,
},
{
"name": "declarative_app2",
"route_prefix": "/declarative2",
"import_path": world_import_path,
},
],
}
deploy_config_multi_app(declarative_config, SERVE_HEAD_URL)
# Wait for declarative apps to be running
def declarative_apps_running():
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
return len(serve_details.applications) == 2 and all(
app.status == ApplicationStatus.RUNNING
for app in serve_details.applications.values()
)
wait_for_condition(declarative_apps_running, timeout=15)
print("Declarative applications are running.")
# Deploy imperative applications using subprocess
deploy = subprocess.run(
[
sys.executable,
str(Path(__file__).parent / "deploy_imperative_serve_apps.py"),
],
capture_output=True,
universal_newlines=True,
)
assert deploy.returncode == 0
# Wait for imperative apps to be running
def all_apps_running():
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
return len(
serve_details.applications
) == 4 and all( # 2 declarative + 2 imperative
app.status == ApplicationStatus.RUNNING
for app in serve_details.applications.values()
)
wait_for_condition(all_apps_running, timeout=15)
print("All applications (declarative + imperative) are running.")
# Test 1: No api_type parameter - should return all applications
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert len(serve_details.applications) == 4
app_names = set(serve_details.applications.keys())
assert app_names == {"declarative_app1", "declarative_app2", "app1", "app2"}
# Test 2: Filter by declarative applications
response = requests.get(SERVE_HEAD_URL + "?api_type=declarative", timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert len(serve_details.applications) == 2
app_names = set(serve_details.applications.keys())
assert app_names == {"declarative_app1", "declarative_app2"}
for app in serve_details.applications.values():
assert app.source == "declarative"
# Test 3: Filter by imperative applications
response = requests.get(SERVE_HEAD_URL + "?api_type=imperative", timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert len(serve_details.applications) == 2
app_names = set(serve_details.applications.keys())
assert app_names == {"app1", "app2"}
for app in serve_details.applications.values():
assert app.source == "imperative"
# Test 4: Filter by unknown - should return 400 error (unknown is not a valid user input)
response = requests.get(SERVE_HEAD_URL + "?api_type=unknown", timeout=15)
assert response.status_code == 400
assert "Invalid 'api_type' value" in response.text
assert "Must be one of: imperative, declarative" in response.text
@pytest.mark.skipif(
sys.platform == "darwin" and not TEST_ON_DARWIN, reason="Flaky on OSX."
)
def test_get_serve_instance_details_invalid_api_type(ray_start_stop):
"""
Test that invalid api_type values return appropriate error responses.
"""
# Test with invalid api_type value
response = requests.get(SERVE_HEAD_URL + "?api_type=invalid_type", timeout=15)
assert response.status_code == 400
assert "Invalid 'api_type' value" in response.text
assert "Must be one of: imperative, declarative" in response.text
# Test with another invalid value
response = requests.get(SERVE_HEAD_URL + "?api_type=python", timeout=15)
assert response.status_code == 400
assert "Invalid 'api_type' value" in response.text
@pytest.mark.skipif(
sys.platform == "darwin" and not TEST_ON_DARWIN, reason="Flaky on OSX."
)
def test_get_serve_instance_details_api_type_case_insensitive(ray_start_stop):
"""
Test that api_type parameter is case insensitive.
"""
# Deploy a declarative application
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
config = {
"applications": [
{
"name": "test_app",
"route_prefix": "/test",
"import_path": world_import_path,
}
],
}
deploy_config_multi_app(config, SERVE_HEAD_URL)
def app_running():
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
return (
len(serve_details.applications) == 1
and serve_details.applications["test_app"].status
== ApplicationStatus.RUNNING
)
wait_for_condition(app_running, timeout=15)
# Test case insensitive filtering
test_cases = ["DECLARATIVE", "Declarative", "declarative", "DeClArAtIvE"]
for api_type_value in test_cases:
response = requests.get(
f"{SERVE_HEAD_URL}?api_type={api_type_value}", timeout=15
)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert len(serve_details.applications) == 1
assert "test_app" in serve_details.applications
@pytest.mark.skipif(
sys.platform == "darwin" and not TEST_ON_DARWIN, reason="Flaky on OSX."
)
def test_get_serve_instance_details_external_scaler_enabled(ray_start_stop):
"""
Test that external_scaler_enabled is correctly returned in the API response.
This test verifies that when applications are deployed with different
external_scaler_enabled values, the /api/serve/applications/ endpoint
correctly returns the external_scaler_enabled field for each application.
"""
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
config = {
"applications": [
{
"name": "app_with_scaler",
"route_prefix": "/with_scaler",
"import_path": world_import_path,
"external_scaler_enabled": True,
},
{
"name": "app_without_scaler",
"route_prefix": "/without_scaler",
"import_path": world_import_path,
"external_scaler_enabled": False,
},
],
}
deploy_config_multi_app(config, SERVE_HEAD_URL)
def both_apps_running():
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
return (
len(serve_details.applications) == 2
and serve_details.applications["app_with_scaler"].status
== ApplicationStatus.RUNNING
and serve_details.applications["app_without_scaler"].status
== ApplicationStatus.RUNNING
)
wait_for_condition(both_apps_running, timeout=15)
# Verify both apps have correct external_scaler_enabled values
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert len(serve_details.applications) == 2
assert "app_with_scaler" in serve_details.applications
assert "app_without_scaler" in serve_details.applications
assert serve_details.applications["app_with_scaler"].external_scaler_enabled is True
assert (
serve_details.applications["app_without_scaler"].external_scaler_enabled
is False
)
# Test default value (when external_scaler_enabled is not specified)
config_default = {
"applications": [
{
"name": "app_default",
"route_prefix": "/default",
"import_path": world_import_path,
}
],
}
deploy_config_multi_app(config_default, SERVE_HEAD_URL)
def app_default_running():
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
return (
len(serve_details.applications) == 1
and serve_details.applications["app_default"].status
== ApplicationStatus.RUNNING
)
wait_for_condition(app_default_running, timeout=15)
# Verify default value is False
response = requests.get(SERVE_HEAD_URL, timeout=15)
assert response.status_code == 200
serve_details = ServeInstanceDetails(**response.json())
assert "app_default" in serve_details.applications
assert serve_details.applications["app_default"].external_scaler_enabled is False
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestScaleDeploymentEndpoint |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 4464,
"end": 4621
} | class ____(scale_color_brewer):
"""
Sequential, diverging and qualitative color scales
"""
_aesthetics = ["fill"]
@dataclass
| scale_fill_brewer |
python | geekcomputers__Python | insta_monitering/insta_api.py | {
"start": 3575,
"end": 4280
} | class ____(tornado.web.RequestHandler):
def get(self):
try:
q = self.get_argument("q")
user = self.get_argument("userId")
type = self.get_argument("type")
productId = self.get_argument("productId")
date = self.get_argument("date")
limit = self.get_argument("limit")
except:
self.send_error(400)
recordsobj = DBDataFetcher(user=user, tags=q, type=type, productId=productId)
data = recordsobj.DBFetcherLess(limit=limit, date=date)
# print("{0}, {1}, {2}, {3}".format(temp["userId"], temp["productId"], temp["query"], temp["status"]))
self.write(data)
| SenderHandlerinstaLess |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 3296,
"end": 3750
} | class ____(BaseModel):
type: Literal["wait"]
"""Specifies the event type.
For a wait action, this property is always set to `wait`.
"""
Action: TypeAlias = Annotated[
Union[
ActionClick,
ActionDoubleClick,
ActionDrag,
ActionKeypress,
ActionMove,
ActionScreenshot,
ActionScroll,
ActionType,
ActionWait,
],
PropertyInfo(discriminator="type"),
]
| ActionWait |
python | yaml__pyyaml | setup.py | {
"start": 4126,
"end": 5893
} | class ____(_Distribution):
def __init__(self, attrs=None):
_Distribution.__init__(self, attrs)
if not self.ext_modules:
return
for idx in range(len(self.ext_modules)-1, -1, -1):
ext = self.ext_modules[idx]
if not isinstance(ext, Extension):
continue
setattr(self, ext.attr_name, None)
self.global_options = [
(ext.option_name, None,
"include %s (default if %s is available)"
% (ext.feature_description, ext.feature_name)),
(ext.neg_option_name, None,
"exclude %s" % ext.feature_description),
] + self.global_options
self.negative_opt = self.negative_opt.copy()
self.negative_opt[ext.neg_option_name] = ext.option_name
def has_ext_modules(self):
if not self.ext_modules:
return False
for ext in self.ext_modules:
with_ext = self.ext_status(ext)
if with_ext is None or with_ext:
return True
return False
def ext_status(self, ext):
implementation = platform.python_implementation()
if implementation not in ['CPython', 'PyPy']:
return False
if isinstance(ext, Extension):
# the "build by default" behavior is implemented by this returning None
with_ext = getattr(self, ext.attr_name) or os.environ.get('PYYAML_FORCE_{0}'.format(ext.feature_name.upper()))
try:
with_ext = int(with_ext) # attempt coerce envvar to int
except TypeError:
pass
return with_ext
else:
return True
| Distribution |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 72643,
"end": 72993
} | class ____(Visitor):
"""Drops all mutable parameters.
Drops all mutable parameters. This visitor differs from
transforms.RemoveMutableParameters in that the latter absorbs mutable
parameters into the signature, while this one blindly drops them.
"""
def VisitParameter(self, p):
return p.Replace(mutated_type=None)
| DropMutableParameters |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 6421,
"end": 7667
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
task = CloudMemorystoreFailoverInstanceOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
data_protection_mode=TEST_DATA_PROTECTION_MODE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.failover_instance.assert_called_once_with(
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
data_protection_mode=TEST_DATA_PROTECTION_MODE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreFailoverInstanceOperator |
python | pennersr__django-allauth | allauth/idp/oidc/contrib/ninja/security.py | {
"start": 301,
"end": 1561
} | class ____(AuthBase):
"""
Use the OIDC access token to authenticate and the scopes attached to the
token to authorize the request.
"""
openapi_type: str = "apiKey"
scope = None
def __init__(self, scope: Union[str, list, dict]):
"""The scope passed can either be:
- a single scope (``str``),
- a list of scopes, all of which should be granted.
- a list of scope lists. Your token should match at least all scopes of one of the scope lists.
- A dictionary, with the request method (e.g. ``GET``) as key, and one
of the scope values from the previous bullet. The scopes to match are
then dynamically selected based on the request.
"""
super().__init__()
self.scope = scope
def __call__(self, request: HttpRequest):
server = get_server()
orequest = extract_params(request)
valid, ctx = server.verify_request(*orequest, scopes=[])
if not valid:
return None
if not is_scope_granted(self.scope, ctx.access_token, request.method):
return None
if ctx.access_token and ctx.access_token.user:
request.user = ctx.access_token.user
return ctx.access_token
| TokenAuth |
python | squidfunk__mkdocs-material | material/plugins/blog/plugin.py | {
"start": 2420,
"end": 45378
} | class ____(BasePlugin[BlogConfig]):
supports_multiple_instances = True
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize incremental builds
self.is_serve = False
self.is_dirty = False
# Initialize temporary directory
self.temp_dir = mkdtemp()
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
self.is_dirty = dirty
# Initialize authors and set defaults
def on_config(self, config):
if not self.config.enabled:
return
# Initialize entrypoint
self.blog: View
# Initialize and resolve authors, if enabled
if self.config.authors:
self.authors = self._resolve_authors(config)
# By default, drafts are rendered when the documentation is served,
# but not when it is built, for a better user experience
if self.is_serve and self.config.draft_on_serve:
self.config.draft = True
# Resolve and load posts and generate views (run later) - we want to allow
# other plugins to add generated posts or views, so we run this plugin as
# late as possible. We also need to remove the posts from the navigation
# before navigation is constructed, as the entrypoint should be considered
# to be the active page for each post. The URLs of posts are computed before
# Markdown processing, so that when linking to and from posts, behavior is
# exactly the same as with regular documentation pages. We create all pages
# related to posts as part of this plugin, so we control the entire process.
@event_priority(-50)
def on_files(self, files, *, config):
if not self.config.enabled:
return
# Resolve path to entrypoint and site directory
root = posixpath.normpath(self.config.blog_dir)
site = config.site_dir
# Compute and normalize path to posts directory
path = self.config.post_dir.format(blog = root)
path = posixpath.normpath(path)
# Adjust destination paths for media files
for file in files.media_files():
if not file.src_uri.startswith(path):
continue
# We need to adjust destination paths for assets to remove the
# purely functional posts directory prefix when building
file.dest_uri = file.dest_uri.replace(path, root)
file.abs_dest_path = os.path.join(site, file.dest_path)
file.url = file.url.replace(path, root)
# Resolve entrypoint and posts sorted by descending date - if the posts
# directory or entrypoint do not exist, they are automatically created
self.blog = self._resolve(files, config)
self.blog.posts = sorted(
self._resolve_posts(files, config),
key = lambda post: (
post.config.pin,
post.config.date.created
),
reverse = True
)
# Generate views for archive
if self.config.archive:
views = self._generate_archive(config, files)
self.blog.views.extend(views)
# Generate views for categories
if self.config.categories:
views = self._generate_categories(config, files)
# We always sort the list of categories by name first, so that any
# custom sorting function that returns the same value for two items
# returns them in a predictable and logical order, because sorting
# in Python is stable, i.e., order of equal items is preserved
self.blog.views.extend(sorted(
sorted(views, key = view_name),
key = self.config.categories_sort_by,
reverse = self.config.categories_sort_reverse
))
# Generate views for profiles
if self.config.authors_profiles:
views = self._generate_profiles(config, files)
self.blog.views.extend(views)
# Generate pages for views
for view in self._resolve_views(self.blog):
if self._config_pagination(view):
for page in self._generate_pages(view, config, files):
view.pages.append(page)
# Ensure that entrypoint is always included in navigation
self.blog.file.inclusion = InclusionLevel.INCLUDED
# Attach posts and views to navigation (run later) - again, we allow other
# plugins to alter the navigation before we start to attach posts and views
# generated by this plugin at the correct locations in the navigation. Also,
# we make sure to correct links to the parent and siblings of each page.
@event_priority(-50)
def on_nav(self, nav, *, config, files):
if not self.config.enabled:
return
# If we're not building a standalone blog, the entrypoint will always
# have a parent when it is included in the navigation. The parent is
# essential to correctly resolve the location where the archive and
# category views are attached. If the entrypoint doesn't have a parent,
# we know that the author did not include it in the navigation, so we
# explicitly mark it as not included.
if not self.blog.parent and self.config.blog_dir != ".":
self.blog.file.inclusion = InclusionLevel.NOT_IN_NAV
# Attach posts to entrypoint without adding them to the navigation, so
# that the entrypoint is considered to be the active page for each post
self._attach(self.blog, [None, *reversed(self.blog.posts), None])
for post in self.blog.posts:
post.file.inclusion = InclusionLevel.NOT_IN_NAV
# Revert temporary exclusion of views from navigation
for view in self._resolve_views(self.blog):
view.file.inclusion = self.blog.file.inclusion
for page in view.pages:
page.file.inclusion = self.blog.file.inclusion
# Attach views for archive
if self.config.archive:
title = self._translate(self.config.archive_name, config)
views = [_ for _ in self.blog.views if isinstance(_, Archive)]
# Attach and link views for archive
if self.blog.file.inclusion.is_in_nav():
self._attach_to(self.blog, Section(title, views), nav)
# Attach views for categories
if self.config.categories:
title = self._translate(self.config.categories_name, config)
views = [_ for _ in self.blog.views if isinstance(_, Category)]
# Attach and link views for categories, if any
if self.blog.file.inclusion.is_in_nav() and views:
self._attach_to(self.blog, Section(title, views), nav)
# Attach views for profiles
if self.config.authors_profiles:
title = self._translate(self.config.authors_profiles_name, config)
views = [_ for _ in self.blog.views if isinstance(_, Profile)]
# Attach and link views for categories, if any
if self.blog.file.inclusion.is_in_nav() and views:
self._attach_to(self.blog, Section(title, views), nav)
# Attach pages for views
for view in self._resolve_views(self.blog):
if self._config_pagination(view):
for at in range(1, len(view.pages)):
self._attach_at(view.parent, view, view.pages[at])
# Prepare post for rendering (run later) - allow other plugins to alter
# the contents or metadata of a post before it is rendered and make sure
# that the post includes a separator, which is essential for rendering
# excerpts that should be included in views
@event_priority(-50)
def on_page_markdown(self, markdown, *, page, config, files):
if not self.config.enabled:
return
# Skip if page is not a post managed by this instance - this plugin has
# support for multiple instances, which is why this check is necessary
if page not in self.blog.posts:
if not self._config_pagination(page):
return
# We set the contents of the view to its title if pagination should
# not keep the content of the original view on paginated views
if not self.config.pagination_keep_content:
view = self._resolve_original(page)
if view in self._resolve_views(self.blog):
# If the current view is paginated, use the rendered title
# of the original view in case the author set the title in
# the page's contents, or it would be overridden with the
# one set in mkdocs.yml, leading to inconsistent headings
assert isinstance(view, View)
if view != page:
name = view._title_from_render or view.title
return f"# {name}"
# Nothing more to be done for views
return
# Extract and assign authors to post, if enabled
if self.config.authors:
for id in page.config.authors:
if id not in self.authors:
raise PluginError(f"Couldn't find author '{id}'")
# Append to list of authors
page.authors.append(self.authors[id])
# Extract settings for excerpts
separator = self.config.post_excerpt_separator
max_authors = self.config.post_excerpt_max_authors
max_categories = self.config.post_excerpt_max_categories
# Ensure presence of separator and throw, if its absent and required -
# we append the separator to the end of the contents of the post, if it
# is not already present, so we can remove footnotes or other content
# from the excerpt without affecting the content of the excerpt
if separator not in page.markdown:
if self.config.post_excerpt == "required":
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(page.file.abs_src_path, docs)
raise PluginError(
f"Couldn't find '{separator}' in post '{path}' in '{docs}'"
)
# Create excerpt for post and inherit authors and categories - excerpts
# can contain a subset of the authors and categories of the post
page.excerpt = Excerpt(page, config, files)
page.excerpt.authors = page.authors[:max_authors]
page.excerpt.categories = page.categories[:max_categories]
# Process posts
def on_page_content(self, html, *, page, config, files):
if not self.config.enabled:
return
# Skip if page is not a post managed by this instance - this plugin has
# support for multiple instances, which is why this check is necessary
if page not in self.blog.posts:
return
# Compute readtime of post, if enabled and not explicitly set
if self.config.post_readtime:
words_per_minute = self.config.post_readtime_words_per_minute
if not page.config.readtime:
page.config.readtime = readtime(html, words_per_minute)
# Register template filters for plugin
def on_env(self, env, *, config, files):
if not self.config.enabled:
return
# Transform links to point to posts and pages
for post in self.blog.posts:
self._generate_links(post, config, files)
# Filter for formatting dates related to posts
def date_filter(date: datetime):
return self._format_date_for_post(date, config)
# Fetch URL template filter from environment - the filter might
# be overridden by other plugins, so we must retrieve and wrap it
url_filter = env.filters["url"]
# Patch URL template filter to add support for paginated views, i.e.,
# that paginated views never link to themselves but to the main vie
@pass_context
def url_filter_with_pagination(context: Context, url: str | None):
page = context["page"]
# If the current page is a view, check if the URL links to the page
# itself, and replace it with the URL of the main view
if isinstance(page, View):
view = self._resolve_original(page)
if page.url == url:
url = view.url
# Forward to original template filter
return url_filter(context, url)
# Register custom template filters
env.filters["date"] = date_filter
env.filters["url"] = url_filter_with_pagination
# Prepare view for rendering (run latest) - views are rendered last, as we
# need to mutate the navigation to account for pagination. The main problem
# is that we need to replace the view in the navigation, because otherwise
# the view would not be considered active.
@event_priority(-100)
def on_page_context(self, context, *, page, config, nav):
if not self.config.enabled:
return
# Skip if page is not a view managed by this instance - this plugin has
# support for multiple instances, which is why this check is necessary
view = self._resolve_original(page)
if view not in self._resolve_views(self.blog):
return
# Render excerpts and prepare pagination
posts, pagination = self._render(page)
# Render pagination links
def pager(args: object):
return pagination.pager(
format = self.config.pagination_format,
show_if_single_page = self.config.pagination_if_single_page,
**args
)
# Assign posts and pagination to context
context["posts"] = posts
context["pagination"] = pager if pagination else None
# Remove temporary directory on shutdown
def on_shutdown(self):
rmtree(self.temp_dir)
# -------------------------------------------------------------------------
# Check if the given post is excluded
def _is_excluded(self, post: Post):
if self.config.draft:
return False
# If a post was not explicitly marked or unmarked as draft, and the
# date should be taken into account, we automatically mark it as draft
# if the publishing date is in the future. This, of course, is opt-in
# and must be explicitly enabled by the author.
if not isinstance(post.config.draft, bool):
if self.config.draft_if_future_date:
return post.config.date.created > datetime.now(timezone.utc)
# Post might be a draft
return bool(post.config.draft)
# -------------------------------------------------------------------------
# Resolve entrypoint - the entrypoint of the blog must have been created
# if it did not exist before, and hosts all posts sorted by descending date
def _resolve(self, files: Files, config: MkDocsConfig):
path = os.path.join(self.config.blog_dir, "index.md")
path = os.path.normpath(path)
# Create entrypoint, if it does not exist - note that the entrypoint is
# created in the docs directory, not in the temporary directory
docs = os.path.relpath(config.docs_dir)
name = os.path.join(docs, path)
if not os.path.isfile(name):
file = self._path_to_file(path, config, temp = False)
files.append(file)
# Create file in docs directory
self._save_to_file(file.abs_src_path, "# Blog\n\n")
# Create and return entrypoint
file = files.get_file_from_path(path)
return View(None, file, config)
# Resolve post - the caller must make sure that the given file points to an
# actual post (and not a page), or behavior might be unpredictable
def _resolve_post(self, file: File, config: MkDocsConfig):
post = Post(file, config)
# Compute path and create a temporary file for path resolution
path = self._format_path_for_post(post, config)
temp = self._path_to_file(path, config, temp = False)
# Replace destination file system path and URL
file.dest_uri = temp.dest_uri
file.abs_dest_path = temp.abs_dest_path
file.url = temp.url
# Replace canonical URL and return post
post._set_canonical_url(config.site_url)
return post
# Resolve posts from directory - traverse all documentation pages and filter
# and yield those that are located in the posts directory
def _resolve_posts(self, files: Files, config: MkDocsConfig):
path = self.config.post_dir.format(blog = self.config.blog_dir)
path = os.path.normpath(path)
# Create posts directory, if it does not exist
docs = os.path.relpath(config.docs_dir)
name = os.path.join(docs, path)
if not os.path.isdir(name):
os.makedirs(name, exist_ok = True)
# Filter posts from pages
for file in files.documentation_pages():
if not file.src_path.startswith(path):
continue
# Temporarily remove post from navigation
file.inclusion = InclusionLevel.EXCLUDED
# Resolve post - in order to determine whether a post should be
# excluded, we must load it and analyze its metadata. All posts
# marked as drafts are excluded, except for when the author has
# configured drafts to be included in the navigation.
post = self._resolve_post(file, config)
if not self._is_excluded(post):
yield post
# Resolve authors - check if there's an authors file at the configured
# location, and if one was found, load and validate it
def _resolve_authors(self, config: MkDocsConfig):
path = self.config.authors_file.format(blog = self.config.blog_dir)
path = os.path.normpath(path)
# Resolve path relative to docs directory
docs = os.path.relpath(config.docs_dir)
file = os.path.join(docs, path)
# If the authors file does not exist, return here
config: Authors = Authors()
if not os.path.isfile(file):
return config.authors
# Open file and parse as YAML
with open(file, encoding = "utf-8-sig") as f:
config.config_file_path = os.path.abspath(file)
try:
config.load_dict(yaml.load(f, SafeLoader) or {})
# The authors file could not be loaded because of a syntax error,
# which we display to the author with a nice error message
except Exception as e:
raise PluginError(
f"Error reading authors file '{path}' in '{docs}':\n"
f"{e}"
)
# Validate authors and throw if errors occurred
errors, warnings = config.validate()
for _, w in warnings:
log.warning(w)
for _, e in errors:
raise PluginError(
f"Error reading authors file '{path}' in '{docs}':\n"
f"{e}"
)
# Return authors
return config.authors
# Resolve views of the given view in pre-order
def _resolve_views(self, view: View):
yield view
# Resolve views recursively
for page in view.views:
for next in self._resolve_views(page):
assert isinstance(next, View)
yield next
# Resolve siblings of a navigation item
def _resolve_siblings(self, item: StructureItem, nav: Navigation):
if isinstance(item.parent, Section):
return item.parent.children
else:
return nav.items
# Resolve original page or view (e.g. for paginated views)
def _resolve_original(self, page: Page):
if isinstance(page, View) and page.pages:
return page.pages[0]
else:
return page
# -------------------------------------------------------------------------
# Generate views for archive - analyze posts and generate the necessary
# views, taking the date format provided by the author into account
def _generate_archive(self, config: MkDocsConfig, files: Files):
for post in self.blog.posts:
date = post.config.date.created
# Compute name and path of archive view
name = self._format_date_for_archive(date, config)
path = self._format_path_for_archive(post, config)
# Create file for view, if it does not exist
file = files.get_file_from_path(path)
if not file:
file = self._path_to_file(path, config)
files.append(file)
# Create file in temporary directory
self._save_to_file(file.abs_src_path, f"# {name}")
# Temporarily remove view from navigation
file.inclusion = InclusionLevel.EXCLUDED
# Create and yield view
if not isinstance(file.page, Archive):
yield Archive(name, file, config)
# Assign post to archive
assert isinstance(file.page, Archive)
file.page.posts.append(post)
# Generate views for categories - analyze posts and generate the necessary
# views, taking the allowed categories as set by the author into account
def _generate_categories(self, config: MkDocsConfig, files: Files):
for post in self.blog.posts:
for name in post.config.categories:
path = self._format_path_for_category(name)
# Ensure category is in non-empty allow list
categories = self.config.categories_allowed or [name]
if name not in categories:
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(post.file.abs_src_path, docs)
raise PluginError(
f"Error reading categories of post '{path}' in "
f"'{docs}': category '{name}' not in allow list"
)
# Create file for view, if it does not exist
file = files.get_file_from_path(path)
if not file:
file = self._path_to_file(path, config)
files.append(file)
# Create file in temporary directory
self._save_to_file(file.abs_src_path, f"# {name}")
# Temporarily remove view from navigation
file.inclusion = InclusionLevel.EXCLUDED
# Create and yield view
if not isinstance(file.page, Category):
yield Category(name, file, config)
# Assign post to category and vice versa
assert isinstance(file.page, Category)
file.page.posts.append(post)
post.categories.append(file.page)
# Generate views for profiles - analyze posts and generate the necessary
# views to provide a profile page for each author listing all posts
def _generate_profiles(self, config: MkDocsConfig, files: Files):
for post in self.blog.posts:
for id in post.config.authors:
author = self.authors[id]
path = self._format_path_for_profile(id, author)
# Create file for view, if it does not exist
file = files.get_file_from_path(path)
if not file:
file = self._path_to_file(path, config)
files.append(file)
# Create file in temporary directory
self._save_to_file(file.abs_src_path, f"# {author.name}")
# Temporarily remove view from navigation and assign profile
# URL to author, if not explicitly set
file.inclusion = InclusionLevel.EXCLUDED
if not author.url:
author.url = file.url
# Create and yield view
if not isinstance(file.page, Profile):
yield Profile(author.name, file, config)
# Assign post to profile
assert isinstance(file.page, Profile)
file.page.posts.append(post)
# Generate pages for pagination - analyze view and generate the necessary
# pages, creating a chain of views for simple rendering and replacement
def _generate_pages(self, view: View, config: MkDocsConfig, files: Files):
yield view
# Compute pagination boundaries and create pages - pages are internally
# handled as copies of a view, as they map to the same source location
step = self._config_pagination_per_page(view)
for at in range(step, len(view.posts), step):
path = self._format_path_for_pagination(view, 1 + at // step)
# Create file for view, if it does not exist
file = files.get_file_from_path(path)
if not file:
file = self._path_to_file(path, config)
files.append(file)
# Copy file to temporary directory
copy_file(view.file.abs_src_path, file.abs_src_path)
# Temporarily remove view from navigation
file.inclusion = InclusionLevel.EXCLUDED
# Create and yield view
if not isinstance(file.page, View):
yield view.__class__(None, file, config)
# Assign pages and posts to view
assert isinstance(file.page, View)
file.page.pages = view.pages
file.page.posts = view.posts
# Generate links from the given post to other posts, pages, and sections -
# this can only be done once all posts and pages have been parsed
def _generate_links(self, post: Post, config: MkDocsConfig, files: Files):
if not post.config.links:
return
# Resolve path relative to docs directory for error reporting
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(post.file.abs_src_path, docs)
# Find all links to pages and replace them with references - while all
# internal links are processed, external links remain as they are
for link in _find_links(post.config.links.items):
url = urlparse(link.url)
if url.scheme:
continue
# Resolve file for link, and throw if the file could not be found -
# authors can link to other pages, as well as to assets or files of
# any kind, but it is essential that the file that is linked to is
# found, so errors are actually catched and reported
file = files.get_file_from_path(url.path)
if not file:
log.warning(
f"Error reading metadata of post '{path}' in '{docs}':\n"
f"Couldn't find file for link '{url.path}'"
)
continue
# If the file linked to is not a page, but an asset or any other
# file, we resolve the destination URL and continue
if not isinstance(file.page, Page):
link.url = file.url
continue
# Cast link to reference
link.__class__ = Reference
assert isinstance(link, Reference)
# Assign page title, URL and metadata to link
link.title = link.title or file.page.title
link.url = file.page.url
link.meta = copy(file.page.meta)
# If the link has no fragment, we can continue - if it does, we
# need to find the matching anchor in the table of contents
if not url.fragment:
continue
# If we're running under dirty reload, MkDocs will reset all pages,
# so it's not possible to resolve anchor links. Thus, the only way
# to make this work is to skip the entire process of anchor link
# resolution in case of a dirty reload.
if self.is_dirty:
continue
# Resolve anchor for fragment, and throw if the anchor could not be
# found - authors can link to any anchor in the table of contents
anchor = _find_anchor(file.page.toc, url.fragment)
if not anchor:
log.warning(
f"Error reading metadata of post '{path}' in '{docs}':\n"
f"Couldn't find anchor '{url.fragment}' in '{url.path}'"
)
# Restore link to original state
link.url = url.geturl()
continue
# Append anchor to URL and set subtitle
link.url += f"#{anchor.id}"
link.meta["subtitle"] = anchor.title
# -------------------------------------------------------------------------
# Attach a list of pages to each other and to the given parent item without
# explicitly adding them to the navigation, which can be done by the caller
def _attach(self, parent: StructureItem, pages: list[Page]):
for tail, page, head in zip(pages, pages[1:], pages[2:]):
# Link page to parent and siblings
page.parent = parent
page.previous_page = tail
page.next_page = head
# If the page is a view, we know that we generated it and need to
# link its siblings back to the view
if isinstance(page, View):
view = self._resolve_original(page)
if tail: tail.next_page = view
if head: head.previous_page = view
# Attach a page to the given parent and link it to the previous and next
# page of the given host - this is exclusively used for paginated views
def _attach_at(self, parent: StructureItem, host: Page, page: Page):
self._attach(parent, [host.previous_page, page, host.next_page])
# Attach a section as a sibling to the given view, make sure its pages are
# part of the navigation, and ensure all pages are linked correctly
def _attach_to(self, view: View, section: Section, nav: Navigation):
section.parent = view.parent
# Resolve siblings, which are the children of the parent section, or
# the top-level list of navigation items if the view is at the root of
# the project, and append the given section to it. It's currently not
# possible to chose the position of a section.
items = self._resolve_siblings(view, nav)
items.append(section)
# Find last sibling that is a page, skipping sections, as we need to
# append the given section after all other pages
tail = next(item for item in reversed(items) if isinstance(item, Page))
head = tail.next_page
# Attach section to navigation and pages to each other
nav.pages.extend(section.children)
self._attach(section, [tail, *section.children, head])
# -------------------------------------------------------------------------
# Render excerpts and pagination for the given view
def _render(self, view: View):
posts, pagination = view.posts, None
# Create pagination, if enabled
if self._config_pagination(view):
at = view.pages.index(view)
# Compute pagination boundaries
step = self._config_pagination_per_page(view)
p, q = at * step, at * step + step
# Extract posts in pagination boundaries
posts = view.posts[p:q]
pagination = self._render_pagination(view, (p, q))
# Render excerpts for selected posts
posts = [
self._render_post(post.excerpt, view)
for post in posts if post.excerpt
]
# Return posts and pagination
return posts, pagination
# Render excerpt in the context of the given view
def _render_post(self, excerpt: Excerpt, view: View):
excerpt.render(view, self.config.post_excerpt_separator)
# Attach top-level table of contents item to view if it should be added
# and both, the view and excerpt contain table of contents items
toc = self._config_toc(view)
if toc and excerpt.toc.items and view.toc.items:
view.toc.items[0].children.append(excerpt.toc.items[0])
# Return excerpt
return excerpt
# Create pagination for the given view and range
def _render_pagination(self, view: View, range: tuple[int, int]):
p, q = range
# Create URL from the given page to another page
def url_maker(n: int):
return get_relative_url(view.pages[n - 1].url, view.url)
# Return pagination
return Pagination(
view.posts, page = q // (q - p),
items_per_page = q - p,
url_maker = url_maker
)
# -------------------------------------------------------------------------
# Retrieve configuration value or return default
def _config(self, key: str, default: any):
return default if self.config[key] is None else self.config[key]
# Retrieve configuration value for table of contents
def _config_toc(self, view: View):
default = self.config.blog_toc
if isinstance(view, Archive):
return self._config("archive_toc", default)
if isinstance(view, Category):
return self._config("categories_toc", default)
if isinstance(view, Profile):
return self._config("authors_profiles_toc", default)
else:
return default
# Retrieve configuration value for pagination
def _config_pagination(self, view: View):
default = self.config.pagination
if isinstance(view, Archive):
return self._config("archive_pagination", default)
if isinstance(view, Category):
return self._config("categories_pagination", default)
if isinstance(view, Profile):
return self._config("authors_profiles_pagination", default)
else:
return default
# Retrieve configuration value for pagination per page
def _config_pagination_per_page(self, view: View):
default = self.config.pagination_per_page
if isinstance(view, Archive):
return self._config("archive_pagination_per_page", default)
if isinstance(view, Category):
return self._config("categories_pagination_per_page", default)
if isinstance(view, Profile):
return self._config("authors_profiles_pagination_per_page", default)
else:
return default
# -------------------------------------------------------------------------
# Format path for post
def _format_path_for_post(self, post: Post, config: MkDocsConfig):
categories = post.config.categories[:self.config.post_url_max_categories]
categories = [self._slugify_category(name) for name in categories]
# Replace placeholders in format string
date = post.config.date.created
path = self.config.post_url_format.format(
categories = "/".join(categories),
date = self._format_date_for_post_url(date, config),
file = post.file.name,
slug = post.config.slug or self._slugify_post(post)
)
# Normalize path and strip slashes at the beginning and end
path = posixpath.normpath(path.strip("/"))
return posixpath.join(self.config.blog_dir, f"{path}.md")
# Format path for archive
def _format_path_for_archive(self, post: Post, config: MkDocsConfig):
date = post.config.date.created
path = self.config.archive_url_format.format(
date = self._format_date_for_archive_url(date, config)
)
# Normalize path and strip slashes at the beginning and end
path = posixpath.normpath(path.strip("/"))
return posixpath.join(self.config.blog_dir, f"{path}.md")
# Format path for category
def _format_path_for_category(self, name: str):
path = self.config.categories_url_format.format(
slug = self._slugify_category(name)
)
# Normalize path and strip slashes at the beginning and end
path = posixpath.normpath(path.strip("/"))
return posixpath.join(self.config.blog_dir, f"{path}.md")
# Format path for profile
def _format_path_for_profile(self, id: str, author: Author):
path = self.config.authors_profiles_url_format.format(
slug = author.slug or id,
name = author.name
)
# Normalize path and strip slashes at the beginning and end
path = posixpath.normpath(path.strip("/"))
return posixpath.join(self.config.blog_dir, f"{path}.md")
# Format path for pagination
def _format_path_for_pagination(self, view: View, page: int):
path = self.config.pagination_url_format.format(
page = page
)
# Compute base path for pagination - if the given view is an index file,
# we need to pop the file name from the base so it's not part of the URL
# and we need to append `index` to the path, so the paginated view is
# also an index page - see https://t.ly/71MKF
base, _ = posixpath.splitext(view.file.src_uri)
if view.is_index:
base = posixpath.dirname(base)
path = posixpath.join(path, "index")
# Normalize path and strip slashes at the beginning and end
path = posixpath.normpath(path.strip("/"))
return posixpath.join(base, f"{path}.md")
# -------------------------------------------------------------------------
# Format date - if the given format string refers to a predefined format,
# we format the date without a time component in order to keep sane default
# behavior, since authors will not expect time to be relevant for most posts
# as by our assumptions - see https://t.ly/Yi7ZC
def _format_date(self, date: datetime, format: str, config: MkDocsConfig):
locale: str = config.theme["language"].replace("-", "_")
if format in ["full", "long", "medium", "short"]:
return format_date(date, format = format, locale = locale)
else:
return format_datetime(date, format = format, locale = locale)
# Format date for post
def _format_date_for_post(self, date: datetime, config: MkDocsConfig):
format = self.config.post_date_format
return self._format_date(date, format, config)
# Format date for post URL
def _format_date_for_post_url(self, date: datetime, config: MkDocsConfig):
format = self.config.post_url_date_format
return self._format_date(date, format, config)
# Format date for archive
def _format_date_for_archive(self, date: datetime, config: MkDocsConfig):
format = self.config.archive_date_format
return self._format_date(date, format, config)
# Format date for archive URL
def _format_date_for_archive_url(self, date: datetime, config: MkDocsConfig):
format = self.config.archive_url_date_format
return self._format_date(date, format, config)
# -------------------------------------------------------------------------
# Slugify post title
def _slugify_post(self, post: Post):
separator = self.config.post_slugify_separator
return self.config.post_slugify(post.title, separator)
# Slugify category
def _slugify_category(self, name: str):
separator = self.config.categories_slugify_separator
return self.config.categories_slugify(name, separator)
# -------------------------------------------------------------------------
# Create a file for the given path, which must point to a valid source file,
# either inside the temporary directory or the docs directory
def _path_to_file(self, path: str, config: MkDocsConfig, *, temp = True):
assert path.endswith(".md")
file = File(
path,
config.docs_dir if not temp else self.temp_dir,
config.site_dir,
config.use_directory_urls
)
# Hack: mark file as generated, so other plugins don't think it's part
# of the file system. This is more or less a new quasi-standard that
# still needs to be adopted by MkDocs, and was introduced by the
# git-revision-date-localized-plugin - see https://bit.ly/3ZUmdBx
if temp:
file.generated_by = "material/blog"
# Return file
return file
# Create a file with the given content on disk
def _save_to_file(self, path: str, content: str):
os.makedirs(os.path.dirname(path), exist_ok = True)
with open(path, "w", encoding = "utf-8") as f:
f.write(content)
# -------------------------------------------------------------------------
# Translate the placeholder referenced by the given key
def _translate(self, key: str, config: MkDocsConfig) -> str:
env = config.theme.get_env()
template = env.get_template(
"partials/language.html", globals = { "config": config }
)
# Translate placeholder
return template.module.t(key)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
# Find all links in the given list of items
def _find_links(items: list[StructureItem]):
for item in items:
# Resolve link
if isinstance(item, Link):
yield item
# Resolve sections recursively
if isinstance(item, Section):
for item in _find_links(item.children):
assert isinstance(item, Link)
yield item
# Find anchor in table of contents for the given id
def _find_anchor(toc: TableOfContents, id: str):
for anchor in toc:
if anchor.id == id:
return anchor
# Resolve anchors recursively
anchor = _find_anchor(anchor.children, id)
if isinstance(anchor, AnchorLink):
return anchor
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.blog")
| BlogPlugin |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/configurable.py | {
"start": 9832,
"end": 15079
} | class ____(DynamicRunnable[Input, Output]):
"""`Runnable` that can be dynamically configured.
A `RunnableConfigurableFields` should be initiated using the
`configurable_fields` method of a `Runnable`.
Here is an example of using a `RunnableConfigurableFields` with LLMs:
```python
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
# This creates a RunnableConfigurableFields for a chat model.
# When invoking the created RunnableSequence, you can pass in the
# value for your ConfigurableField's id which in this case
# will be change in temperature
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.invoke({"x": 0}, config={"configurable": {"temperature": 0.9}})
```
Here is an example of using a `RunnableConfigurableFields` with `HubRunnables`:
```python
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
# Invoking prompt with `with_config` method
prompt.invoke(
{"question": "foo", "context": "bar"},
config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}},
)
```
"""
fields: dict[str, AnyConfigurableField]
"""The configurable fields to use."""
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
"""Get the configuration specs for the `RunnableConfigurableFields`.
Returns:
The configuration specs.
"""
config_specs = []
default_fields = type(self.default).model_fields
for field_name, spec in self.fields.items():
if isinstance(spec, ConfigurableField):
config_specs.append(
ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description
or default_fields[field_name].description,
annotation=spec.annotation
or default_fields[field_name].annotation,
default=getattr(self.default, field_name),
is_shared=spec.is_shared,
)
)
else:
config_specs.append(
make_options_spec(spec, default_fields[field_name].description)
)
config_specs.extend(self.default.config_specs)
return get_unique_config_specs(config_specs)
@override
def configurable_fields(
self, **kwargs: AnyConfigurableField
) -> RunnableSerializable[Input, Output]:
return self.default.configurable_fields(**{**self.fields, **kwargs})
def _prepare(
self, config: RunnableConfig | None = None
) -> tuple[Runnable[Input, Output], RunnableConfig]:
config = ensure_config(config)
specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()}
configurable_fields = {
specs_by_id[k][0]: v
for k, v in config.get("configurable", {}).items()
if k in specs_by_id and isinstance(specs_by_id[k][1], ConfigurableField)
}
configurable_single_options = {
k: v.options[(config.get("configurable", {}).get(v.id) or v.default)]
for k, v in self.fields.items()
if isinstance(v, ConfigurableFieldSingleOption)
}
configurable_multi_options = {
k: [
v.options[o]
for o in config.get("configurable", {}).get(v.id, v.default)
]
for k, v in self.fields.items()
if isinstance(v, ConfigurableFieldMultiOption)
}
configurable = {
**configurable_fields,
**configurable_single_options,
**configurable_multi_options,
}
if configurable:
init_params = {
k: v
for k, v in self.default.__dict__.items()
if k in type(self.default).model_fields
}
return (
self.default.__class__(**{**init_params, **configurable}),
config,
)
return (self.default, config)
# Before Python 3.11 native StrEnum is not available
| RunnableConfigurableFields |
python | realpython__materials | game-of-life-python/source_code_step_2/rplife/patterns.py | {
"start": 47,
"end": 114
} | class ____:
name: str
alive_cells: set[tuple[int, int]]
| Pattern |
python | apache__airflow | providers/apache/beam/src/airflow/providers/apache/beam/operators/beam.py | {
"start": 37384,
"end": 38236
} | class ____(_GoArtifact):
def __init__(self, file: str) -> None:
self.file = file
self.should_init_go_module = False
def is_located_on_gcs(self) -> bool:
return _object_is_located_on_gcs(self.file)
def download_from_gcs(self, gcs_hook: GCSHook, tmp_dir: str) -> None:
self.file = _download_object_from_gcs(gcs_hook=gcs_hook, uri=self.file, tmp_dir=tmp_dir)
self.should_init_go_module = True
def start_pipeline(
self,
beam_hook: BeamHook,
variables: dict,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
beam_hook.start_go_pipeline(
variables=variables,
go_file=self.file,
process_line_callback=process_line_callback,
should_init_module=self.should_init_go_module,
)
| _GoFile |
python | PyCQA__pylint | tests/functional/m/metaclass_attr_access.py | {
"start": 306,
"end": 460
} | class ____:
"""metaclassed class"""
__metaclass__ = Meta
_meta_args = ('foo', 'bar')
def __init__(self):
print('__init__', self)
| Test |
python | kamyu104__LeetCode-Solutions | Python/maximum-score-from-grid-operations.py | {
"start": 48,
"end": 1499
} | class ____(object):
def maximumScore(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
prefix = [0]*(len(grid)+1)
for i in xrange(len(grid)):
prefix[i+1] = prefix[i]+grid[i][0]
result = 0
# dp[0][i]: the maximum score from 0 to the current column, and the current column has i black cells, without scoring the white cells of the current column
# dp[1][i]: the maximum score from 0 to the current column, and the current column has i black cells, with scoring the white cells of the current column
dp = [[0]*(len(grid)+1) for _ in xrange(2)]
for j in xrange(1, len(grid[0])):
new_prefix = [0]*(len(grid)+1)
for i in xrange(len(grid)):
new_prefix[i+1] = new_prefix[i]+grid[i][j]
new_dp = [[0]*(len(grid)+1) for _ in xrange(2)]
for i in xrange(len(grid)+1):
for k in xrange(i+1):
new_dp[0][i] = max(new_dp[0][i], (prefix[i]-prefix[k])+dp[0][k])
new_dp[0][i] = max(new_dp[0][i], max(dp[1]))
for k in xrange(i+1, len(grid)+1):
new_dp[1][i] = max(new_dp[1][i], dp[1][k]+(new_prefix[k]-new_prefix[i]))
new_dp[1][i] = max(new_dp[1][i], new_dp[0][i])
dp, prefix = new_dp, new_prefix
return max(dp[1])
# Time: O(n^3)
# Space: O(n)
# prefix sum, dp
| Solution |
python | huggingface__transformers | tests/models/chinese_clip/test_image_processing_chinese_clip.py | {
"start": 1021,
"end": 3196
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return 3, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| ChineseCLIPImageProcessingTester |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec52.py | {
"start": 259,
"end": 372
} | class ____:
# This should generate an error.
x: A[P]
# This should generate an error, not crash.
B().x(1)
| B |
python | Textualize__textual | src/textual/widgets/_radio_set.py | {
"start": 505,
"end": 11217
} | class ____(VerticalScroll, can_focus=True, can_focus_children=False):
"""Widget for grouping a collection of radio buttons into a set.
When a collection of [`RadioButton`][textual.widgets.RadioButton]s are
grouped with this widget, they will be treated as a mutually-exclusive
grouping. If one button is turned on, the previously-on button will be
turned off.
"""
ALLOW_SELECT = False
ALLOW_MAXIMIZE = True
DEFAULT_CSS = """
RadioSet {
border: tall $border-blurred;
background: $surface;
padding: 0 1;
height: auto;
width: 1fr;
&.-textual-compact {
border: none !important;
padding: 0;
}
& > RadioButton {
background: transparent;
border: none;
padding: 0;
width: 1fr;
& > .toggle--button {
color: $panel-darken-2;
background: $panel;
}
&.-selected {
background: $block-cursor-blurred-background;
}
}
& > RadioButton.-on .toggle--button {
color: $text-success;
}
&:focus {
/* The following rules/styles mimic similar ToggleButton:focus rules in
* ToggleButton. If those styles ever get updated, these should be too.
*/
border: tall $border;
background-tint: $foreground 5%;
& > RadioButton.-selected {
color: $block-cursor-foreground;
text-style: $block-cursor-text-style;
background: $block-cursor-background;
}
}
}
"""
BINDINGS: ClassVar[list[BindingType]] = [
Binding("down,right", "next_button", "Next option", show=False),
Binding("enter,space", "toggle_button", "Toggle", show=False),
Binding("up,left", "previous_button", "Previous option", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| enter, space | Toggle the currently-selected button. |
| left, up | Select the previous radio button in the set. |
| right, down | Select the next radio button in the set. |
"""
_selected: var[int | None] = var[Optional[int]](None)
"""The index of the currently-selected radio button."""
compact: reactive[bool] = reactive(False, toggle_class="-textual-compact")
"""Enable compact display?"""
@rich.repr.auto
class Changed(Message):
"""Posted when the pressed button in the set changes.
This message can be handled using an `on_radio_set_changed` method.
"""
ALLOW_SELECTOR_MATCH = {"pressed"}
"""Additional message attributes that can be used with the [`on` decorator][textual.on]."""
def __init__(self, radio_set: RadioSet, pressed: RadioButton) -> None:
"""Initialise the message.
Args:
pressed: The radio button that was pressed.
"""
super().__init__()
self.radio_set = radio_set
"""A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed."""
self.pressed = pressed
"""The [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change."""
self.index = radio_set.pressed_index
"""The index of the [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change."""
@property
def control(self) -> RadioSet:
"""A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.
This is an alias for [`Changed.radio_set`][textual.widgets.RadioSet.Changed.radio_set]
and is used by the [`on`][textual.on] decorator.
"""
return self.radio_set
def __rich_repr__(self) -> rich.repr.Result:
yield "radio_set", self.radio_set
yield "pressed", self.pressed
yield "index", self.index
def __init__(
self,
*buttons: str | RadioButton,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
tooltip: RenderableType | None = None,
compact: bool = False,
) -> None:
"""Initialise the radio set.
Args:
buttons: The labels or [`RadioButton`][textual.widgets.RadioButton]s to group together.
name: The name of the radio set.
id: The ID of the radio set in the DOM.
classes: The CSS classes of the radio set.
disabled: Whether the radio set is disabled or not.
tooltip: Optional tooltip.
compact: Enable compact radio set style
Note:
When a `str` label is provided, a
[RadioButton][textual.widgets.RadioButton] will be created from
it.
"""
self._pressed_button: RadioButton | None = None
"""Holds the radio buttons we're responsible for."""
super().__init__(
*[
(button if isinstance(button, RadioButton) else RadioButton(button))
for button in buttons
],
name=name,
id=id,
classes=classes,
disabled=disabled,
)
if tooltip is not None:
self.tooltip = tooltip
self.compact = compact
def _on_mount(self, _: Mount) -> None:
"""Perform some processing once mounted in the DOM."""
# If there are radio buttons, select the first available one.
self.action_next_button()
# Get all the buttons within us; we'll be doing a couple of things
# with that list.
buttons = list(self.query(RadioButton))
# RadioButtons can have focus, by default. But we're going to take
# that over and handle movement between them. So here we tell them
# all they can't focus.
for button in buttons:
button.can_focus = False
# It's possible for the user to pass in a collection of radio
# buttons, with more than one set to on; they shouldn't, but we
# can't stop them. So here we check for that and, for want of a
# better approach, we keep the first one on and turn all the others
# off.
switched_on = [button for button in buttons if button.value]
with self.prevent(RadioButton.Changed):
for button in switched_on[1:]:
button.value = False
# Keep track of which button is initially pressed.
if switched_on:
self._pressed_button = switched_on[0]
def watch__selected(self) -> None:
self.query(RadioButton).remove_class("-selected")
if self._selected is not None:
self._nodes[self._selected].add_class("-selected")
self._scroll_to_selected()
def _on_radio_button_changed(self, event: RadioButton.Changed) -> None:
"""Respond to the value of a button in the set being changed.
Args:
event: The event.
"""
# We're going to consume the underlying radio button events, making
# it appear as if they don't emit their own, as far as the caller is
# concerned. As such, stop the event bubbling and also prohibit the
# same event being sent out if/when we make a value change in here.
event.stop()
with self.prevent(RadioButton.Changed):
# If the message pertains to a button being clicked to on...
if event.radio_button.value:
# If there's a button pressed right now and it's not really a
# case of the user mashing on the same button...
if (
self._pressed_button is not None
and self._pressed_button != event.radio_button
):
self._pressed_button.value = False
# Make the pressed button this new button.
self._pressed_button = event.radio_button
# Emit a message to say our state has changed.
self.post_message(self.Changed(self, event.radio_button))
else:
# We're being clicked off, we don't want that.
event.radio_button.value = True
def _on_radio_set_changed(self, event: RadioSet.Changed) -> None:
"""Handle a change to which button in the set is pressed.
This handler ensures that, when a button is pressed, it's also the
selected button.
"""
self._selected = event.index
async def _on_click(self, _: Click) -> None:
"""Handle a click on or within the radio set.
This handler ensures that focus moves to the clicked radio set, even
if there's a click on one of the radio buttons it contains.
"""
self.focus()
@property
def pressed_button(self) -> RadioButton | None:
"""The currently-pressed [`RadioButton`][textual.widgets.RadioButton], or `None` if none are pressed."""
return self._pressed_button
@property
def pressed_index(self) -> int:
"""The index of the currently-pressed [`RadioButton`][textual.widgets.RadioButton], or -1 if none are pressed."""
return (
self._nodes.index(self._pressed_button)
if self._pressed_button is not None
else -1
)
def action_previous_button(self) -> None:
"""Navigate to the previous button in the set.
Note that this will wrap around to the end if at the start.
"""
self._selected = _widget_navigation.find_next_enabled(
self.children,
anchor=self._selected,
direction=-1,
)
def action_next_button(self) -> None:
"""Navigate to the next button in the set.
Note that this will wrap around to the start if at the end.
"""
self._selected = _widget_navigation.find_next_enabled(
self.children,
anchor=self._selected,
direction=1,
)
def action_toggle_button(self) -> None:
"""Toggle the state of the currently-selected button."""
if self._selected is not None:
button = self._nodes[self._selected]
assert isinstance(button, RadioButton)
button.toggle()
def _scroll_to_selected(self) -> None:
"""Ensure that the selected button is in view."""
if self._selected is not None:
button = self._nodes[self._selected]
self.call_after_refresh(self.scroll_to_widget, button, animate=False)
| RadioSet |
python | doocs__leetcode | lcof2/剑指 Offer II 039. 直方图最大矩形面积/Solution.py | {
"start": 0,
"end": 662
} | class ____:
def largestRectangleArea(self, heights: List[int]) -> int:
n = len(heights)
left = [-1] * n
right = [n] * n
stk = []
for i, x in enumerate(heights):
while stk and heights[stk[-1]] >= x:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
while stk and heights[stk[-1]] >= heights[i]:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
return max(x * (r - l - 1) for x, l, r in zip(heights, left, right))
| Solution |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_meta.py | {
"start": 1292,
"end": 4689
} | class ____(OrganizationEventsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get_features(self, organization: Organization, request: Request) -> dict[str, bool | None]:
feature_names = [
"organizations:dashboards-mep",
"organizations:mep-rollout-flag",
"organizations:performance-use-metrics",
"organizations:profiling",
"organizations:dynamic-sampling",
"organizations:use-metrics-layer",
"organizations:starfish-view",
]
batch_features = features.batch_has(
feature_names,
organization=organization,
actor=request.user,
)
all_features = (
batch_features.get(f"organization:{organization.id}", {})
if batch_features is not None
else {}
)
for feature_name in feature_names:
if feature_name not in all_features:
all_features[feature_name] = features.has(
feature_name, organization=organization, actor=request.user
)
return all_features
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response({"count": 0})
dataset = self.get_dataset(request)
batch_features = self.get_features(organization, request)
use_metrics = (
(
batch_features.get("organizations:mep-rollout-flag", False)
and batch_features.get("organizations:dynamic-sampling", False)
)
or batch_features.get("organizations:performance-use-metrics", False)
or batch_features.get("organizations:dashboards-mep", False)
)
with handle_query_errors():
if dataset in RPC_DATASETS:
result = dataset.run_table_query(
params=snuba_params,
query_string=request.query_params.get("query"),
selected_columns=["count()"],
orderby=None,
offset=0,
limit=1,
referrer=Referrer.API_ORGANIZATION_EVENTS_META,
config=SearchResolverConfig(),
)
return Response({"count": result["data"][0]["count()"]})
else:
result = dataset.query(
selected_columns=["count()"],
snuba_params=snuba_params,
query=request.query_params.get("query"),
referrer=Referrer.API_ORGANIZATION_EVENTS_META.value,
has_metrics=use_metrics,
use_metrics_layer=batch_features.get("organizations:use-metrics-layer", False),
# TODO: @athena - add query_source when all datasets support it
# query_source=(
# QuerySource.FRONTEND if is_frontend_request(request) else QuerySource.API
# ),
fallback_to_transactions=True,
)
return Response({"count": result["data"][0]["count"]})
UNESCAPED_QUOTE_RE = re.compile('(?<!\\\\)"')
@region_silo_endpoint
| OrganizationEventsMetaEndpoint |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 21984,
"end": 22397
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert
| MegatronBertOnlyNSPHead |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_common.py | {
"start": 2512,
"end": 5726
} | class ____(Enum):
"""Describes the training state of one FSDP state / parameter group."""
# Transition to forward starting pre-forward until post-forward
FORWARD = auto()
# Transition to pre-backward when unsharding in backward
PRE_BACKWARD = auto()
# Transition to post-backward when resharding and reducing gradients
POST_BACKWARD = auto()
# Idle before/after forward or before pre-backward/after post-backward
IDLE = auto()
def _raise_assert_with_print(*args: Any, **kwargs: Any):
print(f"[Rank {dist.get_rank()}] ", end="")
print(*args, **kwargs)
traceback.print_stack()
raise AssertionError(*args, **kwargs)
def _is_composable_with_fsdp(module: nn.Module) -> bool:
registry = _get_registry(module)
if registry is None:
return True
# Registry keys by function name
return "replicate" not in registry
def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size:
padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor
return torch.Size([padded_dim0]) + tensor_size[1:]
def _chunk_with_empty(
tensor: torch.Tensor, num_chunks: int, dim: int
) -> list[torch.Tensor]:
chunks = list(torch.chunk(tensor, num_chunks, dim=dim))
while len(chunks) < num_chunks:
chunks.append(chunks[0].new_empty(0))
return chunks
def _get_dim_chunked_size(
chunk: torch.Tensor, unchunked_size: torch.Size, dim: int
) -> torch.Size:
if chunk.numel() > 0:
return chunk.size()
# For 0 numel, we need to preserve nonzero-sized dims for DTensor APIs
return unchunked_size[:dim] + torch.Size([0]) + unchunked_size[dim + 1 :]
def _from_local_no_grad(
local_tensor: torch.Tensor,
sharding_spec: DTensorSpec,
) -> DTensor:
"""
This method is similar to ``DTensor.from_local()`` except that in eager mode
it avoids some CPU overhead by avoiding default args and not being differentiable.
"""
if not compiled_autograd_enabled():
# pyrefly: ignore [bad-argument-type]
return DTensor(
# Use the local tensor directly instead of constructing a new tensor
# variable, e.g. with `view_as()`, since this is not differentiable
# pyrefly: ignore [bad-argument-count]
local_tensor,
sharding_spec,
# pyrefly: ignore [unexpected-keyword]
requires_grad=local_tensor.requires_grad,
)
else:
return DTensor.from_local(
local_tensor,
sharding_spec.mesh,
sharding_spec.placements,
shape=sharding_spec.shape,
stride=sharding_spec.stride,
)
def _to_dtype_if_needed(
tensor: torch.Tensor, dtype: Optional[torch.dtype]
) -> torch.Tensor:
if dtype is not None and tensor.dtype != dtype:
return tensor.to(dtype)
return tensor
def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor:
if (
not isinstance(x, torch.Tensor)
or not torch.is_floating_point(x)
or x.dtype == dtype
):
return x
return x.to(dtype)
def is_bw() -> bool:
return torch._C._current_graph_task_id() != -1
| TrainingState |
python | walkccc__LeetCode | solutions/1707. Maximum XOR With an Element From Array/1707.py | {
"start": 1055,
"end": 1705
} | class ____:
def maximizeXor(self, nums: list[int], queries: list[list[int]]) -> list[int]:
ans = [-1] * len(queries)
maxBit = int(math.log2(max(max(nums), max(x for x, _ in queries))))
bitTrie = BitTrie(maxBit)
nums.sort()
i = 0 # nums' index
for queryIndex, x, m in sorted([IndexedQuery(i, x, m)
for i, (x, m) in enumerate(queries)],
key=lambda x: x.m):
while i < len(nums) and nums[i] <= m:
bitTrie.insert(nums[i])
i += 1
if i > 0 and nums[i - 1] <= m:
ans[queryIndex] = bitTrie.getMaxXor(x)
return ans
| Solution |
python | great-expectations__great_expectations | tests/integration/integration_test_fixture.py | {
"start": 160,
"end": 1877
} | class ____:
"""IntegrationTestFixture
Configurations for integration tests are defined as IntegrationTestFixture dataclass objects.
Individual tests can also be run by setting the '-k' flag and referencing the name of test, like the following example:
pytest -v --docs-tests -k "test_docs[migration_guide_spark_v2_api]" tests/integration/test_script_runner.py
Args:
name: Name for integration test. Individual tests can be run by using the -k option and specifying the name of the test.
user_flow_script: Required script for integration test.
backend_dependencies: Flag allows you to tie an individual test with a BackendDependency. Allows for tests to be run / disabled using cli flags (like --aws which enables AWS integration tests). If no backends are required, must explicitly set to empty list.
data_context_dir: Path of great_expectations/ that is used in the test.
data_dir: Folder that contains data used in the test.
other_files: other files (like credential information) to copy into the test environment. These are presented as Tuple(path_to_source_file, path_to_target_file), where path_to_target_file is relative to the test_script.py file in our test environment
util_script: Path of optional util script that is used in test script (for loading test_specific methods like load_data_into_test_database())
""" # noqa: E501 # FIXME CoP
name: str
user_flow_script: str
backend_dependencies: List[BackendDependencies]
data_context_dir: Optional[str] = None
data_dir: Optional[str] = None
other_files: Optional[Tuple[Tuple[str, str]]] = None
util_script: Optional[str] = None
| IntegrationTestFixture |
python | tiangolo__fastapi | docs_src/request_form_models/tutorial002_pv1.py | {
"start": 84,
"end": 272
} | class ____(BaseModel):
username: str
password: str
class Config:
extra = "forbid"
@app.post("/login/")
async def login(data: FormData = Form()):
return data
| FormData |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance8.py | {
"start": 273,
"end": 653
} | class ____(ABC):
@abstractmethod
def f(self) -> None: ...
def func1(cls: Any):
assert issubclass(cls, Base)
reveal_type(cls, expected_text="type[Base]")
_ = cls()
def func2(cls: Any):
assert isinstance(cls, type)
reveal_type(cls, expected_text="type")
assert issubclass(cls, Base)
reveal_type(cls, expected_text="type[Base]")
_ = cls()
| Base |
python | huggingface__transformers | src/transformers/models/eomt/modeling_eomt.py | {
"start": 41584,
"end": 42438
} | class ____(nn.Module):
def __init__(self, config: EomtConfig):
super().__init__()
hidden_size = config.hidden_size
self.conv1 = nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2)
self.activation = ACT2FN[config.hidden_act]
self.conv2 = nn.Conv2d(
hidden_size,
hidden_size,
kernel_size=3,
padding=1,
groups=hidden_size,
bias=False,
)
self.layernorm2d = EomtLayerNorm2d(hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layernorm2d(hidden_states)
return hidden_states
| EomtScaleLayer |
python | getsentry__sentry | tests/sentry/replays/endpoints/test_project_replay_recording_segment_details.py | {
"start": 3723,
"end": 4545
} | class ____(EnvironmentBase, ReplaysSnubaTestCase):
def init_environment(self) -> None:
metadata = RecordingSegmentStorageMeta(
project_id=self.project.id,
replay_id=self.replay_id,
segment_id=self.segment_id,
retention_days=30,
)
self.segment_filename = make_recording_filename(metadata)
self.store_replays(
mock_replay(
datetime.datetime.now() - datetime.timedelta(seconds=22),
metadata.project_id,
metadata.replay_id,
segment_id=metadata.segment_id,
retention_days=metadata.retention_days,
)
)
StorageBlob().set(metadata, zlib.compress(pack(self.segment_data, None)))
| PackedStorageReplayRecordingSegmentDetailsTestCase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 88347,
"end": 93198
} | class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "goals_and_funnels_report_hourly"
report_file = "goals_and_funnels_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "goals_and_funnels_report_hourly_incremental"
report_file_with_records_further_start_date = "goals_and_funnels_report_hourly_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = "goals_and_funnels_report_hourly_incremental_with_records_further_cursor"
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "GoalsAndFunnelsReport", "ReturnOnlyCompleteData": false, "Type": "GoalsAndFunnelsReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Keyword", "KeywordId", "Goal", "AllConversions", "Assists", "AllRevenue", "GoalId", "DeviceType", "DeviceOS", "AccountStatus", "CampaignStatus", "AdGroupStatus", "KeywordStatus", "GoalType", "ViewThroughConversions", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "GoalsAndFunnelsReport", "ReturnOnlyCompleteData": false, "Type": "GoalsAndFunnelsReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Keyword", "KeywordId", "Goal", "AllConversions", "Assists", "AllRevenue", "GoalId", "DeviceType", "DeviceOS", "AccountStatus", "CampaignStatus", "AdGroupStatus", "KeywordStatus", "GoalType", "ViewThroughConversions", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "GoalsAndFunnelsReport", "ReturnOnlyCompleteData": false, "Type": "GoalsAndFunnelsReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "Keyword", "KeywordId", "Goal", "AllConversions", "Assists", "AllRevenue", "GoalId", "DeviceType", "DeviceOS", "AccountStatus", "CampaignStatus", "AdGroupStatus", "KeywordStatus", "GoalType", "ViewThroughConversions", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestGoalsAndFunnelsReportHourlyStream |
python | jazzband__django-model-utils | model_utils/fields.py | {
"start": 577,
"end": 960
} | class ____(DateTimeFieldBase):
"""
A DateTimeField that automatically populates itself at
object creation.
By default, sets editable=False, default=datetime.now.
"""
def __init__(self, *args: Any, **kwargs: Any):
kwargs.setdefault('editable', False)
kwargs.setdefault('default', now)
super().__init__(*args, **kwargs)
| AutoCreatedField |
python | doocs__leetcode | solution/2300-2399/2367.Number of Arithmetic Triplets/Solution2.py | {
"start": 0,
"end": 183
} | class ____:
def arithmeticTriplets(self, nums: List[int], diff: int) -> int:
vis = set(nums)
return sum(x + diff in vis and x + diff * 2 in vis for x in nums)
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 713534,
"end": 721708
} | class ____(
DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefnumber
):
"""
StrokeOpacityDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeOpacity"
@overload
def bandPosition(self, _: float, /) -> StrokeOpacityDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeOpacityDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeOpacityDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> StrokeOpacityDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> StrokeOpacityDatum: ...
@overload
def type(self, _: Type_T, /) -> StrokeOpacityDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
| StrokeOpacityDatum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.