language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 52776,
"end": 55218
} | class ____(Operation):
def __init__(self, from_logits=False, *, name=None):
super().__init__(name=name)
self.from_logits = from_logits
def call(self, target, output):
return backend.nn.binary_crossentropy(
target, output, from_logits=self.from_logits
)
def compute_output_spec(self, target, output):
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
return KerasTensor(output.shape, dtype=output.dtype)
@keras_export(
[
"keras.ops.binary_crossentropy",
"keras.ops.nn.binary_crossentropy",
]
)
def binary_crossentropy(target, output, from_logits=False):
"""Computes binary cross-entropy loss between target and output tensor.
The binary cross-entropy loss is commonly used in binary
classification tasks where each input sample belongs to one
of the two classes. It measures the dissimilarity between the
target and output probabilities or logits.
Args:
target: The target tensor representing the true binary labels.
Its shape should match the shape of the `output` tensor.
output: The output tensor representing the predicted probabilities
or logits. Its shape should match the shape of the
`target` tensor.
from_logits: (optional) Whether `output` is a tensor of logits or
probabilities.
Set it to `True` if `output` represents logits; otherwise,
set it to `False` if `output` represents probabilities.
Defaults to `False`.
Returns:
Integer tensor: The computed binary cross-entropy loss between
`target` and `output`.
Example:
>>> target = keras.ops.convert_to_tensor([0, 1, 1, 0])
>>> output = keras.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2])
>>> binary_crossentropy(target, output)
array([0.10536054 0.10536054 0.22314355 0.22314355],
shape=(4,), dtype=float32)
"""
if any_symbolic_tensors((target, output)):
return BinaryCrossentropy(from_logits=from_logits).symbolic_call(
target, output
)
return backend.nn.binary_crossentropy(
target, output, from_logits=from_logits
)
| BinaryCrossentropy |
python | docker__docker-py | docker/types/services.py | {
"start": 226,
"end": 2574
} | class ____(dict):
"""
Describe the task specification to be used when creating or updating a
service.
Args:
container_spec (ContainerSpec): Container settings for containers
started as part of this task.
log_driver (DriverConfig): Log configuration for containers created as
part of the service.
resources (Resources): Resource requirements which apply to each
individual container created as part of the service.
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None, networks=None,
force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
if isinstance(placement, list):
placement = Placement(constraints=placement)
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
if networks:
self['Networks'] = convert_service_networks(networks)
if force_update is not None:
if not isinstance(force_update, int):
raise TypeError('force_update must be an integer')
self['ForceUpdate'] = force_update
@property
def container_spec(self):
return self.get('ContainerSpec')
@property
def resources(self):
return self.get('Resources')
@property
def restart_policy(self):
return self.get('RestartPolicy')
@property
def placement(self):
return self.get('Placement')
| TaskTemplate |
python | pypa__setuptools | setuptools/_vendor/jaraco/functools/__init__.py | {
"start": 8022,
"end": 16642
} | class ____:
"""Rate-limit a function (or other callable)."""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
self._wait()
return self.func(*args, **kwargs)
def _wait(self):
"""Ensure at least 1/max_rate seconds from last call."""
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
def __get__(self, obj, owner=None):
return first_invoke(self._wait, functools.partial(self.func, obj))
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
method_caller = first_invoke(
lambda: warnings.warn(
'`jaraco.functools.method_caller` is deprecated, '
'use `operator.methodcaller` instead',
DeprecationWarning,
stacklevel=3,
),
operator.methodcaller,
)
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = itertools.count() if retries == float('inf') else range(retries)
for _ in attempts:
try:
return func()
except trap:
cleanup()
return func()
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements.
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results)
def pass_none(func):
"""
Wrap func so it's not called if its first param is None.
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, /, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return None
return wrapper
def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns)
def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, /, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate
def identity(x):
"""
Return the argument.
>>> o = object()
>>> identity(o) is o
True
"""
return x
def bypass_when(check, *, _op=identity):
"""
Decorate a function to return its parameter when ``check``.
>>> bypassed = [] # False
>>> @bypass_when(bypassed)
... def double(x):
... return x * 2
>>> double(2)
4
>>> bypassed[:] = [object()] # True
>>> double(2)
2
"""
def decorate(func):
@functools.wraps(func)
def wrapper(param, /):
return param if _op(check) else func(param)
return wrapper
return decorate
def bypass_unless(check):
"""
Decorate a function to return its parameter unless ``check``.
>>> enabled = [object()] # True
>>> @bypass_unless(enabled)
... def double(x):
... return x * 2
>>> double(2)
4
>>> del enabled[:] # False
>>> double(2)
2
"""
return bypass_when(check, _op=operator.not_)
@functools.singledispatch
def _splat_inner(args, func):
"""Splat args to func."""
return func(*args)
@_splat_inner.register
def _(args: collections.abc.Mapping, func):
"""Splat kargs to func as kwargs."""
return func(**args)
def splat(func):
"""
Wrap func to expect its parameters to be passed positionally in a tuple.
Has a similar effect to that of ``itertools.starmap`` over
simple ``map``.
>>> pairs = [(-1, 1), (0, 2)]
>>> more_itertools.consume(itertools.starmap(print, pairs))
-1 1
0 2
>>> more_itertools.consume(map(splat(print), pairs))
-1 1
0 2
The approach generalizes to other iterators that don't have a "star"
equivalent, such as a "starfilter".
>>> list(filter(splat(operator.add), pairs))
[(0, 2)]
Splat also accepts a mapping argument.
>>> def is_nice(msg, code):
... return "smile" in msg or code == 0
>>> msgs = [
... dict(msg='smile!', code=20),
... dict(msg='error :(', code=1),
... dict(msg='unknown', code=0),
... ]
>>> for msg in filter(splat(is_nice), msgs):
... print(msg)
{'msg': 'smile!', 'code': 20}
{'msg': 'unknown', 'code': 0}
"""
return functools.wraps(func)(functools.partial(_splat_inner, func=func))
| Throttler |
python | django__django | django/db/models/deletion.py | {
"start": 277,
"end": 465
} | class ____(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super().__init__(msg, protected_objects)
| ProtectedError |
python | walkccc__LeetCode | solutions/1798. Maximum Number of Consecutive Values You Can Make/1798.py | {
"start": 0,
"end": 228
} | class ____:
def getMaximumConsecutive(self, coins: list[int]) -> int:
ans = 1 # the next value we want to make
for coin in sorted(coins):
if coin > ans:
return ans
ans += coin
return ans
| Solution |
python | sympy__sympy | sympy/polys/matrices/exceptions.py | {
"start": 492,
"end": 564
} | class ____(DMError):
"""domains do not match"""
pass
| DMDomainError |
python | kamyu104__LeetCode-Solutions | Python/all-oone-data-structure.py | {
"start": 941,
"end": 3109
} | class ____(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.bucket_of_key = {}
self.buckets = LinkedList()
def inc(self, key):
"""
Inserts a new key <Key> with value 1. Or increments an existing key by 1.
:type key: str
:rtype: void
"""
if key not in self.bucket_of_key:
self.bucket_of_key[key] = self.buckets.insert(self.buckets.begin(), Node(0, set([key])))
bucket, next_bucket = self.bucket_of_key[key], self.bucket_of_key[key].next
if next_bucket is self.buckets.end() or next_bucket.value > bucket.value+1:
next_bucket = self.buckets.insert(next_bucket, Node(bucket.value+1, set()))
next_bucket.keys.add(key)
self.bucket_of_key[key] = next_bucket
bucket.keys.remove(key)
if not bucket.keys:
self.buckets.erase(bucket)
def dec(self, key):
"""
Decrements an existing key by 1. If Key's value is 1, remove it from the data structure.
:type key: str
:rtype: void
"""
if key not in self.bucket_of_key:
return
bucket, prev_bucket = self.bucket_of_key[key], self.bucket_of_key[key].prev
self.bucket_of_key.pop(key, None)
if bucket.value > 1:
if bucket is self.buckets.begin() or prev_bucket.value < bucket.value-1:
prev_bucket = self.buckets.insert(bucket, Node(bucket.value-1, set()))
prev_bucket.keys.add(key)
self.bucket_of_key[key] = prev_bucket
bucket.keys.remove(key)
if not bucket.keys:
self.buckets.erase(bucket)
def getMaxKey(self):
"""
Returns one of the keys with maximal value.
:rtype: str
"""
if self.buckets.empty():
return ""
return iter(self.buckets.back().keys).next()
def getMinKey(self):
"""
Returns one of the keys with Minimal value.
:rtype: str
"""
if self.buckets.empty():
return ""
return iter(self.buckets.front().keys).next()
| AllOne |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_emr.py | {
"start": 9230,
"end": 10340
} | class ____(BaseAwsLinksTestCase):
link_class = EmrServerlessS3LogsLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "us-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"log_uri": "s3://bucket-name/logs/",
"application_id": "app-id",
"job_run_id": "job-run-id",
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/s3/buckets/bucket-name?region=us-west-1&prefix=logs/applications/app-id/jobs/job-run-id/"
),
region_name="us-west-1",
aws_partition="aws",
log_uri="s3://bucket-name/logs/",
application_id="app-id",
job_run_id="job-run-id",
)
| TestEmrServerlessS3LogsLink |
python | optuna__optuna | optuna/terminator/erroreval.py | {
"start": 3536,
"end": 4145
} | class ____(BaseErrorEvaluator):
"""An error evaluator that always returns a constant value.
This evaluator can be used to terminate the optimization when the evaluated improvement
potential is below the fixed threshold.
Args:
constant:
A user-specified constant value to always return as an error estimate.
"""
def __init__(self, constant: float) -> None:
self._constant = constant
def evaluate(
self,
trials: list[FrozenTrial],
study_direction: StudyDirection,
) -> float:
return self._constant
| StaticErrorEvaluator |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 30449,
"end": 33852
} | class ____(Response):
"""
Response of queues.get_all endpoint.
:param queues: Queues list
:type queues: Sequence[Queue]
"""
_service = "queues"
_action = "get_all"
_version = "2.9"
_schema = {
"definitions": {
"entry": {
"properties": {
"added": {
"description": "Time this entry was added to the queue",
"format": "date-time",
"type": ["string", "null"],
},
"task": {
"description": "Queued task ID",
"type": ["string", "null"],
},
},
"type": "object",
},
"queue": {
"properties": {
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Queue creation time",
"format": "date-time",
"type": ["string", "null"],
},
"entries": {
"description": "List of ordered queue entries",
"items": {"$ref": "#/definitions/entry"},
"type": ["array", "null"],
},
"id": {"description": "Queue id", "type": ["string", "null"]},
"name": {"description": "Queue name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"queues": {
"description": "Queues list",
"items": {"$ref": "#/definitions/queue"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, queues: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.queues = queues
@schema_property("queues")
def queues(self) -> Optional[List[Any]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Queue.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "queues", Queue, is_array=True)
self._property_queues = value
| GetAllResponse |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 140168,
"end": 140922
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
cluster_id: str = Field(
..., description="Canonical identifier for the cluster. This field is required."
)
details: EventDetails = Field(
..., description="The event details. This field is required."
)
timestamp: Optional[int] = Field(
None,
description=(
"The timestamp when the event occurred, stored as the number of"
" milliseconds since the unix epoch. Assigned by the Timeline service."
),
)
type: ClusterEventType = Field(
..., description="The event type. This field is required."
)
| ClusterEvent |
python | explosion__spaCy | website/setup/jinja_to_js.py | {
"start": 2394,
"end": 4061
} | class ____(Exception):
"""
Raised when an {% extends %} is encountered. At this point the parent template is
loaded and all blocks defined in the current template passed to it.
"""
pass
@contextlib.contextmanager
def option(current_kwargs, **kwargs):
"""
Context manager for temporarily setting a keyword argument and
then restoring it to whatever it was before.
"""
tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items())
current_kwargs.update(kwargs)
yield
current_kwargs.update(tmp_kwargs)
def is_method_call(node, method_name):
"""
Returns True if `node` is a method call for `method_name`. `method_name`
can be either a string or an iterable of strings.
"""
if not isinstance(node, nodes.Call):
return False
if isinstance(node.node, nodes.Getattr):
# e.g. foo.bar()
method = node.node.attr
elif isinstance(node.node, nodes.Name):
# e.g. bar()
method = node.node.name
elif isinstance(node.node, nodes.Getitem):
# e.g. foo["bar"]()
method = node.node.arg.value
else:
return False
if isinstance(method_name, (list, tuple)):
return method in method_name
return method == method_name
def is_loop_helper(node):
"""
Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }}
"""
return (
hasattr(node, "node")
and isinstance(node.node, nodes.Name)
and node.node.name == "loop"
)
def temp_var_names_generator():
x = 0
while True:
yield "__$%s" % x
x += 1
| ExtendsException |
python | ansible__ansible | lib/ansible/cli/playbook.py | {
"start": 1066,
"end": 10480
} | class ____(CLI):
""" the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
See the project home page (https://docs.ansible.com) for more information. """
name = 'ansible-playbook'
USES_CONNECTION = True
def init_parser(self):
# create parser for CLI options
super(PlaybookCLI, self).init_parser(
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
opt_help.add_connect_options(self.parser)
opt_help.add_meta_options(self.parser)
opt_help.add_runas_options(self.parser)
opt_help.add_subset_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
# ansible playbook specific opts
self.parser.add_argument('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
self.parser.add_argument('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.add_argument('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
def post_process_args(self, options):
# for listing, we need to know if user had tag input
# capture here as parent function sets defaults for tags
havetags = bool(options.tags or options.skip_tags)
options = super(PlaybookCLI, self).post_process_args(options)
if options.listtags:
# default to all tags (including never), when listing tags
# unless user specified tags
if not havetags:
options.tags = ['never', 'all']
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# manages passwords
sshpass = None
becomepass = None
passwords = {}
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
# also prep plugin paths
b_playbook_dirs = []
for playbook in context.CLIARGS['args']:
# resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_collection = resource[2]
else:
# not an FQCN so must be a file
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# check if playbook is from collection (path can be passed directly)
playbook_collection = _get_collection_name_from_path(playbook)
# don't add collection playbooks to adjacency search path
if not playbook_collection:
# setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
add_all_plugin_dirs(b_playbook_dir)
b_playbook_dirs.append(b_playbook_dir)
if b_playbook_dirs:
# allow collections adjacent to these playbooks
# we use list copy to avoid opening up 'adjacency' in the previous loop
AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
# don't deal with privilege escalation or passwords when we don't need to
if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# create base objects
loader, inventory, variable_manager = self._play_prereqs()
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
CLI.get_host_list(inventory, context.CLIARGS['subset'])
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
variable_manager=variable_manager, loader=loader,
passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
# show host list if we were able to template into a list
try:
host_list = ','.join(play.hosts)
except TypeError:
host_list = ''
msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if context.CLIARGS['listhosts']:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
taskmsg = ''
if context.CLIARGS['listtasks']:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action in C._ACTION_META and task.implicit:
continue
all_tags.update(task.tags)
if context.CLIARGS['listtasks']:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(play=play)
for block in play.compile():
block = block.filter_tagged_tasks(all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if context.CLIARGS['listtags']:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
def main(args=None):
PlaybookCLI.cli_executor(args)
if __name__ == '__main__':
main()
| PlaybookCLI |
python | TheAlgorithms__Python | data_structures/linked_list/__init__.py | {
"start": 299,
"end": 431
} | class ____:
def __init__(self, item: Any, next: Any) -> None: # noqa: A002
self.item = item
self.next = next
| Node |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/dqn_tf.py | {
"start": 1277,
"end": 7001
} | class ____:
def __init__(self, D, K, hidden_layer_sizes, gamma, max_experiences=10000, min_experiences=100, batch_sz=32):
self.K = K
# create the graph
self.layers = []
M1 = D
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in self.layers:
self.params += layer.params
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, D), name='X')
self.G = tf.placeholder(tf.float32, shape=(None,), name='G')
self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions')
# calculate output and cost
Z = self.X
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
self.predict_op = Y_hat
selected_action_values = tf.reduce_sum(
Y_hat * tf.one_hot(self.actions, K),
reduction_indices=[1]
)
cost = tf.reduce_sum(tf.square(self.G - selected_action_values))
self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost)
# self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost)
# create replay memory
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = max_experiences
self.min_experiences = min_experiences
self.batch_sz = batch_sz
self.gamma = gamma
def set_session(self, session):
self.session = session
def copy_from(self, other):
# collect all the ops
ops = []
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
# now run them all
self.session.run(ops)
def predict(self, X):
X = np.atleast_2d(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def train(self, target_network):
# sample a random batch from buffer, do an iteration of GD
if len(self.experience['s']) < self.min_experiences:
# don't do anything if we don't have enough experience
return
# randomly select a batch
idx = np.random.choice(len(self.experience['s']), size=self.batch_sz, replace=False)
# print("idx:", idx)
states = [self.experience['s'][i] for i in idx]
actions = [self.experience['a'][i] for i in idx]
rewards = [self.experience['r'][i] for i in idx]
next_states = [self.experience['s2'][i] for i in idx]
dones = [self.experience['done'][i] for i in idx]
next_Q = np.max(target_network.predict(next_states), axis=1)
targets = [r + self.gamma*next_q if not done else r for r, next_q, done in zip(rewards, next_Q, dones)]
# call optimizer
self.session.run(
self.train_op,
feed_dict={
self.X: states,
self.G: targets,
self.actions: actions
}
)
def add_experience(self, s, a, r, s2, done):
if len(self.experience['s']) >= self.max_experiences:
self.experience['s'].pop(0)
self.experience['a'].pop(0)
self.experience['r'].pop(0)
self.experience['s2'].pop(0)
self.experience['done'].pop(0)
self.experience['s'].append(s)
self.experience['a'].append(a)
self.experience['r'].append(r)
self.experience['s2'].append(s2)
self.experience['done'].append(done)
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
X = np.atleast_2d(x)
return np.argmax(self.predict(X)[0])
def play_one(env, model, tmodel, eps, gamma, copy_period):
global global_iters
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
totalreward += reward
if done:
reward = -200
# update the model
model.add_experience(prev_observation, action, reward, observation, done)
model.train(tmodel)
iters += 1
global_iters += 1
if global_iters % copy_period == 0:
tmodel.copy_from(model)
return totalreward
def main():
env = gym.make('CartPole-v0')
gamma = 0.99
copy_period = 50
D = len(env.observation_space.sample())
K = env.action_space.n
sizes = [200,200]
model = DQN(D, K, sizes, gamma)
tmodel = DQN(D, K, sizes, gamma)
init = tf.global_variables_initializer()
session = tf.InteractiveSession()
session.run(init)
model.set_session(session)
tmodel.set_session(session)
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 500
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
eps = 1.0/np.sqrt(n+1)
totalreward = play_one(env, model, tmodel, eps, gamma, copy_period)
totalrewards[n] = totalreward
if n % 100 == 0:
print("episode:", n, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
if __name__ == '__main__':
main()
| DQN |
python | kamyu104__LeetCode-Solutions | Python/the-k-th-lexicographical-string-of-all-happy-strings-of-length-n.py | {
"start": 29,
"end": 551
} | class ____(object):
def getHappyString(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
base = 2**(n-1)
if k > 3*base:
return ""
result = [chr(ord('a')+(k-1)//base)]
while base > 1:
k -= (k-1)//base*base
base //= 2
result.append(('a' if result[-1] != 'a' else 'b') if (k-1)//base == 0 else
('c' if result[-1] != 'c' else 'b'))
return "".join(result)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1066432,
"end": 1066891
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Enterprise, Organization)
########################################################################
# Schema Entry Points
########################################################################
graphql_schema.query_type = Query
graphql_schema.mutation_type = Mutation
graphql_schema.subscription_type = None
| VerifiableDomainOwner |
python | tiangolo__fastapi | docs_src/dependencies/tutorial008d.py | {
"start": 71,
"end": 691
} | class ____(Exception):
pass
def get_username():
try:
yield "Rick"
except InternalError:
print("We don't swallow the internal error here, we raise again 😎")
raise
@app.get("/items/{item_id}")
def get_item(item_id: str, username: str = Depends(get_username)):
if item_id == "portal-gun":
raise InternalError(
f"The portal gun is too dangerous to be owned by {username}"
)
if item_id != "plumbus":
raise HTTPException(
status_code=404, detail="Item not found, there's only a plumbus here"
)
return item_id
| InternalError |
python | ray-project__ray | rllib/connectors/common/add_time_dim_to_batch_and_zero_pad.py | {
"start": 741,
"end": 12363
} | class ____(ConnectorV2):
"""Adds an extra time dim (axis=1) to all data currently in the batch.
Note: This is one of the default env-to-module or Learner ConnectorV2 pieces that
are added automatically by RLlib into every env-to-module/Learner connector
pipeline, unless `config.add_default_connectors_to_env_to_module_pipeline` or
`config.add_default_connectors_to_learner_pipeline ` are set to
False.
The default env-to-module connector pipeline is:
[
[0 or more user defined ConnectorV2 pieces],
AddObservationsFromEpisodesToBatch,
AddTimeDimToBatchAndZeroPad,
AddStatesFromEpisodesToBatch,
AgentToModuleMapping, # only in multi-agent setups!
BatchIndividualItems,
NumpyToTensor,
]
The default Learner connector pipeline is:
[
[0 or more user defined ConnectorV2 pieces],
AddObservationsFromEpisodesToBatch,
AddColumnsFromEpisodesToTrainBatch,
AddTimeDimToBatchAndZeroPad,
AddStatesFromEpisodesToBatch,
AgentToModuleMapping, # only in multi-agent setups!
BatchIndividualItems,
NumpyToTensor,
]
If the RLModule is stateful, an extra time dim at axis=1 is added to all data in the
batch.
Also, all data (observations, rewards, etc.. if applicable) will be properly
reshaped into (B, T=max_seq_len (learner) or 1 (env-to-module), ...) and will be
zero-padded, if necessary.
This ConnectorV2:
- Operates on a list of Episode objects.
- Adds a time dim at axis=1 to all columns already in the batch.
- In case of a learner connector pipeline, zero-pads the data according to the
module's `self.model_config["max_seq_len"]` setting and reshapes all data to
(B, T, ...). The connector also adds SEQ_LENS information and loss mask
information to the batch based on the added zero-padding.
- Does NOT alter any data in the given episodes.
- Can be used in EnvToModule and Learner connector pipelines.
.. testcode::
from ray.rllib.connectors.common import AddTimeDimToBatchAndZeroPad
from ray.rllib.core.columns import Columns
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.test_utils import check
# Create a simple dummy class, pretending to be an RLModule with
# `get_initial_state`, `is_stateful` and `model_config` property defined:
class MyStateModule:
# dummy config
model_config = {"max_seq_len": 3}
def is_stateful(self):
return True
def get_initial_state(self):
return 0.0
# Create an already reset episode. Expect the connector to add a time-dim to the
# reset observation.
episode = SingleAgentEpisode(observations=[0])
rl_module = MyStateModule()
# Create an instance of this class (as an env-to-module connector).
connector = AddTimeDimToBatchAndZeroPad(as_learner_connector=False)
# Call the connector.
output_batch = connector(
rl_module=rl_module,
batch={Columns.OBS: [0]},
episodes=[episode],
shared_data={},
)
# The output data's OBS key should now be reshaped to (B, T)
check(output_batch[Columns.OBS], [[0]])
# Create a SingleAgentEpisodes containing 5 observations,
# 4 actions and 4 rewards.
episode = SingleAgentEpisode(
observations=[0, 1, 2, 3, 4],
actions=[1, 2, 3, 4],
rewards=[1.0, 2.0, 3.0, 4.0],
len_lookback_buffer=0,
)
# Call the connector.
output_batch = connector(
rl_module=rl_module,
batch={Columns.OBS: [4]},
episodes=[episode],
shared_data={},
)
# The output data's OBS, ACTIONS, and REWARDS keys should now all have a time
# rank.
check(
# Expect the episode's last OBS.
output_batch[Columns.OBS], [[4]],
)
# Create a new connector as a learner connector with a RNN seq len of 4 (for
# testing purposes only). Passing the same data through this learner connector,
# we expect the data to also be zero-padded.
connector = AddTimeDimToBatchAndZeroPad(as_learner_connector=True)
# Call the connector.
output_batch = connector(
rl_module=rl_module,
batch={Columns.OBS: {(episode.id_,): [0, 1, 2, 3]}},
episodes=[episode],
shared_data={},
)
check(output_batch[Columns.OBS], {(episode.id_,): [[0, 1, 2], [3, 0, 0]]})
"""
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
as_learner_connector: bool = False,
**kwargs,
):
"""Initializes a AddObservationsFromEpisodesToBatch instance.
Args:
as_learner_connector: Whether this connector is part of a Learner connector
pipeline, as opposed to a env-to-module pipeline. As a Learner
connector, it will add an entire Episode's observations (each timestep)
to the batch.
"""
super().__init__(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
**kwargs,
)
self._as_learner_connector = as_learner_connector
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# If not stateful OR STATE_IN already in data, early out.
if (
rl_module is None
or not rl_module.is_stateful()
or Columns.STATE_IN in batch
):
return batch
# Make all inputs (other than STATE_IN) have an additional T-axis.
# Since data has not been batched yet (we are still operating on lists in the
# batch), we add this time axis as 0 (not 1). When we batch, the batch axis will
# be 0 and the time axis will be 1.
# Also, let module-to-env pipeline know that we had added a single timestep
# time rank to the data (to remove it again).
if not self._as_learner_connector:
for column in batch.keys():
self.foreach_batch_item_change_in_place(
batch=batch,
column=column,
func=lambda item, eps_id, aid, mid: (
item
if mid is not None and not rl_module[mid].is_stateful()
# Expand on axis 0 (the to-be-time-dim) if item has not been
# batched yet, otherwise axis=1 (the time-dim).
else tree.map_structure(
lambda s: np.expand_dims(
s, axis=(1 if isinstance(s, BatchedNdArray) else 0)
),
item,
)
),
)
shared_data["_added_single_ts_time_rank"] = True
else:
# Before adding STATE_IN to the `data`, zero-pad existing data and batch
# into max_seq_len chunks.
for column, column_data in batch.copy().items():
# Do not zero-pad INFOS column.
if column == Columns.INFOS:
continue
for key, item_list in column_data.items():
# Multi-agent case AND RLModule is not stateful -> Do not zero-pad
# for this model.
assert isinstance(key, tuple)
mid = None
if len(key) == 3:
eps_id, aid, mid = key
if not rl_module[mid].is_stateful():
continue
column_data[key] = split_and_zero_pad(
item_list,
max_seq_len=self._get_max_seq_len(rl_module, module_id=mid),
)
# TODO (sven): Remove this hint/hack once we are not relying on
# SampleBatch anymore (which has to set its property
# zero_padded=True when shuffling).
shared_data[
(
"_zero_padded_for_mid="
f"{mid if mid is not None else DEFAULT_MODULE_ID}"
)
] = True
for sa_episode in self.single_agent_episode_iterator(
# If Learner connector, get all episodes (for train batch).
# If EnvToModule, get only those ongoing episodes that just had their
# agent step (b/c those are the ones we need to compute actions for next).
episodes,
agents_that_stepped_only=False,
):
# Multi-agent case: Extract correct single agent RLModule (to get its
# individual state).
if sa_episode.module_id is not None:
sa_module = rl_module[sa_episode.module_id]
else:
sa_module = (
rl_module[DEFAULT_MODULE_ID]
if isinstance(rl_module, MultiRLModule)
else rl_module
)
# This single-agent RLModule is NOT stateful -> Skip.
if not sa_module.is_stateful():
continue
max_seq_len = sa_module.model_config["max_seq_len"]
# Also, create the loss mask (b/c of our now possibly zero-padded data)
# as well as the seq_lens array and add these to `data` as well.
mask, seq_lens = create_mask_and_seq_lens(len(sa_episode), max_seq_len)
self.add_n_batch_items(
batch=batch,
column=Columns.SEQ_LENS,
items_to_add=seq_lens,
num_items=len(seq_lens),
single_agent_episode=sa_episode,
)
if not shared_data.get("_added_loss_mask_for_valid_episode_ts"):
self.add_n_batch_items(
batch=batch,
column=Columns.LOSS_MASK,
items_to_add=mask,
num_items=len(mask),
single_agent_episode=sa_episode,
)
return batch
def _get_max_seq_len(self, rl_module, module_id=None):
if not isinstance(rl_module, MultiRLModule):
mod = rl_module
elif module_id:
mod = rl_module[module_id]
else:
mod = next(iter(rl_module.values()))
if "max_seq_len" not in mod.model_config:
raise ValueError(
"You are using a stateful RLModule and are not providing a "
"'max_seq_len' key inside your `model_config`. You can set this "
"dict and/or override keys in it via `config.rl_module("
"model_config={'max_seq_len': [some int]})`."
)
return mod.model_config["max_seq_len"]
| AddTimeDimToBatchAndZeroPad |
python | google__jax | tests/nn_test.py | {
"start": 3305,
"end": 31473
} | class ____(jtu.JaxTestCase):
@parameterized.product(
contract=[160, 96],
lhs_non_contract=[240, 100],
dtype=[jnp.float16, jnp.bfloat16, jnp.float32],
)
def testScaledMatmul(self, contract, lhs_non_contract, dtype):
if not jtu.is_cuda_compute_capability_at_least("10.0"):
raise unittest.SkipTest("Needs compute capability 10.0 or higher.")
# Check if float8_e8m0fnu is available
configs = create_mxfp8_configs_if_available()
batch, rhs_non_contract = 4, 256
a, b, a_q, b_q, a_scales, b_scales = _generate_quantized_tensors(
batch, lhs_non_contract, contract, rhs_non_contract,
configs, dtype=dtype,
)
out = nn.scaled_matmul(a_q, b_q, a_scales, b_scales,
preferred_element_type=dtype)
out_ref = jnp.matmul(a.astype(jnp.float32),
jnp.transpose(b, (0, 2, 1)).astype(jnp.float32))
self.assertArraysAllClose(
out, out_ref.astype(dtype), rtol=1e-3, atol=1e-3
)
@parameterized.product(
is_training=[True, False],
output_type=[jnp.float16, jnp.bfloat16, jnp.float32],
)
def testScaledDotGeneral(
self, is_training, output_type):
if not jtu.is_cuda_compute_capability_at_least("10.0"):
raise unittest.SkipTest("Needs compute capability 10.0 or higher.")
configs = create_mxfp8_configs_if_available()
cast_to_representable = partial(
quantize_dequantize,
scale=jnp.ones((1,)),
compute_dtype=jnp.float32,
)
k1, k2 = jax.random.split(jax.random.key(0), 2)
a_shape = [2, 256, 96]
b_shape = [2, 96, 160]
dimension_numbers = (([2], [1]), ([0], [0]))
a = cast_to_representable(
jax.random.uniform(k1, a_shape, minval=-1.0, dtype=output_type),
configs[0].data_type,
)
b = cast_to_representable(
jax.random.uniform(k2, b_shape, minval=-1.0, dtype=output_type),
configs[1].data_type,
)
scaled_dot_general_fn = partial(
nn.scaled_dot_general, configs=configs
)
def fwd(a, b, is_ref=False):
fn = jax.lax.dot_general if is_ref else scaled_dot_general_fn
y = fn(a, b, dimension_numbers,
preferred_element_type=output_type)
return jnp.sum(y)
if is_training:
j_train = jax.jit(jax.value_and_grad(fwd, argnums=[0, 1]))
j_train_ref = jax.jit(
jax.value_and_grad(partial(fwd, is_ref=True), argnums=[0, 1])
)
out, (x_grad, w_grad) = j_train(a, b)
out_ref, (x_grad_ref, w_grad_ref) = j_train_ref(a, b)
self.assertArraysAllClose(out, out_ref, rtol=1e-2, atol=1e-2)
self.assertArraysAllClose(x_grad, x_grad_ref, rtol=1e-2, atol=1e1)
self.assertArraysAllClose(w_grad, w_grad_ref, rtol=1e-2, atol=1e1)
else:
j_inference = jax.jit(fwd)
j_inference_ref = jax.jit(partial(fwd, is_ref=True))
out = j_inference(a, b)
out_ref = j_inference_ref(a, b)
self.assertArraysAllClose(out, out_ref, rtol=1e-2, atol=1e-2)
@parameterized.product(
dtype=[jnp.bfloat16, jnp.float16],
group_num=[1, 2, 4],
use_vmap=[False, True],
impl=['cudnn', 'xla'],
)
def testDotProductAttention(self, dtype, group_num, use_vmap, impl):
if impl == 'cudnn' and not jtu.is_cuda_compute_capability_at_least("8.0"):
raise unittest.SkipTest("Needs compute capability 8.0 or higher.")
if impl == 'cudnn' and dtype == jnp.float32:
raise unittest.SkipTest("cuDNN only supports fp16 or bf16.")
if impl == 'cudnn' and jtu.is_cuda_version_at_least(13, 0):
raise unittest.SkipTest("cuDNN creates no execution plans on CUDA 13.0.")
B, S, T, N, H, G = 2, 128, 128, 4, 32, group_num
keys = random.split(random.PRNGKey(0), 5)
Q = random.normal(keys[0], (B, T, N, H), dtype)
K = random.normal(keys[1], (B, S, N // G, H), dtype)
V = random.normal(keys[2], (B, S, N // G, H), dtype)
grad = random.normal(keys[3], (B, T, N, H), dtype)
lse_grad = random.normal(keys[4], (B, T, N), dtype)
bias, mask = None, None
sdpa = nn.dot_product_attention
sdpa_ref = partial(sdpa, implementation=None)
sdpa_ans = partial(sdpa, implementation=impl)
sdpa_ref_lse = partial(sdpa, implementation=None, return_residual=True)
sdpa_ans_lse = partial(sdpa, implementation=impl, return_residual=True)
if use_vmap:
sdpa_ans = jax.vmap(sdpa_ans, in_axes=(0, 0, 0, None, None), out_axes=0)
spda_ans_lse = jax.vmap(
sdpa_ans_lse, in_axes=(0, 0, 0, None, None), out_axes=0
)
# For testing purposes, we call the non-GQA version without vmap in the
# reference code
K_ref = jnp.repeat(K, G, axis=2)
V_ref = jnp.repeat(V, G, axis=2)
out_ref, sdpa_vjp_ref = jax.vjp(sdpa_ref, Q, K_ref, V_ref, bias, mask)
out_ans, sdpa_vjp_ans = jax.vjp(sdpa_ans, Q, K, V, bias, mask)
out_ref_lse, sdpa_vjp_ref_lse = jax.vjp(sdpa_ref_lse, Q, K_ref, V_ref, bias, mask)
out_ans_lse, sdpa_vjp_ans_lse = jax.vjp(sdpa_ans_lse, Q, K, V, bias, mask)
dQ_ref, dK_ref, dV_ref = sdpa_vjp_ref(grad)[:3]
dQ_ans, dK_ans, dV_ans = sdpa_vjp_ans(grad)[:3]
dK_ref = dK_ref.reshape(B, S, N // G, G, H).sum(axis=3)
dV_ref = dV_ref.reshape(B, S, N // G, G, H).sum(axis=3)
dQ_ref_lse, dK_ref_lse, dV_ref_lse = sdpa_vjp_ref_lse((grad, lse_grad))[:3]
dQ_ans_lse, dK_ans_lse, dV_ans_lse = sdpa_vjp_ans_lse((grad, lse_grad))[:3]
dK_ref_lse = dK_ref_lse.reshape(B, S, N // G, G, H).sum(axis=3)
dV_ref_lse = dV_ref_lse.reshape(B, S, N // G, G, H).sum(axis=3)
if impl == 'cudnn':
self.assertTrue(_check_cudnn_backend(sdpa_ans, Q, K, V, bias, mask))
self.assertTrue(_check_cudnn_backend(sdpa_vjp_ans, grad))
self.assertTrue(_check_cudnn_backend(sdpa_ans_lse, Q, K, V, bias, mask))
self.assertTrue(_check_cudnn_backend(sdpa_vjp_ans_lse, (grad, lse_grad)))
self.assertAllClose(out_ref, out_ans, atol=.01, rtol=.01)
self.assertAllClose(dQ_ref, dQ_ans, rtol=.01, atol=.01)
self.assertAllClose(dK_ref, dK_ans, rtol=.01, atol=.01)
self.assertAllClose(dV_ref, dV_ans, rtol=.01, atol=.01)
self.assertAllClose(out_ref_lse[0], out_ans_lse[0], atol=.01, rtol=.01)
self.assertAllClose(out_ref_lse[1], out_ans_lse[1], atol=.01, rtol=.01)
self.assertAllClose(dQ_ref_lse, dQ_ans_lse, rtol=.01, atol=.01)
self.assertAllClose(dK_ref_lse, dK_ans_lse, rtol=.01, atol=.01)
self.assertAllClose(dV_ref_lse, dV_ans_lse, rtol=.01, atol=.01)
@parameterized.product(
mask_mode=['bias', 'causal', 'padding', 'custom', ('causal', 'padding'),
('custom', 'padding'), ('bias', 'causal'),
('causal', 'sliding_window')],
)
def testDotProductAttentionMask(self, mask_mode):
if isinstance(mask_mode, str):
mask_mode = (mask_mode,)
if not jtu.is_cuda_compute_capability_at_least("8.0"):
raise unittest.SkipTest("Requires compute capability 8.0 or higher.")
if jtu.is_cuda_version_at_least(13, 0):
raise unittest.SkipTest("cuDNN creates no execution plans on CUDA 13.0.")
dtype = jnp.bfloat16
B, S, T, N, H = 2, 128, 128, 4, 32
keys = random.split(random.PRNGKey(0), 4)
Q = random.normal(keys[0], (B, T, N, H), dtype)
K = random.normal(keys[1], (B, S, N, H), dtype)
V = random.normal(keys[2], (B, S, N, H), dtype)
grad = random.normal(keys[3], (B, T, N, H), dtype)
bias, mask = None, None
q_seqlen, kv_seqlen = None, None
window_size = None
is_causal = 'causal' in mask_mode
if 'padding' in mask_mode:
q_seqlen = jnp.array([T // 2, T // 4], dtype=jnp.int32)
kv_seqlen = jnp.array([S // 4, S // 2], dtype=jnp.int32)
if 'custom' in mask_mode:
# Use a generated causal mask as the custom mask.
custom_mask = jnp.tril(jnp.ones((T, S), dtype=jnp.bool_))
mask = custom_mask[None, None, :, :]
if 'bias' in mask_mode:
bias = random.normal(keys[4], (1, N, T, S), dtype)
if 'sliding_window' in mask_mode:
window_size = (3, 2) if is_causal else (3, 0)
sdpa = nn.dot_product_attention
sdpa_ref = partial(sdpa, is_causal=is_causal, implementation=None)
sdpa_ans = partial(sdpa, is_causal=is_causal, implementation='cudnn')
args = (Q, K, V, bias, mask)
kwargs = {'query_seq_lengths': q_seqlen, 'key_value_seq_lengths': kv_seqlen}
# Convert the kargs to positional args for the jax.vjp.
fn_ref = lambda q, k, v, b, m, qs, kvs: sdpa_ref(
q, k, v, b, m, query_seq_lengths=qs, key_value_seq_lengths=kvs,
local_window_size=window_size,
)
fn_ans = lambda q, k, v, b, m, qs, kvs: sdpa_ans(
q, k, v, b, m, query_seq_lengths=qs, key_value_seq_lengths=kvs,
local_window_size=window_size,
)
out_ref, sdpa_vjp_ref = jax.vjp(fn_ref, *args, q_seqlen, kv_seqlen)
out_ans, sdpa_vjp_ans = jax.vjp(fn_ans, *args, q_seqlen, kv_seqlen)
dQ_ref, dK_ref, dV_ref, dbias_ref = sdpa_vjp_ref(grad)[:4]
dQ_ans, dK_ans, dV_ans, dbias_ans = sdpa_vjp_ans(grad)[:4]
# Check if cudnn backend is called.
self.assertTrue(_check_cudnn_backend(sdpa_ans, *args, **kwargs))
self.assertTrue(_check_cudnn_backend(sdpa_vjp_ans, grad))
self.assertAllClose(out_ref, out_ans, atol=.01, rtol=.01)
self.assertAllClose(dQ_ref, dQ_ans, rtol=.02, atol=.02)
self.assertAllClose(dK_ref, dK_ans, rtol=.02, atol=.02)
self.assertAllClose(dV_ref, dV_ans, rtol=.01, atol=.01)
self.assertAllClose(dbias_ref, dbias_ans, rtol=.02, atol=.02)
@parameterized.product(
batch_size=[1, 16],
use_vmap=[False, True],
)
def testDotProductAttentionBiasGradient(self, batch_size, use_vmap):
if not jtu.is_cuda_compute_capability_at_least("8.0"):
raise unittest.SkipTest("Requires compute capability 8.0 or higher.")
if jtu.is_cuda_version_at_least(13, 0):
raise unittest.SkipTest("cuDNN creates no execution plans on CUDA 13.0.")
dtype = jnp.bfloat16
B, S, N, H = batch_size, 128, 4, 32
keys = random.split(random.PRNGKey(0), 2)
x = random.normal(keys[0], (B, S, N, H), dtype)
bias = random.normal(keys[1], (B, N, S, S), dtype=dtype)
mask = jnp.ones((1, 1, S), dtype=jnp.bool_)
def attention(x, bias, mask, impl):
return jax.nn.dot_product_attention(
query=x,
key=x,
value=x,
bias=bias,
mask=mask,
is_causal=False,
implementation=impl,
)
attn_ref = partial(attention, impl=None)
attn_ans = partial(attention, impl='cudnn')
if use_vmap:
attn_batched_ref = jax.vmap(attn_ref, in_axes=(0, 0, None))
attn_batched_ans = jax.vmap(attn_ans, in_axes=(0, 0, None))
else:
attn_batched_ref = attn_ref
attn_batched_ans = attn_ans
fwd_ref = jax.jit(attn_batched_ref)
fwd_ans = jax.jit(attn_batched_ans)
y_ref = fwd_ref(x, bias, mask)
y_ans = fwd_ans(x, bias, mask)
self.assertAllClose(y_ref, y_ans)
@jax.jit
def bwd_ref(x, bias, mask):
_, f_vjp = jax.vjp(attn_ref, x, bias, mask)
return f_vjp(x)
@jax.jit
def bwd_ans(x, bias, mask):
_, f_vjp = jax.vjp(attn_ans, x, bias, mask)
return f_vjp(x)
_, dbias_ref, _ = bwd_ref(x, bias, mask)
_, dbias_ans, _ = bwd_ans(x, bias, mask)
self.assertAllClose(dbias_ans, dbias_ref, rtol=0.1, atol=0.1)
def testSoftplusGrad(self):
check_grads(nn.softplus, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["fwd"])
check_grads(nn.softplus, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["rev"])
def testSoftplusGradZero(self):
check_grads(nn.softplus, (0.,), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSoftplusGradInf(self):
self.assertAllClose(
1., jax.grad(nn.softplus)(float('inf')))
def testSoftplusGradNegInf(self):
check_grads(nn.softplus, (-float('inf'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSoftplusGradNan(self):
check_grads(nn.softplus, (float('nan'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
@parameterized.parameters([int, float] + jtu.dtypes.floating + jtu.dtypes.integer)
def testSoftplusZero(self, dtype):
self.assertEqual(jnp.log(dtype(2)), nn.softplus(dtype(0)))
def testSparseplusGradZero(self):
check_grads(nn.sparse_plus, (-2.,), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSparseplusGrad(self):
check_grads(nn.sparse_plus, (0.,), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSparseplusAndSparseSigmoid(self):
self.assertAllClose(
jax.grad(nn.sparse_plus)(0.), nn.sparse_sigmoid(0.),
check_dtypes=False)
self.assertAllClose(
jax.grad(nn.sparse_plus)(2.), nn.sparse_sigmoid(2.),
check_dtypes=False)
self.assertAllClose(
jax.grad(nn.sparse_plus)(-2.), nn.sparse_sigmoid(-2.),
check_dtypes=False)
def testSquareplusGrad(self):
check_grads(nn.squareplus, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["fwd"])
check_grads(nn.squareplus, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["rev"])
def testSquareplusGradZero(self):
check_grads(nn.squareplus, (0.,), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSquareplusGradNegInf(self):
check_grads(nn.squareplus, (-float('inf'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testSquareplusGradNan(self):
check_grads(nn.squareplus, (float('nan'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
@parameterized.parameters([float] + jtu.dtypes.floating)
def testSquareplusZero(self, dtype):
self.assertEqual(dtype(1), nn.squareplus(dtype(0), dtype(4)))
def testMishGrad(self):
check_grads(nn.mish, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["fwd"])
check_grads(nn.mish, (1e-8,), order=4,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None,
modes=["rev"])
def testMishGradZero(self):
check_grads(nn.mish, (0.,), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testMishGradNegInf(self):
check_grads(nn.mish, (-float('inf'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
def testMishGradNan(self):
check_grads(nn.mish, (float('nan'),), order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
@parameterized.parameters([float] + jtu.dtypes.floating)
def testMishZero(self, dtype):
self.assertEqual(dtype(0), nn.mish(dtype(0)))
def testReluGrad(self):
rtol = 1e-2 if jtu.test_device_matches(["tpu"]) else None
check_grads(nn.relu, (1.,), order=3, rtol=rtol)
check_grads(nn.relu, (-1.,), order=3, rtol=rtol)
jaxpr = jax.make_jaxpr(jax.grad(nn.relu))(0.)
self.assertGreaterEqual(len(jaxpr.jaxpr.eqns), 2)
def testReluGradAtZero(self):
# https://dl.acm.org/doi/10.5555/3540261.3540297
grad = jax.grad(nn.relu)(0.)
self.assertEqual(grad, 0.)
def testRelu6Grad(self):
rtol = 1e-2 if jtu.test_device_matches(["tpu"]) else None
check_grads(nn.relu6, (1.,), order=3, rtol=rtol)
check_grads(nn.relu6, (-1.,), order=3, rtol=rtol)
self.assertAllClose(jax.grad(nn.relu6)(0.), 0., check_dtypes=False)
self.assertAllClose(jax.grad(nn.relu6)(6.), 0., check_dtypes=False)
def testSoftplusValue(self):
val = nn.softplus(89.)
self.assertAllClose(val, 89., check_dtypes=False)
def testSparseplusValue(self):
val = nn.sparse_plus(89.)
self.assertAllClose(val, 89., check_dtypes=False)
def testSparsesigmoidValue(self):
self.assertAllClose(nn.sparse_sigmoid(-2.), 0., check_dtypes=False)
self.assertAllClose(nn.sparse_sigmoid(2.), 1., check_dtypes=False)
self.assertAllClose(nn.sparse_sigmoid(0.), .5, check_dtypes=False)
def testSquareplusValue(self):
val = nn.squareplus(1e3)
self.assertAllClose(val, 1e3, check_dtypes=False, atol=1e-3)
def testMishValue(self):
val = nn.mish(1e3)
self.assertAllClose(val, 1e3, check_dtypes=False, atol=1e-3)
def testEluGrad(self):
check_grads(nn.elu, (1e4,), order=4, eps=1., modes=["fwd"])
check_grads(nn.elu, (1e4,), order=4, eps=1., modes=["rev"])
def testEluValue(self):
val = nn.elu(1e4)
self.assertAllClose(val, 1e4, check_dtypes=False)
def testGluValue(self):
val = nn.glu(jnp.array([1.0, 0.0]), axis=0)
self.assertAllClose(val, jnp.array([0.5]))
@parameterized.parameters(False, True)
def testGeluIntType(self, approximate):
val_float = nn.gelu(jnp.array(-1.0), approximate=approximate)
val_int = nn.gelu(jnp.array(-1), approximate=approximate)
self.assertAllClose(val_float, val_int)
@parameterized.parameters(False, True)
def testGelu(self, approximate):
def gelu_reference(x):
return x * scipy.stats.norm.cdf(x)
args_maker = lambda: [jnp.linspace(-12, 5, 10000, dtype=jnp.float32)]
rtol = 2e-5
atol = 1e-3 if approximate else 0
self._CheckAgainstNumpy(
gelu_reference,
partial(nn.gelu, approximate=approximate),
args_maker,
check_dtypes=False,
tol=0,
rtol=rtol,
atol=atol,
)
@parameterized.parameters(*itertools.product(
(jnp.float32, jnp.bfloat16, jnp.float16),
(partial(nn.gelu, approximate=False),
partial(nn.gelu, approximate=True),
nn.relu, nn.identity, nn.softplus, nn.sparse_plus, nn.sigmoid, nn.squareplus, nn.mish)))
def testDtypeMatchesInput(self, dtype, fn):
x = jnp.zeros((), dtype=dtype)
out = fn(x)
self.assertEqual(out.dtype, dtype)
def testEluMemory(self):
# see https://github.com/jax-ml/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.elu(jnp.ones((10 ** 12,)))) # don't oom
def testHardTanhMemory(self):
# see https://github.com/jax-ml/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.hard_tanh(jnp.ones((10 ** 12,)))) # don't oom
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxEmptyArray(self, fn):
x = jnp.array([], dtype=float)
self.assertArraysEqual(fn(x), x)
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxEmptyMask(self, fn):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.zeros_like(x, dtype=bool)
expected = jnp.full_like(x, 0.0 if fn is nn.softmax else -jnp.inf)
self.assertArraysEqual(fn(x, where=m), expected)
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxWhereMask(self, fn):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
out = fn(x, where=m)
self.assertAllClose(out[m], fn(x[m]))
probs = out if fn is nn.softmax else jnp.exp(out)
self.assertAllClose(probs.sum(), 1.0)
# TODO(mattjj): include log_softmax in these extra tests if/when we add a
# custom_jvp rule for it (since otherwise it doesn't pass the numerical
# checks below).
if fn is nn.softmax and config.softmax_custom_jvp.value:
g_fun = lambda x: jnp.take(fn(x, where=m, initial=-jnp.inf),
jnp.array([0, 2, 3]))
jtu.check_grads(g_fun, (x,), order=2)
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxWhereGrad(self, fn):
# regression test for https://github.com/jax-ml/jax/issues/19490
x = jnp.array([36., 10000.])
mask = x < 1000
f = lambda x, mask: fn(x, where=mask)[0]
self.assertAllClose(jax.grad(f)(x, mask), jnp.zeros_like(x))
def testSoftmaxGrad(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
jtu.check_grads(nn.softmax, (x,), order=2, atol=5e-3)
def testSoftmaxGradResiduals(self):
if not config.softmax_custom_jvp.value:
raise unittest.SkipTest("only applies when upgrade flag enabled")
x = jnp.array([5.5, 1.3, -4.2, 0.9])
res = ad_checkpoint.saved_residuals(nn.softmax, x)
self.assertLen(res, 1)
def testSoftmaxGradFlag(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
with jax.softmax_custom_jvp(False):
res = ad_checkpoint.saved_residuals(nn.softmax, x)
self.assertLen(res, 3)
self.assertEqual(sum(a.size for a, _ in res), 6)
with jax.softmax_custom_jvp(True):
res = ad_checkpoint.saved_residuals(nn.softmax, x)
self.assertLen(res, 1)
self.assertEqual(sum(a.size for a, _ in res), 4)
def testStandardizeWhereMask(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
out_masked = jnp.take(nn.standardize(x, where=m), jnp.array([0, 2, 3]))
out_filtered = nn.standardize(x_filtered)
self.assertAllClose(out_masked, out_filtered)
def testOneHot(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected, check_dtypes=False)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3)
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]])
self.assertAllClose(actual, expected, check_dtypes=False)
def testOneHotOutOfBound(self):
actual = nn.one_hot(jnp.array([-1, 3]), 3)
expected = jnp.array([[0., 0., 0.],
[0., 0., 0.]])
self.assertAllClose(actual, expected, check_dtypes=False)
def testOneHotNonArrayInput(self):
actual = nn.one_hot([0, 1, 2], 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected, check_dtypes=False)
def testOneHotCustomDtype(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3, dtype=jnp.bool_)
expected = jnp.array([[True, False, False],
[False, True, False],
[False, False, True]])
self.assertAllClose(actual, expected)
def testOneHotConcretizationError(self):
# https://github.com/jax-ml/jax/issues/3654
msg = r"in jax.nn.one_hot argument `num_classes`"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
jax.jit(nn.one_hot)(3, 5)
def testOneHotAxis(self):
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]]).T
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=0)
self.assertAllClose(actual, expected, check_dtypes=False)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=-2)
self.assertAllClose(actual, expected, check_dtypes=False)
def testOneHotNonInteger(self):
with self.assertDeprecationWarnsOrRaises("jax-nn-one-hot-float-input",
"jax.nn.one_hot input should be integer-typed"):
nn.one_hot(jnp.array([1.0]), 3)
def testTanhExists(self):
nn.tanh # doesn't crash
def testCustomJVPLeak(self):
# https://github.com/jax-ml/jax/issues/8171
@jax.jit
def fwd():
a = jnp.array(1.)
def f(hx, _):
hx = jax.nn.sigmoid(hx + a)
return hx, None
hx = jnp.array(0.)
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd() # doesn't crash
def testCustomJVPLeak2(self):
# https://github.com/jax-ml/jax/issues/8171
# The above test uses jax.nn.sigmoid, as in the original #8171, but that
# function no longer actually has a custom_jvp! So we inline the old def.
@jax.custom_jvp
def sigmoid(x):
one = jnp.float32(1)
return jax.lax.div(one, jax.lax.add(one, jax.lax.exp(jax.lax.neg(x))))
sigmoid.defjvps(lambda g, ans, x: g * ans * (jnp.float32(1) - ans))
@jax.jit
def fwd():
a = jnp.array(1., 'float32')
def f(hx, _):
hx = sigmoid(hx + a)
return hx, None
hx = jnp.array(0., 'float32')
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd() # doesn't crash
@parameterized.product(
shape=[(5,), (3, 5), (2, 3, 5)],
use_where=[True, False],
keepdims=[True, False],
)
def testLogMeanExp(self, shape, use_where, keepdims):
x = self.rng().rand(*shape) * 2 - 1
axis = self.rng().randint(0, x.ndim)
if use_where:
where = self.rng().randint(0, 2, size=shape).astype(bool)
else:
where = None
got = nn.logmeanexp(x, axis=axis, where=where, keepdims=keepdims)
expected = jnp.log(jnp.mean(jnp.exp(x), axis=axis, where=where, keepdims=keepdims))
self.assertAllClose(got, expected, atol=1e-3)
def testLog1mExp(self):
x, expected = jnp.array([
[0.1, jnp.log(1 - jnp.exp(-0.1))],
[1.1, jnp.log(1 - jnp.exp(-1.1))],
[0, -jnp.inf],
[1, -0.45867515],
[1e2, 0.0],
[1e-5, jnp.log(1e-5)],
[-1, jnp.nan],
[-1e-2, jnp.nan],
[-1e2, jnp.nan],
[jnp.inf, 0.0],
]).T
got = nn.log1mexp(x)
self.assertAllClose(got, expected, rtol=1e-3, atol=1e-3)
def testLog1mExpGrad(self):
check_grads(
nn.log1mexp,
(jnp.array([1e-2, 1e-1, 1e0, 1e1, 1e2]),),
order=1,
rtol=1e-2 if jtu.test_device_matches(["tpu"]) else 1e-3,
atol=1e-3,
)
InitializerRecord = collections.namedtuple(
"InitializerRecord",
["name", "initializer", "shapes", "dtypes"])
ALL_SHAPES = [(), (2,), (2, 2), (2, 3), (3, 2), (2, 3, 4), (4, 3, 2), (2, 3, 4, 5)]
def initializer_record(name, initializer, dtypes, min_dims=2, max_dims=4):
shapes = [shape for shape in ALL_SHAPES
if min_dims <= len(shape) <= max_dims]
return InitializerRecord(name, initializer, shapes, dtypes)
INITIALIZER_RECS = [
initializer_record("uniform", nn.initializers.uniform, jtu.dtypes.floating, 1),
initializer_record("normal", nn.initializers.normal, jtu.dtypes.inexact, 1),
initializer_record("he_normal", nn.initializers.he_normal, jtu.dtypes.inexact),
initializer_record("he_uniform", nn.initializers.he_uniform, jtu.dtypes.inexact),
initializer_record("glorot_normal", nn.initializers.glorot_normal, jtu.dtypes.inexact),
initializer_record("glorot_uniform", nn.initializers.glorot_uniform, jtu.dtypes.inexact),
initializer_record("lecun_normal", nn.initializers.lecun_normal, jtu.dtypes.inexact),
initializer_record("lecun_uniform", nn.initializers.lecun_uniform, jtu.dtypes.inexact),
initializer_record("orthogonal", nn.initializers.orthogonal, jtu.dtypes.floating, 2, 2),
initializer_record("truncated_normal", nn.initializers.truncated_normal, jtu.dtypes.floating, 1),
initializer_record("delta_orthogonal", nn.initializers.delta_orthogonal, jtu.dtypes.floating, 4, 4),
initializer_record(
"variance_scaling_fan_geo_avg",
partial(nn.initializers.variance_scaling, 1, "fan_geo_avg", "normal"),
jtu.dtypes.floating,
),
initializer_record(
"variance_scaling_fan_in",
partial(nn.initializers.variance_scaling, 1, "fan_in", "normal", in_axis=[0], out_axis=[]),
jtu.dtypes.floating,
min_dims=1,
),
initializer_record(
"variance_scaling_fan_in",
partial(nn.initializers.variance_scaling, 1, "fan_in", "normal", in_axis=[], out_axis=[0]),
jtu.dtypes.floating,
min_dims=1,
),
initializer_record(
"variance_scaling_fan_in",
partial(nn.initializers.variance_scaling, 1, "fan_in", "normal", in_axis=[], out_axis=[]),
jtu.dtypes.floating,
min_dims=0,
),
]
@jtu.with_config(jax_legacy_prng_key="allow")
| NNFunctionsTest |
python | PyCQA__pylint | tests/functional/r/regression/regression_property_no_member_2641.py | {
"start": 168,
"end": 441
} | class ____(metaclass=ABCMeta):
@abstractmethod
def __init__(self, name, age):
self.name = name
self.age = age
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
| Person |
python | kamyu104__LeetCode-Solutions | Python/peaks-in-array.py | {
"start": 57,
"end": 1790
} | class ____(object):
def countOfPeaks(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
class BIT(object): # 0-indexed.
def __init__(self, nums):
self.__bit = [0]*(len(nums)+1) # Extra one for dummy node.
for i in xrange(1, len(self.__bit)):
self.__bit[i] = nums[i-1] + self.__bit[i-1]
for i in reversed(xrange(1, len(self.__bit))):
last_i = i - (i & -i)
self.__bit[i] -= self.__bit[last_i]
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
def check(i):
return nums[i-1] < nums[i] > nums[i+1]
def update(x, d):
for i in xrange(max(x-1, 1), min((x+1)+1, len(nums)-1)):
if check(i):
bit.add(i, d)
bit = BIT([int(1 <= i <= len(nums)-2 and check(i)) for i in xrange(len(nums))])
result = []
for t, x, y in queries:
if t == 1:
result.append(bit.query(y-1)-bit.query((x+1)-1) if y-1 >= x+1 else 0)
continue
update(x, -1)
nums[x] = y
update(x, +1)
return result
# Time: O(nlogn + qlogn)
# Space: O(n)
# bit, fenwick tree
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/missing_type.py | {
"start": 228,
"end": 499
} | class ____:
def source(self) -> None:
pass
unknown = source # revealed type is `unknown`
def test_unknown_source_def(x: UnknownSourceDef) -> None:
# TODO(T90322028): we don't find the flow here.
y = x.unknown()
_test_sink(y)
| UnknownSourceDef |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/atrous_conv2d_test.py | {
"start": 2226,
"end": 7413
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousConv2DForward(self):
with self.session():
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y2 = nn_ops.conv2d(
x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.session():
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1, y2, rtol=1e-2, atol=1e-2)
@test_util.run_deprecated_v1
def testGradient(self):
with self.session():
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
for rate in range(1, 4):
output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f],
[x_shape, f_shape],
output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 4e-3 if test_util.is_xla_enabled() else 1e-3
self.assertLess(err, err_tolerance)
@test_util.run_deprecated_v1
def testAtrousConv2DInvalid(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = nn_ops.atrous_conv2d(
value=np.ones((1, 1, 1, 5)),
filters=np.ones((1, 1, 5, 1)),
rate=2147483647,
padding="SAME")
self.evaluate(op)
| AtrousConv2DTest |
python | django__django | tests/flatpages_tests/test_middleware.py | {
"start": 5830,
"end": 8243
} | class ____(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get("/flatpage_root/flatpage")
self.assertRedirects(response, "/flatpage_root/flatpage/", status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view and
should not add a slash.
"""
response = self.client.get("/flatpage_root/no_such_flatpage")
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"""
A flatpage can be served by the fallback middleware and should add a
slash
"""
response = self.client.get("/flatpage")
self.assertRedirects(response, "/flatpage/", status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage raises a 404 when served by the fallback
middleware and should not add a slash.
"""
response = self.client.get("/no_such_flatpage")
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"""
A flatpage with special chars in the URL can be served by the fallback
middleware and should add a slash.
"""
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get("/some.very_special~chars-here")
self.assertRedirects(
response, "/some.very_special~chars-here/", status_code=301
)
def test_redirect_fallback_flatpage_root(self):
"""
A flatpage at / should not cause a redirect loop when APPEND_SLASH is
set
"""
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get("/")
self.assertContains(response, "<p>Root</p>")
| FlatpageMiddlewareAppendSlashTests |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 198815,
"end": 201321
} | class ____(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
| NetworkConnectionNoServer |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/recursive.py | {
"start": 2059,
"end": 3784
} | class ____(SearchStrategy):
def __init__(self, base, extend, max_leaves):
super().__init__()
self.max_leaves = max_leaves
self.base = base
self.limited_base = LimitedStrategy(base)
self.extend = extend
strategies = [self.limited_base, self.extend(self.limited_base)]
while 2 ** (len(strategies) - 1) <= max_leaves:
strategies.append(extend(OneOfStrategy(tuple(strategies))))
self.strategy = OneOfStrategy(strategies)
def __repr__(self) -> str:
if not hasattr(self, "_cached_repr"):
self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % (
self.base,
get_pretty_function_description(self.extend),
self.max_leaves,
)
return self._cached_repr
def do_validate(self) -> None:
check_strategy(self.base, "base")
extended = self.extend(self.limited_base)
check_strategy(extended, f"extend({self.limited_base!r})")
self.limited_base.validate()
extended.validate()
check_type(int, self.max_leaves, "max_leaves")
if self.max_leaves <= 0:
raise InvalidArgument(
f"max_leaves={self.max_leaves!r} must be greater than zero"
)
def do_draw(self, data):
count = 0
while True:
try:
with self.limited_base.capped(self.max_leaves):
return data.draw(self.strategy)
except LimitReached:
if count == 0:
msg = f"Draw for {self!r} exceeded max_leaves and had to be retried"
data.events[msg] = ""
count += 1
| RecursiveStrategy |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/mvdefaults/package.py | {
"start": 216,
"end": 637
} | class ____(Package):
homepage = "http://www.example.com"
url = "http://www.example.com/mvdefaults-1.0.tar.gz"
version("1.0", md5="abcdef1234567890abcdef1234567890")
version("0.9", md5="abcdef1234567890abcdef1234567890")
variant("foo", values=("a", "b", "c"), default=("a", "b", "c"), multi=True, description="")
conflicts("foo:=a,b", when="@0.9")
depends_on("pkg-b", when="foo:=b,c")
| Mvdefaults |
python | apache__avro | lang/py/avro/schema.py | {
"start": 20022,
"end": 21049
} | class ____(FixedSchema, DecimalLogicalSchema):
def __init__(
self,
size,
name,
precision,
scale=0,
namespace=None,
names=None,
other_props=None,
validate_names: bool = True,
):
max_precision = int(math.floor(math.log10(2) * (8 * size - 1)))
DecimalLogicalSchema.__init__(self, precision, scale, max_precision)
FixedSchema.__init__(self, name, namespace, size, names, other_props, validate_names=validate_names)
self.set_prop("precision", precision)
self.set_prop("scale", scale)
# read-only properties
@property
def precision(self):
return self.get_prop("precision")
@property
def scale(self):
return self.get_prop("scale")
def to_json(self, names=None):
return self.props
def validate(self, datum):
"""Return self if datum is a Decimal object, else None."""
return self if isinstance(datum, decimal.Decimal) else None
| FixedDecimalSchema |
python | sympy__sympy | sympy/core/facts.py | {
"start": 17328,
"end": 17479
} | class ____(ValueError):
def __str__(self):
kb, fact, value = self.args
return "%s, %s=%s" % (kb, fact, value)
| InconsistentAssumptions |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/collective_ops_test.py | {
"start": 18384,
"end": 20718
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
tokens = {}
for dev in [dev0, dev1]:
with ops.device(dev):
tokens[dev] = create_ordering_token()
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev0],
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
ordering_token=tokens[dev1],
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
| AllReduceWithSubdivisionsTest |
python | ray-project__ray | python/ray/dag/compiled_dag_node.py | {
"start": 12867,
"end": 15138
} | class ____:
"""Wraps the normal Ray DAGNode with some metadata."""
def __init__(self, idx: int, dag_node: "ray.dag.DAGNode"):
"""
Args:
idx: A unique index into the original DAG.
dag_node: The original DAG node created by the user.
"""
self.idx = idx
self.dag_node = dag_node
# Dict from task index to actor handle for immediate downstream tasks.
self.downstream_task_idxs: Dict[int, "ray.actor.ActorHandle"] = {}
# Case 1: The task represents a ClassMethodNode.
#
# Multiple return values are written to separate `output_channels`.
# `output_idxs` represents the tuple index of the output value for
# multiple returns in a tuple. If an output index is None, it means
# the complete return value is written to the output channel.
# Otherwise, the return value is a tuple and the index is used
# to extract the value to be written to the output channel.
#
# Case 2: The task represents an InputNode.
#
# `output_idxs` can be an integer or a string to retrieve the
# corresponding value from `args` or `kwargs` in the DAG's input.
self.output_channels: List[ChannelInterface] = []
self.output_idxs: List[Optional[Union[int, str]]] = []
# The DAGNodes that are arguments to this task.
# This is used for lazy resolution of the arguments' type hints.
self.arg_nodes: List["ray.dag.DAGNode"] = []
# idxs of possible ClassMethodOutputNodes if they exist, used for visualization
self.output_node_idxs: List[int] = []
@property
def args(self) -> Tuple[Any]:
return self.dag_node.get_args()
@property
def kwargs(self) -> Dict[str, Any]:
return self.dag_node.get_kwargs()
@property
def num_readers(self) -> int:
return len(self.downstream_task_idxs)
@property
def arg_type_hints(self) -> List["ChannelOutputType"]:
return [arg_node.type_hint for arg_node in self.arg_nodes]
def __str__(self) -> str:
return f"""
Node: {self.dag_node}
Arguments: {self.args}
Output: {self.output_channels}
"""
| CompiledTask |
python | huggingface__transformers | src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py | {
"start": 34060,
"end": 38020
} | class ____(RecurrentGemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = RecurrentGemmaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
# Ignore copy
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
use_cache: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RecurrentGemmaForCausalLM
>>> model = RecurrentGemmaForCausalLM.from_pretrained("google/recurrentgemma-2b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/recurrentgemma-2b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True
outputs = self.model(
input_ids=input_ids,
position_ids=position_ids,
cache_position=cache_position,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
# Soft-cap the logits TODO remove if always done.
# if self.config.logits_soft_cap is not None:
cap = self.config.logits_soft_cap
logits = nn.functional.tanh(logits / cap) * cap
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
__all__ = ["RecurrentGemmaForCausalLM", "RecurrentGemmaModel", "RecurrentGemmaPreTrainedModel"]
| RecurrentGemmaForCausalLM |
python | yaml__pyyaml | lib/yaml/events.py | {
"start": 2281,
"end": 2335
} | class ____(CollectionEndEvent):
pass
| SequenceEndEvent |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchaudio_models.py | {
"start": 25344,
"end": 26386
} | class ____(torch.nn.Module):
def __init__(self, query_proj, key_proj, value_proj):
r"""A in-proj container to process inputs.
Args:
query_proj: a proj layer for query.
key_proj: a proj layer for key.
value_proj: a proj layer for value.
"""
super().__init__()
self.query_proj = query_proj
self.key_proj = key_proj
self.value_proj = value_proj
def forward(
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Projects the input sequences using in-proj layers.
Args:
query, key, value (Tensors): sequence to be projected
Shape:
- query, key, value: :math:`(S, N, E)`
- Output: :math:`(S, N, E)`
where S is the sequence length, N is the batch size, and E is the embedding dimension.
"""
return self.query_proj(query), self.key_proj(key), self.value_proj(value)
| InProjContainer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/automap.py | {
"start": 51788,
"end": 61724
} | class ____:
__slots__ = ("table_keys",)
table_keys: Set[str]
def automap_base(
declarative_base: Optional[Type[Any]] = None, **kw: Any
) -> Any:
r"""Produce a declarative automap base.
This function produces a new base class that is a product of the
:class:`.AutomapBase` class as well a declarative base produced by
:func:`.declarative.declarative_base`.
All parameters other than ``declarative_base`` are keyword arguments
that are passed directly to the :func:`.declarative.declarative_base`
function.
:param declarative_base: an existing class produced by
:func:`.declarative.declarative_base`. When this is passed, the function
no longer invokes :func:`.declarative.declarative_base` itself, and all
other keyword arguments are ignored.
:param \**kw: keyword arguments are passed along to
:func:`.declarative.declarative_base`.
"""
if declarative_base is None:
Base = _declarative_base(**kw)
else:
Base = declarative_base
return type(
Base.__name__,
(AutomapBase, Base),
{
"__abstract__": True,
"classes": util.Properties({}),
"by_module": util.Properties({}),
"_sa_automapbase_bookkeeping": _Bookkeeping(set()),
},
)
def _is_many_to_many(
automap_base: Type[Any], table: Table
) -> Tuple[
Optional[Table], Optional[Table], Optional[list[ForeignKeyConstraint]]
]:
fk_constraints = [
const
for const in table.constraints
if isinstance(const, ForeignKeyConstraint)
]
if len(fk_constraints) != 2:
return None, None, None
cols: List[Column[Any]] = sum(
[
[fk.parent for fk in fk_constraint.elements]
for fk_constraint in fk_constraints
],
[],
)
if set(cols) != set(table.c):
return None, None, None
return (
fk_constraints[0].elements[0].column.table,
fk_constraints[1].elements[0].column.table,
fk_constraints,
)
def _relationships_for_fks(
automap_base: Type[Any],
map_config: _DeferredDeclarativeConfig,
table_to_map_config: Union[
Dict[Optional[Table], _DeferredDeclarativeConfig],
Dict[Table, _DeferredDeclarativeConfig],
],
collection_class: type,
name_for_scalar_relationship: NameForScalarRelationshipType,
name_for_collection_relationship: NameForCollectionRelationshipType,
generate_relationship: GenerateRelationshipType,
) -> None:
local_table = cast("Optional[Table]", map_config.local_table)
local_cls = cast(
"Optional[Type[Any]]", map_config.cls
) # derived from a weakref, may be None
if local_table is None or local_cls is None:
return
for constraint in local_table.constraints:
if isinstance(constraint, ForeignKeyConstraint):
fks = constraint.elements
referred_table = fks[0].column.table
referred_cfg = table_to_map_config.get(referred_table, None)
if referred_cfg is None:
continue
referred_cls = referred_cfg.cls
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls
):
continue
relationship_name = name_for_scalar_relationship(
automap_base, local_cls, referred_cls, constraint
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, constraint
)
o2m_kws: Dict[str, Union[str, bool]] = {}
nullable = False not in {fk.parent.nullable for fk in fks}
if not nullable:
o2m_kws["cascade"] = "all, delete-orphan"
if (
constraint.ondelete
and constraint.ondelete.lower() == "cascade"
):
o2m_kws["passive_deletes"] = True
else:
if (
constraint.ondelete
and constraint.ondelete.lower() == "set null"
):
o2m_kws["passive_deletes"] = True
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.ONETOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
**o2m_kws,
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOONE,
relationship,
relationship_name,
local_cls,
referred_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
backref=backref_obj,
remote_side=[fk.column for fk in constraint.elements],
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name # type: ignore[union-attr] # noqa: E501
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.ONETOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
back_populates=relationship_name,
collection_class=collection_class,
**o2m_kws,
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name # type: ignore[union-attr]
def _m2m_relationship(
automap_base: Type[Any],
lcl_m2m: Table,
rem_m2m: Table,
m2m_const: List[ForeignKeyConstraint],
table: Table,
table_to_map_config: Union[
Dict[Optional[Table], _DeferredDeclarativeConfig],
Dict[Table, _DeferredDeclarativeConfig],
],
collection_class: type,
name_for_scalar_relationship: NameForCollectionRelationshipType,
name_for_collection_relationship: NameForCollectionRelationshipType,
generate_relationship: GenerateRelationshipType,
) -> None:
map_config = table_to_map_config.get(lcl_m2m, None)
referred_cfg = table_to_map_config.get(rem_m2m, None)
if map_config is None or referred_cfg is None:
return
local_cls = map_config.cls
referred_cls = referred_cfg.cls
relationship_name = name_for_collection_relationship(
automap_base, local_cls, referred_cls, m2m_const[0]
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, m2m_const[1]
)
create_backref = backref_name not in referred_cfg.properties
if table in table_to_map_config:
overlaps = "__*"
else:
overlaps = None
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
overlaps=overlaps,
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
relationship_name,
local_cls,
referred_cls,
overlaps=overlaps,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
), # type: ignore [arg-type]
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
), # type: ignore [arg-type]
backref=backref_obj,
collection_class=collection_class,
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name # type: ignore[union-attr] # noqa: E501
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
overlaps=overlaps,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
), # type: ignore [arg-type]
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
), # type: ignore [arg-type]
back_populates=relationship_name,
collection_class=collection_class,
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name # type: ignore[union-attr]
| _Bookkeeping |
python | falconry__falcon | falcon/media/msgpack.py | {
"start": 3857,
"end": 3952
} | class ____(Protocol):
def __call__(self, data: bytes, raw: bool = ...) -> Any: ...
| UnpackMethod |
python | tiangolo__fastapi | docs_src/response_model/tutorial004_py39.py | {
"start": 104,
"end": 627
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: float = 10.5
tags: list[str] = []
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2},
"baz": {"name": "Baz", "description": None, "price": 50.2, "tax": 10.5, "tags": []},
}
@app.get("/items/{item_id}", response_model=Item, response_model_exclude_unset=True)
async def read_item(item_id: str):
return items[item_id]
| Item |
python | optuna__optuna | optuna/storages/_cached_storage.py | {
"start": 1251,
"end": 12261
} | class ____(BaseStorage, BaseHeartbeat):
"""A wrapper class of storage backends.
This class is used in :func:`~optuna.get_storage` function and automatically
wraps :class:`~optuna.storages.RDBStorage` class.
:class:`~optuna.storages._CachedStorage` meets the following **Data persistence** requirements.
**Data persistence**
:class:`~optuna.storages._CachedStorage` does not guarantee that write operations are logged
into a persistent storage, even when write methods succeed.
Thus, when process failure occurs, some writes might be lost.
As exceptions, when a persistent storage is available, any writes on any attributes
of `Study` and writes on `state` of `Trial` are guaranteed to be persistent.
Additionally, any preceding writes on any attributes of `Trial` are guaranteed to
be written into a persistent storage before writes on `state` of `Trial` succeed.
The same applies for `param`, `user_attrs', 'system_attrs' and 'intermediate_values`
attributes.
Args:
backend:
:class:`~optuna.storages.RDBStorage` class instance to wrap.
"""
def __init__(self, backend: RDBStorage) -> None:
self._backend = backend
self._studies: dict[int, _StudyInfo] = {}
self._trial_id_to_study_id_and_number: dict[int, tuple[int, int]] = {}
self._study_id_and_number_to_trial_id: dict[tuple[int, int], int] = {}
self._lock = threading.Lock()
def __getstate__(self) -> dict[Any, Any]:
state = self.__dict__.copy()
del state["_lock"]
return state
def __setstate__(self, state: dict[Any, Any]) -> None:
self.__dict__.update(state)
self._lock = threading.Lock()
def create_new_study(
self, directions: Sequence[StudyDirection], study_name: str | None = None
) -> int:
study_id = self._backend.create_new_study(directions=directions, study_name=study_name)
with self._lock:
study = _StudyInfo()
study.name = study_name
study.directions = list(directions)
self._studies[study_id] = study
return study_id
def delete_study(self, study_id: int) -> None:
with self._lock:
if study_id in self._studies:
for trial_number in self._studies[study_id].trials:
trial_id = self._study_id_and_number_to_trial_id.get((study_id, trial_number))
if trial_id in self._trial_id_to_study_id_and_number:
del self._trial_id_to_study_id_and_number[trial_id]
if (study_id, trial_number) in self._study_id_and_number_to_trial_id:
del self._study_id_and_number_to_trial_id[(study_id, trial_number)]
del self._studies[study_id]
self._backend.delete_study(study_id)
def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None:
self._backend.set_study_user_attr(study_id, key, value)
def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None:
self._backend.set_study_system_attr(study_id, key, value)
def get_study_id_from_name(self, study_name: str) -> int:
return self._backend.get_study_id_from_name(study_name)
def get_study_name_from_id(self, study_id: int) -> str:
with self._lock:
if study_id in self._studies:
name = self._studies[study_id].name
if name is not None:
return name
name = self._backend.get_study_name_from_id(study_id)
with self._lock:
if study_id not in self._studies:
self._studies[study_id] = _StudyInfo()
self._studies[study_id].name = name
return name
def get_study_directions(self, study_id: int) -> list[StudyDirection]:
with self._lock:
if study_id in self._studies:
directions = self._studies[study_id].directions
if directions is not None:
return directions
directions = self._backend.get_study_directions(study_id)
with self._lock:
if study_id not in self._studies:
self._studies[study_id] = _StudyInfo()
self._studies[study_id].directions = directions
return directions
def get_study_user_attrs(self, study_id: int) -> dict[str, Any]:
return self._backend.get_study_user_attrs(study_id)
def get_study_system_attrs(self, study_id: int) -> dict[str, Any]:
return self._backend.get_study_system_attrs(study_id)
def get_all_studies(self) -> list[FrozenStudy]:
return self._backend.get_all_studies()
def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int:
frozen_trial = self._backend._create_new_trial(study_id, template_trial)
trial_id = frozen_trial._trial_id
with self._lock:
if study_id not in self._studies:
self._studies[study_id] = _StudyInfo()
self._add_trials_to_cache(study_id, [frozen_trial])
return trial_id
def set_trial_param(
self,
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: distributions.BaseDistribution,
) -> None:
with self._lock:
study_id, _ = self._trial_id_to_study_id_and_number[trial_id]
cached_dist = self._studies[study_id].param_distribution.get(param_name)
self._backend._set_trial_param(
trial_id, param_name, param_value_internal, distribution, cached_dist
)
if cached_dist is None:
self._studies[study_id].param_distribution[param_name] = distribution
def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int:
key = (study_id, trial_number)
with self._lock:
if key in self._study_id_and_number_to_trial_id:
return self._study_id_and_number_to_trial_id[key]
return self._backend.get_trial_id_from_study_id_trial_number(study_id, trial_number)
def get_best_trial(self, study_id: int) -> FrozenTrial:
_directions = self.get_study_directions(study_id)
if len(_directions) > 1:
raise RuntimeError(
"Best trial can be obtained only for single-objective optimization."
)
direction = _directions[0]
trial_id = self._backend._get_best_trial_id(study_id, direction)
return self.get_trial(trial_id)
def set_trial_state_values(
self, trial_id: int, state: TrialState, values: Sequence[float] | None = None
) -> bool:
return self._backend.set_trial_state_values(trial_id, state=state, values=values)
def set_trial_intermediate_value(
self, trial_id: int, step: int, intermediate_value: float
) -> None:
self._backend.set_trial_intermediate_value(trial_id, step, intermediate_value)
def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None:
self._backend.set_trial_user_attr(trial_id, key=key, value=value)
def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None:
self._backend.set_trial_system_attr(trial_id, key=key, value=value)
def _get_cached_trial(self, trial_id: int) -> FrozenTrial | None:
if trial_id not in self._trial_id_to_study_id_and_number:
return None
study_id, number = self._trial_id_to_study_id_and_number[trial_id]
study = self._studies[study_id]
trial = study.trials[number]
if not trial.state.is_finished():
return None
return trial
def get_trial(self, trial_id: int) -> FrozenTrial:
with self._lock:
trial = self._get_cached_trial(trial_id)
if trial is not None:
return trial
return self._backend.get_trial(trial_id)
def get_all_trials(
self,
study_id: int,
deepcopy: bool = True,
states: Container[TrialState] | None = None,
) -> list[FrozenTrial]:
self._read_trials_from_remote_storage(study_id)
with self._lock:
study = self._studies[study_id]
# We need to sort trials by their number because some samplers assume this behavior.
# The following two lines are latency-sensitive.
trials: dict[int, FrozenTrial] | list[FrozenTrial]
if states is not None:
trials = {number: t for number, t in study.trials.items() if t.state in states}
else:
trials = study.trials
trials = list(sorted(trials.values(), key=lambda t: t.number))
return copy.deepcopy(trials) if deepcopy else trials
def _read_trials_from_remote_storage(self, study_id: int) -> None:
with self._lock:
if study_id not in self._studies:
self._studies[study_id] = _StudyInfo()
study = self._studies[study_id]
trials = self._backend._get_trials(
study_id,
states=None,
included_trial_ids=study.unfinished_trial_ids,
trial_id_greater_than=study.last_finished_trial_id,
)
if not trials:
return
self._add_trials_to_cache(study_id, trials)
for trial in trials:
if not trial.state.is_finished():
study.unfinished_trial_ids.add(trial._trial_id)
continue
# Updates to last_finished_trial_id should only be performed here because they must
# be executed only when all trials have been considered.
study.last_finished_trial_id = max(study.last_finished_trial_id, trial._trial_id)
if trial._trial_id in study.unfinished_trial_ids:
study.unfinished_trial_ids.remove(trial._trial_id)
def _add_trials_to_cache(self, study_id: int, trials: list[FrozenTrial]) -> None:
study = self._studies[study_id]
for trial in trials:
self._trial_id_to_study_id_and_number[trial._trial_id] = (
study_id,
trial.number,
)
self._study_id_and_number_to_trial_id[(study_id, trial.number)] = trial._trial_id
study.trials[trial.number] = trial
def record_heartbeat(self, trial_id: int) -> None:
self._backend.record_heartbeat(trial_id)
def _get_stale_trial_ids(self, study_id: int) -> list[int]:
return self._backend._get_stale_trial_ids(study_id)
def get_heartbeat_interval(self) -> int | None:
return self._backend.get_heartbeat_interval()
def get_failed_trial_callback(self) -> Callable[["optuna.Study", FrozenTrial], None] | None:
return self._backend.get_failed_trial_callback()
| _CachedStorage |
python | readthedocs__readthedocs.org | readthedocs/builds/apps.py | {
"start": 211,
"end": 450
} | class ____(AppConfig):
name = "readthedocs.builds"
label = "builds"
verbose_name = _("Builds")
def ready(self):
import readthedocs.builds.tasks # noqa
import readthedocs.builds.signals_receivers # noqa
| Config |
python | numba__numba | numba/tests/test_objects.py | {
"start": 158,
"end": 265
} | class ____(object):
pass
def setattr_usecase(o, v):
o.x = v
def delattr_usecase(o):
del o.x
| C |
python | pytorch__pytorch | test/test_transformers.py | {
"start": 115101,
"end": 207107
} | class ____(NNTestCase):
""" Used to test CUDA only functionality of scaled_dot_product_attention
Quarks:
There is some trickiness with this function. Its runtime behavior
is dependent on the CUDA architecture you are testing it on. See
`PLATFORM_SUPPORTS_FUSED_ATTENTION` at the top of the file.
Summary:
Math: always supported
FlashAttention: Supported on sm80 or newer hardware
MemEfficientAttention: Supported on sm50 or newer hardware
"""
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
# TODO USED FOR TESTING THE SCORES, e.g. testing ALIBI we don't need this now
def normalize_flash_attn_S(
self,
attn_unnorm,
q,
k,
v,
query_padding_mask=None,
key_padding_mask=None,
attn_bias=None,
is_dropout=False,
causal=False,
window_size=(-1, -1), # -1 means infinite window size
scale=None,
):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, head_dim)
k, v: (batch_size, seqlen_k, nheads, head_dim)
key_padding_mask: (batch_size, seqlen_q)
attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
Output:
softmax_lse: (batch_size, nheads, seqlen_q)
softmax_max: (batch_size, nheads, seqlen_q)
"""
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
if causal:
window_size = (window_size[0], 0)
q, k, v = q.float(), k.float(), v.float()
_, seqlen_q, _, head_dim = q.shape
seqlen_k = k.shape[1]
b = q.shape[0]
from torch.nn.attention.bias import _calculate_scale
scale = _calculate_scale(head_dim, scale)
scores = torch.matmul(q.transpose(1, 2) * scale, k.permute(0, 2, 3, 1))
if key_padding_mask is not None:
scores.masked_fill_(~key_padding_mask.view(b, 1, 1, -1), float("-inf"))
if window_size[0] >= 0 or window_size[1] >= 0:
local_mask = self.construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
query_padding_mask,
key_padding_mask,
q.device,
)
scores.masked_fill_(local_mask, float("-inf"))
if attn_bias is not None:
scores = scores + attn_bias.to(dtype=scores.dtype)
block_size_n = _get_block_size_n(scores.device, head_dim, is_dropout, causal)
scores_block = scores.split(block_size_n, dim=-1)
lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1)
lse = torch.logsumexp(lse_block, dim=-1)
# lse could be -inf (i.e. all values in scores are -inf), and we want to set those to inf
# so that when we do torch.exp(m - lse), we get 0.0 instead of NaN.
lse[lse == float("-inf")] = float("inf")
scores_max_block = torch.stack([torch.amax(s, dim=-1) for s in scores_block], dim=-1)
cummax_block = torch.cummax(scores_max_block.flip(-1), dim=-1).values.flip(-1).unbind(dim=-1)
attn_unnorm_block = attn_unnorm.split(block_size_n, dim=-1)
attn_norm = torch.cat(
[
a * (torch.exp(m - lse)).unsqueeze(-1)
for a, m in zip(attn_unnorm_block, cummax_block)
],
dim=-1,
)
if query_padding_mask is not None:
attn_norm.masked_fill_(~query_padding_mask.view(b, 1, -1, 1), 0.0)
# attn_norm.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
return attn_norm.to(dtype=attn_unnorm.dtype)
def construct_local_mask(self, seqlen_q, seqlen_k, window_size, query_padding_mask, key_padding_mask, device):
# row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
row_idx = torch.arange(seqlen_q, device=device, dtype=torch.long).view(-1, 1)
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
sk = (
seqlen_k
if key_padding_mask is None
else key_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else query_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
if window_size[0] < 0:
return col_idx > row_idx + sk - sq + window_size[1]
else:
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
return torch.logical_or(
col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
col_idx < row_idx + sk - sq - window_size[0],
)
def convert_flash_attn_S_to_softmax(
self,
S,
seqlen_q,
seqlen_k,
query_padding_mask,
key_padding_mask,
causal=False,
window_size=(-1, -1), # -1 means infinite window size
):
"""FlashAttention stores the S matrix in a different way.
Arguments:
S: (batch_size, nheads, seqlen_q, seqlen_k)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
if TEST_WITH_ROCM:
return S
b = S.shape[0]
if causal:
window_size = (window_size[0], 0)
seqlen_q_rounded, seqlen_k_rounded = S.shape[-2:]
S_converted = S
if window_size[0] >= 0 or window_size[1] >= 0:
local_mask = self.construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
query_padding_mask,
key_padding_mask,
S.device,
)
local_mask = F.pad(
local_mask,
(0, seqlen_k_rounded - seqlen_k, 0, seqlen_q_rounded - seqlen_q),
value=True,
)
S_converted = S_converted.masked_fill(local_mask, 0.0)
# Need to zero out things not in attention_mask in case S was initialized with random values
# and some of those values aren't overwritten.
seqlen_q_og = (
query_padding_mask.shape[-1] if query_padding_mask is not None else seqlen_q_rounded
)
if query_padding_mask is not None:
query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q_rounded - seqlen_q_og))
# S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
S_converted = S_converted.masked_fill(~query_padding_mask.view(b, 1, -1, 1), 0.0)
seqlen_k_og = key_padding_mask.shape[-1] if key_padding_mask is not None else seqlen_k
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k_rounded - seqlen_k_og))
S_converted = S_converted.masked_fill(~key_padding_mask.view(b, 1, 1, -1), 0.0)
# S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q_rounded))
S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k_rounded))
return S_converted[:, :, :seqlen_q, :seqlen_k]
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_cudnn_attention_different_dk_dv(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim_k, head_dim_v = 32, 16, 128, 64
seq_len = 640
q_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
k_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
v_shape = SdpaShape(batch, num_heads, seq_len, head_dim_v)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_cudnn_attention_gqa(self, device):
batch = 4
seq_len_q = 512
seq_len_kv = 1024
D = 128
# Sample call to SDPA - GQ
query = torch.rand(batch, 32, seq_len_q, D, device='cuda', dtype=torch.bfloat16)
key = torch.rand(batch, 8, seq_len_kv, D, device='cuda', dtype=torch.bfloat16)
# cuDNN supports h_k != h_v
value = torch.rand(batch, 4, seq_len_kv, D, device='cuda', dtype=torch.bfloat16)
with sdpa_kernel([SDPBackend.MATH]):
output_math = scaled_dot_product_attention(query, key, value, is_causal=True, enable_gqa=True)
with self.assertRaisesRegex(RuntimeError, "No available kernel."):
with sdpa_kernel([SDPBackend.CUDNN_ATTENTION]):
output_cudnn = scaled_dot_product_attention(query, key, value, is_causal=True, enable_gqa=False)
with sdpa_kernel([SDPBackend.CUDNN_ATTENTION]):
output_cudnn = scaled_dot_product_attention(query, key, value, is_causal=True, enable_gqa=True)
self.assertEqual(output_math, output_cudnn)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_cudnn_attention_d256_heuristic(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim_k, head_dim_v = 32, 16, 256, 64
seq_len = 640
q_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
k_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
v_shape = SdpaShape(batch, num_heads, seq_len, head_dim_v)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
def test():
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION], set_priority=True):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
actual.backward(torch.randn_like(actual))
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
if torch.cuda.get_device_capability() in [(9, 0)]:
test()
else:
with self.assertRaisesRegex(RuntimeError, "No available kernel."):
test()
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_fused_attention_different_dk_dv(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim_k, head_dim_v = 32, 16, 128, 64
q_shape = SdpaShape(batch, num_heads, 1, head_dim_k)
k_shape = SdpaShape(batch, num_heads, 2, head_dim_k)
v_shape = SdpaShape(batch, num_heads, 2, head_dim_v)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
# test that we do not dispatch to cuDNN for an unsupported case
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
@unittest.skipIf(True, "broken as of cuDNN 9.10")
def test_cudnn_attention_fail_d128(self, device):
# Test that cuDNN attention dispatching correctly bails out on d > 128
b, h = 1, 2
s_q, s_kv = 128, 128
d_qk, d_v = 128, 144
q = torch.randn(b, h, s_q, d_qk, device=device, dtype=torch.bfloat16)
k = torch.randn(b, h, s_kv, d_qk, device=device, dtype=torch.bfloat16)
v = torch.randn(b, h, s_kv, d_v, device=device, dtype=torch.bfloat16)
device_cap = torch.cuda.get_device_capability()
ISSM90 = device_cap == (9, 0)
ISSM100 = device_cap == (10, 0)
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
if (ISSM90 or ISSM100) and torch.backends.cudnn.version() >= 90501:
torch.nn.functional.scaled_dot_product_attention(q, k, v)
else:
with self.assertRaisesRegex(RuntimeError, "No available kernel."):
torch.nn.functional.scaled_dot_product_attention(q, k, v)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_trivial_output_transpose(self, device):
# see also: https://github.com/pytorch/pytorch/issues/134001
x = torch.randn(2, 4, 1, 64, device='cuda', dtype=torch.float16, requires_grad=True)
x2 = x.transpose(1, 2)
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION):
o = torch.nn.functional.scaled_dot_product_attention(x2, x2, x2).transpose(1, 2).reshape(2, 64, 4)
o.backward(o)
x_cpu = x.clone().cpu().detach()
x_cpu.requires_grad = True
x2_cpu = x_cpu.transpose(1, 2)
o = torch.nn.functional.scaled_dot_product_attention(x2_cpu, x2_cpu, x2_cpu).transpose(1, 2).reshape(2, 64, 4)
o.backward(o)
torch.testing.assert_close(x.grad, x_cpu.grad.cuda(), atol=7e-3, rtol=7e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_nonmodulo64seqlen(self, device):
# see also: https://github.com/pytorch/pytorch/issues/137347
mask = torch.randint(0, 2, (2, 1, 157, 6404)).to(device="cuda", dtype=torch.bool)
q = torch.randn(2, 32, 157, 128, device='cuda', dtype=torch.float16, requires_grad=True)
k = torch.randn(2, 32, 6404, 128, device='cuda', dtype=torch.float16, requires_grad=True)
v = torch.randn(2, 32, 6404, 128, device='cuda', dtype=torch.float16, requires_grad=True)
q_cpu = q.detach().clone().cpu()
k_cpu = k.detach().clone().cpu()
v_cpu = v.detach().clone().cpu()
q_cpu.requires_grad = True
k_cpu.requires_grad = True
v_cpu.requires_grad = True
mask_cpu = mask.detach().clone().cpu()
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION):
out = nn.functional.scaled_dot_product_attention(
q,
k,
v,
attn_mask=mask,
dropout_p=0.0,
is_causal=False,
)
out_cpu = nn.functional.scaled_dot_product_attention(
q_cpu,
k_cpu,
v_cpu,
attn_mask=mask_cpu,
dropout_p=0.0,
is_causal=False,
)
out.sum().backward()
out_cpu.sum().backward()
torch.testing.assert_close(q.grad, q_cpu.grad.cuda(), atol=3e-3, rtol=2e-3)
torch.testing.assert_close(k.grad, k_cpu.grad.cuda(), atol=3e-3, rtol=2e-3)
torch.testing.assert_close(v.grad, v_cpu.grad.cuda(), atol=3e-3, rtol=2e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_preserves_query_layout(self, device):
def test_attention(backend: SDPBackend, permute_order: list[list[int]]):
BHSqD = [4, 16, 256, 64]
BHSkvD = [4, 16, 512, 64]
shape_q = [BHSqD[idx] for idx in permute_order]
shape_kv = [BHSkvD[idx] for idx in permute_order]
reverse = [permute_order.index(idx) for idx in range(4)]
q = torch.randn(*shape_q, dtype=torch.bfloat16, device='cuda', requires_grad=True).permute(reverse)
k = torch.randn(*shape_kv, dtype=torch.bfloat16, device='cuda', requires_grad=True).permute(reverse)
v = torch.randn(*shape_kv, dtype=torch.bfloat16, device='cuda', requires_grad=True).permute(reverse)
self.assertEqual(q.shape, BHSqD)
self.assertEqual(k.shape, BHSkvD)
self.assertEqual(v.shape, BHSkvD)
with sdpa_kernel(backend):
out = F.scaled_dot_product_attention(q, k, v)
self.assertTrue(out.permute(permute_order).is_contiguous())
out.sum().backward()
permute_orders = list()
permutable = [0, 1, 2]
permute_orders = itertools.permutations(permutable)
for permute_order in permute_orders:
test_attention(SDPBackend.CUDNN_ATTENTION, list(permute_order) + [3])
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_compiles(self):
q = torch.randn(2, 8, 1024, 128, dtype=torch.half, device='cuda', requires_grad=True)
grad = torch.randn_like(q)
@torch.compile()
def func():
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION):
out = torch.nn.functional.scaled_dot_product_attention(q, q, q)
out.backward(grad)
return out
out = func()
q_cpu = q.float().cpu().detach().clone()
q_cpu.requires_grad = True
grad_cpu = grad.cpu().float()
out_cpu = torch.nn.functional.scaled_dot_product_attention(q_cpu, q_cpu, q_cpu)
out_cpu.backward(grad_cpu)
self.assertEqual(out, out_cpu.cuda().half(), atol=1e-3, rtol=1e-3)
self.assertEqual(q.grad, q_cpu.grad.cuda().half(), atol=7e-3, rtol=5e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_seqlen1_dropout_heuristic(self):
q = torch.randn(2, 8, 1, 128, dtype=torch.half, device='cuda', requires_grad=True)
grad = torch.randn_like(q)
with torch.nn.attention.sdpa_kernel([SDPBackend.CUDNN_ATTENTION, SDPBackend.FLASH_ATTENTION]):
out = torch.nn.functional.scaled_dot_product_attention(q, q, q, dropout_p=0.5)
out.backward(grad)
@skipIfRocm
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_broken_166211(self):
# https://github.com/pytorch/pytorch/issues/166211#issue-3551350377
shape = (20, 4, 4, 32)
scale = 10
for _ in range(100):
q = torch.randn(*shape, device='cuda', dtype=torch.bfloat16) * scale
k = torch.randn(*shape, device='cuda', dtype=torch.bfloat16) * scale
v = torch.randn(*shape, device='cuda', dtype=torch.bfloat16) * scale
q.requires_grad = True
k.requires_grad = True
v.requires_grad = True
grad_attn_output = torch.randn(*shape, device='cuda', dtype=torch.bfloat16) * scale
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION):
attn_output = torch.nn.functional.scaled_dot_product_attention(q, k, v)
dq, dk, dv = torch.autograd.grad(outputs=attn_output, inputs=(q, k, v), grad_outputs=grad_attn_output)
self.assertFalse(dq.isnan().any())
self.assertFalse(dk.isnan().any())
self.assertFalse(dv.isnan().any())
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("mask_dim", [1, 2, 3, 4])
def test_mem_efficient_attention_mask_variants(self, device, mask_dim: list[int]):
dtype = torch.float16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 8, 8, 64
seq_len_q, seq_len_kv = 64, 15
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
if mask_dim == 1:
mask = torch.randn((seq_len_kv,), device=device, dtype=dtype)
elif mask_dim == 2:
mask = torch.randn((seq_len_q, seq_len_kv), device=device, dtype=dtype)
elif mask_dim == 3:
mask = torch.randn((num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
elif mask_dim == 4:
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("dtype", [torch.float, torch.float16])
def test_mem_eff_attention_non_contiguous_mask(self, device, dtype):
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 8, 8, 64
seq_len_q, seq_len_kv = 64, 16
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
mask = torch.as_strided(mask, (batch, num_heads, seq_len_q, seq_len_kv), (0, 0, 0, 1))
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("dtype", [torch.float, torch.float16])
def test_mem_eff_attention_long_sequence_mask(self, device, dtype):
if torch.cuda.get_device_properties('cuda').total_memory < 80 * 2**30:
unittest.skip("This test requires substatnial GPU memory.")
return
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 1, 32, 64
seq_len_q, seq_len_kv = 8192, 8192
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
def test_mem_eff_attention_non_contig_mask_bug(self, device):
# Without the fix this produces `AssertionError: assert 0.07352933287620544 < 1e-07`
# Shapes taken from repro
query_size = (3, 16, 1, 128)
query_strides = (2304, 128, 2048, 1)
key_size = (3, 16, 14, 128)
key_strides = (3584, 0, 256, 1)
value_size = (3, 16, 14, 128)
value_strides = (3584, 0, 256, 1)
attention_mask_size = (3, 1, 1, 14)
attn_mask_strides = (14, 14, 14, 1)
# Calculate the number of elements needed for each tensor
query_num_elements = max(size * stride for size, stride in zip(query_size, query_strides))
key_num_elements = max(size * stride for size, stride in zip(key_size, key_strides))
value_num_elements = max(size * stride for size, stride in zip(value_size, value_strides))
attention_mask_num_elements = max(size * stride for size, stride in zip(attention_mask_size, attn_mask_strides))
# Create the tensors with the specified sizes and strides
query = torch.randn(query_num_elements, device=device).as_strided(query_size, query_strides)
key = torch.randn(key_num_elements, device=device).as_strided(key_size, key_strides)
value = torch.randn(value_num_elements, device=device).as_strided(value_size, value_strides)
bias = torch.randn(attention_mask_num_elements, device=device).as_strided(attention_mask_size, attn_mask_strides)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, bias)
out_contig = F.scaled_dot_product_attention(query, key, value, bias.contiguous())
max_diff = (out - out_contig).abs().mean()
self.assertTrue(max_diff.item() < 1e-7)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Fused SDPA was not built for this system")
def test_singelton_head_dim_stride_ne_1(self, device):
query = torch.tensor([[[[1, 2]]]], dtype=torch.float16, device=device)
query = query.transpose(-1, -2)
key = torch.tensor([[[[1]]]], dtype=torch.float16, device=device)
value = torch.tensor([[[[1]]]], dtype=torch.float16, device=device)
with torch.backends.cuda.sdp_kernel(enable_math=False, enable_flash=True, enable_mem_efficient=False):
scaled_dot_product_attention(query, key, value)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("is_contiguous", [True, False])
def test_scaled_dot_product_attention_fused_kernels_packed(self, device, type: str, is_contiguous: bool):
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16, packed=True)
batch_size, seq_len, num_heads, head_dim = 32, 64, 16, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
# Test Packed
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous(), key.contiguous(), value.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "Fused SDPA was not built for this system")
@unittest.skipIf("TORCH_CUDNN_SDPA_NESTED_TENSOR_ENABLED" not in os.environ, "cuDNN Nested Tensor support not enabled")
@parametrize("type", ["nested"])
@parametrize("is_contiguous", [True, False])
def test_scaled_dot_product_attention_cudnn_nested(self, device, type: str, is_contiguous: bool):
if TEST_WITH_ROCM and type == 'nested':
self.skipTest("ROCM does not support efficient attention on nested tensors, for now")
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16, packed=True)
batch_size, seq_len, num_heads, head_dim = 8, 64, 16, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
# Test Packed
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous(), key.contiguous(), value.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
def test_scaled_dot_product_attention_fused_kernels_packed_accuracy(self, device, type: str, fused_kernel: str):
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device=device, dtype=torch.float32),
torch.nested.nested_tensor(tensors, device=device, dtype=torch.float16))
def rand_tensor(shape):
batch, seq_len, num_heads, head_dim = shape
tensor = 6 * torch.rand((batch, seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
return tensor, tensor.to(dtype=torch.float16)
batch_size, seq_len, num_heads, head_dim = 16, 8, 4, 64
shape = (batch_size, seq_len, num_heads, head_dim)
# Test Packed
qkv, qkv_low_precision = rand_tensor(shape) if type == "dense" else rand_nt(shape)
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_low_precision.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
with sdpa_kernel(backends=[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp.contiguous(), key_lp.contiguous(), value_lp.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
math_query = query.contiguous()
math_key = key.contiguous()
math_value = value.contiguous()
math_ref = torch.nn.functional.scaled_dot_product_attention(
math_query, math_key, math_value, attn_mask=None, dropout_p=0.0, is_causal=False)
actual_test = actual
math_ref_test = math_ref
math_ref_lp_test = math_ref_lp
if actual_test.is_nested:
actual_test = torch.nested.to_padded_tensor(actual_test.contiguous(), padding=0.0)
math_ref_test = torch.nested.to_padded_tensor(math_ref_test, padding=0.0)
math_ref_lp_test = torch.nested.to_padded_tensor(math_ref_lp_test, padding=0.0)
actual_test = actual_test.to(dtype=torch.float32).contiguous()
math_ref_test = math_ref_test.to(dtype=torch.float32).contiguous()
math_ref_lp_test = math_ref_lp_test.to(dtype=torch.float32).contiguous()
self.assertEqual(math_ref_test, math_ref_lp_test, atol=8e-3, rtol=7e-3)
self.assertEqual(actual_test, math_ref_test, atol=7e-3, rtol=7e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Efficient Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
def test_sdp_mem_efficient_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor(SdpaShape(batch_size, num_heads, seq_len, head_dim))
qkv_lp = qkv.detach().clone().to(torch.float32).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdpa_kernel(backends=[SDPBackend.MATH]):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(torch.float32)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=1e-5, rtol=1e-5)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Flash Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
@parametrize("dtype", [torch.float16, torch.bfloat16])
def test_sdp_flash_attention_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool, dtype: torch.dtype):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor(SdpaShape(batch_size, num_heads, seq_len, head_dim))
qkv_lp = qkv.detach().clone().to(dtype).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdpa_kernel(backends=[SDPBackend.MATH]):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(dtype)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
# Since we are doing the compute on fp16 we have to bump the tolerance
# Bump down the tolerance for blfoat16
atol = 7e-4 if dtype == torch.float16 else 7e-3
rtol = 7e-4 if dtype == torch.float16 else 7e-3
if TEST_WITH_ROCM:
atol = 9e-4 if dtype == torch.float16 else 9e-3
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=atol, rtol=rtol)
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Platform does not support fused SDPA")
@parametrize("type", ["dense", "nested"])
def test_fused_sdp_choice(self, device, type: str):
batch_size, seq_len, num_heads, head_dim = 2, 128, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float16, packed=True, requires_grad=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
device_capability = None
if "cuda" in str(device):
device_capability = torch.cuda.get_device_capability()
prefer_cudnn = "TORCH_CUDNN_SDPA_PREFERRED" not in os.environ or bool(os.environ["TORCH_CUDNN_SDPA_PREFERRED"])
prefer_cudnn = prefer_cudnn and device_capability and (device_capability == (9, 0) or device_capability == (10, 0))
# TODO we are currently disabling this by default, lets assert that this returns
# FlashAttention, we need to change when we make remove opt-in for cudnn
if type != "nested" and PLATFORM_SUPPORTS_CUDNN_ATTENTION and prefer_cudnn:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.CUDNN_ATTENTION.value)
elif PLATFORM_SUPPORTS_FLASH_ATTENTION:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.FLASH_ATTENTION.value)
elif type != "nested" and PLATFORM_SUPPORTS_CUDNN_ATTENTION and not prefer_cudnn: # e.g., we're on Windows
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.EFFICIENT_ATTENTION.value)
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.CUDNN_ATTENTION.value)
else:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.EFFICIENT_ATTENTION.value)
# Change dtype to float32 so that efficient attention should get chosen
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float32, packed=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION.value
@skipIfRocm # Missing triton.float32 ("triton" prefix is to locate skipped UTs), and deterministic algo
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Platform does not support fused SDPA")
@parametrize("warn_only", [True, False])
def test_sdp_choice_with_determinism(self, device, warn_only):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, packed=False)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH]):
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION.value
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Platform does not support fused SDPA")
@parametrize("use_compile", [True, False])
def test_fused_sdp_priority_order(self, device, use_compile):
@torch.compile
def compiled_func(order):
with sdpa_kernel(order, set_priority=True):
out = scaled_dot_product_attention(q, q, q)
return out
q = torch.randn(64, 8, 1024, 64, dtype=torch.half, device='cuda')
default_order = torch._C._get_sdp_priority_order()
orders = [[SDPBackend.CUDNN_ATTENTION, SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION],
[SDPBackend.MATH, SDPBackend.CUDNN_ATTENTION, SDPBackend.EFFICIENT_ATTENTION],
[SDPBackend.EFFICIENT_ATTENTION, SDPBackend.CUDNN_ATTENTION, SDPBackend.MATH],
[SDPBackend.FLASH_ATTENTION, SDPBackend.CUDNN_ATTENTION, SDPBackend.MATH]]
import time
times = list()
for order in orders:
if use_compile:
compiled_func(order)
else:
with sdpa_kernel(order, set_priority=True):
scaled_dot_product_attention(q, q, q)
torch.cuda.synchronize()
t0 = time.perf_counter()
if use_compile:
compiled_func(order)
else:
with sdpa_kernel(order, set_priority=True):
scaled_dot_product_attention(q, q, q)
torch.cuda.synchronize()
t1 = time.perf_counter()
times.append(t1 - t0)
self.assertTrue(times[0] < times[1], "expected cuDNN SDPA to be faster than Math backend.")
self.assertTrue(times[1] > times[2], "expected Eff Attn backend to faster than Math backend.")
self.assertTrue(times[3] < times[2], "expected Flash Attn backend to faster than Math backend.")
self.assertTrue(times[0] < times[2], "expected cuDNN Attn backend to faster than Eff Attn backend.")
reset_order = torch._C._get_sdp_priority_order()
self.assertEqual(default_order, reset_order, "expected SDPA context manager to reset priority order.")
@skipIfRocm # Missing deterministic algo
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", PLATFORM_SPECIFIC_SDPA)
@parametrize("warn_only", [True, False])
def test_fused_backwards_throws_determinism_warning(self, device, warn_only, fused_kernel):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float16, packed=False, requires_grad=True)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
kernel_name = "Memory Efficient attention" if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else \
"Flash Attention" if fused_kernel == SDPBackend.FLASH_ATTENTION else "cuDNN Attention"
warning_context = (
self.assertWarnsRegex(
UserWarning,
f"{kernel_name} defaults to a non-deterministic algorithm.",
)
if warn_only
else contextlib.nullcontext()
)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdpa_kernel(backends=[fused_kernel]):
with warning_context:
if warn_only or fused_kernel != SDPBackend.CUDNN_ATTENTION:
torch.nn.functional.scaled_dot_product_attention(query, key, value).sum().backward()
else:
# cuDNN attention has no deterministic fallback
self.assertRaises(RuntimeError, lambda:
torch.nn.functional.scaled_dot_product_attention(query, key, value).sum().backward())
@unittest.skip("This test is not behaving deterministaclly non-deterministaclly on CI/CD")
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Platform does not support fused SDPA")
def test_mem_eff_backwards_determinism(self, device):
# Need big seq_len to ensure that num_splits > 1
dtype = torch.float32
batch_size, seq_len, n_heads, head_dim = 1, 1024, 8, 64
query = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Run once to establish baseline
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
initial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# not deterministic
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(initial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertTrue(diff_anwser_once)
with use_deterministic_algorithims(True, warn_only=False):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
initial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# deterministic now that we have enforced it
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(initial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertFalse(diff_anwser_once)
# verified passing successfully on H100
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize(
"seq_len_q",
[8, 103, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80 else [4, 8, 256, 512],
)
@parametrize(
"seq_len_k",
[8, 103, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80 else [4, 8, 256, 512],
)
@parametrize(
"head_dim",
[8, 16, 96, 128] if MEM_EFF_CAPABILITY_MATCHES_SM80 and not isSM120Device else [8, 16, 32, 64],
)
@parametrize("is_causal", [False, True])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize(
"dtype",
(
[torch.float16, torch.bfloat16, torch.float32]
if MEM_EFF_CAPABILITY_MATCHES_SM80
else [torch.float16, torch.float32]
),
)
@parametrize("scale", [None, "l1"])
@tf32_enabled()
def test_mem_efficient_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
# On ROCM _fill_mem_eff_dropout_mask fills 0.5 if (prng > p) otherwise -0.5 to the tensor
tester_p = p if not TEST_WITH_ROCM else 0.0
mask = (rand_uniform > tester_p).to(torch.float32)
return mask
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_ROCM and seq_len_q * seq_len_k * head_dim * batch_size > 1024 * 1024 * 128:
torch.cuda.empty_cache() # Prevent memory fragmentation
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# Create real output
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q, seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal, scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 3.0 ,
'grad_query': 150.0 ,
'grad_key': 25.0,
'grad_value': 8.5,
}
if TEST_WITH_ROCM:
fudge_factors['out'] = 5.0
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 160.0
fudge_factors['grad_query'] = 670.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
if "gfx95" in torch.cuda.get_device_properties(0).gcnArchName:
fudge_factors['grad_value'] = 16.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize(
"seq_len_q",
[8, 312, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80 else [8, 152, 512],
)
@parametrize(
"seq_len_k",
[8, 408, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80 else [8, 37, 512],
)
@parametrize(
"head_dim",
[8, 16, 96, 128] if MEM_EFF_CAPABILITY_MATCHES_SM80 and not isSM120Device else [8, 16, 32, 64],
)
@parametrize("is_causal", [False])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize(
"dtype",
(
[torch.float16, torch.bfloat16, torch.float32]
if MEM_EFF_CAPABILITY_MATCHES_SM80
else [torch.float16, torch.float32]
),
)
@parametrize("scale", [None, "l1"])
@tf32_enabled()
def test_mem_efficient_attention_attn_mask_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int,
seq_len_k: int, head_dim: int, is_causal: bool,
dropout_p: float, dtype: torch.dtype,
scale: str):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
# On ROCM _fill_mem_eff_dropout_mask fills 0.5 if (prng > p) otherwise -0.5 to the tensor
tester_p = p if not TEST_WITH_ROCM else 0.0
mask = (rand_uniform > tester_p).to(torch.float32)
return mask
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_ROCM and seq_len_q * seq_len_k * head_dim * batch_size > 1024 * 1024 * 128:
torch.cuda.empty_cache() # Prevent memory fragmentation
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
attn_mask = torch.rand(seq_len_q, seq_len_k, device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
attn_mask_ref = attn_mask.detach().to(higher_precision_dtype).requires_grad_(True)
# Create real output
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, attn_mask, dropout_p=dropout_p,
is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref, attn_mask_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value, attn_mask,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q,
seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, attn_mask_ref, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, attn_mask,
dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
grads = torch.autograd.grad(out, (query, key, value, attn_mask), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value, attn_mask), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref, attn_mask_ref), upstream_grad)
fudge_factors = {
"out": 4,
"grad_query": 160.0,
"grad_key": 25.0,
"grad_value": 8.0,
"grad_attn_mask": 45.0,
}
if TEST_WITH_ROCM:
fudge_factors['out'] = 6.0
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 160.0
fudge_factors['grad_query'] = 670.0 # gfx90a
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
if "gfx95" in torch.cuda.get_device_properties(0).gcnArchName:
fudge_factors['grad_value'] = 16.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Does not support SDPA or pre-SM80 hardware",
)
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 143, 2048])
@parametrize("seq_len_k", [4, 127, 579, 2048])
@parametrize("head_dim", [8, 203, 256])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22, 0.48])
@parametrize("dtype", [torch.float16, torch.bfloat16])
@parametrize("scale", [None, "l1"])
@parametrize("enable_gqa", [True, False])
@parametrize("n_heads", [[16, 8], [10, 2]])
@tf32_enabled()
def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str, enable_gqa: bool, n_heads: list[int]):
if isSM8XDevice or isSM120Device and head_dim in range(193, 256 + 1):
self.skipTest("Flash attention on sm86, sm87, and sm89 for headdim > 192 currently disabled")
if is_causal and seq_len_q != seq_len_k:
self.skipTest("Flash V2 does not accept is_casual when seq_len_q != seq_len_k")
if TEST_WITH_ROCM and seq_len_q >= 1024 and seq_len_k >= 1024 and batch_size > 1:
torch.cuda.empty_cache() # Prevent memory fragmentation
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_CK and dropout_p != 0:
self.skipTest("CK does not support tensor format dropout masks")
if TEST_WITH_CK and head_dim > 128:
self.skipTest("CK does not support head dims over 128")
scale = scale if scale is None else (1 / head_dim)
num_heads_q = num_heads_kv = 4
if enable_gqa:
num_heads_q = n_heads[0]
num_heads_kv = n_heads[1]
query = torch.rand(batch_size, num_heads_q, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, num_heads_kv, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, num_heads_kv, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
is_dropout = dropout_p > 0.0
if not is_dropout:
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out = F.scaled_dot_product_attention(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(
query_ref, key_ref, value_ref, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(
query, key, value, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
else:
# Problem: We pad sizes in the composite region of the top level SDPA. But we need the
# Debug mask when have dropout. So I am going to manually pad up here when testing dropout
q_padded, q_og_size = pad_last_dim(query, 8)
k_padded, k_og_size = pad_last_dim(key, 8)
v_padded, v_og_size = pad_last_dim(value, 8)
# scale needs to be calculated on the og head_size
if scale is None:
scale = 1 / math.sqrt(q_og_size)
output_tuple = torch.ops.aten._scaled_dot_product_flash_attention(
q_padded, k_padded, v_padded, dropout_p=dropout_p, is_causal=is_causal, scale=scale, return_debug_mask=is_dropout)
out = output_tuple[0]
out = out[..., :v_og_size]
# Build dropout_mask
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
batch_size, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
batch_size, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, seq_len_q, seq_len_k, query_padding_mask, key_padding_mask,
causal=is_causal)[:, :, :seq_len_q, :seq_len_k]
dropout_mask = softmax_mask >= 0
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, dropout_mask=dropout_mask, enable_gqa=enable_gqa)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask, enable_gqa=enable_gqa)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
# backward for flash attention on sm86, sm87, and sm89 for headdim >= 193 currently disabled
if isSM8XDevice or isSM120Device and head_dim in range(193, 256):
self.assertRaises(RuntimeError, lambda: out.backward(upstream_grad))
return
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 4,
'grad_query': 180.0,
'grad_key': 16,
'grad_value': 4,
}
if TEST_WITH_ROCM:
fudge_factors['grad_value'] = 6.0
if TEST_WITH_CK:
fudge_factors['out'] = 5.0
fudge_factors['grad_key'] = 145.0
fudge_factors['grad_query'] = 855.0 # ck min = 855.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 190.0
fudge_factors['grad_query'] = 1550.0 # NEW CK MIN
if seq_len_q >= 2048:
fudge_factors['grad_query'] = 1100.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
else:
fudge_factors['out'] = 6.0
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 190.0
fudge_factors['grad_query'] = 650.0
if seq_len_q >= 2048:
fudge_factors['grad_query'] = 1100.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Does not support SDPA or pre-SM80 hardware",
)
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [256, 1024])
@parametrize("seq_len_k", [256, 1024])
@parametrize("head_dim", [32, 64])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16])
@parametrize("scale", [None, "l1"])
@parametrize("fused_kernel", PLATFORM_SPECIFIC_SDPA)
@tf32_enabled()
def test_fused_attention_vs_math_ref_grads_cudagraph(self, device, batch_size: int,
seq_len_q: int, seq_len_k: int,
head_dim: int,
is_causal: bool,
dropout_p: float,
dtype: torch.dtype,
scale: str,
fused_kernel: SDPBackend):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, dropout_p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, dropout_p, seed, offset)
# On ROCM _fill_mem_eff_dropout_mask fills 0.5 if (prng > p) otherwise -0.5 to the tensor
tester_p = dropout_p if not TEST_WITH_ROCM else 0.0
mask = (rand_uniform > tester_p).to(torch.float32)
return mask
def get_dropout_mask(output, fused_kernel, batch_size, n_heads, q_len, kv_len, dropout_p, device=device):
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
output_seed, output_offset = output_tuple[2], output_tuple[3]
output_seed = output_seed.item()
output_offset = output_offset.item()
return _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len,
dropout_p, output_seed, output_offset, device=device)
else:
# Build dropout_mask
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
batch_size, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
batch_size, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, seq_len_q, seq_len_k, query_padding_mask, key_padding_mask,
causal=is_causal)[:, :, :seq_len_q, :seq_len_k]
dropout_mask = softmax_mask >= 0
return dropout_mask
if fused_kernel == SDPBackend.FLASH_ATTENTION and is_causal and seq_len_q != seq_len_k:
self.skipTest("Flash V2 does not accept is_casual when seq_len_q != seq_len_k")
seed = 42
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
fused_op = (torch.ops.aten._scaled_dot_product_efficient_attention
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else torch.ops.aten._scaled_dot_product_flash_attention
if fused_kernel == SDPBackend.FLASH_ATTENTION else torch.ops.aten._scaled_dot_product_cudnn_attention)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
# Set the global seed before capture
torch.manual_seed(seed)
kwargs = {"dropout_p": dropout_p, "is_causal": is_causal}
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if fused_kernel == SDPBackend.FLASH_ATTENTION:
kwargs['return_debug_mask'] = dropout_p > 0.0
if fused_kernel == SDPBackend.CUDNN_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if "return_debug_mask" in kwargs:
kwargs.pop("return_debug_mask")
with torch.cuda.stream(s):
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
torch.cuda.current_stream().wait_stream(s)
out = output_tuple[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
out.backward(upstream_grad)
for x in (query, key, value):
x.grad = None
g = torch.cuda.CUDAGraph()
# Create real output
with torch.cuda.graph(g):
torch.rand_like(query, device=query.device) # test non-zero intragraph offset
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
assert all(not isinstance(o, torch.Tensor) or o.is_cuda for o in output_tuple)
g.replay()
out_first = output_tuple[0].clone()
g.replay()
out = output_tuple[0]
if dropout_p == 0.0:
self.assertEqual(out_first, out, atol=0, rtol=0)
else:
# replays produce different results
self.assertNotEqual(out_first, out)
with sdpa_kernel(backends=[SDPBackend.MATH]):
if dropout_p == 0.0:
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal)
# cuDNN attention doesn't support returning dropout mask
elif fused_kernel != SDPBackend.CUDNN_ATTENTION:
# Create the dropout_mask
dropout_mask = get_dropout_mask(output_tuple, fused_kernel, batch_size,
n_heads, seq_len_q, seq_len_k, dropout_p, device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
g1 = torch.cuda.CUDAGraph()
with torch.cuda.graph(g1):
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
g1.replay()
if fused_kernel != SDPBackend.CUDNN_ATTENTION or dropout_p == 0.0:
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 3.0,
'grad_query': 110.0,
'grad_key': 8.0,
'grad_value': 3.0,
}
if TEST_WITH_ROCM:
fudge_factors['out'] = 6.0
fudge_factors['grad_value'] = 6.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors
)
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
def test_fused_kernels_seq_len_1_inputs(self, device, fused_kernel):
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float16)
batch, num_heads, head_dim = 32, 16, 64
seq_lens = torch.randint(low=1, high=32, size=(batch,))
# make sure some seq_lens are 1
num_ones = 10
indices = torch.randint(low=0, high=batch, size=(num_ones,))
seq_lens.scatter_(0, indices, 1)
shape = SdpaShape(batch, num_heads, seq_lens.tolist(), head_dim)
query = rand_nested_tensor(shape)
key = rand_nested_tensor(shape)
value = rand_nested_tensor(shape)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(torch.float16), atol=1e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
@parametrize("expand_q_batch", [True, False])
@parametrize("expand_k_batch", [True, False])
@parametrize("expand_v_batch", [True, False])
@parametrize("expand_q_num_heads", [True, False])
@parametrize("expand_k_num_heads", [True, False])
@parametrize("expand_v_num_heads", [True, False])
def test_fused_kernels_nested_broadcasting(
self,
device,
kernel,
expand_q_batch,
expand_k_batch,
expand_v_batch,
expand_q_num_heads,
expand_k_num_heads,
expand_v_num_heads,
):
is_efficient = kernel == SDPBackend.EFFICIENT_ATTENTION
dtype = torch.float32 if is_efficient else torch.float16
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=dtype)
batch, num_heads, head_dim = 32, 8, 64
head_dim_v = 32 if is_efficient else head_dim
if TEST_WITH_ROCM and head_dim != head_dim_v:
self.skipTest("head_dim != head_dim_v unsupported on ROCm for now")
return
seq_lens_q = (torch.randint(low=1, high=5, size=(1,)).item()
if expand_q_batch
else torch.randint(low=1, high=32, size=(batch,)).tolist())
seq_lens_kv = (torch.randint(low=1, high=5, size=(1,)).item()
if (expand_k_batch or expand_v_batch)
else torch.randint(low=1, high=32, size=(batch,)).tolist())
batch_q = 1 if expand_q_batch else batch
batch_k = 1 if expand_k_batch else batch
batch_v = 1 if expand_v_batch else batch
# handle case where all batch_sizes are 1
batch = max(batch_q, batch_k, batch_v)
num_heads_q = 1 if expand_q_num_heads else num_heads
num_heads_k = 1 if expand_k_num_heads else num_heads
num_heads_v = 1 if expand_v_num_heads else num_heads
# handle case where all num_heads are 1
num_heads = max(num_heads_q, num_heads_k, num_heads_v)
q_shape = SdpaShape(batch_q, num_heads_q, seq_lens_q, head_dim)
k_shape = SdpaShape(batch_k, num_heads_k, seq_lens_kv, head_dim)
v_shape = SdpaShape(batch_v, num_heads_v, seq_lens_kv, head_dim_v)
query = rand_nested_tensor(q_shape)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
def _broadcast(t, batch_broadcasted, num_heads_broadcasted):
if batch_broadcasted and num_heads_broadcasted:
# (1, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor(
[t[0].expand(-1, num_heads, t.size(-1)) for _ in range(batch)], dtype=torch.float32)
elif batch_broadcasted:
# (1, seq_len, num_heads, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([t[0] for _ in range(batch)], dtype=torch.float32)
elif num_heads_broadcasted:
# (batch, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([x.expand(-1, num_heads, t.size(-1))
for x in t.unbind()], dtype=torch.float32)
else:
result = t.to(torch.float32)
return result
query_expanded = _broadcast(query, expand_q_batch, expand_q_num_heads).transpose(1, 2)
key_expanded = _broadcast(key, expand_k_batch, expand_k_num_heads).transpose(1, 2)
value_expanded = _broadcast(value, expand_v_batch, expand_v_num_heads).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key_expanded.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1.5e-3, rtol=1e-2)
@skipIfRocm(msg="Efficient Attention on ROCM does not support head_dim != head_dim_v for now.")
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
def test_fused_kernels_nested_broadcasting_query_dense(self, device):
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float32)
batch, num_heads, head_dim, head_dim_v = 32, 16, 64, 96
seq_lens = torch.randint(low=1, high=32, size=(batch,)).tolist()
q_shape = (1, 1, num_heads, head_dim)
k_shape = SdpaShape(batch, num_heads, seq_lens, head_dim)
v_shape = SdpaShape(batch, 1, seq_lens, head_dim_v)
# create a dense query
query = torch.randn(q_shape, device=device, dtype=torch.float32)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
# (1, 1, num_heads, head_dim) -> (batch, 1, num_heads, head_dim)
query_expanded = torch.nested.nested_tensor([query.squeeze(0) for _ in range(batch)]).transpose(1, 2)
# (batch, seq_lens, 1, head_dim) -> (batch, seq_lens, num_heads, head_dim)
value_expanded = torch.nested.nested_tensor(
[t.expand(-1, num_heads, head_dim_v) for t in value.unbind()]).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=1e-3, rtol=1e-2)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [8, 32])
@parametrize("max_seq_len_q", [32, 256])
@parametrize("max_seq_len_kv", [32, 256])
@parametrize("head_dim", [8, 64])
@parametrize("dropout_p", [0.0, 0.1])
@parametrize("dtype", [torch.float16])
@parametrize("scale", [None, "l1"])
@parametrize("is_causal", [True, False])
def test_flash_attention_vs_math_ref_grads_nestedtensor(self, device, batch_size: int, max_seq_len_q: int, max_seq_len_kv: int,
head_dim: int, dropout_p: float, dtype: torch.dtype,
scale: str, is_causal: bool):
if is_causal:
# TODO we should support this
self.assertRaisesRegex(RuntimeError, "Nested tensors for query / key are not supported when is_causal=True")
return
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
seq_lens_q = torch.randint(low=1, high=max_seq_len_q, size=(batch_size,))
# Set one entry to max length
seq_lens_q[torch.randint(0, batch_size, size=(1,))] = max_seq_len_q
seq_lens_kv = torch.randint(low=1, high=max_seq_len_kv, size=(batch_size,))
seq_lens_kv[torch.randint(0, batch_size, size=(1,))] = max_seq_len_kv
def rand_nt(sequence_list, num_heads, head_dim):
tensors = [torch.rand((num_heads, seq_len, head_dim)) for seq_len in sequence_list]
return torch.nested.nested_tensor(tensors, requires_grad=True, device=device, dtype=dtype)
query = rand_nt(seq_lens_q, n_heads, head_dim)
key = rand_nt(seq_lens_kv, n_heads, head_dim)
value = rand_nt(seq_lens_kv, n_heads, head_dim)
# Run the math kernel on low precision references
query_ref_lp = query.detach().clone().requires_grad_(True)
key_ref_lp = key.detach().clone().requires_grad_(True)
value_ref_lp = value.detach().clone().requires_grad_(True)
query_ref = query.detach().clone().to(torch.float32).requires_grad_(True)
key_ref = key.detach().clone().to(torch.float32).requires_grad_(True)
value_ref = value.detach().clone().to(torch.float32).requires_grad_(True)
is_dropout = dropout_p > 0.0
if not is_dropout:
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(
query_ref, key_ref, value_ref, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(
query_ref_lp, key_ref_lp, value_ref_lp, is_causal=is_causal, scale=scale)
else:
# Create real output
output_tuple = torch.ops.aten._scaled_dot_product_flash_attention(
query, key, value, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, return_debug_mask=is_dropout)
out = output_tuple[0]
dbug_mask = output_tuple[-1]
query_padding_mask = torch.arange(max_seq_len_q).unsqueeze(0).expand(
batch_size, max_seq_len_q
) < seq_lens_q.unsqueeze(-1)
query_padding_mask = query_padding_mask.to("cuda")
key_padding_mask = torch.arange(max_seq_len_kv).unsqueeze(0).expand(
batch_size, max_seq_len_kv
) < seq_lens_kv.unsqueeze(-1)
key_padding_mask = key_padding_mask.to("cuda")
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, max_seq_len_q, max_seq_len_kv, query_padding_mask, key_padding_mask, causal=is_causal)
dropout_mask = softmax_mask >= 0
nt_stack = []
for tensor_component in range(batch_size):
batch_stack = []
for head in range(n_heads):
batch_stack.append(dropout_mask[tensor_component, head,
0:seq_lens_q[tensor_component],
0:seq_lens_kv[tensor_component]].unsqueeze(0))
nt_stack.append(torch.cat(batch_stack))
nested_dropout_mask = torch.nested.nested_tensor(nt_stack)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p,
is_causal=is_causal, scale=scale, dropout_mask=nested_dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref_lp, key_ref_lp, value_ref_lp, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=nested_dropout_mask)[0]
upstream_grad = out.detach().clone().contiguous()
out.backward(upstream_grad)
out_ref.backward(upstream_grad.to(out_ref.dtype))
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
dropout_fudge_factor = 1.0 if dropout_p == 0.0 else 2.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
(query_ref, query_ref_lp, query),
(key_ref, key_ref_lp, key),
(value_ref, value_ref_lp, value),
fudge_factors={
'out': 1.5 * dropout_fudge_factor,
'grad_query': 12.0 * dropout_fudge_factor,
'grad_key': 1.5 * dropout_fudge_factor,
'grad_value': 2.0 * dropout_fudge_factor,
}
)
| TestSDPACudaOnly |
python | pyparsing__pyparsing | examples/statemachine/statemachine.py | {
"start": 8863,
"end": 11532
} | class ____:
"""An importer designed using the mechanism defined in :pep:`302`. I read
the PEP, and also used Doug Hellmann's PyMOTW article `Modules and
Imports`_, as a pattern.
.. _`Modules and Imports`: http://www.doughellmann.com/PyMOTW/sys/imports.html
Define a subclass that specifies a :attr:`suffix` attribute, and
implements a :meth:`process_filedata` method. Then call the classmethod
:meth:`register` on your class to actually install it in the appropriate
places in :mod:`sys`."""
scheme = "suffix"
suffix = None
path_entry = None
@classmethod
def trigger_url(cls):
if cls.suffix is None:
raise ValueError(f"{cls.__name__}.suffix is not set")
return f"suffix:{cls.suffix}"
@classmethod
def register(cls):
sys.path_hooks.append(cls)
sys.path.append(cls.trigger_url())
def __init__(self, path_entry):
pr = urlparse(str(path_entry))
if pr.scheme != self.scheme or pr.path != self.suffix:
raise ImportError()
self.path_entry = path_entry
self._found = {}
def checkpath_iter(self, fullname):
for dirpath in sys.path:
# if the value in sys.path_importer_cache is None, then this
# path *should* be imported by the builtin mechanism, and the
# entry is thus a path to a directory on the filesystem;
# if it's not None, then some other importer is in charge, and
# it probably isn't even a filesystem path
finder = sys.path_importer_cache.get(dirpath)
if isinstance(finder, (type(None), importlib.machinery.FileFinder)):
checkpath = os.path.join(dirpath, "{}.{}".format(fullname, self.suffix))
yield checkpath
def find_module(self, fullname, path=None):
for checkpath in self.checkpath_iter(fullname):
if os.path.isfile(checkpath):
self._found[fullname] = checkpath
return self
return None
def load_module(self, fullname):
assert fullname in self._found
if fullname in sys.modules:
module = sys.modules[fullname]
else:
sys.modules[fullname] = module = types.ModuleType(fullname)
data = None
with open(self._found[fullname]) as f:
data = f.read()
module.__dict__.clear()
module.__file__ = self._found[fullname]
module.__name__ = fullname
module.__loader__ = self
self.process_filedata(module, data)
return module
def process_filedata(self, module, data):
pass
| SuffixImporter |
python | apache__airflow | airflow-ctl/src/airflowctl/ctl/cli_config.py | {
"start": 6172,
"end": 9224
} | class ____(argparse.Action):
"""Custom action to prompt for password input."""
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = getpass.getpass()
setattr(namespace, self.dest, values)
# Common Positional Arguments
ARG_FILE = Arg(
flags=("file",),
metavar="FILEPATH",
help="File path to read from or write to. "
"For import commands, it is a file to read from. For export commands, it is a file to write to.",
)
ARG_OUTPUT = Arg(
(
"--output",
"-o",
),
help="Output format. Allowed values: json, yaml, plain, table (default: json)",
metavar="(table, json, yaml, plain)",
choices=("table", "json", "yaml", "plain"),
default="json",
type=str,
)
# Authentication arguments
ARG_AUTH_URL = Arg(
flags=("--api-url",),
type=str,
default="http://localhost:8080",
dest="api_url",
help="The URL of the metadata database API",
)
ARG_AUTH_TOKEN = Arg(
flags=("--api-token",),
type=str,
dest="api_token",
help="The token to use for authentication",
)
ARG_AUTH_ENVIRONMENT = Arg(
flags=("-e", "--env"),
type=str,
default="production",
help="The environment to run the command in",
)
ARG_AUTH_USERNAME = Arg(
flags=("--username",),
type=str,
dest="username",
help="The username to use for authentication",
)
ARG_AUTH_PASSWORD = Arg(
flags=("--password",),
type=str,
dest="password",
help="The password to use for authentication",
action=Password,
nargs="?",
)
# Dag Commands Args
ARG_DAG_ID = Arg(
flags=("--dag-id",),
type=str,
dest="dag_id",
help="The DAG ID of the DAG to pause or unpause",
)
# Variable Commands Args
ARG_VARIABLE_ACTION_ON_EXISTING_KEY = Arg(
flags=("-a", "--action-on-existing-key"),
type=str,
default="overwrite",
help="Action to take if we encounter a variable key that already exists.",
choices=("overwrite", "fail", "skip"),
)
# Config arguments
ARG_CONFIG_SECTION = Arg(
flags=("--section",),
type=str,
dest="section",
help="The section of the configuration",
)
ARG_CONFIG_OPTION = Arg(
flags=("--option",),
type=str,
dest="option",
help="The option of the configuration",
)
ARG_CONFIG_IGNORE_SECTION = Arg(
flags=("--ignore-section",),
type=str,
dest="ignore_section",
help="The configuration section being ignored",
)
ARG_CONFIG_IGNORE_OPTION = Arg(
flags=("--ignore-option",),
type=str,
dest="ignore_option",
help="The configuration option being ignored",
)
ARG_CONFIG_VERBOSE = Arg(
flags=(
"-v",
"--verbose",
),
help="Enables detailed output, including the list of ignored sections and options",
default=False,
action="store_true",
)
# Version Command Args
ARG_REMOTE = Arg(
flags=("--remote",),
help="Fetch the Airflow version in remote server, otherwise only shows the local airflowctl version",
default=False,
action="store_true",
)
| Password |
python | Textualize__textual | tests/test_content_switcher.py | {
"start": 214,
"end": 5202
} | class ____(App[None]):
def __init__(self, initial: str | None = None) -> None:
super().__init__()
self._initial = initial
def compose(self) -> ComposeResult:
with ContentSwitcher(initial=self._initial):
for n in range(5):
yield Widget(id=f"w{n}")
async def test_no_initial_display() -> None:
"""Test starting a content switcher with nothing shown."""
async with SwitcherApp().run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current is None
assert all(
not child.display for child in pilot.app.query_one(ContentSwitcher).children
)
assert pilot.app.query_one(ContentSwitcher).visible_content is None
async def test_initial_display() -> None:
"""Test starting a content switcher with a widget initially shown."""
async with SwitcherApp("w3").run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current == "w3"
for child in pilot.app.query_one(ContentSwitcher).children:
assert child.display is (child.id == "w3")
assert pilot.app.query_one(
ContentSwitcher
).visible_content is pilot.app.query_one("#w3")
async def test_no_initial_display_then_set() -> None:
"""Test starting a content switcher with nothing shown then setting the display."""
async with SwitcherApp().run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current is None
assert all(
not child.display for child in pilot.app.query_one(ContentSwitcher).children
)
assert pilot.app.query_one(ContentSwitcher).visible_content is None
pilot.app.query_one(ContentSwitcher).current = "w3"
assert pilot.app.query_one(ContentSwitcher).current == "w3"
for child in pilot.app.query_one(ContentSwitcher).children:
assert child.display is (child.id == "w3")
assert pilot.app.query_one(
ContentSwitcher
).visible_content is pilot.app.query_one("#w3")
async def test_initial_display_then_change() -> None:
"""Test starting a content switcher with a widget initially shown then changing it."""
async with SwitcherApp("w3").run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current == "w3"
for child in pilot.app.query_one(ContentSwitcher).children:
assert child.display is (child.id == "w3")
assert pilot.app.query_one(
ContentSwitcher
).visible_content is pilot.app.query_one("#w3")
pilot.app.query_one(ContentSwitcher).current = "w2"
assert pilot.app.query_one(ContentSwitcher).current == "w2"
for child in pilot.app.query_one(ContentSwitcher).children:
assert child.display is (child.id == "w2")
assert pilot.app.query_one(
ContentSwitcher
).visible_content is pilot.app.query_one("#w2")
async def test_initial_display_then_hide() -> None:
"""Test starting a content switcher with a widget initially shown then hide all."""
async with SwitcherApp("w3").run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current == "w3"
for child in pilot.app.query_one(ContentSwitcher).children:
assert child.display is (child.id == "w3")
pilot.app.query_one(ContentSwitcher).current = None
assert pilot.app.query_one(ContentSwitcher).current is None
assert all(
not child.display for child in pilot.app.query_one(ContentSwitcher).children
)
@pytest.mark.xfail(
reason="The expected exception doesn't appear to make it to pytest -- perhaps related to https://github.com/Textualize/textual/issues/1972"
)
async def test_initial_display_unknown_id() -> None:
"""Test setting an initial display to an unknown widget ID."""
with pytest.raises(NoMatches):
async with SwitcherApp("does-not-exist").run_test():
pass
async def test_set_current_to_unknown_id() -> None:
"""Test attempting to switch to an unknown widget ID."""
async with SwitcherApp().run_test() as pilot:
assert pilot.app.query_one(ContentSwitcher).current is None
assert all(
not child.display for child in pilot.app.query_one(ContentSwitcher).children
)
with pytest.raises(NoMatches):
pilot.app.query_one(ContentSwitcher).current = "does-not-exist"
async def test_add_content() -> None:
async with SwitcherApp().run_test() as pilot:
switcher = pilot.app.query_one(ContentSwitcher)
await switcher.add_content(Widget(id="foo"))
assert not switcher.query_one("#foo").display
await switcher.add_content(Widget(), id="bar", set_current=True)
assert not switcher.query_one("#foo").display
assert switcher.query_one("#bar").display
assert switcher.current == "bar"
with pytest.raises(ValueError):
switcher.add_content(Widget())
| SwitcherApp |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py | {
"start": 4949,
"end": 6038
} | class ____(Executor):
pass
def test_flow_yaml_override_with_protocol():
from jina.enums import ProtocolType
path = os.path.join(
cur_dir.parent.parent.parent, 'yaml/examples/faiss/flow-index.yml'
)
f1 = Flow.load_config(path)
assert f1.protocol == ProtocolType.GRPC
f2 = Flow.load_config(path, uses_with={'protocol': 'http'})
assert f2.protocol == ProtocolType.HTTP
f3 = Flow.load_config(path, uses_with={'protocol': 'websocket'})
assert f3.protocol == ProtocolType.WEBSOCKET
@pytest.mark.skipif('GITHUB_WORKFLOW' in os.environ, reason='no specific port test in CI')
@pytest.mark.parametrize(
'yaml_file',
['yaml/flow_with_gateway.yml', 'yaml/test-flow-custom-gateway-nested-config.yml'],
)
def test_load_flow_with_gateway(yaml_file):
path = os.path.join(cur_dir.parent.parent.parent, yaml_file)
flow = Flow.load_config(
path,
)
with flow:
# protocol and port are overridden by the gateway configuration
assert flow.protocol == ProtocolType.HTTP
assert flow.port == 12344
| DummyEncoder |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 140649,
"end": 150918
} | class ____:
"""
Implement the heuristic to select the tiling factors and tiling indices.
In the future, we can implement advanced heuristic in a subclass.
"""
def select_tiling(
self,
fn_list,
var_sizes_list,
) -> tuple[list[int], list[int]]:
# TODO(jgong5): support alternative tiling factors and data types
loop_bodies = _get_loop_body(fn_list)
all_dtypes = _get_dtype_from_loopbodies(loop_bodies)
assert all_dtypes
if any(dtype not in VECTORIZABLE_DTYPES for dtype in all_dtypes):
return [], []
dtype = torch.float
_lowp_fp_dtype = get_loop_body_lowp_fp(loop_bodies[0])[0]
if _lowp_fp_dtype and all(
(get_loop_body_lowp_fp(loop_body)[0] == _lowp_fp_dtype)
for loop_body in loop_bodies[1:]
):
dtype = _lowp_fp_dtype
tiling_factor = cpu_vec_isa.pick_vec_isa().nelements(dtype=dtype)
tiling_indices = self._select_tiling_indices(
fn_list, var_sizes_list, tiling_factor
)
if tiling_indices:
group, reduction_group = max(
var_sizes_list, key=lambda sizes: len(sizes[1])
)
call_ranges = tuple(group) + tuple(reduction_group)
if config.cpp.enable_tiling_heuristics:
def _try_get_stride(
index,
itervars,
tiling_factor,
tiling_indices,
):
itervar = itervars[tiling_indices[0]]
stride = stride_at_vec_range(index, itervar, tiling_factor)
return stride if stride.is_number else None
def _update_negative_op_count(
node_name, non_contig_indexing_op_counter
):
if node_name not in non_contig_indexing_op_counter:
non_contig_indexing_op_counter[node_name] = 1
else:
non_contig_indexing_op_counter[node_name] += 1
def _is_valid_indices(
itervars,
tiling_indices,
):
return (
len(tiling_indices) == 1
and len(itervars) > 0
and (
tiling_indices[0]
if tiling_indices[0] >= 0
else tiling_indices[0] + len(itervars)
)
< len(itervars)
)
itervars = [
sympy_index_symbol_with_prefix(SymT.XBLOCK, n)
for n in range(len(call_ranges))
]
reduction_depth = len(group)
vars, reduction_vars = (
itervars[:reduction_depth],
itervars[reduction_depth:],
)
op_counter: dict[str, int] = {}
# ops may cause overhead with vectorization, like non-contiguous
# index_expr, load, store
non_contig_indexing_op_counter: dict[str, int] = {}
for _body in loop_bodies:
sub_blocks = [_body.root_block] + list(_body.subblocks.values())
for sub_block in sub_blocks:
for _node in sub_block.graph.nodes:
if _node.target in ["index_expr", "load", "store"]:
# get the index and replace prefix from z to x
arg_idx = 1 if _node.target == "index_expr" else 2
index = sub_block.body.indexing_from_args(
(vars, reduction_vars)
)[_node.args[arg_idx].args[0]]
if _is_valid_indices(itervars, tiling_indices):
stride = _try_get_stride(
index, itervars, tiling_factor, tiling_indices
)
if (
stride is None
if _node.target == "index_expr"
else stride not in [0, 1]
):
_update_negative_op_count(
_node.target, non_contig_indexing_op_counter
)
if isinstance(_node.target, str) and not (
_node.target.startswith("masked_subblock")
or _node.target
in ["ops", "output", "constant", "get_index"]
):
if _node.target not in op_counter:
op_counter[_node.target] = 1
else:
op_counter[_node.target] += 1
op_num = sum(op_counter.values())
non_contig_indexing_op_num = sum(
non_contig_indexing_op_counter.values()
)
ratio_threshold = 0.12
quantity_threshold = 35
if non_contig_indexing_op_num >= quantity_threshold or (
op_num > 0
and non_contig_indexing_op_num / op_num >= ratio_threshold
):
# Too many non-contiguous load/store/index_expr which hurts the
# vectorization performance. Disable vectorization when exceeding
# the thresholds.
return [], []
if (
not reduction_group
and group
and len(tiling_indices) == 1
and not has_free_symbols(
[
group[tiling_indices[0]],
]
)
and group[tiling_indices[0]] < tiling_factor / 4
and op_num < 10
):
# We found that when the number of elements in the inner loop range is
# relatively small(< tiling_factor / 4) and the number of operations is
# not large(< 10), vectorization is not efficient.
# And found that `#pragma GCC ivdep` has better performance than
# `#pragma omp simd simdlen(8)` for these cases.
return [], []
if dtype in DTYPE_LOWP_FP:
# For lower precision data type, if the call_range is not long enough,
# use tiling_factor // 2 for better performance
factor_lowp = cpu_vec_isa.pick_vec_isa().nelements(dtype=dtype)
for tiling_indice in tiling_indices:
if tiling_indice < 0:
tiling_indice = tiling_indice + len(call_ranges)
if tiling_indice < 0 or tiling_indice >= len(call_ranges):
continue
if has_free_symbols(call_ranges):
call_range = V.graph.sizevars.size_hint(
call_ranges[tiling_indice], fallback=0
)
if call_range < factor_lowp:
V.graph.sizevars.check_lt(call_range, factor_lowp) # type: ignore[arg-type]
tiling_factor = factor_lowp // 2
break
elif call_ranges[tiling_indice] < factor_lowp:
tiling_factor = factor_lowp // 2
break
if len(tiling_indices) == 1:
return [tiling_factor], tiling_indices
if len(tiling_indices) == 2:
return [tiling_factor, tiling_factor], tiling_indices
return [], []
def _select_tiling_indices(
self,
fn_list,
var_sizes_list,
tiling_factor,
):
all_index = []
for fn, var_sizes in zip(fn_list, var_sizes_list):
rw = dependencies.extract_read_writes(fn, *var_sizes)
all_index += [dep.index for dep in itertools.chain(rw.reads, rw.writes)]
contig_vars = OrderedSet[int]()
contig_vars_list = []
non_contig_stride_const = OrderedSet[int]()
non_contig_stride_other = OrderedSet[int]()
for index in all_index:
for var in index.free_symbols:
if not re.search(r"^d\d+$", var.name):
continue
stride = stride_at_vec_range(index, var, tiling_factor)
if stride == 0:
continue
elif stride == 1:
contig_vars.add(int(var.name[1:]))
contig_vars_list.append(int(var.name[1:]))
elif all(symbol_is_type(s, SymT.SIZE) for s in stride.free_symbols):
non_contig_stride_const.add(int(var.name[1:]))
else:
non_contig_stride_other.add(int(var.name[1:]))
contig_only = contig_vars - non_contig_stride_const - non_contig_stride_other
group, reduction_group = max(var_sizes_list, key=lambda sizes: len(sizes[1]))
num_itervars = len(group) + len(reduction_group)
if len(contig_vars) == 0:
# no contiguous vars
return [num_itervars - 1]
if contig_only:
return sorted(contig_only)[-1:]
contig_and_const_stride = (
contig_vars & non_contig_stride_const
) - non_contig_stride_other
contig_vars_sorted = sorted(contig_vars)
if (
len(contig_vars_sorted) == 2
and contig_vars_sorted[-1] in contig_and_const_stride
and contig_vars_sorted[-1] == num_itervars - 1
):
return contig_vars_sorted
return sorted(contig_vars_sorted, key=contig_vars_list.count)[-1:]
| TilingSelect |
python | kamyu104__LeetCode-Solutions | Python/regular-expression-matching.py | {
"start": 853,
"end": 1553
} | class ____(object):
# @return a boolean
def isMatch(self, s, p):
result = [[False for j in xrange(len(p) + 1)] for i in xrange(len(s) + 1)]
result[0][0] = True
for i in xrange(2, len(p) + 1):
if p[i-1] == '*':
result[0][i] = result[0][i-2]
for i in xrange(1,len(s) + 1):
for j in xrange(1, len(p) + 1):
if p[j-1] != '*':
result[i][j] = result[i-1][j-1] and (s[i-1] == p[j-1] or p[j-1] == '.')
else:
result[i][j] = result[i][j-2] or (result[i-1][j] and (s[i-1] == p[j-2] or p[j-2] == '.'))
return result[len(s)][len(p)]
# iteration
| Solution2 |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 17310,
"end": 18716
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
super().setUp()
self.handler = OpsgenieIssueAlertHandler()
self.detector = self.create_detector(project=self.project)
self.action = self.create_action(
type=Action.Type.OPSGENIE,
integration_id="1234567890",
config={"target_identifier": "team789", "target_type": ActionTarget.SPECIFIC},
data={"priority": "P1"},
)
def test_build_rule_action_blob(self) -> None:
"""Test that build_rule_action_blob creates correct Opsgenie action data"""
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.opsgenie.notify_action.OpsgenieNotifyTeamAction",
"account": "1234567890",
"team": "team789",
"priority": "P1",
}
def test_build_rule_action_blob_no_priority(self) -> None:
"""Test that build_rule_action_blob handles missing priority"""
self.action.data = {}
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.opsgenie.notify_action.OpsgenieNotifyTeamAction",
"account": "1234567890",
"team": "team789",
"priority": "",
}
| TestOpsgenieIssueAlertHandler |
python | spyder-ide__spyder | spyder/plugins/profiler/widgets/main_widget.py | {
"start": 1966,
"end": 24025
} | class ____(ShellConnectMainWidget):
"""Profiler widget."""
# PluginMainWidget API
ENABLE_SPINNER = True
SHOW_MESSAGE_WHEN_EMPTY = True
IMAGE_WHEN_EMPTY = "code-profiler"
MESSAGE_WHEN_EMPTY = _("Code not profiled yet")
DESCRIPTION_WHEN_EMPTY = _(
"Profile your code to explore which functions and methods took the "
"longest to run and were called the most, and find out where to "
"optimize it."
)
# Other
TIP_CALLERS = _("Show functions or modules that call the root item")
TIP_CALLEES = _("Show functions or modules called by the root item")
TIP_CALLERS_OR_CALLEES = _(
"Show functions or modules that call an item or are called by it"
)
# --- Signals
# ------------------------------------------------------------------------
sig_edit_goto_requested = Signal(str, int, str)
"""
This signal will request to open a file in a given row and column
using a code editor.
Parameters
----------
path: str
Path to file.
row: int
Cursor starting row position.
word: str
Word to select on given row.
"""
def __init__(self, name=None, plugin=None, parent=None):
super().__init__(name, plugin, parent)
# ---- PluginMainWidget API
# -------------------------------------------------------------------------
def get_title(self):
return _('Profiler')
def setup(self):
# ---- Toolbar actions
collapse_action = self.create_action(
ProfilerWidgetActions.Collapse,
text=_('Collapse'),
tip=_('Collapse one level up'),
icon=self.create_icon('collapse'),
triggered=self._collapse_tree,
)
expand_action = self.create_action(
ProfilerWidgetActions.Expand,
text=_('Expand'),
tip=_('Expand one level down'),
icon=self.create_icon('expand'),
triggered=self._expand_tree,
)
callers_or_callees_action = self.create_action(
ProfilerWidgetActions.CallersOrCallees,
text=self.TIP_CALLERS_OR_CALLEES,
tip=self.TIP_CALLERS_OR_CALLEES,
icon=self.create_icon("callers_or_callees"),
toggled=self._toggle_callers_or_callees,
)
slow_local_action = self.create_action(
ProfilerWidgetActions.SlowLocal,
text=_("Show items with large local time"),
tip=_('Show items with large local time'),
icon=self.create_icon('slow'),
toggled=self._slow_local_tree,
)
toggle_builtins_action = self.create_action(
ProfilerWidgetActions.ToggleBuiltins,
text=_("Hide calls to external libraries"),
tip=_('Hide calls to external libraries'),
icon=self.create_icon('hide'),
toggled=self._toggle_builtins,
)
save_action = self.create_action(
ProfilerWidgetActions.SaveData,
text=_("Save data"),
tip=_('Save profiling data'),
icon=self.create_icon('filesave'),
triggered=self._save_data,
)
load_action = self.create_action(
ProfilerWidgetActions.LoadData,
text=_("Load data"),
tip=_('Load profiling data for comparison'),
icon=self.create_icon('fileimport'),
triggered=self._load_data,
)
clear_action = self.create_action(
ProfilerWidgetActions.Clear,
text=_("Clear comparison"),
tip=_("Clear comparison"),
icon=self.create_icon('editdelete'),
triggered=self._clear,
)
search_action = self.create_action(
ProfilerWidgetActions.Search,
text=_("Search"),
icon=self.create_icon('find'),
toggled=self._toggle_finder,
register_shortcut=True
)
stop_action = self.create_action(
ProfilerWidgetActions.Stop,
text=_("Stop profiling"),
icon=self.create_icon('stop_profile'),
triggered=self._stop_profiling,
)
# This needs to be workedd out better because right now is confusing
# and kind of unnecessary
# undo_action = self.create_action(
# ProfilerWidgetActions.Undo,
# text=_("Previous View"),
# icon=self.create_icon('previous'),
# triggered=self._undo,
# register_shortcut=True
# )
# redo_action = self.create_action(
# ProfilerWidgetActions.Redo,
# text=_("Next View"),
# icon=self.create_icon('next'),
# triggered=self._redo,
# register_shortcut=True
# )
# home_action = self.create_action(
# ProfilerWidgetActions.Home,
# text=_("Reset tree"),
# tip=_('Go back to full tree'),
# icon=self.create_icon('home'),
# triggered=self._home_tree,
# )
# ---- Main Toolbar
main_toolbar = self.get_main_toolbar()
# To be added later
# for action in [undo_action, redo_action, home_action]:
# self.add_item_to_toolbar(
# action,
# toolbar=main_toolbar,
# section=ProfilerWidgetMainToolbarSections.BrowseView,
# )
for action in [collapse_action, expand_action]:
self.add_item_to_toolbar(
action,
toolbar=main_toolbar,
section=ProfilerWidgetMainToolbarSections.ExpandCollapse,
)
for action in [
slow_local_action,
toggle_builtins_action,
callers_or_callees_action,
search_action
]:
self.add_item_to_toolbar(
action,
toolbar=main_toolbar,
section=ProfilerWidgetMainToolbarSections.ChangeView,
)
self.add_item_to_toolbar(
stop_action,
toolbar=main_toolbar,
section=ProfilerWidgetMainToolbarSections.Stop,
)
# ---- Corner widget
for action in [save_action, load_action, clear_action]:
self.add_corner_widget(action, before=self._options_button)
# ---- Context menu actions
show_callees_action = self.create_action(
ProfilerWidgetContextMenuActions.ShowCallees,
_("Show functions or modules called by this item"),
icon=self.create_icon('callees'),
triggered=self._show_callees
)
show_callers_action = self.create_action(
ProfilerWidgetContextMenuActions.ShowCallers,
_("Show functions or modules that call this item"),
icon=self.create_icon('callers'),
triggered=self._show_callers
)
goto_definition_action = self.create_action(
ProfilerWidgetContextMenuActions.GotoDefinition,
_("Go to definition"),
icon=self.create_icon("transparent"),
triggered=self._goto_definition
)
self._context_menu = self.create_menu(
ProfilerWidgetMenus.PopulatedContextMenu
)
for item in [show_callers_action, show_callees_action]:
self.add_item_to_menu(
item,
menu=self._context_menu,
section=ProfilerContextMenuSections.Locals,
)
self.add_item_to_menu(
goto_definition_action,
menu=self._context_menu,
section=ProfilerContextMenuSections.Other,
)
def update_actions(self):
"""Update actions."""
widget = self.current_widget()
search_action = self.get_action(ProfilerWidgetActions.Search)
callers_or_callees_action = self.get_action(
ProfilerWidgetActions.CallersOrCallees
)
toggle_builtins_action = self.get_action(
ProfilerWidgetActions.ToggleBuiltins
)
slow_local_action = self.get_action(ProfilerWidgetActions.SlowLocal)
stop_action = self.get_action(ProfilerWidgetActions.Stop)
widget_inactive = (
widget is None or self.is_current_widget_error_message()
)
if widget_inactive:
search = False
callers_or_callees_enabled = False
ignore_builtins = False
show_slow = False
stop = False
self.stop_spinner()
else:
search = widget.finder_is_visible()
callers_or_callees_enabled = widget.callers_or_callees_enabled
ignore_builtins = widget.ignore_builtins
show_slow = widget.show_slow
stop = widget.is_profiling
toggle_builtins_action.setChecked(ignore_builtins)
stop_action.setEnabled(stop)
# Showing callers/callees can't be combined with slow locals and search
# because they give different views, so we need to disable them.
if callers_or_callees_enabled:
# Automatically toggle the action
callers_or_callees_action.setEnabled(True)
callers_or_callees_action.setChecked(True)
# This prevents an additional call to update_actions because
# refresh_tree emits at the end sig_refresh
with signals_blocked(widget):
widget.refresh_tree()
# Adjust button's tooltip and icon
show_callers = widget.inverted_tree
callers_or_callees_action.setToolTip(
self.TIP_CALLERS if show_callers else self.TIP_CALLEES
)
callers_or_callees_action.setIcon(
ima.icon("callers" if show_callers else "callees")
)
# Disable slow locals
widget.show_slow = False
with signals_blocked(slow_local_action):
slow_local_action.setChecked(False)
slow_local_action.setEnabled(False)
# Disable search and hide finder widget
with signals_blocked(search_action):
search_action.setChecked(False)
search_action.setEnabled(False)
with signals_blocked(widget.finder):
widget.finder.set_visible(False)
# We expand the tree so that users can easily inspect callers or
# callees
self._expand_tree()
else:
search_action.setChecked(search)
slow_local_action.setChecked(show_slow)
# Recreate custom view if we're showing slow locals or searching
# for something
if not widget_inactive and widget.recreate_custom_view:
# Reset state for next time
widget.recreate_custom_view = False
if search:
search_text = widget.finder_text()
if search_text:
widget.do_find(search_text)
elif show_slow:
self._slow_local_tree(True)
if callers_or_callees_action.isEnabled():
# Change icon and tooltip when the button is inactive
callers_or_callees_action.setToolTip(
self.TIP_CALLERS_OR_CALLEES
)
callers_or_callees_action.setIcon(
ima.icon("callers_or_callees")
)
# Untoggle the button
with signals_blocked(callers_or_callees_action):
callers_or_callees_action.setChecked(False)
callers_or_callees_action.setEnabled(False)
if not widget_inactive:
if widget.is_profiling:
self.start_spinner()
else:
self.stop_spinner()
# Home, undo and redo are disabled for now because they are confusing
# and kind of unnecessary
# can_redo = False
# can_undo = False
tree_empty = True
can_clear = False
if not widget_inactive:
tree_empty = widget.profdata is None
# can_undo = len(widget.data_tree.history) > 1
# can_redo = len(widget.data_tree.redo_history) > 0
can_clear = widget.compare_data is not None
for action_name in [
ProfilerWidgetActions.Collapse,
ProfilerWidgetActions.Expand,
ProfilerWidgetActions.ToggleBuiltins,
# ProfilerWidgetActions.Home,
ProfilerWidgetActions.SlowLocal,
ProfilerWidgetActions.SaveData,
ProfilerWidgetActions.LoadData,
ProfilerWidgetActions.Search,
]:
action = self.get_action(action_name)
if action_name in [
ProfilerWidgetActions.SlowLocal,
ProfilerWidgetActions.Search,
]:
action.setEnabled(
not tree_empty and not callers_or_callees_enabled
)
elif action_name == ProfilerWidgetActions.LoadData:
action.setEnabled(not widget_inactive)
else:
action.setEnabled(not tree_empty)
# undo_action = self.get_action(ProfilerWidgetActions.Undo)
# redo_action = self.get_action(ProfilerWidgetActions.Redo)
# undo_action.setEnabled(can_undo)
# redo_action.setEnabled(can_redo)
clear_action = self.get_action(ProfilerWidgetActions.Clear)
clear_action.setEnabled(can_clear)
# ---- ShellConnectPluginMixin API
# -------------------------------------------------------------------------
def create_new_widget(self, shellwidget):
"""Create new profiler widget."""
widget = ProfilerSubWidget(self)
widget.sig_display_requested.connect(self._display_request)
widget.sig_refresh.connect(self.update_actions)
widget.set_context_menu(self._context_menu)
widget.sig_hide_finder_requested.connect(self._hide_finder)
widget.sig_show_empty_message_requested.connect(
self.switch_empty_message
)
shellwidget.register_kernel_call_handler(
"show_profile_file", widget.show_profile_buffer
)
shellwidget.register_kernel_call_handler(
"start_profiling", self._start_profiling
)
widget.on_kernel_ready_callback = functools.partial(
self._on_kernel_ready, widget
)
shellwidget.sig_kernel_is_ready.connect(
widget.on_kernel_ready_callback
)
widget.shellwidget = shellwidget
return widget
def close_widget(self, widget):
"""Close profiler widget."""
widget.sig_refresh.disconnect(self.update_actions)
widget.sig_display_requested.disconnect(self._display_request)
widget.sig_hide_finder_requested.disconnect(self._hide_finder)
# Unregister
widget.shellwidget.unregister_kernel_call_handler("show_profile_file")
widget.shellwidget.unregister_kernel_call_handler("start_profiling")
widget.shellwidget.sig_kernel_is_ready.disconnect(
widget.on_kernel_ready_callback
)
widget.setParent(None)
widget.close()
def switch_widget(self, widget, old_widget):
"""Switch widget."""
pass
def switch_empty_message(self, value: bool):
"""
Override this method to prevent hiding the empty message if profiling
finishes in another console but the current one has no content to show.
"""
widget = self.current_widget()
if widget is None:
return
if value:
self.show_empty_message()
else:
if widget.profdata is not None:
self.show_content_widget()
def current_widget(self) -> ProfilerSubWidget:
"""Override to add typing."""
return super().current_widget()
# ---- Private API
# -------------------------------------------------------------------------
def _start_profiling(self):
self.start_spinner()
stop_action = self.get_action(ProfilerWidgetActions.Stop)
stop_action.setEnabled(True)
widget = self.current_widget()
if widget is None:
return
widget.is_profiling = True
# Check if we're showing slow locals or searching for something to
# recreate the custom view after new results arrive.
if widget.show_slow or (
widget.finder_is_visible() and widget.finder_text()
):
widget.recreate_custom_view = True
# Reset state of callers/callees view because the new results couldn't
# contain the selected item.
widget.callers_or_callees_enabled = False
widget.inverted_tree = False
def _stop_profiling(self):
widget = self.current_widget()
if widget is None or self.is_current_widget_error_message():
return
if widget.is_profiling:
self.stop_spinner()
stop_action = self.get_action(ProfilerWidgetActions.Stop)
stop_action.setEnabled(False)
widget.shellwidget.request_interrupt_kernel()
widget.is_profiling = False
def _home_tree(self):
"""Show home tree."""
widget = self.current_widget()
if widget is None:
return
widget.home_tree()
def _toggle_callers_or_callees(self, state):
"""
Toggle filter for callers or callees.
Notes
-----
* The toogle state is handled automatically by update_actions.
* After users untoggle the button, they'll return to the initial view.
"""
widget = self.current_widget()
if widget is None:
return
if not state:
widget.callers_or_callees_enabled = False
widget.inverted_tree = False
self._home_tree()
def _collapse_tree(self):
self.current_widget().change_view(-1)
def _expand_tree(self):
self.current_widget().change_view(1)
def _toggle_builtins(self, state):
"""Toggle builtins."""
widget = self.current_widget()
if widget is None:
return
widget.ignore_builtins = state
widget.refresh_tree()
def _slow_local_tree(self, state):
"""Show items with large local times"""
widget = self.current_widget()
if widget is None:
return
widget.show_slow = state
if state:
widget.show_slow_items()
else:
self._home_tree()
def _undo(self):
"""Undo change."""
widget = self.current_widget()
if widget is None:
return
widget.undo()
def _redo(self):
"""Redo changes."""
widget = self.current_widget()
if widget is None:
return
widget.redo()
def _show_callers(self):
"""Show callers."""
widget = self.current_widget()
if widget is None:
return
widget.inverted_tree = True
widget.show_selected()
def _show_callees(self):
"""Show callees."""
widget = self.current_widget()
if widget is None:
return
widget.inverted_tree = False
widget.show_selected()
def _goto_definition(self):
widget = self.current_widget()
if widget is None:
return
item = widget.currentItem()
if osp.isfile(item.filename):
self.sig_edit_goto_requested.emit(
item.filename, item.line_number, ""
)
def _save_data(self):
"""Save data."""
widget = self.current_widget()
if widget is None:
return
title = _("Save profiler result")
filename, _selfilter = getsavefilename(
self,
title,
getcwd_or_home(),
_("Profiler result") + " (*.prof)",
)
extension = osp.splitext(filename)[1].lower()
if not extension:
# Needed to prevent trying to save a data file without extension
# See spyder-ide/spyder#19633
filename = filename + '.prof'
if filename:
widget.save_data(filename)
def _load_data(self):
"""Compare previous saved run with last run."""
widget = self.current_widget()
if widget is None:
return
filename, _selfilter = getopenfilename(
self,
_("Select script to compare"),
getcwd_or_home(),
_("Profiler result") + " (*.prof)",
)
if filename:
widget.compare(filename)
widget.home_tree()
self.update_actions()
def _clear(self):
"""Clear data in tree."""
widget = self.current_widget()
if widget is None:
return
widget.compare(None)
widget.home_tree()
self.update_actions()
def _display_request(self, widget):
"""
Display request from ProfilerDataTree.
Only display if this is the current widget.
"""
self.update_actions()
self._stop_profiling()
if (
self.current_widget() is widget
and self.get_conf("switch_to_plugin")
):
self.get_plugin().switch_to_plugin()
def _toggle_finder(self, checked):
"""Show or hide finder."""
widget = self.current_widget()
if widget is None:
return
widget.toggle_finder(checked)
def _hide_finder(self):
"""Hide finder."""
action = self.get_action(ProfilerWidgetActions.Search)
action.setChecked(False)
def _on_kernel_ready(self, widget: ProfilerSubWidget):
self._stop_profiling()
widget.set_pane_empty(True)
| ProfilerWidget |
python | sympy__sympy | sympy/physics/quantum/tests/test_anticommutator.py | {
"start": 947,
"end": 1304
} | class ____(Operator):
def _eval_anticommutator_Foo(self, foo):
return Integer(1)
def test_eval_commutator():
F = Foo('F')
B = Bar('B')
T = Tam('T')
assert AComm(F, B).doit() == 0
assert AComm(B, F).doit() == 0
assert AComm(F, T).doit() == 1
assert AComm(T, F).doit() == 1
assert AComm(B, T).doit() == B*T + T*B
| Tam |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py | {
"start": 1025,
"end": 4569
} | class ____:
"""Tests for the PrefixTree class initialization and basic tenant management."""
def test_initial_state(self, tree: PrefixTree) -> None:
"""Test the initial state of a new PrefixTree."""
assert tree.tenant_to_char_count == {}
assert tree.tenant_to_lru_tail == {}
assert tree.root is not None
assert tree.root.text == ""
assert tree.root.parent is None
assert tree.root.tenant_to_last_access_time == {}
assert tree.root.edge_label_to_child == {}
def test_add_tenant(self, tree: PrefixTree) -> None:
"""Test adding a new tenant via add_tenants."""
tree.add_tenants(["tenant_1"], 0)
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
def test_add_existing_tenant_noop(self, tree: PrefixTree) -> None:
"""Test that adding an existing tenant via add_tenants is a no-op."""
tree.add_tenants(["tenant_1"], 0)
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
tree.add_tenants(["tenant_1"], 0) # Add again
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
def test_add_multiple_tenants(self, tree: PrefixTree) -> None:
"""Test adding multiple tenants at once."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
assert tree.tenant_to_char_count == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
for tenant in ["tenant_1", "tenant_2", "tenant_3"]:
assert tree.tenant_to_lru_tail.get(tenant) == tree.root
assert tree.root.tenant_to_newer_node.get(tenant) is None
assert tree.root.tenant_to_older_node.get(tenant) is None
assert tree.root.tenant_to_last_access_time == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert get_lru_texts_from_tree(tree, tenant) == [""]
def test_add_multiple_tenants_with_existing(self, tree: PrefixTree) -> None:
"""Test adding multiple tenants when some already exist."""
tree.add_tenants(["tenant_1"], 0)
assert tree.root.tenant_to_last_access_time == {"tenant_1": 0}
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert "tenant_1" in tree.tenant_to_lru_tail
# Add a mix of new and existing tenants
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
# Existing tenants should remain unchanged
assert tree.root.tenant_to_last_access_time == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert tree.tenant_to_char_count == {
"tenant_1": 0,
"tenant_2": 0,
"tenant_3": 0,
}
assert all(
tenant in tree.tenant_to_lru_tail
for tenant in ["tenant_1", "tenant_2", "tenant_3"]
)
| TestPrefixTreeInitialization |
python | kamyu104__LeetCode-Solutions | Python/reconstruct-itinerary.py | {
"start": 960,
"end": 1927
} | class ____(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
def route_helper(origin, ticket_cnt, graph, ans):
if ticket_cnt == 0:
return True
for i, (dest, valid) in enumerate(graph[origin]):
if valid:
graph[origin][i][1] = False
ans.append(dest)
if route_helper(dest, ticket_cnt - 1, graph, ans):
return ans
ans.pop()
graph[origin][i][1] = True
return False
graph = collections.defaultdict(list)
for ticket in tickets:
graph[ticket[0]].append([ticket[1], True])
for k in graph.keys():
graph[k].sort()
origin = "JFK"
ans = [origin]
route_helper(origin, len(tickets), graph, ans)
return ans
| Solution2 |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 3538,
"end": 6839
} | class ____:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
self._M = np.asarray(M)
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
msg = "The input matrix must be symmetric positive semidefinite."
raise ValueError(msg)
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
msg = ("When `allow_singular is False`, the input matrix must be "
"symmetric positive definite.")
raise np.linalg.LinAlgError(msg)
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Save the eigenvector basis, and tolerance for testing support
self.eps = 1e3*eps
self.V = u[:, s <= eps]
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize attributes to be lazily computed.
self._pinv = None
def _support_mask(self, x):
"""
Check whether x lies in the support of the distribution.
"""
residual = np.linalg.norm(x @ self.V, axis=-1)
in_support = residual < self.eps
return in_support
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
| _PSD |
python | Textualize__textual | src/textual/scrollbar.py | {
"start": 7234,
"end": 13249
} | class ____(Widget):
renderer: ClassVar[Type[ScrollBarRender]] = ScrollBarRender
"""The class used for rendering scrollbars.
This can be overridden and set to a ScrollBarRender-derived class
in order to delegate all scrollbar rendering to that class. E.g.:
```
class MyScrollBarRender(ScrollBarRender): ...
app = MyApp()
ScrollBar.renderer = MyScrollBarRender
app.run()
```
Because this variable is accessed through specific instances
(rather than through the class ScrollBar itself) it is also possible
to set this on specific scrollbar instance to change only that
instance:
```
my_widget.horizontal_scrollbar.renderer = MyScrollBarRender
```
"""
DEFAULT_CLASSES = "-textual-system"
# Nothing to select in scrollbars
ALLOW_SELECT = False
def __init__(
self, vertical: bool = True, name: str | None = None, *, thickness: int = 1
) -> None:
self.vertical = vertical
self.thickness = thickness
self.grabbed_position: float = 0
super().__init__(name=name)
self.auto_links = False
window_virtual_size: Reactive[int] = Reactive(100)
window_size: Reactive[int] = Reactive(0)
position: Reactive[float] = Reactive(0)
mouse_over: Reactive[bool] = Reactive(False)
grabbed: Reactive[Offset | None] = Reactive(None)
def __rich_repr__(self) -> rich.repr.Result:
yield from super().__rich_repr__()
yield "window_virtual_size", self.window_virtual_size
yield "window_size", self.window_size
yield "position", self.position
if self.thickness > 1:
yield "thickness", self.thickness
def render(self) -> RenderableType:
assert self.parent is not None
styles = self.parent.styles
if self.grabbed:
background = styles.scrollbar_background_active
color = styles.scrollbar_color_active
elif self.mouse_over:
background = styles.scrollbar_background_hover
color = styles.scrollbar_color_hover
else:
background = styles.scrollbar_background
color = styles.scrollbar_color
if background.a < 1:
base_background, _ = self.parent.background_colors
background = base_background + background
color = background + color
scrollbar_style = Style.from_color(color.rich_color, background.rich_color)
if self.screen.styles.scrollbar_color.a == 0:
return self.renderer(vertical=self.vertical, style=scrollbar_style)
return self._render_bar(scrollbar_style)
def _render_bar(self, scrollbar_style: Style) -> RenderableType:
"""Get a renderable for the scrollbar with given style.
Args:
scrollbar_style: Scrollbar style.
Returns:
Scrollbar renderable.
"""
window_size = (
self.window_size if self.window_size < self.window_virtual_size else 0
)
virtual_size = self.window_virtual_size
return self.renderer(
virtual_size=ceil(virtual_size),
window_size=ceil(window_size),
position=self.position,
thickness=self.thickness,
vertical=self.vertical,
style=scrollbar_style,
)
def _on_hide(self, event: events.Hide) -> None:
if self.grabbed:
self.release_mouse()
self.grabbed = None
def _on_enter(self, event: events.Enter) -> None:
if event.node is self:
self.mouse_over = True
def _on_leave(self, event: events.Leave) -> None:
if event.node is self:
self.mouse_over = False
def action_scroll_down(self) -> None:
"""Scroll vertical scrollbars down, horizontal scrollbars right."""
if not self.grabbed:
self.post_message(ScrollDown() if self.vertical else ScrollRight())
def action_scroll_up(self) -> None:
"""Scroll vertical scrollbars up, horizontal scrollbars left."""
if not self.grabbed:
self.post_message(ScrollUp() if self.vertical else ScrollLeft())
def action_grab(self) -> None:
"""Begin capturing the mouse cursor."""
self.capture_mouse()
async def _on_mouse_down(self, event: events.MouseDown) -> None:
# We don't want mouse events on the scrollbar bubbling
event.stop()
async def _on_mouse_up(self, event: events.MouseUp) -> None:
if self.grabbed:
self.release_mouse()
self.grabbed = None
event.stop()
def _on_mouse_capture(self, event: events.MouseCapture) -> None:
if isinstance(self._parent, Widget):
self._parent.release_anchor()
self.grabbed = event.mouse_position
self.grabbed_position = self.position
def _on_mouse_release(self, event: events.MouseRelease) -> None:
self.grabbed = None
if self.vertical and isinstance(self.parent, Widget):
self.parent._check_anchor()
event.stop()
async def _on_mouse_move(self, event: events.MouseMove) -> None:
if self.grabbed and self.window_size:
x: float | None = None
y: float | None = None
if self.vertical:
virtual_size = self.window_virtual_size
y = self.grabbed_position + (
(event._screen_y - self.grabbed.y)
* (virtual_size / self.window_size)
)
else:
virtual_size = self.window_virtual_size
x = self.grabbed_position + (
(event._screen_x - self.grabbed.x)
* (virtual_size / self.window_size)
)
self.post_message(
ScrollTo(x=x, y=y, animate=not self.app.supports_smooth_scrolling)
)
event.stop()
async def _on_click(self, event: events.Click) -> None:
event.stop()
| ScrollBar |
python | facebook__pyre-check | api/connection.py | {
"start": 973,
"end": 1106
} | class ____(NamedTuple):
exit_code: int
errors: Optional[List[str]]
# pyre-ignore[33]: We don't have GADT's yet.
| PyreCheckResult |
python | neetcode-gh__leetcode | python/0347-top-k-frequent-elements.py | {
"start": 0,
"end": 464
} | class ____:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
count = {}
freq = [[] for i in range(len(nums) + 1)]
for n in nums:
count[n] = 1 + count.get(n, 0)
for n, c in count.items():
freq[c].append(n)
res = []
for i in range(len(freq) - 1, 0, -1):
res += freq[i]
if len(res) == k:
return res
# O(n)
| Solution |
python | getsentry__sentry | src/flagpole/conditions.py | {
"start": 6162,
"end": 7453
} | class ____(ConditionBase):
value: EqualsOperatorValueTypes
operator: str = dataclasses.field(default="not_equals")
def _operator_match(self, condition_property: Any, segment_name: str):
return not self._evaluate_equals(
condition_property=condition_property,
segment_name=segment_name,
)
OPERATOR_LOOKUP: Mapping[ConditionOperatorKind, type[ConditionBase]] = {
ConditionOperatorKind.IN: InCondition,
ConditionOperatorKind.NOT_IN: NotInCondition,
ConditionOperatorKind.CONTAINS: ContainsCondition,
ConditionOperatorKind.NOT_CONTAINS: NotContainsCondition,
ConditionOperatorKind.EQUALS: EqualsCondition,
ConditionOperatorKind.NOT_EQUALS: NotEqualsCondition,
}
def condition_from_dict(data: Mapping[str, Any]) -> ConditionBase:
operator_kind = ConditionOperatorKind(data.get("operator", "invalid"))
if operator_kind not in OPERATOR_LOOKUP:
valid = ", ".join(OPERATOR_LOOKUP.keys())
raise ValueError(f"The {operator_kind} is not a known operator. Choose from {valid}")
condition_cls = OPERATOR_LOOKUP[operator_kind]
return condition_cls(
property=str(data.get("property")), operator=operator_kind.value, value=data.get("value")
)
@dataclasses.dataclass
| NotEqualsCondition |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py | {
"start": 29593,
"end": 29675
} | class ____(Qwen2VLCausalLMOutputWithPast):
pass
| Qwen2_5_VLCausalLMOutputWithPast |
python | getsentry__sentry | src/sentry/data_secrecy/types.py | {
"start": 381,
"end": 567
} | class ____(StrEnum):
CACHE_MISS = "cache_miss"
NEGATIVE_CACHE = "negative_cache"
VALID_WINDOW = "valid_window"
EXPIRED_WINDOW = "expired_window"
@dataclass
| GrantCacheStatus |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 165831,
"end": 167052
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
token: str,
key: str,
start_date: str,
board_ids: Optional[list[str]] = None,
):
"""Airbyte Source for Trello.
Documentation can be found at https://docs.airbyte.com/integrations/sources/trello
Args:
name (str): The name of the destination.
token (str): Trello v API token. See the docs for instructions on how to generate it.
key (str): Trello API key. See the docs for instructions on how to generate it.
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
board_ids (Optional[List[str]]): IDs of the boards to replicate data from. If left empty, data from all boards to which you have access will be replicated.
"""
self.token = check.str_param(token, "token")
self.key = check.str_param(key, "key")
self.start_date = check.str_param(start_date, "start_date")
self.board_ids = check.opt_nullable_list_param(board_ids, "board_ids", str)
super().__init__("Trello", name)
| TrelloSource |
python | python__mypy | mypy/test/data.py | {
"start": 26955,
"end": 27091
} | class ____(NamedTuple):
lineno: int # 1-offset, inclusive
end_lineno: int # 1-offset, exclusive
lines: list[str]
| DataFileFix |
python | getsentry__sentry | src/sentry/tasks/llm_issue_detection/detection.py | {
"start": 1601,
"end": 1676
} | class ____(BaseModel):
issues: list[DetectedIssue]
| IssueDetectionResponse |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_input.py | {
"start": 895,
"end": 5807
} | class ____(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for arguments.
# The outer layer of the index path indexes into the arguments.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf._user_specified_name = "y", tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_function_arity"]
@tf.function(input_signature=[
tf.TensorSpec([1], tf.float32),
tf.TensorSpec([2], tf.float32)
])
def f0000_function_arity(self, x, y):
return
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf._user_specified_name = "l", tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: %arg1: tensor<f32> {tf._user_specified_name = "l", tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_list_2_elements"]
@tf.function(input_signature=[[
tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32),
]])
def f0001_list_2_elements(self, l):
return
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_dict_2_keys"]
@tf.function(input_signature=[{
'x': tf.TensorSpec([1], tf.float32),
'y': tf.TensorSpec([2], tf.float32),
}])
def f0002_dict_2_keys(self, d):
return
# Check index paths for dicts, where the keys are not in sorted order.
# The index path should be insensitive to the key order.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_dict_2_keys_out_of_order"]
@tf.function(input_signature=[{
'y': tf.TensorSpec([2], tf.float32),
'x': tf.TensorSpec([1], tf.float32),
}])
def f0003_dict_2_keys_out_of_order(self, d):
return
# Slightly stronger stress test of multiple dict keys.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "a"]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "b"]},
# CHECK-SAME: %arg2: tensor<3xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "c"]},
# CHECK-SAME: %arg3: tensor<4xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "x"]},
# CHECK-SAME: %arg4: tensor<5xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "y"]},
# CHECK-SAME: %arg5: tensor<6xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "z"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_dict_many_keys"]
@tf.function(input_signature=[{
'x': tf.TensorSpec([4], tf.float32),
'y': tf.TensorSpec([5], tf.float32),
'z': tf.TensorSpec([6], tf.float32),
'a': tf.TensorSpec([1], tf.float32),
'b': tf.TensorSpec([2], tf.float32),
'c': tf.TensorSpec([3], tf.float32),
}])
def f0004_dict_many_keys(self, d):
return
# Check a slightly more complex recursive structure.
# Note that list elements can have heterogenous types.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<1xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "x", 0]},
# CHECK-SAME: %arg1: tensor<2xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "x", 1]},
# CHECK-SAME: %arg2: tensor<3xf32> {tf._user_specified_name = "d", tf_saved_model.index_path = [0, "y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_more_complex_recursive_structure"]
@tf.function(input_signature=[{
'x': [tf.TensorSpec([1], tf.float32),
tf.TensorSpec([2], tf.float32)],
'y': tf.TensorSpec([3], tf.float32),
}])
def f0005_more_complex_recursive_structure(self, d):
return
if __name__ == '__main__':
common.do_test(TestModule)
| TestModule |
python | google__pytype | pytype/rewrite/overlays/enum_overlay_test.py | {
"start": 182,
"end": 997
} | class ____(test_utils.ContextfulTestBase):
def test_call(self):
# Simulate:
# class E(enum.Enum):
# X = 42
metaclass = cast(abstract.SimpleClass,
self.ctx.abstract_loader.load_value('enum', 'EnumMeta'))
base = cast(abstract.SimpleClass,
self.ctx.abstract_loader.load_value('enum', 'Enum'))
enum_cls = abstract.SimpleClass(
ctx=self.ctx,
name='E',
members={'X': self.ctx.consts[42]},
bases=(base,),
keywords={'metaclass': metaclass},
)
enum_overlay.transform_enum_class(self.ctx, enum_cls)
enum_member = enum_cls.members['X']
self.assertIsInstance(enum_member, abstract.BaseInstance)
self.assertEqual(enum_member.cls.name, 'E')
if __name__ == '__main__':
unittest.main()
| EnumMetaNewTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes11.py | {
"start": 648,
"end": 735
} | class ____(Mapping[int, int], Sequence[float]): ...
T = TypeVar("T")
S = TypeVar("S")
| F |
python | python__mypy | mypy/metastore.py | {
"start": 4492,
"end": 6598
} | class ____(MetadataStore):
def __init__(self, cache_dir_prefix: str) -> None:
# We check startswith instead of equality because the version
# will have already been appended by the time the cache dir is
# passed here.
if cache_dir_prefix.startswith(os.devnull):
self.db = None
return
os.makedirs(cache_dir_prefix, exist_ok=True)
self.db = connect_db(os.path.join(cache_dir_prefix, "cache.db"))
def _query(self, name: str, field: str) -> Any:
# Raises FileNotFound for consistency with the file system version
if not self.db:
raise FileNotFoundError()
cur = self.db.execute(f"SELECT {field} FROM files2 WHERE path = ?", (name,))
results = cur.fetchall()
if not results:
raise FileNotFoundError()
assert len(results) == 1
return results[0][0]
def getmtime(self, name: str) -> float:
mtime = self._query(name, "mtime")
assert isinstance(mtime, float)
return mtime
def read(self, name: str) -> bytes:
data = self._query(name, "data")
assert isinstance(data, bytes)
return data
def write(self, name: str, data: bytes, mtime: float | None = None) -> bool:
import sqlite3
if not self.db:
return False
try:
if mtime is None:
mtime = time.time()
self.db.execute(
"INSERT OR REPLACE INTO files2(path, mtime, data) VALUES(?, ?, ?)",
(name, mtime, data),
)
except sqlite3.OperationalError:
return False
return True
def remove(self, name: str) -> None:
if not self.db:
raise FileNotFoundError()
self.db.execute("DELETE FROM files2 WHERE path = ?", (name,))
def commit(self) -> None:
if self.db:
self.db.commit()
def list_all(self) -> Iterable[str]:
if self.db:
for row in self.db.execute("SELECT path FROM files2"):
yield row[0]
| SqliteMetadataStore |
python | scipy__scipy | scipy/special/tests/test_spherical_bessel.py | {
"start": 5434,
"end": 7313
} | class ____:
def test_spherical_in_exact(self):
# https://dlmf.nist.gov/10.49.E9
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_in(2, x),
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
def test_spherical_in_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
def test_spherical_in_inf_complex(self):
# https://dlmf.nist.gov/10.52.E5
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
# this appears impossible to achieve because C99 regards any complex
# value with at least one infinite part as a complex infinity, so
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
# the correct return value.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
def test_spherical_in_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
| TestSphericalIn |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/exceptions.py | {
"start": 705,
"end": 1112
} | class ____(BuildBaseException):
default_message = _("Build application exception")
GENERIC_WITH_BUILD_ID = "build:app:generic-with-build-id"
UPLOAD_FAILED = "build:app:upload-failed"
BUILDS_DISABLED = "build:app:project-builds-disabled"
BUILD_DOCKER_UNKNOWN_ERROR = "build:app:docker:unknown-error"
BUILD_TERMINATED_DUE_INACTIVITY = "build:app:terminated-due-inactivity"
| BuildAppError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict23.py | {
"start": 128,
"end": 197
} | class ____(TypedDict):
a: Required[int]
b: NotRequired[str]
| TD1 |
python | walkccc__LeetCode | solutions/1191. K-Concatenation Maximum Sum/1191.py | {
"start": 0,
"end": 695
} | class ____:
def kConcatenationMaxSum(self, arr: list[int], k: int) -> int:
MOD = 1_000_000_007
sz = len(arr) * (1 if k == 1 else 2)
summ = sum(arr)
# The concatenated array will be [arr1, arr2, ..., arrk].
# If sum(arr) > 0 and k > 2, then arr2, ..., arr(k - 1) should be included.
# Equivalently, maxSubarraySum is from arr1 and arrk.
if summ > 0 and k > 2:
return (self._kadane(arr, sz) + summ * (k - 2)) % MOD
return self._kadane(arr, sz) % MOD
def _kadane(self, arr: list[int], sz: int) -> int:
ans = 0
summ = 0
for i in range(sz):
a = arr[i % len(arr)]
summ = max(a, summ + a)
ans = max(ans, summ)
return ans
| Solution |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/types.py | {
"start": 3509,
"end": 4018
} | class ____(TypedDict):
optimizer: Optimizer
lr_scheduler: Union[LRSchedulerTypeUnion, LRSchedulerConfigType]
monitor: NotRequired[str]
OptimizerLRScheduler = Optional[
Union[
Optimizer,
Sequence[Optimizer],
tuple[Sequence[Optimizer], Sequence[Union[LRSchedulerTypeUnion, LRSchedulerConfig]]],
OptimizerConfig,
OptimizerLRSchedulerConfig,
Sequence[OptimizerConfig],
Sequence[OptimizerLRSchedulerConfig],
]
]
| OptimizerLRSchedulerConfig |
python | wandb__wandb | tests/unit_tests/test_step_prepare.py | {
"start": 3928,
"end": 7170
} | class ____:
def test_smoke(self):
q = Mock(
get=Mock(
side_effect=[
simple_request_prepare("a"),
simple_request_prepare("b"),
simple_request_prepare("c"),
RequestFinish(),
]
)
)
done, batch = gather_batch(q, 0.1, 0.1, 100)
assert done
assert [f.file_spec["name"] for f in batch] == ["a", "b", "c"]
def test_returns_empty_if_first_request_is_finish(self):
q = Mock(
get=Mock(
side_effect=[
RequestFinish(),
]
)
)
done, batch = gather_batch(q, 0.1, 0.1, 100)
assert done
assert len(batch) == 0
def test_respects_batch_size(self):
q = Mock(
get=Mock(
side_effect=[
simple_request_prepare("a"),
simple_request_prepare("b"),
simple_request_prepare("c"),
]
)
)
_, batch = gather_batch(q, 0.1, 0.1, 2)
assert len(batch) == 2
assert q.get.call_count == 2
def test_respects_batch_time(self):
clock = MockClock()
q = MockRequestQueue(
clock=clock,
schedule=[(t, simple_request_prepare(f"req-{t}")) for t in [5, 15, 25, 35]],
)
_, batch = gather_batch(
q,
batch_time=33,
inter_event_time=12,
max_batch_size=100,
clock=clock,
)
assert q.get.call_args_list == [
call(), # finishes at t=5; 28s left in batch
call(timeout=12), # finishes at t=15; 18s left in batch
call(timeout=12), # finishes at t=25; 8s left in batch
call(timeout=8),
]
assert len(batch) == 3
def test_respects_inter_event_time(self):
clock = MockClock()
q = MockRequestQueue(
clock=clock,
schedule=[
(t, simple_request_prepare(f"req-{t}"))
for t in [10, 30, 60, 100, 150, 210, 280]
# diffs: 20 30 40 50 60 70
],
)
_, batch = gather_batch(
q,
batch_time=1000,
inter_event_time=33,
max_batch_size=100,
clock=clock,
)
assert q.get.call_args_list == [
call(), # waited 10s, next wait is 20s
call(timeout=33), # waited 20s, next wait is 30s
call(timeout=33), # waited 30s, next wait is 40s
call(timeout=33), # waited 33s, then raised Empty
]
assert len(batch) == 3
def test_ends_early_if_request_finish(self):
q = Mock(
get=Mock(
side_effect=[
simple_request_prepare("a"),
RequestFinish(),
simple_request_prepare("b"),
]
)
)
done, batch = gather_batch(q, 0.1, 0.1, 100)
assert done
assert [f.file_spec["name"] for f in batch] == ["a"]
assert q.get.call_count == 2
| TestGatherBatch |
python | django-import-export__django-import-export | tests/core/tests/test_results.py | {
"start": 1159,
"end": 1574
} | class ____(SimpleTestCase):
def test_repr(self):
try:
raise ValidationError(message="invalid row")
except ValidationError as exc:
error = InvalidRow(validation_error=exc, number=1, values={})
self.assertEqual(
repr(error),
"<InvalidRow(row=1, error=ValidationError(['invalid row']),"
" error_count=1)>",
)
| InvalidRowTest |
python | getsentry__sentry | tests/sentry/migrations/test_0002_backfill_insights_team_starred_segments.py | {
"start": 378,
"end": 2087
} | class ____(TestMigrations):
migrate_from = "0001_squashed_0001_add_starred_transactions_model"
migrate_to = "0002_backfill_team_starred"
app = "insights"
def setup_before_migration(self, apps):
self.organization: Organization = self.create_organization(name="test", slug="test")
self.project = self.create_project(organization=self.organization)
self.user = self.create_user()
self.environment = self.create_environment(
organization=self.organization, project=self.project
)
self.team: Team = self.create_team(
organization=self.organization, members=[self.user, None]
)
self.transaction_name = "my-transaction"
self.project_team = ProjectTeam.objects.create(
project_id=self.project.id, team_id=self.team.id
)
self.team_key_transaction = TeamKeyTransaction.objects.create(
organization=self.organization,
project_team=self.project_team,
transaction=self.transaction_name,
)
def test_migrates_single_entry(self) -> None:
self.team_key_transaction.refresh_from_db()
starred_segment_results = InsightsStarredSegment.objects.filter(
organization_id=self.organization.id,
project_id=self.project.id,
user_id=self.user.id,
)
assert len(starred_segment_results) == 1
first_starred_segment = starred_segment_results[0]
assert first_starred_segment.segment_name == "my-transaction"
assert first_starred_segment.user_id == self.user.id
assert first_starred_segment.project.id == self.project.id
| BackfillUserStarredSegmentsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqlconnector.py | {
"start": 9855,
"end": 10154
} | class ____(
MariaDBDialect, MySQLDialect_mysqlconnector
):
supports_statement_cache = True
_allows_uuid_binds = False
preparer = MariaDBIdentifierPreparer_mysqlconnector
dialect = MySQLDialect_mysqlconnector
mariadb_dialect = MariaDBDialect_mysqlconnector
| MariaDBDialect_mysqlconnector |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 41253,
"end": 41440
} | class ____(Callback):
def on_train_epoch_end(self, trainer, pl_module):
if trainer.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledCallbackOnTrainEpochEnd |
python | walkccc__LeetCode | solutions/2387. Median of a Row Wise Sorted Matrix/2387.py | {
"start": 0,
"end": 367
} | class ____:
def matrixMedian(self, grid: list[list[int]]) -> int:
noGreaterThanMedianCount = len(grid) * len(grid[0]) // 2 + 1
l = 1
r = 1_000_000
while l < r:
m = (l + r) // 2
if (sum(bisect.bisect_right(row, m) for row in grid) >=
noGreaterThanMedianCount):
r = m
else:
l = m + 1
return l
| Solution |
python | google__pytype | pytype/ast/visitor_test.py | {
"start": 365,
"end": 674
} | class ____(visitor.BaseVisitor):
"""Tests visit()'s node replacement functionality."""
def visit_Name(self, node):
if node.id == "x":
return True # should replace
elif node.id == "y":
return False # should replace
else:
return None # should not replace
| _VisitReplaceVisitor |
python | doocs__leetcode | solution/1500-1599/1579.Remove Max Number of Edges to Keep Graph Fully Traversable/Solution.py | {
"start": 0,
"end": 614
} | class ____:
def __init__(self, n):
self.p = list(range(n))
self.size = [1] * n
self.cnt = n
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, a, b):
pa, pb = self.find(a - 1), self.find(b - 1)
if pa == pb:
return False
if self.size[pa] > self.size[pb]:
self.p[pb] = pa
self.size[pa] += self.size[pb]
else:
self.p[pa] = pb
self.size[pb] += self.size[pa]
self.cnt -= 1
return True
| UnionFind |
python | pytorch__pytorch | torch/utils/_cxx_pytree.py | {
"start": 33942,
"end": 34207
} | class ____:
def __repr__(self) -> str:
return "*"
def treespec_pprint(treespec: TreeSpec) -> str:
dummy_tree = tree_unflatten(
[_DummyLeaf() for _ in range(treespec.num_leaves)],
treespec,
)
return repr(dummy_tree)
| _DummyLeaf |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 133152,
"end": 137000
} | class ____(Response):
"""
Response of models.update_for_task endpoint.
:param id: ID of the model
:type id: str
:param created: Was the model created
:type created: bool
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "update_for_task"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"created": {
"description": "Was the model created",
"type": ["boolean", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"id": {"description": "ID of the model", "type": ["string", "null"]},
"updated": {
"description": "Number of models updated (0 or 1)",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
created: Optional[bool] = None,
updated: Optional[int] = None,
fields: Optional[dict] = None,
**kwargs: Any
) -> None:
super(UpdateForTaskResponse, self).__init__(**kwargs)
self.id = id
self.created = created
self.updated = updated
self.fields = fields
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("created")
def created(self) -> Optional[bool]:
return self._property_created
@created.setter
def created(self, value: Optional[bool]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", (bool,))
self._property_created = value
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetByTaskIdRequest: GetByTaskIdResponse,
GetAllRequest: GetAllResponse,
GetFrameworksRequest: GetFrameworksResponse,
UpdateForTaskRequest: UpdateForTaskResponse,
CreateRequest: CreateResponse,
EditRequest: EditResponse,
UpdateRequest: UpdateResponse,
PublishManyRequest: PublishManyResponse,
SetReadyRequest: SetReadyResponse,
ArchiveManyRequest: ArchiveManyResponse,
DeleteManyRequest: DeleteManyResponse,
DeleteRequest: DeleteResponse,
MakePublicRequest: MakePublicResponse,
MakePrivateRequest: MakePrivateResponse,
MoveRequest: MoveResponse,
AddOrUpdateMetadataRequest: AddOrUpdateMetadataResponse,
DeleteMetadataRequest: DeleteMetadataResponse,
}
| UpdateForTaskResponse |
python | cython__cython | Cython/Compiler/TypeSlots.py | {
"start": 21630,
"end": 21940
} | class ____(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
doc = scope.doc
if doc is None:
return "0"
if doc.is_unicode:
doc = doc.as_utf8_string()
return "PyDoc_STR(%s)" % doc.as_c_string_literal()
| DocStringSlot |
python | huggingface__transformers | src/transformers/models/vit/image_processing_vit.py | {
"start": 1389,
"end": 14196
} | class ____(BaseImageProcessor):
r"""
Constructs a ViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 224, "width": 224}
size = get_size_dict(size)
self.do_resize = do_resize
self.do_rescale = do_rescale
self.do_normalize = do_normalize
self.size = size
self.resample = resample
self.rescale_factor = rescale_factor
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_convert_rgb = do_convert_rgb
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
do_convert_rgb: Optional[bool] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size_dict = get_size_dict(size)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["ViTImageProcessor"]
| ViTImageProcessor |
python | numba__numba | numba/tests/npyufunc/test_gufunc.py | {
"start": 11613,
"end": 14666
} | class ____(MemoryLeakMixin, TestCase):
"""
Nothing keeps user from out-of-bound memory access
"""
target = 'cpu'
def test_scalar_output(self):
"""
Note that scalar output is a 0-dimension array that acts as
a pointer to the output location.
"""
@guvectorize(['void(int32[:], int32[:])'], '(n)->()',
target=self.target, nopython=True)
def sum_row(inp, out):
tmp = 0.
for i in range(inp.shape[0]):
tmp += inp[i]
out[()] = tmp
# inp is (10000, 3)
# out is (10000)
# The outer (leftmost) dimension must match or numpy broadcasting is performed.
inp = np.arange(30000, dtype=np.int32).reshape(10000, 3)
out = sum_row(inp)
# verify result
for i in range(inp.shape[0]):
self.assertEqual(out[i], inp[i].sum())
def test_scalar_input(self):
@guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)',
target=self.target, nopython=True)
def foo(inp, n, out):
for i in range(inp.shape[0]):
out[i] = inp[i] * n[0]
inp = np.arange(3 * 10, dtype=np.int32).reshape(10, 3)
# out = np.empty_like(inp)
out = foo(inp, 2)
# verify result
self.assertPreciseEqual(inp * 2, out)
def test_scalar_input_core_type(self):
def pyfunc(inp, n, out):
for i in range(inp.size):
out[i] = n * (inp[i] + 1)
my_gufunc = guvectorize(['int32[:], int32, int32[:]'],
'(n),()->(n)',
target=self.target)(pyfunc)
# test single core loop execution
arr = np.arange(10).astype(np.int32)
got = my_gufunc(arr, 2)
expected = np.zeros_like(got)
pyfunc(arr, 2, expected)
np.testing.assert_equal(got, expected)
# test multiple core loop execution
arr = np.arange(20).astype(np.int32).reshape(10, 2)
got = my_gufunc(arr, 2)
expected = np.zeros_like(got)
for ax in range(expected.shape[0]):
pyfunc(arr[ax], 2, expected[ax])
np.testing.assert_equal(got, expected)
def test_scalar_input_core_type_error(self):
with self.assertRaises(TypeError) as raises:
@guvectorize(['int32[:], int32, int32[:]'], '(n),(n)->(n)',
target=self.target)
def pyfunc(a, b, c):
pass
self.assertEqual("scalar type int32 given for non scalar argument #2",
str(raises.exception))
def test_ndim_mismatch(self):
with self.assertRaises(TypeError) as raises:
@guvectorize(['int32[:], int32[:]'], '(m,n)->(n)',
target=self.target)
def pyfunc(a, b):
pass
self.assertEqual("type and shape signature mismatch for arg #1",
str(raises.exception))
| TestGUVectorizeScalar |
python | doocs__leetcode | lcof2/剑指 Offer II 003. 前 n 个数字二进制中 1 的个数/Solution.py | {
"start": 0,
"end": 177
} | class ____:
def countBits(self, n: int) -> List[int]:
f = [0] * (n + 1)
for i in range(1, n + 1):
f[i] = f[i & (i - 1)] + 1
return f
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/lisp.py | {
"start": 122449,
"end": 129602
} | class ____(RegexLexer):
"""
Lexer for `Shen <http://shenlanguage.org/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Shen'
aliases = ['shen']
filenames = ['*.shen']
mimetypes = ['text/x-shen', 'application/x-shen']
DECLARATIONS = (
'datatype', 'define', 'defmacro', 'defprolog', 'defcc',
'synonyms', 'declare', 'package', 'type', 'function',
)
SPECIAL_FORMS = (
'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze',
'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output',
'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p',
'@s', '@v',
)
BUILTINS = (
'==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address',
'<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin',
'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd',
'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy',
'difference', 'element?', 'empty?', 'enable-type-theory',
'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external',
'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym',
'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical',
'implementation', 'in', 'include', 'include-all-but', 'inferences',
'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill',
'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand',
'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null',
'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port',
'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile',
'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file',
'read-file-as-bytelist', 'read-file-as-string', 'read-from-string',
'release', 'remove', 'return', 'reverse', 'run', 'save', 'set',
'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput',
'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?',
'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track',
'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile',
'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?',
'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file',
'y-or-n?',
)
BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>')
MAPPINGS = dict((s, Keyword) for s in DECLARATIONS)
MAPPINGS.update((s, Name.Builtin) for s in BUILTINS)
MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS)
valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]'
valid_name = '%s+' % valid_symbol_chars
symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars
variable = r'[A-Z]%s*' % valid_symbol_chars
tokens = {
'string': [
(r'"', String, '#pop'),
(r'c#\d{1,3};', String.Escape),
(r'~[ARS%]', String.Interpol),
(r'(?s).', String),
],
'root': [
(r'(?s)\\\*.*?\*\\', Comment.Multiline), # \* ... *\
(r'\\\\.*', Comment.Single), # \\ ...
(r'\s+', Text),
(r'_{5,}', Punctuation),
(r'={5,}', Punctuation),
(r'(;|:=|\||--?>|<--?)', Punctuation),
(r'(:-|:|\{|\})', Literal),
(r'[+-]*\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'[+-]*\d+', Number.Integer),
(r'"', String, 'string'),
(variable, Name.Variable),
(r'(true|false|<>|\[\])', Keyword.Pseudo),
(symbol_name, Literal),
(r'(\[|\]|\(|\))', Punctuation),
],
}
def get_tokens_unprocessed(self, text):
tokens = RegexLexer.get_tokens_unprocessed(self, text)
tokens = self._process_symbols(tokens)
tokens = self._process_declarations(tokens)
return tokens
def _relevant(self, token):
return token not in (Text, Comment.Single, Comment.Multiline)
def _process_declarations(self, tokens):
opening_paren = False
for index, token, value in tokens:
yield index, token, value
if self._relevant(token):
if opening_paren and token == Keyword and value in self.DECLARATIONS:
declaration = value
for index, token, value in \
self._process_declaration(declaration, tokens):
yield index, token, value
opening_paren = value == '(' and token == Punctuation
def _process_symbols(self, tokens):
opening_paren = False
for index, token, value in tokens:
if opening_paren and token in (Literal, Name.Variable):
token = self.MAPPINGS.get(value, Name.Function)
elif token == Literal and value in self.BUILTINS_ANYWHERE:
token = Name.Builtin
opening_paren = value == '(' and token == Punctuation
yield index, token, value
def _process_declaration(self, declaration, tokens):
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if declaration == 'datatype':
prev_was_colon = False
token = Keyword.Type if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if prev_was_colon and token == Literal:
token = Keyword.Type
yield index, token, value
if self._relevant(token):
prev_was_colon = token == Literal and value == ':'
elif declaration == 'package':
token = Name.Namespace if token == Literal else token
yield index, token, value
elif declaration == 'define':
token = Name.Function if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if value == '{' and token == Literal:
yield index, Punctuation, value
for index, token, value in self._process_signature(tokens):
yield index, token, value
else:
yield index, token, value
else:
token = Name.Function if token == Literal else token
yield index, token, value
raise StopIteration
def _process_signature(self, tokens):
for index, token, value in tokens:
if token == Literal and value == '}':
yield index, Punctuation, value
raise StopIteration
elif token in (Literal, Name.Function):
token = Name.Variable if value.istitle() else Keyword.Type
yield index, token, value
| ShenLexer |
python | getsentry__sentry | tests/sentry/web/frontend/test_project_event.py | {
"start": 210,
"end": 2320
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.login_as(self.user)
self.org = self.create_organization()
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.project = self.create_project(organization=self.org, teams=[self.team])
min_ago = before_now(minutes=1).isoformat()
self.event = self.store_event(
data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id
)
def test_redirect_to_event(self) -> None:
resp = self.client.get(
reverse(
"sentry-project-event-redirect",
args=[self.org.slug, self.project.slug, self.event.event_id],
)
)
self.assertRedirects(
resp,
f"http://testserver/organizations/{self.org.slug}/issues/{self.event.group_id}/events/{self.event.event_id}/",
)
def test_event_not_found(self) -> None:
resp = self.client.get(
reverse(
"sentry-project-event-redirect", args=[self.org.slug, self.project.slug, "event1"]
)
)
assert resp.status_code == 404
def test_event_not_found__event_no_group(self) -> None:
min_ago = before_now(minutes=1).isoformat()
event = self.store_event(
data={
"type": "transaction",
"transaction": "api.test",
"timestamp": min_ago,
"start_timestamp": min_ago,
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "b" * 16}},
},
project_id=self.project.id,
)
url = reverse(
"sentry-project-event-redirect", args=[self.org.slug, self.project.slug, event.event_id]
)
resp = self.client.get(url)
assert resp.status_code == 404
| ProjectEventTest |
python | doocs__leetcode | solution/3400-3499/3406.Find the Lexicographically Largest String From the Box II/Solution.py | {
"start": 0,
"end": 608
} | class ____:
def answerString(self, word: str, numFriends: int) -> str:
if numFriends == 1:
return word
s = self.lastSubstring(word)
return s[: len(word) - numFriends + 1]
def lastSubstring(self, s: str) -> str:
i, j, k = 0, 1, 0
while j + k < len(s):
if s[i + k] == s[j + k]:
k += 1
elif s[i + k] < s[j + k]:
i += k + 1
k = 0
if i >= j:
j = i + 1
else:
j += k + 1
k = 0
return s[i:]
| Solution |
python | realpython__materials | python-serialize/executable-code/digital-signature/safe_unpickler.py | {
"start": 43,
"end": 630
} | class ____(pickle.Unpickler):
ALLOWED = {
"builtins": ["print"],
"sysconfig": ["get_python_version"],
}
@classmethod
def safe_loads(cls, serialized_data):
file = io.BytesIO(serialized_data)
return cls(file).load()
def find_class(self, module_name, name):
if module_name in self.ALLOWED:
if name in self.ALLOWED[module_name]:
module = importlib.import_module(module_name)
return getattr(module, name)
raise pickle.UnpicklingError(f"{module_name}.{name} is unsafe")
| SafeUnpickler |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 11069,
"end": 12924
} | class ____(
spack.phase_callbacks.PhaseCallbacksMeta,
DetectablePackageMeta,
spack.directives_meta.DirectiveMeta,
spack.multimethod.MultiMethodMeta,
):
"""
Package metaclass for supporting directives (e.g., depends_on) and phases
"""
def __new__(cls, name, bases, attr_dict):
"""
FIXME: REWRITE
Instance creation is preceded by phase attribute transformations.
Conveniently transforms attributes to permit extensible phases by
iterating over the attribute 'phases' and creating / updating private
InstallPhase attributes in the class that will be initialized in
__init__.
"""
attr_dict["_name"] = None
return super(PackageMeta, cls).__new__(cls, name, bases, attr_dict)
def on_package_attributes(**attr_dict):
"""Decorator: executes instance function only if object has attr valuses.
Executes the decorated method only if at the moment of calling the
instance has attributes that are equal to certain values.
Args:
attr_dict (dict): dictionary mapping attribute names to their
required values
"""
def _execute_under_condition(func):
@functools.wraps(func)
def _wrapper(instance, *args, **kwargs):
# If all the attributes have the value we require, then execute
has_all_attributes = all([hasattr(instance, key) for key in attr_dict])
if has_all_attributes:
has_the_right_values = all(
[
getattr(instance, key) == value for key, value in attr_dict.items()
] # NOQA: ignore=E501
)
if has_the_right_values:
func(instance, *args, **kwargs)
return _wrapper
return _execute_under_condition
| PackageMeta |
python | huggingface__transformers | src/transformers/models/diffllama/modeling_diffllama.py | {
"start": 32087,
"end": 35061
} | class ____(DiffLlamaPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = DiffLlamaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, DiffLlamaForCausalLM
>>> model = DiffLlamaForCausalLM.from_pretrained("google/diffllama-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/diffllama-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| DiffLlamaForCausalLM |
python | walkccc__LeetCode | solutions/1244. Design A Leaderboard/1244.py | {
"start": 0,
"end": 357
} | class ____:
def __init__(self):
self.idToScore = collections.Counter()
def addScore(self, playerId: int, score: int) -> None:
self.idToScore[playerId] += score
def top(self, K: int) -> int:
return sum(score for _, score in self.idToScore.most_common(K))
def reset(self, playerId: int) -> None:
del self.idToScore[playerId]
| Leaderboard |
python | sqlalchemy__sqlalchemy | examples/adjacency_list/adjacency_list.py | {
"start": 570,
"end": 3338
} | class ____(MappedAsDataclass, Base):
__tablename__ = "tree"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
parent_id: Mapped[Optional[int]] = mapped_column(
ForeignKey("tree.id"), init=False
)
name: Mapped[str]
children: Mapped[Dict[str, TreeNode]] = relationship(
cascade="all, delete-orphan",
back_populates="parent",
collection_class=attribute_keyed_dict("name"),
init=False,
repr=False,
)
parent: Mapped[Optional[TreeNode]] = relationship(
back_populates="children", remote_side=id, default=None
)
def dump(self, _indent: int = 0) -> str:
return (
" " * _indent
+ repr(self)
+ "\n"
+ "".join([c.dump(_indent + 1) for c in self.children.values()])
)
if __name__ == "__main__":
engine = create_engine("sqlite://", echo=True)
print("Creating Tree Table:")
Base.metadata.create_all(engine)
with Session(engine) as session:
node = TreeNode("rootnode")
TreeNode("node1", parent=node)
TreeNode("node3", parent=node)
node2 = TreeNode("node2")
TreeNode("subnode1", parent=node2)
node.children["node2"] = node2
TreeNode("subnode2", parent=node.children["node2"])
print(f"Created new tree structure:\n{node.dump()}")
print("flush + commit:")
session.add(node)
session.commit()
print(f"Tree after save:\n{node.dump()}")
session.add_all(
[
TreeNode("node4", parent=node),
TreeNode("subnode3", parent=node.children["node4"]),
TreeNode("subnode4", parent=node.children["node4"]),
TreeNode(
"subsubnode1",
parent=node.children["node4"].children["subnode3"],
),
]
)
# remove node1 from the parent, which will trigger a delete
# via the delete-orphan cascade.
del node.children["node1"]
print("Removed node1. flush + commit:")
session.commit()
print("Tree after save, will unexpire all nodes:\n")
print(f"{node.dump()}")
with Session(engine) as session:
print(
"Perform a full select of the root node, eagerly loading "
"up to a depth of four"
)
node = session.scalars(
select(TreeNode)
.options(selectinload(TreeNode.children, recursion_depth=4))
.filter(TreeNode.name == "rootnode")
).one()
print(f"Full Tree:\n{node.dump()}")
print("Marking root node as deleted, flush + commit:")
session.delete(node)
session.commit()
| TreeNode |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-colbert-rerank/llama_index/postprocessor/colbert_rerank/base.py | {
"start": 675,
"end": 4796
} | class ____(BaseNodePostprocessor):
model: str = Field(description="Colbert model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
def __init__(
self,
top_n: int = 5,
model: str = "colbert-ir/colbertv2.0",
tokenizer: str = "colbert-ir/colbertv2.0",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
):
device = infer_torch_device() if device is None else device
super().__init__(
top_n=top_n,
device=device,
keep_retrieval_score=keep_retrieval_score,
model=model,
)
self._tokenizer = AutoTokenizer.from_pretrained(tokenizer)
self._model = AutoModel.from_pretrained(model)
@classmethod
def class_name(cls) -> str:
return "ColbertRerank"
def _calculate_sim(self, query: str, documents_text_list: List[str]) -> List[float]:
# Query: [batch_size, query_length, embedding_size] -> [batch_size, query_length, 1, embedding_size]
# Document: [batch_size, doc_length, embedding_size] -> [batch_size, 1, doc_length, embedding_size]
query_encoding = self._tokenizer(query, return_tensors="pt")
query_embedding = self._model(**query_encoding).last_hidden_state
rerank_score_list = []
for document_text in documents_text_list:
document_encoding = self._tokenizer(
document_text, return_tensors="pt", truncation=True, max_length=512
)
document_embedding = self._model(**document_encoding).last_hidden_state
sim_matrix = torch.nn.functional.cosine_similarity(
query_embedding.unsqueeze(2), document_embedding.unsqueeze(1), dim=-1
)
# Take the maximum similarity for each query token (across all document tokens)
# sim_matrix shape: [batch_size, query_length, doc_length]
max_sim_scores, _ = torch.max(sim_matrix, dim=2)
rerank_score_list.append(torch.mean(max_sim_scores, dim=1))
return rerank_score_list
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
nodes_text_list = [
str(node.node.get_content(metadata_mode=MetadataMode.EMBED))
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._calculate_sim(query_bundle.query_str, nodes_text_list)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = float(score)
reranked_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: reranked_nodes})
dispatcher.event(ReRankEndEvent(nodes=reranked_nodes))
return reranked_nodes
| ColbertRerank |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_misc.py | {
"start": 654,
"end": 1672
} | class ____(fixtures.TestBase):
__requires__ = ("cpython", "python_profiling_backend")
def setup_test(self):
class SomeEnum:
# Implements PEP 435 in the minimal fashion needed by SQLAlchemy
_members = {}
@classproperty
def __members__(cls):
"""simulate a very expensive ``__members__`` getter"""
for i in range(10):
x = {}
x.update({k: v for k, v in cls._members.items()}.copy())
return x.copy()
def __init__(self, name, value):
self.name = name
self.value = value
self._members[name] = self
setattr(self.__class__, name, self)
for i in range(400):
SomeEnum("some%d" % i, i)
self.SomeEnum = SomeEnum
@profiling.function_call_count()
def test_create_enum_from_pep_435_w_expensive_members(self):
Enum(self.SomeEnum, omit_aliases=False)
| EnumTest |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 29351,
"end": 29656
} | class ____(generics.CreateAPIView):
queryset = ForeignKeySource.objects.all()
serializer_class = ForeignKeySourceSerializer
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| ForeignKeySourceView |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 22660,
"end": 24211
} | class ____:
def __init__(self):
from ..memory import estimate_peak_memory, get_freeable_input_buf
scheduler_nodes = V.graph.scheduler.nodes
graph_inputs = OrderedSet(V.graph.graph_inputs.keys())
graph_outputs = OrderedSet(V.graph.get_output_names())
names_to_freeable_bufs = get_freeable_input_buf(scheduler_nodes, graph_inputs)
self.overall_peak_memory, peak_by_scheduler_node = estimate_peak_memory(
scheduler_nodes,
names_to_freeable_bufs,
graph_outputs,
)
from .segmented_tree import SegmentedTree
self.segmented_tree = SegmentedTree(
peak_by_scheduler_node, operator.add, max, 0
)
def _get_size(self, node: BufferLike) -> int:
return V.graph.sizevars.size_hint(
V.graph.get_allocation_storage_size(node), fallback=0
) * get_dtype_size(node.get_dtype())
def peak_between(self, line_a: FreeIfNotReusedLine, line_b: AllocateLine):
return self.segmented_tree.summarize_range(
line_a.scheduler_node_index + 1, line_b.scheduler_node_index - 1
)
def update_peak_between(self, line_a: FreeIfNotReusedLine, line_b: AllocateLine):
if line_a.scheduler_node_index + 1 == line_b.scheduler_node_index:
return
self.segmented_tree.update_range(
line_a.scheduler_node_index + 1,
line_b.scheduler_node_index - 1,
self._get_size(line_b.node),
)
@dataclasses.dataclass
| EfficientPeakEstimate |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 952413,
"end": 952893
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RetireSponsorsTier"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "sponsors_tier")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
sponsors_tier = sgqlc.types.Field("SponsorsTier", graphql_name="sponsorsTier")
"""The tier that was retired."""
| RetireSponsorsTierPayload |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/composite_dc.py | {
"start": 268,
"end": 331
} | class ____(DeclarativeBase):
pass
@dataclasses.dataclass
| Base |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.