language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pandas-dev__pandas | pandas/tests/frame/methods/test_reorder_levels.py | {
"start": 119,
"end": 2729
} | class ____:
def test_reorder_levels(self, frame_or_series):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = tm.get_obj(df, frame_or_series)
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = tm.get_obj(expected, frame_or_series)
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = tm.get_obj(expected, frame_or_series)
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
tm.assert_equal(result, expected)
def test_reorder_levels_swaplevel_equivalence(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.reorder_levels(["month", "day", "year"])
expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = ymd["A"].reorder_levels(["month", "day", "year"])
expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match="hierarchical axis"):
ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match="Too many levels"):
ymd.index.reorder_levels([1, 2, 3])
| TestReorderLevels |
python | ansible__ansible | lib/ansible/plugins/cliconf/__init__.py | {
"start": 1406,
"end": 22719
} | class ____(AnsiblePlugin):
"""
A base class for implementing cli connections
.. note:: String inputs to :meth:`send_command` will be cast to byte strings
within this method and as such are not required to be made byte strings
beforehand. Please avoid using literal byte strings (``b'string'``) in
:class:`CliConfBase` plugins as this can lead to unexpected errors when
running on Python 3
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
__rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
def __init__(self, connection):
super(CliconfBase, self).__init__()
self._connection = connection
self.history = list()
self.response_logging = False
def _alarm_handler(self, signum, frame):
"""Alarm handler raised in case of command timeout """
self._connection.queue_message('log', 'closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout)
self.close()
def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
"""Executes a command over the device connection
This method will execute a command over the device connection and
return the results to the caller. This method will also perform
logging of any commands based on the `nolog` argument.
:param command: The command to send over the connection to the device
:param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
:param answer: The answer to respond with if the prompt is matched.
:param sendonly: Bool value that will send the command but not wait for a result.
:param newline: Bool value that will append the newline character to the command
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: The output from the device after executing the command
"""
kwargs = {
'command': to_bytes(command),
'sendonly': sendonly,
'newline': newline,
'prompt_retry_check': prompt_retry_check,
'check_all': check_all
}
if prompt is not None:
if isinstance(prompt, list):
kwargs['prompt'] = [to_bytes(p) for p in prompt]
else:
kwargs['prompt'] = to_bytes(prompt)
if answer is not None:
if isinstance(answer, list):
kwargs['answer'] = [to_bytes(p) for p in answer]
else:
kwargs['answer'] = to_bytes(answer)
resp = self._connection.send(**kwargs)
if not self.response_logging:
self.history.append(('*****', '*****'))
else:
self.history.append((kwargs['command'], resp))
return resp
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return self.__rpc__
def get_history(self):
""" Returns the history file for all commands
This will return a log of all the commands that have been sent to
the device and all of the output received. By default, all commands
and output will be redacted unless explicitly configured otherwise.
:return: An ordered list of command, output pairs
"""
return self.history
def reset_history(self):
""" Resets the history of run commands
:return: None
"""
self.history = list()
def enable_response_logging(self):
"""Enable logging command response"""
self.response_logging = True
def disable_response_logging(self):
"""Disable logging command response"""
self.response_logging = False
@abstractmethod
def get_config(self, source='running', flags=None, format=None):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:param source: The configuration source to return from the device.
This argument accepts either `running` or `startup` as valid values.
:param flags: For devices that support configuration filtering, this
keyword argument is used to filter the returned configuration.
The use of this keyword argument is device dependent and will be
silently ignored on devices that do not support it.
:param format: For devices that support fetching different configuration
format, this keyword argument is used to specify the format in which
configuration is to be retrieved.
:return: The device configuration as specified by the source argument.
"""
pass
@abstractmethod
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
"""Loads the candidate configuration into the network device
This method will load the specified candidate config into the device
and merge with the current configuration unless replace is set to
True. If the device does not support config replace an errors
is returned.
:param candidate: The configuration to load into the device and merge
with the current running configuration
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param replace: If the value is True/False it indicates if running configuration should be completely
replace by candidate configuration. If can also take configuration file path as value,
the file in this case should be present on the remote host in the mentioned path as a
prerequisite.
:param comment: Commit comment provided it is supported by remote host
:return: Returns a json string with contains configuration applied on remote host, the returned
response on executing configuration commands and platform relevant data.
{
"diff": "",
"response": [],
"request": []
}
"""
pass
@abstractmethod
def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:param command: command in string format to be executed on remote device
:param prompt: the expected prompt generated by executing command, this can
be a string or a list of strings
:param answer: the string to respond to the prompt with
:param sendonly: bool to disable waiting for response, default is false
:param newline: bool to indicate if newline should be added at end of answer or not
:param output: For devices that support fetching command output in different
format, this keyword argument is used to specify the output in which
response is to be retrieved.
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:return: The output from the device after executing the command
"""
pass
@abstractmethod
def get_capabilities(self):
"""Returns the basic capabilities of the network device
This method will provide some basic facts about the device and
what capabilities it has to modify the configuration. The minimum
return from this method takes the following format.
eg:
{
'rpc': [list of supported rpcs],
'network_api': <str>, # the name of the transport
'device_info': {
'network_os': <str>,
'network_os_version': <str>,
'network_os_model': <str>,
'network_os_hostname': <str>,
'network_os_image': <str>,
'network_os_platform': <str>,
},
'device_operations': {
'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
'supports_commit': <bool>, # identify if commit is supported by device or not
'supports_rollback': <bool>, # identify if rollback is supported or not
'supports_defaults': <bool>, # identify if fetching running config with default is supported
'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
'supports_onbox_diff': <bool>, # identify if on box diff capability is supported or not
'supports_generate_diff': <bool>, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': <bool>, # identify if multiline delimiter is supported within config
'supports_diff_match': <bool>, # identify if match is supported
'supports_diff_ignore_lines': <bool>, # identify if ignore line in diff is supported
'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
'supports_admin': <bool>, # identify if admin configure mode is supported or not
'supports_commit_label': <bool>, # identify if commit label is supported or not
}
'format': [list of supported configuration format],
'diff_match': [list of supported match values],
'diff_replace': [list of supported replace values],
'output': [list of supported command output format]
}
:return: capability as dict
"""
result = {}
result['rpc'] = self.get_base_rpc()
result['device_info'] = self.get_device_info()
result['network_api'] = 'cliconf'
return result
@abstractmethod
def get_device_info(self):
"""Returns basic information about the network device.
This method will provide basic information about the device such as OS version and model
name. This data is expected to be used to fill the 'device_info' key in get_capabilities()
above.
:return: dictionary of device information
"""
pass
def commit(self, comment=None):
"""Commit configuration changes
This method will perform the commit operation on a previously loaded
candidate configuration that was loaded using `edit_config()`. If
there is a candidate configuration, it will be committed to the
active configuration. If there is not a candidate configuration, this
method should just silently return.
:return: None
"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"""Discard candidate configuration
This method will discard the current candidate configuration if one
is present. If there is no candidate configuration currently loaded,
then this method should just silently return
:returns: None
"""
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def rollback(self, rollback_id, commit=True):
"""
:param rollback_id: The commit id to which configuration should be rollbacked
:param commit: Flag to indicate if changes should be committed or not
:return: Returns diff between before and after change.
"""
pass
def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Copies file over scp/sftp to remote device
:param source: Source file path
:param destination: Destination file path on remote device
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
out = scp.put(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.put(source, destination)
def get_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Fetch file over scp/sftp from remote device
:param source: Source file path
:param destination: Destination file path
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
try:
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
scp.get(source, destination)
except EOFError:
# This appears to be benign.
pass
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.get(source, destination)
def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration and/or banner diff in json format.
{
'config_diff': ''
}
"""
pass
def run_commands(self, commands=None, check_rc=True):
"""
Execute a list of commands on remote host and return the list of response
:param commands: The list of command that needs to be executed on remote host.
The individual command in list can either be a command string or command dict.
If the command is dict the valid keys are
{
'command': <command to be executed>
'prompt': <expected prompt on executing the command>,
'answer': <answer for the prompt>,
'output': <the format in which command output should be rendered eg: 'json', 'text'>,
'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
}
:param check_rc: Boolean flag to check if returned response should be checked for error or not.
If check_rc is False the error output is appended in return response list, else if the
value is True an exception is raised.
:return: List of returned response
"""
pass
def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
if not candidate and not replace:
raise ValueError("must provide a candidate or replace to load configuration")
if commit not in (True, False):
raise ValueError("'commit' must be a bool, got %s" % commit)
if replace and not operations['supports_replace']:
raise ValueError("configuration replace is not supported")
if comment and not operations.get('supports_commit_comment', False):
raise ValueError("commit comment is not supported")
if replace and not operations.get('supports_replace', False):
raise ValueError("configuration replace is not supported")
def set_cli_prompt_context(self):
"""
Ensure the command prompt on device is in right mode
:return: None
"""
pass
def _update_cli_prompt_context(self, config_context=None, exit_command='exit'):
"""
Update the cli prompt context to ensure it is in operational mode
:param config_context: It is string value to identify if the current cli prompt ends with config mode prompt
:param exit_command: Command to execute to exit the config mode
:return: None
"""
out = self._connection.get_prompt()
if out is None:
raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
u' response window: %s' % self._connection._last_recv_window)
while True:
out = to_text(out, errors='surrogate_then_replace').strip()
if config_context and out.endswith(config_context):
self._connection.queue_message('vvvv', 'wrong context, sending exit to device')
self.send_command(exit_command)
out = self._connection.get_prompt()
else:
break
| CliconfBase |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/live.py | {
"start": 1044,
"end": 14271
} | class ____(JupyterMixin, RenderHook):
"""Renders an auto-updating live display of any given renderable.
Args:
renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
screen (bool, optional): Enable alternate screen mode. Defaults to False.
auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
"""
def __init__(
self,
renderable: Optional[RenderableType] = None,
*,
console: Optional[Console] = None,
screen: bool = False,
auto_refresh: bool = True,
refresh_per_second: float = 4,
transient: bool = False,
redirect_stdout: bool = True,
redirect_stderr: bool = True,
vertical_overflow: VerticalOverflowMethod = "ellipsis",
get_renderable: Optional[Callable[[], RenderableType]] = None,
) -> None:
assert refresh_per_second > 0, "refresh_per_second must be > 0"
self._renderable = renderable
self.console = console if console is not None else get_console()
self._screen = screen
self._alt_screen = False
self._redirect_stdout = redirect_stdout
self._redirect_stderr = redirect_stderr
self._restore_stdout: Optional[IO[str]] = None
self._restore_stderr: Optional[IO[str]] = None
self._lock = RLock()
self.ipy_widget: Optional[Any] = None
self.auto_refresh = auto_refresh
self._started: bool = False
self.transient = True if screen else transient
self._refresh_thread: Optional[_RefreshThread] = None
self.refresh_per_second = refresh_per_second
self.vertical_overflow = vertical_overflow
self._get_renderable = get_renderable
self._live_render = LiveRender(
self.get_renderable(), vertical_overflow=vertical_overflow
)
@property
def is_started(self) -> bool:
"""Check if live display has been started."""
return self._started
def get_renderable(self) -> RenderableType:
renderable = (
self._get_renderable()
if self._get_renderable is not None
else self._renderable
)
return renderable or ""
def start(self, refresh: bool = False) -> None:
"""Start live rendering display.
Args:
refresh (bool, optional): Also refresh. Defaults to False.
"""
with self._lock:
if self._started:
return
self.console.set_live(self)
self._started = True
if self._screen:
self._alt_screen = self.console.set_alt_screen(True)
self.console.show_cursor(False)
self._enable_redirect_io()
self.console.push_render_hook(self)
if refresh:
try:
self.refresh()
except Exception:
# If refresh fails, we want to stop the redirection of sys.stderr,
# so the error stacktrace is properly displayed in the terminal.
# (or, if the code that calls Rich captures the exception and wants to display something,
# let this be displayed in the terminal).
self.stop()
raise
if self.auto_refresh:
self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
self._refresh_thread.start()
def stop(self) -> None:
"""Stop live rendering display."""
with self._lock:
if not self._started:
return
self.console.clear_live()
self._started = False
if self.auto_refresh and self._refresh_thread is not None:
self._refresh_thread.stop()
self._refresh_thread = None
# allow it to fully render on the last even if overflow
self.vertical_overflow = "visible"
with self.console:
try:
if not self._alt_screen and not self.console.is_jupyter:
self.refresh()
finally:
self._disable_redirect_io()
self.console.pop_render_hook()
if not self._alt_screen and self.console.is_terminal:
self.console.line()
self.console.show_cursor(True)
if self._alt_screen:
self.console.set_alt_screen(False)
if self.transient and not self._alt_screen:
self.console.control(self._live_render.restore_cursor())
if self.ipy_widget is not None and self.transient:
self.ipy_widget.close() # pragma: no cover
def __enter__(self) -> "Live":
self.start(refresh=self._renderable is not None)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
def _enable_redirect_io(self) -> None:
"""Enable redirecting of stdout / stderr."""
if self.console.is_terminal or self.console.is_jupyter:
if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
self._restore_stdout = sys.stdout
sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
self._restore_stderr = sys.stderr
sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
def _disable_redirect_io(self) -> None:
"""Disable redirecting of stdout / stderr."""
if self._restore_stdout:
sys.stdout = cast("TextIO", self._restore_stdout)
self._restore_stdout = None
if self._restore_stderr:
sys.stderr = cast("TextIO", self._restore_stderr)
self._restore_stderr = None
@property
def renderable(self) -> RenderableType:
"""Get the renderable that is being displayed
Returns:
RenderableType: Displayed renderable.
"""
renderable = self.get_renderable()
return Screen(renderable) if self._alt_screen else renderable
def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
"""Update the renderable that is being displayed
Args:
renderable (RenderableType): New renderable to use.
refresh (bool, optional): Refresh the display. Defaults to False.
"""
if isinstance(renderable, str):
renderable = self.console.render_str(renderable)
with self._lock:
self._renderable = renderable
if refresh:
self.refresh()
def refresh(self) -> None:
"""Update the display of the Live Render."""
with self._lock:
self._live_render.set_renderable(self.renderable)
if self.console.is_jupyter: # pragma: no cover
try:
from IPython.display import display
from ipywidgets import Output
except ImportError:
import warnings
warnings.warn('install "ipywidgets" for Jupyter support')
else:
if self.ipy_widget is None:
self.ipy_widget = Output()
display(self.ipy_widget)
with self.ipy_widget:
self.ipy_widget.clear_output(wait=True)
self.console.print(self._live_render.renderable)
elif self.console.is_terminal and not self.console.is_dumb_terminal:
with self.console:
self.console.print(Control())
elif (
not self._started and not self.transient
): # if it is finished allow files or dumb-terminals to see final result
with self.console:
self.console.print(Control())
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Process renderables to restore cursor and display progress."""
self._live_render.vertical_overflow = self.vertical_overflow
if self.console.is_interactive:
# lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
with self._lock:
reset = (
Control.home()
if self._alt_screen
else self._live_render.position_cursor()
)
renderables = [reset, *renderables, self._live_render]
elif (
not self._started and not self.transient
): # if it is finished render the final output for files or dumb_terminals
renderables = [*renderables, self._live_render]
return renderables
if __name__ == "__main__": # pragma: no cover
import random
import time
from itertools import cycle
from typing import Dict, List, Tuple
from .align import Align
from .console import Console
from .live import Live as Live
from .panel import Panel
from .rule import Rule
from .syntax import Syntax
from .table import Table
console = Console()
syntax = Syntax(
'''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value''',
"python",
line_numbers=True,
)
table = Table("foo", "bar", "baz")
table.add_row("1", "2", "3")
progress_renderables = [
"You can make the terminal shorter and taller to see the live table hide"
"Text may be printed while the progress bars are rendering.",
Panel("In fact, [i]any[/i] renderable will work"),
"Such as [magenta]tables[/]...",
table,
"Pretty printed structures...",
{"type": "example", "text": "Pretty printed"},
"Syntax...",
syntax,
Rule("Give it a try!"),
]
examples = cycle(progress_renderables)
exchanges = [
"SGD",
"MYR",
"EUR",
"USD",
"AUD",
"JPY",
"CNH",
"HKD",
"CAD",
"INR",
"DKK",
"GBP",
"RUB",
"NZD",
"MXN",
"IDR",
"TWD",
"THB",
"VND",
]
with Live(console=console) as live_table:
exchange_rate_dict: Dict[Tuple[str, str], float] = {}
for index in range(100):
select_exchange = exchanges[index % len(exchanges)]
for exchange in exchanges:
if exchange == select_exchange:
continue
time.sleep(0.4)
if random.randint(0, 10) < 1:
console.log(next(examples))
exchange_rate_dict[(select_exchange, exchange)] = 200 / (
(random.random() * 320) + 1
)
if len(exchange_rate_dict) > len(exchanges) - 1:
exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
table = Table(title="Exchange Rates")
table.add_column("Source Currency")
table.add_column("Destination Currency")
table.add_column("Exchange Rate")
for (source, dest), exchange_rate in exchange_rate_dict.items():
table.add_row(
source,
dest,
Text(
f"{exchange_rate:.4f}",
style="red" if exchange_rate < 1.0 else "green",
),
)
live_table.update(Align.center(table))
| Live |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py | {
"start": 12384,
"end": 12954
} | class ____(PreTrainedModel):
config: HunYuanDenseV1Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["HunYuanDenseV1DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": HunYuanDenseV1DecoderLayer,
"attentions": HunYuanDenseV1Attention,
}
| HunYuanDenseV1PreTrainedModel |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 43838,
"end": 46781
} | class ____(ContextWrappingVariable):
"""represents torch.nn.attention.sdpa_kernel"""
@staticmethod
def create(
tx: "InstructionTranslator",
backends: Any,
set_priority: bool = False,
**kwargs: Any,
) -> "SDPAKernelVariable":
if isinstance(backends, torch.nn.attention.SDPBackend):
backends = [backends]
var = SDPAKernelVariable(
target_values=backends,
initial_values=None,
set_priority=set_priority,
**kwargs,
)
return var
def __init__(
self,
target_values: list[torch.nn.attention.SDPBackend],
initial_values: Any = None,
set_priority: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
target_values=target_values, initial_values=initial_values, **kwargs
)
self.set_priority = set_priority
@staticmethod
def _backends_to_nodes(
tx: "InstructionTranslator",
backends: list[Any],
) -> list[Any]:
# convert to/from string in order to bake the backend into FX graph
nodes = [
tx.output.create_node(
"call_function",
torch.nn.attention._backend_from_string,
(backend.name,),
{},
)
for backend in backends
]
return nodes
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
self.prev_backends = torch.nn.attention._cur_sdpa_kernel_backends(
with_priority=self.set_priority
)
self.set_cleanup_hook(
tx,
lambda: torch.nn.attention._sdpa_kernel(
self.prev_backends, set_priority=self.set_priority
),
)
torch.nn.attention._sdpa_kernel(
self.target_values, set_priority=self.set_priority
)
arg = self._backends_to_nodes(tx, self.target_values)
tx.output.create_node(
"call_function",
torch.nn.attention._sdpa_kernel,
(arg, bool(self.set_priority)),
{},
)
return variables.ConstantVariable.create(None)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup_assert()
arg = self._backends_to_nodes(tx, self.prev_backends)
tx.output.create_node(
"call_function",
torch.nn.attention._sdpa_kernel,
(arg, bool(self.set_priority)),
{},
)
return variables.ConstantVariable.create(None)
def module_name(self) -> str:
return "torch.nn.attention"
# use a private version of sdpa_kernel that accepts variadic arguments
# since dynamo reconstructs the contents of target_values one-by-one
def fn_name(self) -> str:
return "_sdpa_kernel_variadic"
| SDPAKernelVariable |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/debug.py | {
"start": 318,
"end": 479
} | class ____:
"""Dataclass describing a Python package."""
name: str
"""Package name."""
version: str
"""Package version."""
@dataclass
| _Package |
python | keon__algorithms | tests/test_strings.py | {
"start": 16929,
"end": 17951
} | class ____(unittest.TestCase):
def test_longest_common_prefix(self):
# Test first solution
self.assertEqual("fl", longest_common_prefix_v1(["flower", "flow",
"flight"]))
self.assertEqual("", longest_common_prefix_v1(["dog", "racecar",
"car"]))
# Test second solution
self.assertEqual("fl", longest_common_prefix_v2(["flower", "flow",
"flight"]))
self.assertEqual("", longest_common_prefix_v2(["dog", "racecar",
"car"]))
# Test third solution
self.assertEqual("fl", longest_common_prefix_v3(["flower", "flow",
"flight"]))
self.assertEqual("", longest_common_prefix_v3(["dog", "racecar",
"car"]))
| TestLongestCommonPrefix |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/pydantic/types.py | {
"start": 1539,
"end": 2654
} | class ____(_fields.PydanticMetadata):
strict: bool = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BOOLEAN TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
StrictBool = Annotated[bool, Strict()]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTEGER TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def conint(
*,
strict: bool | None = None,
gt: int | None = None,
ge: int | None = None,
lt: int | None = None,
le: int | None = None,
multiple_of: int | None = None,
) -> type[int]:
return Annotated[ # type: ignore[return-value]
int,
Strict(strict) if strict is not None else None,
annotated_types.Interval(gt=gt, ge=ge, lt=lt, le=le),
annotated_types.MultipleOf(multiple_of) if multiple_of is not None else None,
]
PositiveInt = Annotated[int, annotated_types.Gt(0)]
NegativeInt = Annotated[int, annotated_types.Lt(0)]
NonPositiveInt = Annotated[int, annotated_types.Le(0)]
NonNegativeInt = Annotated[int, annotated_types.Ge(0)]
StrictInt = Annotated[int, Strict()]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLOAT TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@_dataclasses.dataclass
| Strict |
python | django__django | django/forms/utils.py | {
"start": 4344,
"end": 7972
} | class ____(UserList, list, RenderableErrorMixin):
"""
A collection of errors that knows how to display itself in various formats.
"""
template_name = "django/forms/errors/list/default.html"
template_name_text = "django/forms/errors/list/text.txt"
template_name_ul = "django/forms/errors/list/ul.html"
def __init__(self, initlist=None, error_class=None, renderer=None, field_id=None):
super().__init__(initlist)
if error_class is None:
self.error_class = "errorlist"
else:
self.error_class = "errorlist {}".format(error_class)
self.renderer = renderer or get_default_renderer()
self.field_id = field_id
def as_data(self):
return ValidationError(self.data).error_list
def copy(self):
copy = super().copy()
copy.error_class = self.error_class
copy.renderer = self.renderer
return copy
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = next(iter(error))
errors.append(
{
"message": escape(message) if escape_html else message,
"code": error.code or "",
}
)
return errors
def get_context(self):
return {
"errors": self,
"error_class": self.error_class,
}
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return next(iter(error))
return error
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
if timezone._datetime_ambiguous_or_imaginary(value, current_timezone):
raise ValueError("Ambiguous or non-existent time.")
return timezone.make_aware(value, current_timezone)
except Exception as exc:
raise ValidationError(
_(
"%(datetime)s couldn’t be interpreted "
"in time zone %(current_timezone)s; it "
"may be ambiguous or it may not exist."
),
code="ambiguous_timezone",
params={"datetime": value, "current_timezone": current_timezone},
) from exc
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
return timezone.make_naive(value)
return value
| ErrorList |
python | pytorch__pytorch | test/test_mkldnn.py | {
"start": 1256,
"end": 77488
} | class ____(TestCase):
def test_conversion(self):
for cpu_tensor in [torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')),
torch.randn((1, 2, 3, 4, 5),
dtype=torch.float, device=torch.device('cpu'))[:, :, :, :, 1]]:
cpu_tensor.requires_grad_()
convert_dtypes = {torch.half: [torch.half, torch.float],
torch.bfloat16: [torch.bfloat16, torch.float],
torch.float: [torch.bfloat16, torch.half]}
# float/bfloat16/half cpu tensor to mkldnn tensortensor.
for dtype1 in types:
mkldnn_tensor = cpu_tensor.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat tensor to cpu float or bfloat tensor
for dtype2 in convert_dtypes[dtype1]:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
atol = 1e-5 if dtype1 == torch.float and dtype2 == torch.float else 1e-2
self.assertEqual(cpu_tensor, cpu_tensor_2.float(), atol=atol, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 == torch.float:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size() / 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
# bfloat cpu tensor to mkldnn float tensor or bfloat tensor.
for orig_dtype in [torch.half, torch.bfloat16]:
cpu_tensor_lower = cpu_tensor.to(dtype=orig_dtype)
for dtype1 in convert_dtypes[orig_dtype]:
mkldnn_tensor = cpu_tensor_lower.to_mkldnn(dtype1)
self.assertEqual(mkldnn_tensor.dtype, dtype1)
cpu_tensor_1 = mkldnn_tensor.to_dense()
# not given dtype for to_dense, mkldnn tensor has same dtype with cpu tensor
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
# mkldnn float/bfloat/half tensor to cpu float/bfloat/half tensor
for dtype2 in convert_dtypes[cpu_tensor_lower.dtype]:
cpu_tensor_2 = mkldnn_tensor.to_dense(dtype2)
self.assertEqual(cpu_tensor_2.dtype, dtype2)
self.assertEqual(cpu_tensor_lower,
cpu_tensor_2.to(dtype=cpu_tensor_lower.dtype), atol=1e-5, rtol=0)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), torch.Size([1, 2, 3, 4]))
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
if dtype1 in [torch.bfloat16, torch.half]:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_lower.element_size())
else:
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor_lower.element_size() * 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
def test_conversion_byte_char(self):
int8_types = [torch.int8, torch.uint8]
for int8_type in int8_types:
low = -100 if int8_type is torch.int8 else 0
high = 100
for cpu_tensor in [torch.randint(
low=low,
high=high,
size=(1, 2, 3, 4),
dtype=torch.int64,
device=torch.device('cpu')),
torch.randint(
low=low,
high=high,
size=(1, 2, 3, 4, 5),
dtype=torch.int64,
device=torch.device('cpu'))[:, :, :, :, :]]:
cpu_tensor = cpu_tensor.to(dtype=int8_type)
mkldnn_tensor = cpu_tensor.to_mkldnn(int8_type)
self.assertEqual(mkldnn_tensor.dtype, int8_type)
cpu_tensor_1 = mkldnn_tensor.to_dense()
self.assertEqual(mkldnn_tensor.dtype, cpu_tensor_1.dtype)
self.assertEqual(cpu_tensor, cpu_tensor_1)
self.assertEqual(mkldnn_tensor.device, torch.device('cpu'))
self.assertEqual(mkldnn_tensor.size(), cpu_tensor.size())
self.assertEqual(mkldnn_tensor.numel(), cpu_tensor.numel())
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size())
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
def test_copy(self):
x = torch.randn(4, 5, dtype=torch.float32)
mkldnn_x = x.to_mkldnn()
mkldnn_y = torch.randn(4, 5, dtype=torch.float32).to_mkldnn()
mkldnn_z = torch.randn(4, 10, dtype=torch.float32).to_mkldnn()
mkldnn_y.copy_(mkldnn_x)
self.assertEqual(x, mkldnn_y.to_dense())
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: only support same size tensor.",
lambda: mkldnn_z.copy_(mkldnn_x))
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: between mkldnn layout and dense Tensors is not implemented! "
"Found self type = torch.FloatTensor and src type = Mkldnntorch.FloatTensor",
lambda: x.copy_(mkldnn_x))
self.assertRaisesRegex(RuntimeError,
"copy_mkldnn_: between mkldnn layout and dense Tensors is not implemented! "
"Found self type = Mkldnntorch.FloatTensor and src type = torch.FloatTensor",
lambda: mkldnn_x.copy_(x))
def test_unsupported(self):
# unsupported types and unsupported types with gpu
for dtype in [torch.double, torch.uint8, torch.int8,
torch.short, torch.int, torch.long]:
with self.assertRaises(RuntimeError):
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn()
if torch.cuda.is_available():
with self.assertRaises(RuntimeError):
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn()
# supported type with gpu
if torch.cuda.is_available():
with self.assertRaises(RuntimeError):
torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn()
# some factory functions
for creator in [torch.ones, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError):
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)
def test_mkldnn_conv_shapecheck(self):
input = torch.full((1, 1, 1, 24,), 1, dtype=torch.float32)
w1 = torch.full((1, 1, 1, 24,), 1, dtype=torch.float32)
b1 = torch.full((1,), 1, dtype=torch.float32)
w2 = torch.full((1, 1, 2, 24,), 1, dtype=torch.float32)
b2 = torch.full((2,), 1, dtype=torch.float32)
options = zip([-1, 0, 0, 0, 0, 0, 0], # padding
[1, 0, 1, 1, 1, 1, 1], # stride
[1, 1, 0, 1, 1, 1, 1], # dilation
[1, 1, 1, 0, 2, 1, 1], # groups
[w1, w1, w1, w1, w1, w1, w2], # weight
[b1, b1, b1, b1, b1, b2, b1]) # bias
for pad, st, dil, gr, w, b in options:
with self.assertRaises(RuntimeError) as _:
torch.mkldnn_convolution(input, w, b, [pad] * 2, [st] * 2, [dil] * 2, gr)
def test_autograd_to_mkldnn(self):
# MKLDNN only supports float32
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
def func(root):
return root.to_mkldnn().to_dense()
# because MKLDNN only supports float32, we need to lessen the precision.
# these numbers are just empirical results that seem to work.
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradcheck(func, [root], atol=4e-2, rtol=1e-2))
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradgradcheck(func, [root], atol=4e-2, rtol=1e-2))
def test_autograd_from_mkldnn(self):
# MKLDNN only supports float32
root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_()
def func(root):
return root.to_dense()
# because MKLDNN only supports float32, we need to lessen the precision.
# these numbers are just empirical results that seem to work.
self.assertWarnsRegex(UserWarning,
'double precision floating point',
lambda: gradcheck(func, [root], atol=4e-2, rtol=1e-2))
def test_detach(self):
root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_()
detach = root.detach()
self.assertEqual((4, 5), detach.size())
self.assertFalse(detach.requires_grad)
self.assertTrue(root.requires_grad)
detach_ = root.detach_()
self.assertEqual((4, 5), detach_.size())
self.assertFalse(detach_.requires_grad)
self.assertFalse(root.requires_grad)
def test_repr(self):
self.assertTrue("layout=torch._mkldnn" in str(torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')).to_mkldnn()))
def _test_conv_base(self, dim):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
conv = conv_module[dim](in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).float()
x1 = x.clone()
x2 = x.clone().to_mkldnn()
if not train:
mkldnn_conv = mkldnn_utils.to_mkldnn(copy.deepcopy(conv))
elif train and dim != 1:
# TODO: enable conv1d training.
x1.requires_grad_()
x2.requires_grad_()
mkldnn_conv = copy.deepcopy(conv)
with torch.backends.mkldnn.flags(enabled=False):
y_aten = conv(x1)
if train and dim != 1:
loss1 = y_aten.sum()
loss1.backward()
if not train or (train and dim != 1):
y_mkldnn = mkldnn_conv(x2).to_dense()
if self.precision != 0:
self.assertEqual(y_aten, y_mkldnn, atol=self.precision, rtol=self.precision)
else:
self.assertEqual(y_aten, y_mkldnn)
if not train:
self._test_serialization(mkldnn_conv, (x.to_mkldnn(),))
self._test_tracing(mkldnn_conv, (x.to_mkldnn(),))
elif dim != 1:
loss2 = y_mkldnn.sum()
loss2.backward()
self.assertTrue(x2.grad.is_mkldnn)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(conv.weight.grad,
mkldnn_conv.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv.bias.grad, mkldnn_conv.bias.grad)
@reduced_f32_on_and_off()
def test_conv1d(self):
self._test_conv_base(dim=1)
@reduced_f32_on_and_off()
def test_conv2d(self):
self._test_conv_base(dim=2)
@reduced_f32_on_and_off()
def test_conv3d(self):
self._test_conv_base(dim=3)
def _test_conv_deconv_lower_precision_base(self, dim, conv_module, dtype):
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [1, 2], [1, 4])
for bias, dilation, groups in options:
N = torch.randint(1, 3, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
# TODO: remove this when group depthwise is supported:
if conv_module in [torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d] and groups > 1 and C == groups:
continue
conv = conv_module(in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).float()
x_lower = x.to(dtype=dtype)
if (dtype == torch.bfloat16 and torch.ops.mkldnn._is_mkldnn_bf16_supported()) or \
(dtype == torch.half and torch.ops.mkldnn._is_mkldnn_fp16_supported()):
mkldnn_conv = mkldnn_utils.to_mkldnn(copy.deepcopy(conv))
mkldnn_conv_lower = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), dtype)
y = mkldnn_conv(x.to_mkldnn()).to_dense()
y_lower = mkldnn_conv_lower(x_lower.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_lower, atol=1e-1, rtol=1e-3)
else:
msg = {
torch.bfloat16: r"bf16 path needs the cpu support avx_ne_convert or avx512bw, avx512vl and avx512dq",
torch.half: r"fp16 path needs the cpu support avx_ne_convert or avx512_fp16",
}
with self.assertRaisesRegex(RuntimeError, msg[dtype]):
mkldnn_conv_lower = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), dtype)
y_lower = mkldnn_conv_lower(x_lower.to_mkldnn()).to_dense(torch.float32)
# test thnn impl
conv_lower = copy.deepcopy(conv).to(dtype=dtype)
conv_ref = copy.deepcopy(conv_lower).float()
with torch.backends.mkldnn.flags(enabled=False):
x_ref = x_lower.clone().float().detach().requires_grad_()
x_lower.requires_grad_()
y = conv_ref(x_ref)
y_lower = conv_lower(x_lower).float()
self.assertEqual(y, y_lower, atol=5e-2, rtol=5e-3)
@dtypes(torch.float16, torch.bfloat16)
def test_conv_deconv_1d_lower_precision(self, dtype):
self._test_conv_deconv_lower_precision_base(1, torch.nn.Conv1d, dtype=dtype)
self._test_conv_deconv_lower_precision_base(1, torch.nn.ConvTranspose1d, dtype=dtype)
@dtypes(torch.float16, torch.bfloat16)
def test_conv_deconv_2d_lower_precision(self, dtype):
self._test_conv_deconv_lower_precision_base(2, torch.nn.Conv2d, dtype=dtype)
self._test_conv_deconv_lower_precision_base(2, torch.nn.ConvTranspose2d, dtype=dtype)
@dtypes(torch.float16, torch.bfloat16)
def test_conv_deconv_3d_lower_precision(self, dtype):
self._test_conv_deconv_lower_precision_base(3, torch.nn.Conv3d, dtype=dtype)
self._test_conv_deconv_lower_precision_base(3, torch.nn.ConvTranspose3d, dtype=dtype)
def _test_conv_deconv_nhwc_base(self, conv_module, weight_memory_format, dtype, prec=None):
input_shapes = {2: (55, 55), 3: (14, 14, 14)}
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
if conv_module in [torch.nn.Conv2d, torch.nn.ConvTranspose2d]:
cl_format = torch.channels_last
input_shape = input_shapes[2]
elif conv_module in [torch.nn.Conv3d, torch.nn.ConvTranspose3d]:
cl_format = torch.channels_last_3d
input_shape = input_shapes[3]
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shape
x = torch.randn(x_shape, dtype=dtype)
# conv1: mkldnn conv/deconv in contiguous memory format (nchw)
# conv2: mkldnn conv/deconv in channels last memory format (nhwc)
conv1 = conv_module(in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).to(dtype=dtype)
conv2 = copy.deepcopy(conv1).to(memory_format=weight_memory_format)
x1 = x.clone()
x2 = x.clone().to(memory_format=cl_format)
if train:
x1.requires_grad_()
x2.requires_grad_()
y1 = conv1(x1)
y2 = conv2(x2)
self.assertEqual(y1, y2, atol=prec, rtol=prec)
if train:
y1.sum().backward()
y2.sum().backward()
self.assertTrue(x2.grad.is_contiguous(memory_format=cl_format))
self.assertEqual(conv1.weight.grad,
conv2.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv1.bias.grad, conv2.bias.grad, atol=prec, rtol=prec)
self.assertEqual(x1.grad, x2.grad, atol=prec, rtol=prec)
@reduced_f32_on_and_off()
def test_conv_nhwc_fp32(self):
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.contiguous_format, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.channels_last_3d, dtype=torch.float32)
@dtypes(torch.float16, torch.bfloat16)
def test_conv_nhwc_lower_precision(self, dtype):
# when torch.ops.mkldnn._is_mkldnn_bf16_supported() or torch.ops.mkldnn._is_mkldnn_fp16_supported()
# returns false, bf16/fp16 CPU conv will fall back to thnn impl
support_checks = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported
}
if support_checks[dtype]():
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.channels_last_3d, dtype=dtype)
# BF16/FP16 fallback implementations are divided into two parts im2col+gemm,
# and the number of data type conversions in the middle is more than that of onednn's direct conv,
# resulting in additional accuracy loss.
precisions = {
torch.bfloat16: 1e-2,
torch.float16: 2e-3,
}
prec = precisions[dtype]
with torch.backends.mkldnn.flags(enabled=False):
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.channels_last_3d, dtype=dtype, prec=prec)
@reduced_f32_on_and_off()
def test_conv_transpose_nhwc_fp32(self):
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.contiguous_format, dtype=torch.float32)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.channels_last_3d, dtype=torch.float32)
@dtypes(torch.float16, torch.bfloat16)
def test_conv_transpose_nhwc_lower_precision(self, dtype):
# when torch.ops.mkldnn._is_mkldnn_bf16_supported() or torch.ops.mkldnn._is_mkldnn_fp16_supported()
# returns false, bf16/fp16 CPU conv will fall back to thnn impl
support_checks = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported
}
if support_checks[dtype]():
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.channels_last_3d, dtype=dtype)
# BF16/FP16 fallback implementations are divided into two parts col2im+gemm,
# and the number of data type conversions in the middle is more than that of onednn's direct conv,
# resulting in additional accuracy loss.
precisions = {
torch.bfloat16: 2e-2,
torch.float16: 3e-3,
}
prec = precisions[dtype]
with torch.backends.mkldnn.flags(enabled=False):
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.channels_last_3d, dtype=dtype, prec=prec)
def _test_conv_transpose_base(self, dim):
conv_module = {
1: torch.nn.ConvTranspose1d,
2: torch.nn.ConvTranspose2d,
3: torch.nn.ConvTranspose3d
}
input_shapes = {1: (55,), 2: (28, 28), 3: (14, 14, 14)}
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
data = torch.randn(x_shape, dtype=torch.float32)
# conv: mkldnn transpose conv fp32
# conv_ref: thnn transpose conv fp32
conv = conv_module[dim](in_channels=C,
out_channels=M,
kernel_size=3,
stride=1,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).to(dtype=torch.float32)
x = data.clone()
x_ref = x.clone()
if train:
x.requires_grad_()
x_ref.requires_grad_()
conv_ref = copy.deepcopy(conv)
with torch.backends.mkldnn.flags(enabled=False):
y_ref = conv_ref(x_ref)
if train:
y_ref.sum().backward()
y = conv(x)
if train:
y.sum().backward()
if self.precision != 0:
self.assertEqual(y, y_ref, atol=self.precision, rtol=self.precision)
else:
self.assertEqual(y, y_ref)
if train:
self.assertEqual(x.grad, x_ref.grad)
self.assertEqual(conv.weight.grad,
conv_ref.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv.bias.grad, conv_ref.bias.grad)
@reduced_f32_on_and_off()
def test_conv_transpose1d(self):
self._test_conv_transpose_base(dim=1)
@reduced_f32_on_and_off()
def test_conv_transpose2d(self):
self._test_conv_transpose_base(dim=2)
@reduced_f32_on_and_off()
def test_conv_transpose3d(self):
self._test_conv_transpose_base(dim=3)
def test_conv2d_legacy_jit_model(self):
"""
MKLDNN integration used to serialize models with 5d weight for grouped
convolutions, we'd like to preserve this behavior
"""
g = 4
conv2d = torch.nn.Conv2d(16, 16, 3, groups=g)
conv2d_mkldnn = torch.utils.mkldnn.to_mkldnn(conv2d)
# contrive legacy conv2d module with a 5-d weight
o, i, h, w = conv2d.weight.shape
weight_5d = conv2d.weight.reshape((g, o // g, i, h, w))
conv2d_mkldnn.weight = weight_5d.to_mkldnn()
x = torch.randn(1, 16, 8, 8)
with TemporaryFileName() as fname:
torch.jit.save(conv2d_mkldnn, fname)
conv2d_loaded = torch.jit.load(fname)
self.assertEqual(conv2d_mkldnn.weight.ndimension(), 5)
self.assertEqual(conv2d_loaded.weight.ndimension(), 4)
self.assertEqual(
conv2d(x),
conv2d_loaded(x.to_mkldnn()).to_dense())
# This test is to check whether 1D conv is supported for mkldnn tensor,
# which is exposed by Issue https://github.com/pytorch/pytorch/issues/68034.
def test_conv1d_functional(self):
input = torch.randn(2, 3, 10).to_mkldnn()
weight = torch.randn(3, 3, 3).to_mkldnn()
bias = torch.randn(3).to_mkldnn()
output = torch.nn.functional.conv1d(input, weight, bias)
self.assertEqual(output.size(), torch.Size([2, 3, 8]))
def test_relu(self):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = torch.relu(x1)
y2 = torch.relu(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_relu_(self):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = torch.relu_(x1.clone())
y2 = torch.relu_(x2.clone()).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_relu_bf16_base(self, name):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
fn = getattr(torch, name)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = fn(x.to_mkldnn()).to_dense()
y_bf16 = fn(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: fn(x_bf16.to_mkldnn()))
def test_relu_bf16(self):
self._test_relu_bf16_base("relu")
def test_relu_inplace_bf16(self):
self._test_relu_bf16_base("relu_")
def test_gelu(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = m(x1)
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_gelu_bf16(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().to_mkldnn().requires_grad_()
x2 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y1 = m(x1).to_dense()
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2.to(torch.float32), atol=1e-1, rtol=0)
self.assertEqual(x1.grad.to_dense(), x2.grad.to_dense(torch.float32), atol=1e-2, rtol=0)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m(x2))
def _test_prelu_base(self, size, num_channels):
x = torch.randn(size, dtype=torch.float32)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
x3 = x.clone().to_mkldnn().requires_grad_()
m1 = torch.nn.PReLU(num_channels)
m2 = mkldnn_utils.to_mkldnn(copy.deepcopy(m1))
m3 = copy.deepcopy(m1)
y1 = m1(x1)
y2 = m2(x2).to_dense()
y3 = m3(x3).to_dense() # Only convert data to mkldnn, weight is Aten tensor
loss1 = y1.sum()
loss1.backward()
loss2 = y2.sum()
loss2.backward()
loss3 = y3.sum()
loss3.backward()
self.assertEqual(y1, y2)
self.assertEqual(y1, y3)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(x1.grad, x3.grad.to_dense())
def test_prelu(self):
self._test_prelu_base(torch.Size([16]), 1)
self._test_prelu_base(torch.Size([16, 64]), 1)
self._test_prelu_base(torch.Size([16, 64]), 64)
self._test_prelu_base(torch.Size([16, 64, 112]), 1)
self._test_prelu_base(torch.Size([16, 64, 112]), 64)
self._test_prelu_base(torch.Size([16, 64, 112, 112]), 1)
self._test_prelu_base(torch.Size([16, 64, 112, 112]), 64)
self._test_prelu_base(torch.Size([16, 64, 112, 112, 1]), 1)
self._test_prelu_base(torch.Size([16, 64, 112, 112, 1]), 64)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_prelu_bf16_base(self, size, num_channels):
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
x = torch.randn(size, dtype=torch.float32)
x_fp32 = x.clone().to_mkldnn().requires_grad_()
x_bf16 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
m = mkldnn_utils.to_mkldnn(torch.nn.PReLU())
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
y = m(x_fp32).to_dense()
y_bf16 = m_bf16(x_bf16).to_dense()
self.assertEqual(y, y_bf16.to(torch.float32), atol=1e-1, rtol=1e-3)
loss = y.sum()
loss.backward()
loss_bf16 = y_bf16.sum()
loss_bf16.backward()
self.assertEqual(x_fp32.grad.to_dense(), x_bf16.grad.to_dense(torch.float32))
else:
x_bf16 = torch.randn(size, dtype=torch.bfloat16).requires_grad_()
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m_bf16(x_bf16))
def test_prelu_bf16(self):
self._test_prelu_bf16_base(torch.Size([16]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64]), 64)
self._test_prelu_bf16_base(torch.Size([16, 64, 112]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64, 112]), 64)
self._test_prelu_bf16_base(torch.Size([16, 64, 112, 112, 1]), 1)
self._test_prelu_bf16_base(torch.Size([16, 64, 112, 112, 1]), 64)
def _test_max_pool_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = max_pool(x1)
y2 = max_pool(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_max_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
self._test_max_pool_base(dim=2, input=x)
def test_max_pool3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for D, H, W in [(64, 64, 64), (35, 39, 35), (16, 19, 20), [7, 8, 9]]:
x = torch.randn(N, C, D, H, W, dtype=torch.float32) * 10
self._test_max_pool_base(dim=3, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_max_pool_bf16_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
x_bf16 = input.bfloat16()
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = max_pool(input.to_mkldnn()).to_dense()
y_bf16 = max_pool(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=0.1, rtol=1e-3)
else:
msg = f"mkldnn_max_pool{dim:d}d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: max_pool(x_bf16.to_mkldnn()))
def test_max_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
self._test_max_pool_bf16_base(dim=2, input=x)
def test_max_pool3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for D, H, W in [(64, 64, 64), (35, 39, 35), (16, 19, 20), [7, 8, 9]]:
x = torch.randn(N, C, D, H, W, dtype=torch.float32) * 10
self._test_max_pool_bf16_base(dim=3, input=x)
def test_max_pool2d_stride_none(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
for H, W in [(64, 64), (35, 39), (16, 19), [7, 8]]:
x = torch.randn(N, C, H, W, dtype=torch.float32) * 10
for ceil_mode in [False, True]:
y1 = F.max_pool2d(
x,
kernel_size=3 if not ceil_mode else 7,
stride=None,
padding=1,
ceil_mode=ceil_mode)
y2 = F.max_pool2d(
x.to_mkldnn(),
kernel_size=3 if not ceil_mode else 7,
stride=None,
padding=1,
ceil_mode=ceil_mode)
self.assertEqual(y1, y2.to_dense())
# https://github.com/pytorch/pytorch/issues/127111
@xfailIfTorchDynamo
def test_max_pool_unsupported(self):
# OneDNN not support dilation max_pooling, will be avilabled in v2.0.
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
# 2d dilation case
x = torch.randn(N, C, 7, 7, dtype=torch.float32).to_mkldnn()
max_pool2d = torch.nn.MaxPool2d(
kernel_size=3,
stride=3,
padding=1,
dilation=2)
self.assertRaisesRegex(RuntimeError,
'mkldnn_max_pool2d does not support dilation case',
lambda: max_pool2d(x))
# 3d dilation case
x = torch.randn(N, C, 7, 7, 7, dtype=torch.float32).to_mkldnn()
max_pool3d = torch.nn.MaxPool3d(
kernel_size=3,
stride=3,
padding=1,
dilation=2)
self.assertRaisesRegex(RuntimeError,
'mkldnn_max_pool3d does not support dilation case',
lambda: max_pool3d(x))
def _test_avg_pool_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = avg_pool(x1)
y2 = avg_pool(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_avg_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_base(dim=2, input=x)
def test_avg_pool3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_base(dim=3, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_avg_pool_bf16_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
x_bf16 = input.bfloat16()
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = avg_pool(input.to_mkldnn()).to_dense()
y_bf16 = avg_pool(x_bf16.to_mkldnn()).to_dense(torch.float)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = f"mkldnn_avg_pool{dim:d}d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: avg_pool(x_bf16.to_mkldnn()))
def test_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_bf16_base(dim=2, input=x)
def test_avg_pool3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, 64, dtype=torch.float32) * 10
self._test_avg_pool_bf16_base(dim=3, input=x)
def test_avg_pool2d_stride_none(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 64, 64, dtype=torch.float32) * 10
for count_include_pad in [True, False]:
y1 = F.avg_pool2d(
x,
kernel_size=3,
stride=None,
padding=1,
count_include_pad=count_include_pad)
y2 = F.avg_pool2d(
x.to_mkldnn(),
kernel_size=3,
stride=None,
padding=1,
count_include_pad=count_include_pad)
self.assertEqual(y1, y2.to_dense())
def test_adaptive_avg_pool2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
y1 = adaptive_avg_pool2d(x1)
y2 = adaptive_avg_pool2d(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def test_adaptive_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
x_bf16 = x.bfloat16()
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = adaptive_avg_pool2d(x.to_mkldnn()).to_dense()
y_bf16 = adaptive_avg_pool2d(x.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_adaptive_avg_pool2d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: adaptive_avg_pool2d(x_bf16.to_mkldnn()))
def _test_batch_norm_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
bn = bn_module[dim](channels).float().train(False)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn))
self.assertEqual(
bn(input),
mkldnn_bn(input.to_mkldnn()).to_dense())
self._test_serialization(mkldnn_bn, (input.to_mkldnn(),))
self._test_tracing(mkldnn_bn, (input.to_mkldnn(),))
def _test_batch_norm_train_base(self, dim, channels, input):
# TODO: support 3d batchnorm training.
bn_module = {2 : torch.nn.BatchNorm2d}
# TODO: support none affine.
options = itertools.product([True], [True, False])
for affine, track_running_stats in options:
bn = bn_module[dim](
num_features=channels,
affine=affine,
track_running_stats=track_running_stats).float().train(True)
mkldnn_bn = copy.deepcopy(bn)
x1 = input.clone().requires_grad_()
x2 = input.clone().to_mkldnn().requires_grad_()
y1 = bn(x1)
y2 = mkldnn_bn(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(bn.weight.grad, mkldnn_bn.weight.grad, rtol=1e-3, atol=1e-3)
if track_running_stats:
self.assertEqual(bn.running_mean, mkldnn_bn.running_mean)
self.assertEqual(bn.running_var, mkldnn_bn.running_var, rtol=1e-5, atol=1e-5)
def test_batch_norm_2d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
self._test_batch_norm_base(dim=2, channels=C, input=x)
self._test_batch_norm_train_base(dim=2, channels=C, input=x)
def test_batch_norm_3d(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 30, 30, 30, dtype=torch.float32) * 10
self._test_batch_norm_base(dim=3, channels=C, input=x)
@unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path")
def _test_batch_norm_bf16_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
x_bf16 = input.bfloat16()
# TODO: support training
for train in [False]:
bn = bn_module[dim](channels).float().train(train)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn)) # noqa: F841
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = bn(input.to_mkldnn().to_dense())
y_bf16 = bn(input.to_mkldnn().to_dense(torch.float))
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_batch_norm: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: bn(x_bf16.to_mkldnn()))
def test_batch_norm_2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
self._test_batch_norm_bf16_base(dim=2, channels=C, input=x)
def test_batch_norm_3d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
x = torch.randn(N, C, 30, 30, 30, dtype=torch.float32) * 10
self._test_batch_norm_bf16_base(dim=3, channels=C, input=x)
def test_add(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
alpha = torch.randn(1, dtype=torch.float32).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
y = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
mx = x.to_mkldnn()
my = y.to_mkldnn()
# add
self.assertEqual(
x + y,
(mx + my).to_dense())
self.assertEqual(
torch.add(x, y, alpha=alpha),
torch.add(mx, my, alpha=alpha).to_dense())
# add_
x += y
mx += my
self.assertEqual(x, mx.to_dense())
# add_out
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.add(x, y, alpha=alpha, out=out)
torch.add(mx, my, alpha=alpha, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
# add_out inplace case: first input
torch.add(x, y, alpha=alpha, out=x)
torch.add(mx, my, alpha=alpha, out=mx)
self.assertEqual(x, mx.to_dense())
# add_out inplace case: second input
torch.add(x, y, alpha=alpha, out=y)
torch.add(mx, my, alpha=alpha, out=my)
self.assertEqual(y, my.to_dense())
def test_mul(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 100, (1,)).item()
value = torch.randn(1, dtype=torch.float32).item()
x = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
y = torch.randn(N, C, 35, 45, dtype=torch.float32) * 10
mx = x.to_mkldnn()
my = y.to_mkldnn()
# mul
self.assertEqual(
x * y,
(mx * my).to_dense())
self.assertEqual(
x * value,
(mx * value).to_dense())
self.assertEqual(
torch.mul(x, y),
torch.mul(mx, my).to_dense())
self.assertEqual(
torch.mul(x, value),
torch.mul(mx, value).to_dense())
# mul_
x *= y
mx *= my
self.assertEqual(x, mx.to_dense())
x *= value
mx *= value
self.assertEqual(x, mx.to_dense())
# mul_out
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.mul(x, y, out=out)
torch.mul(mx, my, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
out = x.clone()
mkldnn_out = out.to_mkldnn()
torch.mul(x, value, out=out)
torch.mul(mx, value, out=mkldnn_out)
self.assertEqual(out, mkldnn_out.to_dense())
def test_0_dimension_tensor(self):
x = torch.rand([20, 20, 1, 1], dtype=torch.float)
y = torch.rand([20, 20, 0, 1], dtype=torch.float)
# unary ops work without modification
out_relu = torch.relu(y)
out_relu_mkldnn = torch.relu(y.to_mkldnn()).to_dense()
self.assertEqual(out_relu, out_relu_mkldnn)
out_mul = x * y
out_mul_mkldnn = (x.to_mkldnn() * y.to_mkldnn()).to_dense()
self.assertEqual(out_mul, out_mul_mkldnn)
out_add = x + y
out_add_mkldnn = (x.to_mkldnn() + y.to_mkldnn()).to_dense()
self.assertEqual(out_add, out_add_mkldnn)
x.requires_grad_(True)
y.requires_grad_(True)
with self.assertRaisesRegex(RuntimeError, "0-dimension Tensor in training"):
x.to_mkldnn() + y.to_mkldnn()
with self.assertRaisesRegex(RuntimeError, "must match"):
torch.rand([5]).to_mkldnn() + torch.rand([0]).to_mkldnn()
C = 7
m = torch.nn.Conv2d(C, C, 3)
x = torch.randn(0, C, C, 8, dtype=torch.float)
out_eager = m(x)
out_mkldnn = mkldnn_utils.to_mkldnn(m)(x)
self.assertEqual(out_eager, out_mkldnn)
# https://github.com/pytorch/pytorch/issues/127111
@xfailIfTorchDynamo
def test_view(self):
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
self.assertRaisesRegex(RuntimeError,
"Change to use reshape",
lambda: x.view(x.size(0), -1))
def test_reshape(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
size = (x.size(0), -1)
self.assertEqual(
x.reshape(size),
x.to_mkldnn().reshape(size).to_dense(),
)
# test whether share same memory for plain format tensor
y = x.to_mkldnn()
z = y.reshape(size).add_(y.reshape(size))
self.assertEqual(
y.reshape(size).to_dense(),
z.to_dense(),
)
def test_reshape_blocked_format(self):
# construct an mkldnn blocked tensor with mkldnn conv2d
C = 7
m = mkldnn_utils.to_mkldnn(torch.nn.Conv2d(C, C, 3))
x = torch.randn(1, C, 8, 8).to_mkldnn()
# mkldnn tensor w/ blocked format
y_block = m(x)
# aten tensor w/ plain format
y_plain = y_block.to_dense()
y_block_reshape = y_block.reshape(C, -1)
y_plain_reshape = y_plain.reshape(C, -1)
self.assertEqual(y_plain_reshape, y_block_reshape.to_dense())
def test_reshape_backward(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
size = (x.size(0), -1)
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
in_features = 20
out_features = torch.randint(3, 100, (1,)).item()
linear = torch.nn.Linear(in_features, out_features).float()
y1 = linear(x1.reshape(size)).sum()
y2 = linear(x2.reshape(size).to_dense()).sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
def test_clone(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
self.assertEqual(
x.clone(),
x.to_mkldnn().clone().to_dense(),
)
# test whether share same memory
y = x.to_mkldnn()
z = y.clone().add_(y)
self.assertNotEqual(
y.to_dense(),
z.to_dense(),
)
def test_transpose(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
for dim1 in range(x.ndim):
for dim2 in range(x.ndim):
self.assertEqual(
x.transpose(dim1, dim2),
x.to_mkldnn().transpose(dim1, dim2).to_dense(),
)
def test_transpose_invalid_dime(self):
x = torch.randn(3, 4, 5, dtype=torch.float32).to_mkldnn()
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
torch._mkldnn_transpose(x, 0, 12)
def test_linear_non_contiguous_weight(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
w = torch.randn(in_features, out_features, dtype=torch.float32)
for bias in [True, False]:
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
linear = torch.nn.Linear(in_features, out_features).float()
linear.weight = torch.nn.Parameter(w.t())
mkldnn_linear = copy.deepcopy(linear)
y1 = linear(x1).sum()
y2 = mkldnn_linear(x2).to_dense().sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(linear.weight.grad, mkldnn_linear.weight.grad)
if bias:
self.assertEqual(linear.bias.grad, mkldnn_linear.bias.grad)
def test_linear(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
self.assertEqual(
linear(x),
mkldnn_linear(x.to_mkldnn()).to_dense())
self._test_serialization(mkldnn_linear, (x.to_mkldnn(),))
self._test_tracing(mkldnn_linear, (x.to_mkldnn(),))
def test_linear_backward(self):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
for bias in [True, False]:
x1 = x.clone().requires_grad_()
x2 = x.clone().to_mkldnn().requires_grad_()
linear = torch.nn.Linear(in_features, out_features).float()
mkldnn_linear = copy.deepcopy(linear)
y1 = linear(x1).sum()
y2 = mkldnn_linear(x2).to_dense().sum()
y1.backward()
y2.backward()
self.assertEqual(x1.grad, x2.grad.to_dense())
self.assertEqual(linear.weight.grad, mkldnn_linear.weight.grad)
if bias:
self.assertEqual(linear.bias.grad, mkldnn_linear.bias.grad)
@dtypes(torch.float16, torch.bfloat16)
def test_linear_lowp(self, dtype):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
x_lowp = x.to(dtype=dtype)
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
mkldnn_linear_lowp = mkldnn_utils.to_mkldnn(
copy.deepcopy(linear), dtype
)
lowp_support = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.half: torch.ops.mkldnn._is_mkldnn_fp16_supported,
}
if lowp_support[dtype]():
y = mkldnn_linear(x.to_mkldnn()).to_dense()
y_lowp = mkldnn_linear_lowp(x_lowp.to_mkldnn()).to_dense(
torch.float32
)
if dtype == torch.bfloat16:
self.assertEqual(y, y_lowp, atol=1e-1, rtol=1e-3)
else:
self.assertEqual(y, y_lowp, atol=5e-3, rtol=1e-3)
else:
msg = {
torch.bfloat16: r"bf16 path needs the cpu support avx_ne_convert or avx512bw, avx512vl and avx512dq",
torch.half: r"fp16 path needs the cpu support avx_ne_convert or avx512_fp16",
}
self.assertRaisesRegex(
RuntimeError,
msg[dtype],
lambda: mkldnn_linear_lowp(x_lowp.to_mkldnn()),
)
def test_softmax(self):
x = torch.randn(3, 4, 5, dtype=torch.float32) * 10
for dim in range(x.ndim):
softmax = torch.nn.Softmax(dim=dim)
self.assertEqual(
softmax(x),
softmax(x.to_mkldnn()).to_dense())
def test_sigmoid(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
mkldnn_x = x.to_mkldnn()
self.assertEqual(
torch.sigmoid(x),
torch.sigmoid(mkldnn_x).to_dense(),
)
# inplace
torch.sigmoid_(x)
torch.sigmoid_(mkldnn_x)
self.assertEqual(x, mkldnn_x.to_dense())
def test_tanh(self):
x = torch.randn(4, 5, dtype=torch.float32) * 10
mkldnn_x = x.to_mkldnn()
self.assertEqual(
torch.tanh(x),
torch.tanh(mkldnn_x).to_dense(),
)
# inplace
torch.tanh_(x)
torch.tanh_(mkldnn_x)
self.assertEqual(x, mkldnn_x.to_dense())
def _test_serialization(self, module, inputs):
with TemporaryFileName() as fname:
torch.jit.save(module, fname)
loaded = torch.jit.load(fname)
self.assertEqual(
module(*inputs).to_dense(),
loaded(*inputs).to_dense())
def _test_tracing(self, module, inputs):
traced = torch.jit.trace(module, inputs)
self.assertEqual(
module(*inputs).to_dense(),
traced(*inputs).to_dense())
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while MKL-DNN tensor has impl
# of type `OpaqueTensorImpl<IDeepTensorWrapperPtr>`.
x = torch.randn((1, 2), dtype=torch.float, device=torch.device('cpu'))
x_mkldnn = x.to_mkldnn()
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_mkldnn
def test_empty(self):
x1 = torch.empty(4, 5, 2, 3, dtype=torch.float32)
x2 = torch.empty(4, 5, 2, 3, dtype=torch.float32, layout=torch._mkldnn)
self.assertEqual(x1.size(), x2.to_dense().size())
self.assertEqual(x1.dtype, x2.to_dense().dtype)
def test_zero_(self):
x1 = torch.randn(4, 5, dtype=torch.float32) * 10
x2 = x1.clone().to_mkldnn()
self.assertEqual(
x1.zero_(),
x2.zero_().to_dense(),
)
def test_is_mkldnn(self):
x = torch.randn(1, dtype=torch.float32)
self.assertFalse(x.is_mkldnn)
self.assertTrue(x.to_mkldnn().is_mkldnn)
# legacy constructor/new doesn't support mkldnn tensors
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1992")
def test_legacy_new_failure(self):
x = torch.randn(1, dtype=torch.float32)
x_mkldnn = x.to_mkldnn()
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(x.storage()))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(x))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new(torch.Size([2, 3])))
self.assertRaises(RuntimeError, lambda: x_mkldnn.new([6]))
def test_is_mkldnn_jit(self):
class EnsureMkldnn(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
if not x.is_mkldnn:
x = x.to_mkldnn()
return x
m = EnsureMkldnn()
x = torch.randn(1, dtype=torch.float32)
self.assertTrue(m(x).is_mkldnn)
self.assertTrue(m(x.to_mkldnn()).is_mkldnn)
def _test_imagenet_model(self, model):
model = model.train(False).float()
mkldnn_model = mkldnn_utils.to_mkldnn(copy.deepcopy(model))
x = torch.randn(1, 3, 224, 224, dtype=torch.float32)
with torch.no_grad():
self.assertEqual(
model(x),
mkldnn_model(x.to_mkldnn()).to_dense(),
)
@skipIfNoTorchVision
def test_resnet18(self):
model = torchvision.models.resnet.resnet18(weights=None)
self._test_imagenet_model(model)
@skipIfNoTorchVision
def test_resnext50_32x4d(self):
model = torchvision.models.resnet.resnext50_32x4d(weights=None)
self._test_imagenet_model(model)
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 5],
"hidden_size": [5, 16],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
"training": [False, True]
}
params_list = list(params_dict.values())
return params_list
def _cast_dtype(self, input, dtype):
if dtype == torch.bfloat16:
input = input.to(torch.bfloat16)
elif dtype == torch.half:
input = input.to(torch.half)
return input
def test_lstm(self):
seed = 2023
torch.manual_seed(seed)
params_list = self._lstm_params_list()
for dtype in types:
bf16 = dtype == torch.bfloat16
fp16 = dtype == torch.half
rtol = 1.3e-6
atol = 1e-5
if bf16:
rtol = 0.02
atol = 0.02
if fp16:
rtol = 1e-3
atol = 1e-3
for input_size, hidden_size, num_layers, bidirectional, bias, batch_first, dropout, batch_size, seq_len, training \
in itertools.product(*params_list):
num_directions = 2 if bidirectional else 1
if batch_first:
input = torch.randn(batch_size, seq_len, input_size, dtype=torch.float32)
else:
input = torch.randn(seq_len, batch_size, input_size, dtype=torch.float32)
h = torch.randn(num_layers * num_directions, batch_size, hidden_size, dtype=torch.float32)
c = torch.randn(num_layers * num_directions, batch_size, hidden_size, dtype=torch.float32)
if fp16:
# TODO add training support when oneDNN support lstm FP16 training
training = False
model = torch.nn.LSTM(input_size, hidden_size, num_layers, bidirectional=bidirectional,
bias=bias, dropout=dropout, batch_first=batch_first).float()
model.train() if training else model.eval()
input1 = input.clone().requires_grad_(training)
input2 = input.clone().requires_grad_(training)
h1 = h.clone().requires_grad_(training)
h2 = h.clone().requires_grad_(training)
c1 = c.clone().requires_grad_(training)
c2 = c.clone().requires_grad_(training)
model1 = copy.deepcopy(model)
model2 = copy.deepcopy(model)
with torch.no_grad() if not training else nullcontext():
with torch.backends.mkldnn.flags(enabled=False):
torch.manual_seed(seed)
output1, (hn1, cn1) = self._cast_dtype(model1, dtype)(
self._cast_dtype(input1, dtype),
(
self._cast_dtype(h1, dtype),
self._cast_dtype(c1, dtype),
),
)
torch.manual_seed(seed)
output2, (hn2, cn2) = self._cast_dtype(model2, dtype)(
self._cast_dtype(input2, dtype),
(
self._cast_dtype(h2, dtype),
self._cast_dtype(c2, dtype),
),
)
self.assertEqual(output1, output2, rtol=rtol, atol=atol)
self.assertEqual(hn1, hn2, rtol=rtol, atol=atol)
self.assertEqual(cn1, cn2, rtol=rtol, atol=atol)
if training:
with torch.backends.mkldnn.flags(enabled=False):
torch.manual_seed(seed)
output1.sum().backward(retain_graph=True)
torch.manual_seed(seed)
output2.sum().backward(retain_graph=True)
self.assertEqual(input1.grad, input2.grad, rtol=rtol, atol=atol)
for name, para in model1.named_parameters():
self.assertEqual(para, getattr(model2, name))
self.assertEqual(
para.grad,
getattr(model2, name).grad,
rtol=rtol,
atol=atol,
)
with torch.backends.mkldnn.flags(enabled=False):
torch.manual_seed(seed)
hn1.sum().backward(retain_graph=True)
torch.manual_seed(seed)
hn2.sum().backward(retain_graph=True)
self.assertEqual(h1.grad, h2.grad, rtol=rtol, atol=atol)
with torch.backends.mkldnn.flags(enabled=False):
torch.manual_seed(seed)
cn1.sum().backward(retain_graph=True)
torch.manual_seed(seed)
cn2.sum().backward(retain_graph=True)
self.assertEqual(c1.grad, c2.grad, rtol=rtol, atol=atol)
@dtypes(torch.float16, torch.bfloat16)
def test_matmul_lower_precision(self, dtype):
support_check = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported,
}
def common(self, shape1, shape2, op, dtype):
a = torch.randn(shape1, dtype=dtype)
a_ref = a.float()
b = torch.randn(shape2, dtype=dtype)
b_ref = b.float()
y = op(a, b)
y_ref = op(a_ref, b_ref)
self.assertEqual(y, y_ref, exact_dtype=False)
if support_check[dtype]():
a1 = torch.randn([64, 1, 33], dtype=dtype)
# a2 is contiguous tensor but it's strides
# is not default contiguous strides.
a2 = torch.as_strided(a1.clone(), [64, 1, 33], [33, 3, 1])
self.assertTrue(a2.is_contiguous())
b = torch.randn(64, 33, 256).to(dtype=dtype)
y1 = torch.ops.aten.bmm(a1, b)
y2 = torch.bmm(a2, b)
self.assertEqual(y1, y2)
for shape1, shape2, op in [
((33, 77), (77, 22), torch.matmul),
((128, 256), (256, 10), torch.matmul),
((7, 300), (300, 3), torch.matmul),
((1, 100), (100, 60), torch.matmul),
((100, 1), (1, 100), torch.matmul),
((20, 54, 78), (20, 78, 10), torch.bmm),
((1, 300, 1), (1, 1, 300), torch.bmm),
]:
common(self, shape1, shape2, op, dtype)
def test_mkldnn_setflags_nowarn(self, device):
# Regression test for https://github.com/pytorch/pytorch/issues/149829
with warnings.catch_warnings(record=True) as w:
rc = torch.backends.mkldnn.set_flags()
# torch.backends.mkldnn. returns previously set flags
# That one should be able to set back without cauinsg a warning
torch.backends.mkldnn.set_flags(*rc)
# Above should trigger no warnings regardless of configuration
self.assertEqual(len(w), 0)
def test_mkldnn_error_on_zero_stride(self, device):
# Regression test for https://github.com/pytorch/pytorch/issues/149274
x = torch.rand(1, 2, 3, 3).to_mkldnn()
with self.assertRaises(ValueError):
torch.mkldnn_max_pool2d(x, kernel_size=3, stride=0)
def test_mkldnn_scaled_mm(self, device) -> None:
# test with input scale, weight scale and output_scale
M, N, K = 2, 13, 16
x = torch.randn((M, K), device=device) / K
y = torch.randn((N, K), device=device).t() / K
options = itertools.product(
[torch.float8_e4m3fn, torch.float8_e5m2],
[torch.float8_e4m3fn, torch.float8_e5m2],
[torch.float8_e4m3fn, torch.float8_e5m2, torch.bfloat16, torch.float16, torch.float32])
for x_dtype, y_dtype, out_dtype in options:
if out_dtype in (torch.float8_e4m3fn, torch.float8_e5m2):
if x_dtype != out_dtype:
continue
x_fp8 = x.to(x_dtype)
y_fp8 = y.to(y_dtype)
scale_a = torch.randn(1, device=device)
scale_b = torch.randn(1, device=device)
scale_out = torch.randn(1, device=device)
out_fp32 = torch.mm(x_fp8.to(torch.float) * scale_a, y_fp8.to(torch.float) * scale_b)
if out_dtype in (torch.float8_e4m3fn, torch.float8_e5m2):
out_emulated = (out_fp32 / scale_out).to(out_dtype)
else:
out_emulated = out_fp32.to(out_dtype)
out = torch._scaled_mm(x_fp8, y_fp8, scale_a, scale_b, scale_result=scale_out, out_dtype=out_dtype)
if out_dtype is not None:
self.assertEqual(out_dtype, out.dtype)
self.assertEqual(out_emulated.float(), out.float(), atol=5e-2, rtol=5e-2)
@recover_orig_fp32_precision
def test_mlkdnn_get_set(self):
# get/set mkldnn ops
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="bf16"):
self.assertEqual(torch.backends.mkldnn.fp32_precision, "bf16")
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="tf32"):
self.assertEqual(torch.backends.mkldnn.fp32_precision, "tf32")
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="none"):
self.assertEqual(torch.backends.mkldnn.fp32_precision, "none")
# get/set matmul
torch.backends.mkldnn.matmul.fp32_precision = "bf16"
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "bf16")
torch.backends.mkldnn.matmul.fp32_precision = "tf32"
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "tf32")
torch.backends.mkldnn.matmul.fp32_precision = "none"
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "none")
# get/set conv
torch.backends.mkldnn.conv.fp32_precision = "bf16"
self.assertEqual(torch.backends.mkldnn.conv.fp32_precision, "bf16")
torch.backends.mkldnn.conv.fp32_precision = "tf32"
self.assertEqual(torch.backends.mkldnn.conv.fp32_precision, "tf32")
torch.backends.mkldnn.conv.fp32_precision = "none"
self.assertEqual(torch.backends.mkldnn.conv.fp32_precision, "none")
# get/set rnn
torch.backends.mkldnn.rnn.fp32_precision = "bf16"
self.assertEqual(torch.backends.mkldnn.rnn.fp32_precision, "bf16")
torch.backends.mkldnn.rnn.fp32_precision = "tf32"
self.assertEqual(torch.backends.mkldnn.rnn.fp32_precision, "tf32")
torch.backends.mkldnn.rnn.fp32_precision = "none"
self.assertEqual(torch.backends.mkldnn.rnn.fp32_precision, "none")
@recover_orig_fp32_precision
def test_generic_precision(self):
with torch.backends.flags(fp32_precision="none"):
self.assertEqual(torch.backends.fp32_precision, "none")
with torch.backends.flags(fp32_precision="tf32"):
self.assertEqual(torch.backends.fp32_precision, "tf32")
@recover_orig_fp32_precision
def test_default_use_parent(self):
torch.backends.mkldnn.matmul.fp32_precision = "none"
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="bf16"):
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "bf16")
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="tf32"):
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "tf32")
with torch.backends.mkldnn.flags(enabled=None, fp32_precision="none"):
with torch.backends.flags(fp32_precision="bf16"):
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "bf16")
with torch.backends.flags(fp32_precision="tf32"):
self.assertEqual(torch.backends.mkldnn.matmul.fp32_precision, "tf32")
instantiate_device_type_tests(TestMkldnn, globals(), only_for=('cpu',))
if __name__ == '__main__':
run_tests()
| TestMkldnn |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 3110,
"end": 3230
} | class ____(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ("name",)
| Person |
python | pytest-dev__pytest | src/_pytest/compat.py | {
"start": 1067,
"end": 9433
} | class ____(enum.Enum):
token = 0
NOTSET: Final = NotSetType.token
# fmt: on
def iscoroutinefunction(func: object) -> bool:
"""Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin coroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
def signature(obj: Callable[..., Any]) -> Signature:
"""Return signature without evaluating annotations."""
if sys.version_info >= (3, 14):
return inspect.signature(obj, annotation_format=Format.STRING)
return inspect.signature(obj)
def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str:
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
try:
relfn = fn.relative_to(curdir)
except ValueError:
pass
else:
return f"{relfn}:{lineno + 1}"
return f"{fn}:{lineno + 1}"
def num_mock_patch_args(function) -> int:
"""Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(
function: Callable[..., object],
*,
name: str = "",
cls: type | None = None,
) -> tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
Should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The cls arguments indicate that the function should be treated as a bound
method even though it's not unless the function is a static method.
The name parameter should be the original name in which the function was collected.
"""
# TODO(RonnyPfannschmidt): This function should be refactored when we
# revisit fixtures. The fixture mechanism should ask the node for
# the fixture names, and not try to obtain directly from the
# function object well after collection has occurred.
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters.values()
except (ValueError, TypeError) as e:
from _pytest.outcomes import fail
fail(
f"Could not determine arguments of {function!r}: {e}",
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, and its first parameter
# wasn't defined as positional only, remove the first parameter name.
if not any(p.kind is Parameter.POSITIONAL_ONLY for p in parameters) and (
# Not using `getattr` because we don't want to resolve the staticmethod.
# Not using `cls.__dict__` because we want to check the entire MRO.
cls
and not isinstance(
inspect.getattr_static(cls, name, default=None), staticmethod
)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]:
# Note: this code intentionally mirrors the code at the beginning of
# getfuncargnames, to get the arguments which were excluded from its result
# because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def ascii_escaped(val: bytes | str) -> str:
r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes strings into a sequence of escaped unicode ids, e.g.:
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
Note:
The obvious "v.decode('unicode-escape')" will return
valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a UTF-8 string.
"""
if isinstance(val, bytes):
ret = val.decode("ascii", "backslashreplace")
else:
ret = val.encode("unicode_escape").decode("ascii")
return ret.translate(_non_printable_ascii_translate_table)
def get_real_func(obj):
"""Get the real function object of the (possibly) wrapped object by
:func:`functools.wraps`, or :func:`functools.partial`."""
obj = inspect.unwrap(obj)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object: Any, name: str, default: Any) -> Any:
"""Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes
are derived from BaseException instead of Exception (for more details
check #2707).
"""
from _pytest.outcomes import TEST_OUTCOME
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj: object) -> bool:
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
def get_user_id() -> int | None:
"""Return the current process's real user id or None if it could not be
determined.
:return: The user id or None if it could not be determined.
"""
# mypy follows the version and platform checking expectation of PEP 484:
# https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks
# Containment checks are too complex for mypy v1.5.0 and cause failure.
if sys.platform == "win32" or sys.platform == "emscripten":
# win32 does not have a getuid() function.
# Emscripten has a return 0 stub.
return None
else:
# On other platforms, a return value of -1 is assumed to indicate that
# the current process's real user id could not be determined.
ERROR = -1
uid = os.getuid()
return uid if uid != ERROR else None
if sys.version_info >= (3, 11):
from typing import assert_never
else:
def assert_never(value: NoReturn) -> NoReturn:
assert False, f"Unhandled value: {value} ({type(value).__name__})"
| NotSetType |
python | zostera__django-bootstrap4 | src/bootstrap4/renderers.py | {
"start": 20928,
"end": 21542
} | class ____(FieldRenderer):
"""Inline field renderer."""
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
| InlineFieldRenderer |
python | django__django | django/tasks/__init__.py | {
"start": 585,
"end": 1178
} | class ____(BaseConnectionHandler):
settings_name = "TASKS"
exception_class = InvalidTaskBackend
def create_connection(self, alias):
params = self.settings[alias]
backend = params["BACKEND"]
try:
backend_cls = import_string(backend)
except ImportError as e:
raise InvalidTaskBackend(f"Could not find backend '{backend}': {e}") from e
return backend_cls(alias=alias, params=params)
task_backends = TaskBackendHandler()
default_task_backend = ConnectionProxy(task_backends, DEFAULT_TASK_BACKEND_ALIAS)
| TaskBackendHandler |
python | pandas-dev__pandas | pandas/tests/arrays/timedeltas/test_constructors.py | {
"start": 82,
"end": 2268
} | class ____:
def test_other_type_raises(self):
msg = r"dtype bool cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
TimedeltaArray._from_sequence(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
msg = "dtype 'category' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="category"
)
msg = "dtype 'int64' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64")
)
msg = r"dtype 'datetime64\[ns\]' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("M8[ns]")
)
msg = (
r"dtype 'datetime64\[us, UTC\]' is invalid, should be np.timedelta64 dtype"
)
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype="M8[us, UTC]"
)
msg = "Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(
np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("m8[Y]")
)
def test_copy(self):
data = np.array([1, 2, 3], dtype="m8[ns]")
arr = TimedeltaArray._from_sequence(data, copy=False)
assert arr._ndarray is data
arr = TimedeltaArray._from_sequence(data, copy=True)
assert arr._ndarray is not data
assert arr._ndarray.base is not data
def test_from_sequence_dtype(self):
msg = "dtype 'object' is invalid, should be np.timedelta64 dtype"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence([], dtype=object)
| TestTimedeltaArrayConstructor |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/shell_tool.py | {
"start": 3106,
"end": 3353
} | class ____:
"""Structured result from command execution."""
output: str
exit_code: int | None
timed_out: bool
truncated_by_lines: bool
truncated_by_bytes: bool
total_lines: int
total_bytes: int
| CommandExecutionResult |
python | redis__redis-py | tests/test_asyncio/test_commands.py | {
"start": 179002,
"end": 183075
} | class ____:
async def test_binary_get_set(self, r: redis.Redis):
assert await r.set(" foo bar ", "123")
assert await r.get(" foo bar ") == b"123"
assert await r.set(" foo\r\nbar\r\n ", "456")
assert await r.get(" foo\r\nbar\r\n ") == b"456"
assert await r.set(" \r\n\t\x07\x13 ", "789")
assert await r.get(" \r\n\t\x07\x13 ") == b"789"
assert sorted(await r.keys("*")) == [
b" \r\n\t\x07\x13 ",
b" foo\r\nbar\r\n ",
b" foo bar ",
]
assert await r.delete(" foo bar ")
assert await r.delete(" foo\r\nbar\r\n ")
assert await r.delete(" \r\n\t\x07\x13 ")
async def test_binary_lists(self, r: redis.Redis):
mapping = {
b"foo bar": [b"1", b"2", b"3"],
b"foo\r\nbar\r\n": [b"4", b"5", b"6"],
b"foo\tbar\x07": [b"7", b"8", b"9"],
}
# fill in lists
for key, value in mapping.items():
await r.rpush(key, *value)
# check that KEYS returns all the keys as they are
assert sorted(await r.keys("*")) == sorted(mapping.keys())
# check that it is possible to get list content by key name
for key, value in mapping.items():
assert await r.lrange(key, 0, -1) == value
async def test_22_info(self, r: redis.Redis):
"""
Older Redis versions contained 'allocation_stats' in INFO that
was the cause of a number of bugs when parsing.
"""
info = (
"allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330,"
"13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020,"
"20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303,"
"27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160,"
"34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523,"
"41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171,"
"49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332,"
"58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30,"
"67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25,"
"76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46,"
"85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20,"
"94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15,"
"103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52,"
"111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54,"
"119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52,"
"127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62,"
"135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7,"
"144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1,"
"155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2,"
"172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3,"
"187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1,"
"207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2,"
"220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1,"
">=256=203"
)
parsed = parse_info(info)
assert "allocation_stats" in parsed
assert "6" in parsed["allocation_stats"]
assert ">=256" in parsed["allocation_stats"]
async def test_large_responses(self, r: redis.Redis):
"""The PythonParser has some special cases for return values > 1MB"""
# load up 5MB of data into a key
data = "".join([ascii_letters] * (5000000 // len(ascii_letters)))
await r.set("a", data)
assert await r.get("a") == data.encode()
async def test_floating_point_encoding(self, r: redis.Redis):
"""
High precision floating point values sent to the server should keep
precision.
"""
timestamp = 1349673917.939762
await r.zadd("a", {"a1": timestamp})
assert await r.zscore("a", "a1") == timestamp
| TestBinarySave |
python | conda__conda | tests/models/test_enums.py | {
"start": 421,
"end": 3152
} | class ____:
type: NoarchType
@pytest.mark.parametrize(
"value,expected",
[
# NoarchType instances should return themselves
(NoarchType.python, NoarchType.python),
(NoarchType.generic, NoarchType.generic),
# ObjectWithType
(ObjectWithType(NoarchType.python), NoarchType.python),
(ObjectWithType(NoarchType.generic), NoarchType.generic),
# Boolean
(True, NoarchType.generic),
(False, None),
# Truthy strings
("true", NoarchType.generic),
("True", NoarchType.generic),
("TRUE", NoarchType.generic),
("yes", NoarchType.generic),
("Yes", NoarchType.generic),
("YES", NoarchType.generic),
("on", NoarchType.generic),
("On", NoarchType.generic),
("ON", NoarchType.generic),
("y", NoarchType.generic),
("Y", NoarchType.generic),
("1", NoarchType.generic),
("42", NoarchType.generic),
("1.0", NoarchType.generic),
# Falsy strings
("false", None),
("False", None),
("FALSE", None),
("off", None),
("Off", None),
("OFF", None),
("no", None),
("No", None),
("NO", None),
("n", None),
("N", None),
("non", None),
("Non", None),
("0", None),
("0.0", None),
# Python strings
("python", NoarchType.python),
("Python", NoarchType.python),
("PYTHON", NoarchType.python),
# Generic strings
("generic", NoarchType.generic),
("Generic", NoarchType.generic),
("GENERIC", NoarchType.generic),
# This is the fix for PR #14179 - handling 'null' values from malformed repodata
("null", None),
("NULL", None),
("Null", None),
# Other null-like values
("none", None),
("None", None),
("NONE", None),
# YAML null representation
("~", None),
# Null byte
("\0", None),
# Empty string
("", None),
# Invalid
("invalid", CondaUpgradeError),
("foobar", CondaUpgradeError),
("unknown", CondaUpgradeError),
("other", CondaUpgradeError),
],
)
def test_noarch_type_coercion(
value: Any,
expected: NoarchType | None | CondaUpgradeError,
):
"""Test that NoarchType.coerce returns the expected value for given input."""
with (
pytest.raises(
CondaUpgradeError,
match=f"The noarch type for this package is set to '{value}'",
)
if expected == CondaUpgradeError
else nullcontext()
):
assert NoarchType.coerce(value) == expected
| ObjectWithType |
python | django__django | django/core/paginator.py | {
"start": 361,
"end": 422
} | class ____(RuntimeWarning):
pass
| UnorderedObjectListWarning |
python | sympy__sympy | sympy/physics/optics/gaussopt.py | {
"start": 10058,
"end": 20898
} | class ____(Expr):
"""
Representation for a gaussian ray in the Ray Transfer Matrix formalism.
Parameters
==========
wavelen : the wavelength,
z : the distance to waist, and
w : the waist, or
z_r : the rayleigh range.
n : the refractive index of medium.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
>>> p.q.n()
1.0 + 5.92753330865999*I
>>> p.w_0.n()
0.00100000000000000
>>> p.z_r.n()
5.92753330865999
>>> from sympy.physics.optics import FreeSpace
>>> fs = FreeSpace(10)
>>> p1 = fs*p
>>> p.w.n()
0.00101413072159615
>>> p1.w.n()
0.00210803120913829
See Also
========
RayTransferMatrix
References
==========
.. [1] https://en.wikipedia.org/wiki/Complex_beam_parameter
.. [2] https://en.wikipedia.org/wiki/Gaussian_beam
"""
#TODO A class Complex may be implemented. The BeamParameter may
# subclass it. See:
# https://groups.google.com/d/topic/sympy/7XkU07NRBEs/discussion
def __new__(cls, wavelen, z, z_r=None, w=None, n=1):
wavelen = sympify(wavelen)
z = sympify(z)
n = sympify(n)
if z_r is not None and w is None:
z_r = sympify(z_r)
elif w is not None and z_r is None:
z_r = waist2rayleigh(sympify(w), wavelen, n)
elif z_r is None and w is None:
raise ValueError('Must specify one of w and z_r.')
return Expr.__new__(cls, wavelen, z, z_r, n)
@property
def wavelen(self):
return self.args[0]
@property
def z(self):
return self.args[1]
@property
def z_r(self):
return self.args[2]
@property
def n(self):
return self.args[3]
@property
def q(self):
"""
The complex parameter representing the beam.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
"""
return self.z + I*self.z_r
@property
def radius(self):
"""
The radius of curvature of the phase front.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.radius
1 + 3.55998576005696*pi**2
"""
return self.z*(1 + (self.z_r/self.z)**2)
@property
def w(self):
"""
The radius of the beam w(z), at any position z along the beam.
The beam radius at `1/e^2` intensity (axial value).
See Also
========
w_0 :
The minimal radius of beam.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w
0.001*sqrt(0.2809/pi**2 + 1)
"""
return self.w_0*sqrt(1 + (self.z/self.z_r)**2)
@property
def w_0(self):
"""
The minimal radius of beam at `1/e^2` intensity (peak value).
See Also
========
w : the beam radius at `1/e^2` intensity (axial value).
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w_0
0.00100000000000000
"""
return sqrt(self.z_r/(pi*self.n)*self.wavelen)
@property
def divergence(self):
"""
Half of the total angular spread.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.divergence
0.00053/pi
"""
return self.wavelen/pi/self.w_0
@property
def gouy(self):
"""
The Gouy phase.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.gouy
atan(0.53/pi)
"""
return atan2(self.z, self.z_r)
@property
def waist_approximation_limit(self):
"""
The minimal waist for which the gauss beam approximation is valid.
Explanation
===========
The gauss beam is a solution to the paraxial equation. For curvatures
that are too great it is not a valid approximation.
Examples
========
>>> from sympy.physics.optics import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.waist_approximation_limit
1.06e-6/pi
"""
return 2*self.wavelen/pi
###
# Utilities
###
def waist2rayleigh(w, wavelen, n=1):
"""
Calculate the rayleigh range from the waist of a gaussian beam.
See Also
========
rayleigh2waist, BeamParameter
Examples
========
>>> from sympy.physics.optics import waist2rayleigh
>>> from sympy import symbols
>>> w, wavelen = symbols('w wavelen')
>>> waist2rayleigh(w, wavelen)
pi*w**2/wavelen
"""
w, wavelen = map(sympify, (w, wavelen))
return w**2*n*pi/wavelen
def rayleigh2waist(z_r, wavelen):
"""Calculate the waist from the rayleigh range of a gaussian beam.
See Also
========
waist2rayleigh, BeamParameter
Examples
========
>>> from sympy.physics.optics import rayleigh2waist
>>> from sympy import symbols
>>> z_r, wavelen = symbols('z_r wavelen')
>>> rayleigh2waist(z_r, wavelen)
sqrt(wavelen*z_r)/sqrt(pi)
"""
z_r, wavelen = map(sympify, (z_r, wavelen))
return sqrt(z_r/pi*wavelen)
def geometric_conj_ab(a, b):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Explanation
===========
Takes the distances to the optical element and returns the needed
focal distance.
See Also
========
geometric_conj_af, geometric_conj_bf
Examples
========
>>> from sympy.physics.optics import geometric_conj_ab
>>> from sympy import symbols
>>> a, b = symbols('a b')
>>> geometric_conj_ab(a, b)
a*b/(a + b)
"""
a, b = map(sympify, (a, b))
if a.is_infinite or b.is_infinite:
return a if b.is_infinite else b
else:
return a*b/(a + b)
def geometric_conj_af(a, f):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Explanation
===========
Takes the object distance (for geometric_conj_af) or the image distance
(for geometric_conj_bf) to the optical element and the focal distance.
Then it returns the other distance needed for conjugation.
See Also
========
geometric_conj_ab
Examples
========
>>> from sympy.physics.optics.gaussopt import geometric_conj_af, geometric_conj_bf
>>> from sympy import symbols
>>> a, b, f = symbols('a b f')
>>> geometric_conj_af(a, f)
a*f/(a - f)
>>> geometric_conj_bf(b, f)
b*f/(b - f)
"""
a, f = map(sympify, (a, f))
return -geometric_conj_ab(a, -f)
geometric_conj_bf = geometric_conj_af
def gaussian_conj(s_in, z_r_in, f):
"""
Conjugation relation for gaussian beams.
Parameters
==========
s_in :
The distance to optical element from the waist.
z_r_in :
The rayleigh range of the incident beam.
f :
The focal length of the optical element.
Returns
=======
a tuple containing (s_out, z_r_out, m)
s_out :
The distance between the new waist and the optical element.
z_r_out :
The rayleigh range of the emergent beam.
m :
The ration between the new and the old waists.
Examples
========
>>> from sympy.physics.optics import gaussian_conj
>>> from sympy import symbols
>>> s_in, z_r_in, f = symbols('s_in z_r_in f')
>>> gaussian_conj(s_in, z_r_in, f)[0]
1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
>>> gaussian_conj(s_in, z_r_in, f)[1]
z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
>>> gaussian_conj(s_in, z_r_in, f)[2]
1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
"""
s_in, z_r_in, f = map(sympify, (s_in, z_r_in, f))
s_out = 1 / ( -1/(s_in + z_r_in**2/(s_in - f)) + 1/f )
m = 1/sqrt((1 - (s_in/f)**2) + (z_r_in/f)**2)
z_r_out = z_r_in / ((1 - (s_in/f)**2) + (z_r_in/f)**2)
return (s_out, z_r_out, m)
def conjugate_gauss_beams(wavelen, waist_in, waist_out, **kwargs):
"""
Find the optical setup conjugating the object/image waists.
Parameters
==========
wavelen :
The wavelength of the beam.
waist_in and waist_out :
The waists to be conjugated.
f :
The focal distance of the element used in the conjugation.
Returns
=======
a tuple containing (s_in, s_out, f)
s_in :
The distance before the optical element.
s_out :
The distance after the optical element.
f :
The focal distance of the optical element.
Examples
========
>>> from sympy.physics.optics import conjugate_gauss_beams
>>> from sympy import symbols, factor
>>> l, w_i, w_o, f = symbols('l w_i w_o f')
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[0]
f*(1 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))
>>> factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1])
f*w_o**2*(w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 -
pi**2*w_i**4/(f**2*l**2)))/w_i**2
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[2]
f
"""
#TODO add the other possible arguments
wavelen, waist_in, waist_out = map(sympify, (wavelen, waist_in, waist_out))
m = waist_out / waist_in
z = waist2rayleigh(waist_in, wavelen)
if len(kwargs) != 1:
raise ValueError("The function expects only one named argument")
elif 'dist' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
elif 'f' in kwargs:
f = sympify(kwargs['f'])
s_in = f * (1 - sqrt(1/m**2 - z**2/f**2))
s_out = gaussian_conj(s_in, z, f)[0]
elif 's_in' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
else:
raise ValueError(filldedent('''
The functions expects the focal length as a named argument'''))
return (s_in, s_out, f)
#TODO
#def plot_beam():
# """Plot the beam radius as it propagates in space."""
# pass
#TODO
#def plot_beam_conjugation():
# """
# Plot the intersection of two beams.
#
# Represents the conjugation relation.
#
# See Also
# ========
#
# conjugate_gauss_beams
# """
# pass
| BeamParameter |
python | Netflix__metaflow | metaflow/runner/nbdeploy.py | {
"start": 161,
"end": 293
} | class ____(Exception):
"""Custom exception for errors during NBDeployer initialization."""
pass
| NBDeployerInitializationError |
python | django__django | tests/admin_inlines/tests.py | {
"start": 1525,
"end": 32995
} | class ____(TestDataMixin, TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.holder = Holder.objects.create(dummy=13)
Inner.objects.create(dummy=42, holder=cls.holder)
cls.parent = SomeParentModel.objects.create(name="a")
SomeChildModel.objects.create(name="b", position="0", parent=cls.parent)
SomeChildModel.objects.create(name="c", position="1", parent=cls.parent)
cls.view_only_user = User.objects.create_user(
username="user",
password="pwd",
is_staff=True,
)
parent_ct = ContentType.objects.get_for_model(SomeParentModel)
child_ct = ContentType.objects.get_for_model(SomeChildModel)
permission = Permission.objects.get(
codename="view_someparentmodel",
content_type=parent_ct,
)
cls.view_only_user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="view_somechildmodel",
content_type=child_ct,
)
cls.view_only_user.user_permissions.add(permission)
def setUp(self):
self.client.force_login(self.superuser)
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(self.holder.id,))
)
inner_formset = response.context["inline_admin_formsets"][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, "can_delete must be equal")
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly="")
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.id,))
)
self.assertContains(response, "<label>Inner readonly label:</label>")
def test_excluded_id_for_inlines_uses_hidden_field(self):
parent = UUIDParent.objects.create()
child = UUIDChild.objects.create(title="foo", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_uuidparent_change", args=(parent.id,))
)
self.assertContains(
response,
f'<input type="hidden" name="uuidchild_set-0-id" value="{child.id}" '
'id="id_uuidchild_set-0-id">',
html=True,
)
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# The heading for the m2m inline block uses the right text
self.assertContains(
response,
(
'<h2 id="Author_books-heading" class="inline-heading">'
"Author-book relationships</h2>"
),
html=True,
)
# The "add another" label is correct
self.assertContains(response, "Add another Author-book relationship")
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname="Imelda")
item = OutfitItem.objects.create(name="Shoes")
# Imelda likes shoes, but can't carry her own bags.
data = {
"shoppingweakness_set-TOTAL_FORMS": 1,
"shoppingweakness_set-INITIAL_FORMS": 0,
"shoppingweakness_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"person": person.id,
"max_weight": 0,
"shoppingweakness_set-0-item": item.id,
}
response = self.client.post(
reverse("admin:admin_inlines_fashionista_add"), data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname="Imelda")), 1)
def test_tabular_inline_column_css_class(self):
"""
Field names are included in the context to output a field-specific
CSS class name in the column headers.
"""
response = self.client.get(reverse("admin:admin_inlines_poll_add"))
text_field, call_me_field = list(
response.context["inline_admin_formset"].fields()
)
# Editable field.
self.assertEqual(text_field["name"], "text")
self.assertContains(response, '<th class="column-text required">')
# Read-only field.
self.assertEqual(call_me_field["name"], "call_me")
self.assertContains(response, '<th class="column-call_me">')
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse("admin:admin_inlines_titlecollection_add"))
self.assertContains(
response, '<th class="column-title1 required">Title1</th>', html=True
)
def test_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse("admin:admin_inlines_outfititem_add"))
_, extra_field = list(response.context["inline_admin_formset"].fields())
self.assertEqual(extra_field["label"], "Extra field")
def test_non_editable_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse("admin:admin_inlines_chapter_add"))
_, extra_field = list(response.context["inline_admin_formset"].fields())
self.assertEqual(extra_field["label"], "Extra field")
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
field = list(response.context["inline_admin_formset"].fields())[0]
self.assertEqual(field["label"], "new label")
self.assertContains(
response, '<th class="column-name required">New label</th>', html=True
)
def test_tabular_non_field_errors(self):
"""
non_field_errors are displayed correctly, including the correct value
for colspan.
"""
data = {
"title_set-TOTAL_FORMS": 1,
"title_set-INITIAL_FORMS": 0,
"title_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"title_set-0-title1": "a title",
"title_set-0-title2": "a different title",
}
response = self.client.post(
reverse("admin:admin_inlines_titlecollection_add"), data
)
# Here colspan is "4": two fields (title1 and title2), one hidden field
# and the delete checkbox.
self.assertContains(
response,
'<tr class="row-form-errors"><td colspan="4">'
'<ul class="errorlist nonfield">'
"<li>The two titles must be the same</li></ul></td></tr>",
)
def test_no_parent_callable_lookup(self):
"""
Admin inline `readonly_field` shouldn't invoke parent ModelAdmin
callable
"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse("admin:admin_inlines_novel_add"))
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"',
)
def test_callable_lookup(self):
"""
Admin inline should invoke local callable when its name is listed in
readonly_fields.
"""
response = self.client.get(reverse("admin:admin_inlines_poll_add"))
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"',
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, "<p>Callable in QuestionInline</p>")
def test_model_error_inline_with_readonly_field(self):
poll = Poll.objects.create(name="Test poll")
data = {
"question_set-TOTAL_FORMS": 1,
"question_set-INITIAL_FORMS": 0,
"question_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"question_set-0-text": "Question",
"question_set-0-poll": poll.pk,
}
response = self.client.post(
reverse("admin:admin_inlines_poll_change", args=(poll.pk,)),
data,
)
self.assertContains(response, "Always invalid model.")
def test_help_text(self):
"""
The inlines' model field help texts are displayed when using both the
stacked and tabular layouts.
"""
response = self.client.get(reverse("admin:admin_inlines_holder4_add"))
self.assertContains(response, "Awesome stacked help text is awesome.", 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome.">',
1,
)
# ReadOnly fields
response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add"))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline">',
1,
)
def test_tabular_model_form_meta_readonly_field(self):
"""
Tabular inlines use ModelForm.Meta.help_texts and labels for read-only
fields.
"""
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text from ModelForm.Meta)" '
'title="Help text from ModelForm.Meta">',
)
self.assertContains(response, "Label from ModelForm.Meta")
def test_inline_hidden_field_no_column(self):
"""
#18263 -- Make sure hidden fields don't get a column in tabular inlines
"""
parent = SomeParentModel.objects.create(name="a")
SomeChildModel.objects.create(name="b", position="0", parent=parent)
SomeChildModel.objects.create(name="c", position="1", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_someparentmodel_change", args=(parent.pk,))
)
self.assertNotContains(response, '<td class="field-position">')
self.assertInHTML(
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1">',
response.rendered_content,
)
def test_tabular_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in tabular inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
self.assertInHTML(
'<th class="column-position hidden">Position'
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Position help_text.)" '
'title="Position help_text.">'
"</th>",
response.rendered_content,
)
self.assertInHTML(
'<td class="field-position hidden"><p>0</p></td>', response.rendered_content
)
self.assertInHTML(
'<td class="field-position hidden"><p>1</p></td>', response.rendered_content
)
def test_stacked_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"stacked_inline_hidden_field_in_group_admin:"
"admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing name + position fields is not hidden.
self.assertContains(
response, '<div class="form-row field-name field-position">'
)
# The div containing the position field is hidden.
self.assertInHTML(
'<div class="flex-container fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">0</div></div>'
'<div class="help hidden"><div>Position help_text.</div></div>',
response.rendered_content,
)
self.assertInHTML(
'<div class="flex-container fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">1</div></div>'
'<div class="help hidden"><div>Position help_text.</div></div>',
response.rendered_content,
)
def test_stacked_inline_single_hidden_field_in_line_with_view_only_permissions(
self,
):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission and the field is grouped on a separate line.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"stacked_inline_hidden_field_on_single_line_admin:"
"admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing position field is hidden.
self.assertInHTML(
'<div class="form-row hidden field-position">'
'<div><div class="flex-container"><label>Position:</label>'
'<div class="readonly">0</div></div>'
'<div class="help hidden"><div>Position help_text.</div></div>'
"</div></div>",
response.rendered_content,
)
self.assertInHTML(
'<div class="form-row hidden field-position">'
'<div><div class="flex-container"><label>Position:</label>'
'<div class="readonly">1</div></div>'
'<div class="help hidden"><div>Position help_text.</div></div>'
"</div></div>",
response.rendered_content,
)
def test_tabular_inline_with_hidden_field_non_field_errors_has_correct_colspan(
self,
):
"""
In tabular inlines, when a form has non-field errors, those errors
are rendered in a table line with a single cell spanning the whole
table width. Colspan must be equal to the number of visible columns.
"""
parent = SomeParentModel.objects.create(name="a")
child = SomeChildModel.objects.create(name="b", position="0", parent=parent)
url = reverse(
"tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change",
args=(parent.id,),
)
data = {
"name": parent.name,
"somechildmodel_set-TOTAL_FORMS": 1,
"somechildmodel_set-INITIAL_FORMS": 1,
"somechildmodel_set-MIN_NUM_FORMS": 0,
"somechildmodel_set-MAX_NUM_FORMS": 1000,
"_save": "Save",
"somechildmodel_set-0-id": child.id,
"somechildmodel_set-0-parent": parent.id,
"somechildmodel_set-0-name": child.name,
"somechildmodel_set-0-position": 1,
}
response = self.client.post(url, data)
# Form has 3 visible columns and 1 hidden column.
self.assertInHTML(
'<thead><tr><th class="original"></th>'
'<th class="column-name required">Name</th>'
'<th class="column-position required hidden">Position'
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Position help_text.)" '
'title="Position help_text.">'
"</th>"
"<th>Delete?</th></tr></thead>",
response.rendered_content,
)
# The non-field error must be spanned on 3 (visible) columns.
self.assertInHTML(
'<tr class="row-form-errors"><td colspan="3">'
'<ul class="errorlist nonfield"><li>A non-field error</li></ul></td></tr>',
response.rendered_content,
)
def test_non_related_name_inline(self):
"""
Multiple inlines with related_name='+' have correct form prefixes.
"""
response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add"))
self.assertContains(
response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True
)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" '
'id="id_-1-0-capo_famiglia">',
html=True,
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" '
'maxlength="100" aria-describedby="id_-1-0-name_helptext">',
html=True,
)
self.assertContains(
response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True
)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" '
'id="id_-2-0-capo_famiglia">',
html=True,
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" '
'maxlength="100">',
html=True,
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
The "View on Site" link is correct for locales that use thousand
separators.
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly="")
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.id,))
)
inner_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(inner).pk,
inner.pk,
)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
The "View on Site" link is correct for models with a custom primary key
field.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",))
)
child1_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(child1).pk,
child1.pk,
)
child2_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(child2).pk,
child2.pk,
)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
An object can be created with inlines when it inherits another class.
"""
data = {
"name": "Martian",
"sighting_set-TOTAL_FORMS": 1,
"sighting_set-INITIAL_FORMS": 0,
"sighting_set-MAX_NUM_FORMS": 0,
"sighting_set-0-place": "Zone 51",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_inlines_extraterrestrial_add"), data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name="Martian").count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">'
)
response = self.client.get(reverse("admin:admin_inlines_binarytree_add"))
self.assertInHTML(max_forms_input % 3, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
response = self.client.get(
reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,))
)
self.assertInHTML(max_forms_input % 2, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
def test_min_num(self):
"""
min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">'
)
request = self.factory.get(reverse("admin:admin_inlines_binarytree_add"))
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms, response.rendered_content)
self.assertInHTML(total_forms, response.rendered_content)
def test_custom_min_num(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">'
)
request = self.factory.get(reverse("admin:admin_inlines_binarytree_add"))
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms % 2, response.rendered_content)
self.assertInHTML(total_forms % 5, response.rendered_content)
request = self.factory.get(
reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,))
)
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertInHTML(min_forms % 5, response.rendered_content)
self.assertInHTML(total_forms % 8, response.rendered_content)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden">',
html=True,
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden">',
html=True,
)
def test_inline_nonauto_noneditable_inherited_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">',
html=True,
)
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">',
html=True,
)
def test_inline_editable_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number">',
html=True,
count=1,
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number">',
html=True,
count=1,
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.pk,))
)
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1,
)
self.assertContains(
response, '<div class="inline-related" id="inner_set-1">', count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
("inner4stacked", item1.pk),
("inner4tabular", item2.pk),
)
response = self.client.get(
reverse("admin:admin_inlines_holder4_change", args=(holder.pk,))
)
self.assertTrue(
response.context["inline_admin_formset"].opts.has_registered_model
)
for model, pk in items:
url = reverse("admin:admin_inlines_%s_change" % model, args=(pk,))
self.assertContains(
response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML)
)
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",))
)
self.assertFalse(
response.context["inline_admin_formset"].opts.has_registered_model
)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(
reverse("admin:admin_inlines_poll_change", args=(poll.pk,))
)
self.assertTrue(
response.context["inline_admin_formset"].opts.has_registered_model
)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_noneditable_inline_has_field_inputs(self):
"""Inlines without change permission shows field inputs on add form."""
response = self.client.get(
reverse("admin:admin_inlines_novelreadonlychapter_add")
)
self.assertContains(
response,
'<input type="text" name="chapter_set-0-name" '
'class="vTextField" maxlength="40" id="id_chapter_set-0-name">',
html=True,
)
def test_inlines_plural_heading_foreign_key(self):
response = self.client.get(reverse("admin:admin_inlines_holder4_add"))
self.assertContains(
response,
(
'<h2 id="inner4stacked_set-heading" class="inline-heading">'
"Inner4 stackeds</h2>"
),
html=True,
)
self.assertContains(
response,
(
'<h2 id="inner4tabular_set-heading" class="inline-heading">'
"Inner4 tabulars</h2>"
),
html=True,
)
def test_inlines_singular_heading_one_to_one(self):
response = self.client.get(reverse("admin:admin_inlines_person_add"))
self.assertContains(
response,
'<h2 id="author-heading" class="inline-heading">Author</h2>',
html=True,
) # Tabular.
self.assertContains(
response,
'<h2 id="fashionista-heading" class="inline-heading">Fashionista</h2>',
html=True,
) # Stacked.
def test_inlines_based_on_model_state(self):
parent = ShowInlineParent.objects.create(show_inlines=False)
data = {
"show_inlines": "on",
"_save": "Save",
}
change_url = reverse(
"admin:admin_inlines_showinlineparent_change",
args=(parent.id,),
)
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 302)
parent.refresh_from_db()
self.assertIs(parent.show_inlines, True)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
| TestInline |
python | kamyu104__LeetCode-Solutions | Python/24-game.py | {
"start": 137,
"end": 1042
} | class ____(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == 1:
return abs(nums[0]-24) < 1e-6
ops = [add, sub, mul, truediv]
for i in xrange(len(nums)):
for j in xrange(len(nums)):
if i == j:
continue
next_nums = [nums[k] for k in xrange(len(nums)) if i != k != j]
for op in ops:
if ((op is add or op is mul) and j > i) or \
(op == truediv and nums[j] == 0):
continue
next_nums.append(op(nums[i], nums[j]))
if self.judgePoint24(next_nums):
return True
next_nums.pop()
return False
# Time: O(n^3 * 4^n) = O(1), n = 4
# Space: O(n^2) = O(1)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 132394,
"end": 132835
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryMigrationOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryMigrationOrderDirection),
graphql_name="direction",
)
| RepositoryMigrationOrder |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/config_type.py | {
"start": 4890,
"end": 5029
} | class ____(BuiltinConfigScalar):
def __init__(self):
super().__init__(scalar_kind=ConfigScalarKind.STRING, description="")
| String |
python | mlflow__mlflow | dev/clint/tests/rules/test_invalid_abstract_method.py | {
"start": 269,
"end": 1190
} | class ____(abc.ABC):
@abc.abstractmethod
def bad_abstract_method_has_implementation(self) -> None:
return "This should not be here"
@abc.abstractmethod
def bad_abstract_method_multiple_statements(self) -> None:
pass
...
@abc.abstractmethod
def good_abstract_method_pass(self) -> None:
pass
@abc.abstractmethod
def good_abstract_method_ellipsis(self) -> None:
...
@abc.abstractmethod
def good_abstract_method_docstring(self) -> None:
'''This is a valid docstring'''
"""
config = Config(select={InvalidAbstractMethod.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 2
assert all(isinstance(v.rule, InvalidAbstractMethod) for v in violations)
assert violations[0].range == Range(Position(5, 4))
assert violations[1].range == Range(Position(9, 4))
| AbstractExample |
python | pytorch__pytorch | torch/utils/_sympy/reference.py | {
"start": 4555,
"end": 7336
} | class ____(ReferenceAnalysis):
@staticmethod
def constant(c, dtype):
if dtype is torch.int64:
return int(c)
elif dtype is torch.double:
return float(c)
elif dtype is torch.bool:
return bool(c)
else:
raise AssertionError(f"unrecognized dtype {dtype}")
@staticmethod
def not_(a):
return torch.sym_not(a)
@classmethod
def sym_sum(cls, args):
if len(args) == 0:
return 0
if len(args) == 1:
return args[0]
acc = cls.add(args[0], args[1])
for i in range(2, len(args)):
acc = cls.add(acc, args[i])
return acc
@staticmethod
def floordiv(a, b):
return a // b
@staticmethod
def mod(x, y):
return x % y
@staticmethod
def truncdiv(a, b):
return a / b
@staticmethod
def to_dtype(x, dtype):
if dtype == torch.float64:
return torch.sym_float(x)
raise NotImplementedError(f"to_dtype {dtype} NYI")
@staticmethod
def exp(x) -> NoReturn:
raise AssertionError("exp is not valid shape sympy expr")
@staticmethod
def log(x) -> NoReturn:
raise AssertionError("log is not valid shape sympy expr")
@staticmethod
def log2(x):
return torch._sym_log2(x) # type: ignore[attr-defined]
@staticmethod
def sqrt(x):
return torch._sym_sqrt(x) # type: ignore[attr-defined]
@staticmethod
def minimum(a, b):
return torch.sym_min(a, b)
@staticmethod
def maximum(a, b):
return torch.sym_max(a, b)
@staticmethod
def floor_to_int(x, dtype):
return math.floor(x)
@staticmethod
def ceil_to_int(x, dtype):
return math.ceil(x)
@staticmethod
def floor(x):
return float(math.floor(x))
@staticmethod
def ceil(x):
return float(math.ceil(x))
@staticmethod
def truediv(a, b):
return a / b
@staticmethod
def pow(a, b):
return a**b
@staticmethod
def pow_by_natural(a, b):
# Pray that safe_pow is not needed here lol. In particular, this
# never participates in VR low/high ranges, so overflow should be
# unlikely
return a**b
@staticmethod
def round_to_int(a, dtype):
return round(a)
@staticmethod
def round_decimal(a, b):
return round(a, ndigits=b)
@staticmethod
def bitwise_and(a, b):
return a & b
@staticmethod
def bitwise_or(a, b):
return a | b
@staticmethod
def bitwise_xor(a, b):
return a ^ b
# Like PythonReferenceAnalysis, but some export-unfriendly choices of
# operators to make things faster
| PythonReferenceAnalysis |
python | ansible__ansible | lib/ansible/_internal/_task.py | {
"start": 611,
"end": 954
} | class ____(AmbientContextBase):
"""Ambient context that wraps task execution on workers. It provides access to the currently executing task."""
task: Task
TaskArgsFinalizerCallback = t.Callable[[str, t.Any, _engine.TemplateEngine, t.Any], t.Any]
"""Type alias for the shape of the `ActionBase.finalize_task_arg` method."""
| TaskContext |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 14643,
"end": 19076
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup useful constants
cls.jan1 = datetime.datetime(2013, 1, 1, tzinfo=datetime.timezone.utc)
cls.jan3 = datetime.datetime(2013, 1, 3, tzinfo=datetime.timezone.utc)
cls.jan31 = datetime.datetime(2013, 1, 31, tzinfo=datetime.timezone.utc)
def test_accurate_definition(self):
"""Tests explicit definition of a FuzzyDateTime."""
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan31)
def test_partial_definition(self):
"""Test defining a FuzzyDateTime without passing an end date."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDateTime(self.jan1)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan3)
def test_invalid_definition(self):
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan31, self.jan1)
def test_invalid_partial_definition(self):
with utils.mocked_datetime_now(self.jan1, fuzzy):
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan31)
def test_naive_start(self):
"""Tests that a timezone-naive start datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan1.replace(tzinfo=None), self.jan31)
def test_naive_end(self):
"""Tests that a timezone-naive end datetime is rejected."""
with self.assertRaises(ValueError):
fuzzy.FuzzyDateTime(self.jan1, self.jan31.replace(tzinfo=None))
def test_force_year(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_year=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.year)
def test_force_month(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_month=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.month)
def test_force_day(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_day=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.day)
def test_force_hour(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_hour=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.hour)
def test_force_minute(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_minute=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.minute)
def test_force_second(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_second=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.second)
def test_force_microsecond(self):
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31, force_microsecond=4)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(4, res.microsecond)
def test_biased(self):
"""Tests a FuzzyDate with a biased random.randint."""
fake_randint = lambda low, high: (low + high) // 2
fuzz = fuzzy.FuzzyDateTime(self.jan1, self.jan31)
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 16, tzinfo=datetime.timezone.utc), res)
def test_biased_partial(self):
"""Tests a FuzzyDate with a biased random and implicit upper bound."""
with utils.mocked_datetime_now(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDateTime(self.jan1)
fake_randint = lambda low, high: (low + high) // 2
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.datetime(2013, 1, 2, tzinfo=datetime.timezone.utc), res)
| FuzzyDateTimeTestCase |
python | pytest-dev__pytest | src/_pytest/_code/source.py | {
"start": 286,
"end": 7859
} | class ____:
"""An immutable object holding a source code fragment.
When using Source(...), the source lines are deindented.
"""
def __init__(self, obj: object = None) -> None:
if not obj:
self.lines: list[str] = []
self.raw_lines: list[str] = []
elif isinstance(obj, Source):
self.lines = obj.lines
self.raw_lines = obj.raw_lines
elif isinstance(obj, tuple | list):
self.lines = deindent(x.rstrip("\n") for x in obj)
self.raw_lines = list(x.rstrip("\n") for x in obj)
elif isinstance(obj, str):
self.lines = deindent(obj.split("\n"))
self.raw_lines = obj.split("\n")
else:
try:
rawcode = getrawcode(obj)
src = inspect.getsource(rawcode)
except TypeError:
src = inspect.getsource(obj) # type: ignore[arg-type]
self.lines = deindent(src.split("\n"))
self.raw_lines = src.split("\n")
def __eq__(self, other: object) -> bool:
if not isinstance(other, Source):
return NotImplemented
return self.lines == other.lines
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
@overload
def __getitem__(self, key: int) -> str: ...
@overload
def __getitem__(self, key: slice) -> Source: ...
def __getitem__(self, key: int | slice) -> str | Source:
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
newsource.raw_lines = self.raw_lines[key.start : key.stop]
return newsource
def __iter__(self) -> Iterator[str]:
return iter(self.lines)
def __len__(self) -> int:
return len(self.lines)
def strip(self) -> Source:
"""Return new Source object with trailing and leading blank lines removed."""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.raw_lines = self.raw_lines
source.lines[:] = self.lines[start:end]
return source
def indent(self, indent: str = " " * 4) -> Source:
"""Return a copy of the source object with all lines indented by the
given indent-string."""
newsource = Source()
newsource.raw_lines = self.raw_lines
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno: int) -> Source:
"""Return Source statement which contains the given linenumber
(counted from 0)."""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno: int) -> tuple[int, int]:
"""Return (start, end) tuple which spans the minimal statement region
which containing the given lineno."""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
_ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self) -> Source:
"""Return a new Source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
newsource.raw_lines = self.raw_lines
return newsource
def __str__(self) -> str:
return "\n".join(self.lines)
#
# helper functions
#
def findsource(obj) -> tuple[Source | None, int]:
try:
sourcelines, lineno = inspect.findsource(obj)
except Exception:
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
source.raw_lines = sourcelines
return source, lineno
def getrawcode(obj: object, trycall: bool = True) -> types.CodeType:
"""Return code object for given function."""
try:
return obj.__code__ # type: ignore[attr-defined,no-any-return]
except AttributeError:
pass
if trycall:
call = getattr(obj, "__call__", None)
if call and not isinstance(obj, type):
return getrawcode(call, trycall=False)
raise TypeError(f"could not get code object for {obj!r}")
def deindent(lines: Iterable[str]) -> list[str]:
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None]:
# Flatten all statements and except handlers into one lineno-list.
# AST's line numbers start indexing at 1.
values: list[int] = []
for x in ast.walk(node):
if isinstance(x, ast.stmt | ast.ExceptHandler):
# The lineno points to the class/def, so need to include the decorators.
if isinstance(x, ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef):
for d in x.decorator_list:
values.append(d.lineno - 1)
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val: list[ast.stmt] | None = getattr(x, name, None)
if val:
# Treat the finally/orelse part as its own statement.
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
if insert_index == 0:
return 0, None
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(
lineno: int,
source: Source,
assertion: bool = False,
astnode: ast.AST | None = None,
) -> tuple[ast.AST, int, int]:
if astnode is None:
content = str(source)
# See #4260:
# Don't produce duplicate warnings when compiling source to find AST.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = ast.parse(content, "source", "exec")
start, end = get_statement_startend2(lineno, astnode)
# We need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# Make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself.
block_finder = inspect.BlockFinder()
# If we start with an indented line, put blockfinder to "started" mode.
block_finder.started = (
bool(source.lines[start]) and source.lines[start][0].isspace()
)
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# The end might still point to a comment or empty line, correct it.
end = min(end, len(source.lines))
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
| Source |
python | scrapy__scrapy | tests/test_utils_datatypes.py | {
"start": 6181,
"end": 6833
} | class ____(TestCaseInsensitiveDictBase):
dict_class = CaselessDict
def test_deprecation_message(self):
with warnings.catch_warnings(record=True) as caught:
warnings.filterwarnings("always", category=ScrapyDeprecationWarning)
self.dict_class({"foo": "bar"})
assert len(caught) == 1
assert issubclass(caught[0].category, ScrapyDeprecationWarning)
assert (
str(caught[0].message)
== "scrapy.utils.datatypes.CaselessDict is deprecated,"
" please use scrapy.utils.datatypes.CaseInsensitiveDict instead"
)
| TestCaselessDict |
python | getsentry__sentry | src/sentry/integrations/api/serializers/models/integration.py | {
"start": 7351,
"end": 7552
} | class ____(TypedDict):
key: str
slug: str
name: str
metadata: Any
canAdd: bool
canDisable: bool
features: list[str]
setupDialog: dict[str, Any]
| IntegrationProviderResponse |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 32910,
"end": 57211
} | class ____(BasePostProgressGroupMixin):
def make_ownership(self, extra_rules=None):
self.user_2 = self.create_user()
self.create_team_membership(team=self.team, user=self.user_2)
rules = [
Rule(Matcher("path", "src/app/*"), [Owner("team", self.team.name)]),
Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)]),
Rule(Matcher("path", "tests/*"), [Owner("user", self.user_2.email)]),
]
if extra_rules:
rules.extend(extra_rules)
self.prj_ownership = ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema(rules),
fallthrough=True,
auto_assignment=True,
)
def test_owner_assignment_order_precedence(self) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id == self.user.id
assert assignee.team is None
owners = list(GroupOwner.objects.filter(group=event.group))
assert len(owners) == 2
assert {(self.user.id, None), (None, self.team.id)} == {
(o.user_id, o.team_id) for o in owners
}
activity = Activity.objects.get(group=event.group)
assert activity.data == {
"assignee": str(self.user.id),
"assigneeEmail": self.user.email,
"assigneeName": self.user.name,
"assigneeType": "user",
"integration": ActivityIntegration.PROJECT_OWNERSHIP.value,
"rule": str(Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])),
}
def test_owner_assignment_extra_groups(self) -> None:
extra_user = self.create_user()
self.create_team_membership(self.team, user=extra_user)
self.make_ownership(
[Rule(Matcher("path", "src/app/things/in/*"), [Owner("user", extra_user.email)])],
)
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/things/in/a/path/example2.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id == extra_user.id
assert assignee.team is None
owners = list(GroupOwner.objects.filter(group=event.group))
assert len(owners) == 2
assert {(extra_user.id, None), (self.user.id, None)} == {
(o.user_id, o.team_id) for o in owners
}
def test_owner_assignment_existing_owners(self) -> None:
extra_team = self.create_team()
ProjectTeam.objects.create(team=extra_team, project=self.project)
self.make_ownership(
[Rule(Matcher("path", "src/app/things/in/*"), [Owner("team", extra_team.slug)])],
)
GroupOwner.objects.create(
group=self.group,
project=self.project,
organization=self.organization,
user_id=self.user.id,
type=GroupOwnerType.OWNERSHIP_RULE.value,
)
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/things/in/a/path/example2.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id is None
assert assignee.team == extra_team
owners = list(GroupOwner.objects.filter(group=event.group))
assert {(None, extra_team.id), (self.user.id, None)} == {
(o.user_id, o.team_id) for o in owners
}
def test_owner_assignment_existing_assignee_preserved(self):
"""
Tests that if a group already has an assignee, post-processing won't reassign it
even if ownership rules change in the interim.
"""
other_team = self.create_team()
ProjectTeam.objects.create(team=other_team, project=self.project)
rules = [
Rule(Matcher("path", "src/*"), [Owner("team", self.team.slug)]),
Rule(Matcher("path", "src/app/*"), [Owner("team", other_team.slug)]),
]
self.prj_ownership = ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema(rules),
fallthrough=True,
auto_assignment=True,
)
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
# No assignee should exist prior to post processing
assert not event.group.assignee_set.exists()
# First post-processing - should assign to other_team (last matching rule)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.team == other_team
new_rules = [
Rule(Matcher("path", "src/app/*"), [Owner("team", other_team.slug)]),
Rule(Matcher("path", "src/*"), [Owner("team", self.team.slug)]),
]
self.prj_ownership.schema = dump_schema(new_rules)
self.prj_ownership.save()
# Run post-processing again - assignee should NOT change
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.team == other_team
# If we had a completely new group, it would get assigned to self.team (new last matching rule)
fresh_event = self.create_event(
data={
"message": "fresh event",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/fresh.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=fresh_event,
)
fresh_assignee = fresh_event.group.assignee_set.first()
assert fresh_assignee.team == self.team
def test_owner_assignment_assign_user(self) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id == self.user.id
assert assignee.team is None
def test_owner_assignment_ownership_no_matching_owners(self) -> None:
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assert not event.group.assignee_set.exists()
def test_owner_assignment_existing_assignment(self) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
event.group.assignee_set.create(team=self.team, project=self.project)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id is None
assert assignee.team == self.team
def test_only_first_assignment_works(self) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
"fingerprint": ["group1"],
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id == self.user.id
assert assignee.team is None
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "tests/src/app/test_example.py"}]},
"fingerprint": ["group1"],
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
# Assignment shouldn't change.
assert assignee.user_id == self.user.id
assert assignee.team is None
def test_owner_assignment_owner_is_gone(self) -> None:
self.make_ownership()
# Remove the team so the rule match will fail to resolve
self.team.delete()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee is None
def test_suspect_committer_affect_cache_debouncing_issue_owners_calculations(self) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/things/in/a/path/example2.py"}]},
},
project_id=self.project.id,
)
committer = GroupOwner(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
committer.save()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
event.group.assignee_set.create(team=self.team, project=self.project)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id is None
assert assignee.team == self.team
def test_owner_assignment_when_owners_have_been_unassigned(self) -> None:
"""
Test that ensures that if certain assignees get unassigned, and project rules are changed
then the new group assignees should be re-calculated and re-assigned
"""
# Create rules and check assignees
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py", "in_app": True}]},
},
project_id=self.project.id,
)
event_2 = self.create_event(
data={
"message": "Exception",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/integration.py", "in_app": True}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event_2,
)
assignee = event.group.assignee_set.first()
assert assignee.user_id == self.user.id
user_3 = self.create_user()
self.create_team_membership(self.team, user=user_3)
# De-assign group assignees
GroupAssignee.objects.deassign(event.group, self.user)
assert event.group.assignee_set.first() is None
# Change ProjectOwnership rules
rules = [
Rule(Matcher("path", "src/*"), [Owner("user", user_3.email)]),
]
self.prj_ownership.schema = dump_schema(rules)
self.prj_ownership.save()
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event_2,
)
# Group should be re-assigned to the new group owner
assignee = event.group.assignee_set.first()
assert assignee.user_id == user_3.id
# De-assign group assignees
GroupAssignee.objects.deassign(event.group, user_service.get_user(user_id=assignee.user_id))
assert event.group.assignee_set.first() is None
user_4 = self.create_user()
self.create_team_membership(self.team, user=user_4)
self.prj_ownership.schema = dump_schema([])
self.prj_ownership.save()
code_owners_rule = Rule(
Matcher("codeowners", "*.py"),
[Owner("user", user_4.email)],
)
self.code_mapping = self.create_code_mapping(project=self.project)
self.code_owners = self.create_codeowners(
self.project,
self.code_mapping,
schema=dump_schema([code_owners_rule]),
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event_2,
)
# Group should be re-assigned to the new group owner
assignee = event.group.assignee_set.first()
assert assignee.user_id == user_4.id
def test_auto_assignment_when_owners_have_been_unassigned(self) -> None:
"""
Test that ensures that if assignee gets unassigned and project rules are changed,
then the new group assignees should be re-calculated and re-assigned
"""
# Create rules and check assignees
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = (
GroupOwner.objects.filter()
.exclude(user_id__isnull=True, team_id__isnull=True)
.order_by("type")[0]
)
assert assignee.user_id == self.user.id
user_3 = self.create_user()
self.create_team_membership(self.team, user=user_3)
# Set assignee_exists cache to self.user
cache.set(ASSIGNEE_EXISTS_KEY(event.group_id), self.user, ASSIGNEE_EXISTS_DURATION)
# De-assign group assignees
GroupAssignee.objects.deassign(event.group, self.user)
assert event.group.assignee_set.first() is None
# Change ProjectOwnership rules
rules = [
Rule(Matcher("path", "src/*"), [Owner("user", user_3.email)]),
]
self.prj_ownership.schema = dump_schema(rules)
self.prj_ownership.save()
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
# Mimic filter used in get_autoassigned_owner_cached to get the issue owner to be
# auto-assigned
assignee = (
GroupOwner.objects.filter().exclude(user_id__isnull=True, team_id__isnull=True).get()
)
# Group should be re-assigned to the new group owner
assert assignee.user_id == user_3.id
def test_ensure_when_assignees_and_owners_are_cached_does_not_cause_unbound_errors(
self,
) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app.py"}]},
},
project_id=self.project.id,
)
assignee_cache_key = "assignee_exists:1:%s" % event.group.id
owner_cache_key = "owner_exists:1:%s" % event.group.id
for key in [assignee_cache_key, owner_cache_key]:
cache.set(key, True)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
def test_auto_assignment_when_owners_are_invalid(self) -> None:
"""
Test that invalid group owners (that exist due to bugs) are deleted and not assigned
when no valid issue owner exists
"""
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
},
project_id=self.project.id,
)
# Hard code an invalid group owner
invalid_codeowner = GroupOwner(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.CODEOWNERS.value,
context={"rule": "codeowners:/**/*.css " + self.user.email},
user_id=self.user.id,
)
invalid_codeowner.save()
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
assignee = event.group.assignee_set.first()
assert assignee is None
assert len(GroupOwner.objects.filter(group_id=event.group)) == 0
@patch("sentry.utils.metrics.incr")
def test_debounces_handle_owner_assignments(self, mock_incr: MagicMock) -> None:
self.make_ownership()
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app.py"}]},
},
project_id=self.project.id,
)
cache.set(ISSUE_OWNERS_DEBOUNCE_KEY(event.group_id), True, ISSUE_OWNERS_DEBOUNCE_DURATION)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
mock_incr.assert_any_call("sentry.tasks.post_process.handle_owner_assignment.debounce")
@patch("sentry.utils.metrics.incr")
def test_issue_owners_should_ratelimit(self, mock_incr: MagicMock) -> None:
cache.set(
f"issue_owner_assignment_ratelimiter:{self.project.id}",
(set(range(0, ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT * 10, 10)), datetime.now()),
)
event = self.create_event(
data={
"message": "oh no",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event,
)
mock_incr.assert_any_call("sentry.task.post_process.handle_owner_assignment.ratelimited")
mock_incr.reset_mock()
# Raise this organization's ratelimit
with self.feature("organizations:increased-issue-owners-rate-limit"):
# Create a new event to avoid debouncing
event2 = self.create_event(
data={
"message": "oh no again",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app2.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event2,
)
with pytest.raises(AssertionError):
mock_incr.assert_any_call(
"sentry.task.post_process.handle_owner_assignment.ratelimited"
)
mock_incr.reset_mock()
cache.set(
f"issue_owner_assignment_ratelimiter:{self.project.id}",
(
set(range(0, HIGHER_ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT * 10, 10)),
datetime.now(),
),
)
with self.feature("organizations:increased-issue-owners-rate-limit"):
# Create a new event to avoid debouncing
event3 = self.create_event(
data={
"message": "oh no yet again",
"platform": "python",
"stacktrace": {"frames": [{"filename": "src/app3.py"}]},
},
project_id=self.project.id,
)
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=False,
event=event3,
)
mock_incr.assert_any_call(
"sentry.task.post_process.handle_owner_assignment.ratelimited"
)
| AssignmentTestMixin |
python | walkccc__LeetCode | solutions/99. Recover Binary Search Tree/99-2.py | {
"start": 0,
"end": 571
} | class ____:
def recoverTree(self, root: TreeNode | None) -> None:
pred = None
x = None # the first wrong node
y = None # the second wrong node
stack = []
while root or stack:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if pred and root.val < pred.val:
y = root
if not x:
x = pred
pred = root
root = root.right
def swap(x: TreeNode | None, y: TreeNode | None) -> None:
temp = x.val
x.val = y.val
y.val = temp
swap(x, y)
| Solution |
python | astropy__astropy | astropy/samp/hub.py | {
"start": 768,
"end": 54486
} | class ____:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(
self,
secret=None,
addr=None,
port=0,
lockfile=None,
timeout=0,
client_timeout=0,
mode="single",
label="",
web_profile=True,
web_profile_dialog=None,
web_port=21012,
pool_size=20,
):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, "samp.hub.ping")
server.register_function(
self._set_xmlrpc_callback, "samp.hub.setXmlrpcCallback"
)
# Standard API operations
server.register_function(self._register, "samp.hub.register")
server.register_function(self._unregister, "samp.hub.unregister")
server.register_function(self._declare_metadata, "samp.hub.declareMetadata")
server.register_function(self._get_metadata, "samp.hub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.hub.declareSubscriptions"
)
server.register_function(self._get_subscriptions, "samp.hub.getSubscriptions")
server.register_function(
self._get_registered_clients, "samp.hub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.hub.getSubscribedClients"
)
server.register_function(self._notify, "samp.hub.notify")
server.register_function(self._notify_all, "samp.hub.notifyAll")
server.register_function(self._call, "samp.hub.call")
server.register_function(self._call_all, "samp.hub.callAll")
server.register_function(self._call_and_wait, "samp.hub.callAndWait")
server.register_function(self._reply, "samp.hub.reply")
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, "samp.webhub.ping")
server.register_function(self._unregister, "samp.webhub.unregister")
server.register_function(self._declare_metadata, "samp.webhub.declareMetadata")
server.register_function(self._get_metadata, "samp.webhub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.webhub.declareSubscriptions"
)
server.register_function(
self._get_subscriptions, "samp.webhub.getSubscriptions"
)
server.register_function(
self._get_registered_clients, "samp.webhub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.webhub.getSubscribedClients"
)
server.register_function(self._notify, "samp.webhub.notify")
server.register_function(self._notify_all, "samp.webhub.notifyAll")
server.register_function(self._call, "samp.webhub.call")
server.register_function(self._call_all, "samp.webhub.callAll")
server.register_function(self._call_and_wait, "samp.webhub.callAndWait")
server.register_function(self._reply, "samp.webhub.reply")
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, "samp.webhub.register")
server.register_function(
self._web_profile_allowReverseCallbacks, "samp.webhub.allowReverseCallbacks"
)
server.register_function(
self._web_profile_pullCallbacks, "samp.webhub.pullCallbacks"
)
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log,
logRequests=False,
allow_none=True,
)
prot = "http"
self._port = self._server.socket.getsockname()[1]
addr = f"{self._addr or self._host_name}:{self._port}"
self._url = urlunparse((prot, addr, "", "", "", ""))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = self._web_profile_requests_queue
self._web_profile_dialog.queue_result = self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
("localhost", self._web_port), log, logRequests=False, allow_none=True
)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except OSError:
log.warning(
f"Port {self._web_port} already in use. Impossible to run the "
"Hub with Web Profile support.",
SAMPWarning,
)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn(
"Timeout expired, Hub is shutting down!", SAMPWarning
)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
for private_key in self._client_activity_time.keys():
if (
now - self._client_activity_time[private_key]
> self._client_timeout
and private_key != self._hub_private_key
):
warnings.warn(
f"Client {private_key} timeout expired!", SAMPWarning
)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == "samp.client.receiveCall":
return self._receive_call(*args)
elif method == "samp.client.receiveNotification":
return self._receive_notification(*args)
elif method == "samp.client.receiveResponse":
return self._receive_response(*args)
elif method == "samp.app.ping":
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {
"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "https://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon",
}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(
self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}}
)
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(
lockfilename=self._customlockfilename,
mode=self._mode,
hub_id=self.id,
hub_params=self.params,
)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile).
"""
params = {}
# Keys required by standard profile
params["samp.secret"] = self._hub_secret
params["samp.hub.xmlrpc.url"] = self._url
params["samp.profile.version"] = __profile_version__
# Custom keys
params["hub.id"] = self.id
params["hub.label"] = self._label or f"Hub {self.id}"
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub, name="Hub timeout test"
)
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client, name="Client timeout test"
)
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict["samp.secret"] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.0)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if (
self._thread_hub_timeout is not None
and self._thread_hub_timeout is not current_thread
):
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if (
self._thread_client_timeout is not None
and self._thread_client_timeout is not current_thread
):
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(
request, self._web_profile_requests_result
)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select(
[self._web_profile_server.socket], [], [], 0.01
)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(
self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}},
)
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id},
},
)
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id},
},
)
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.metadata",
"samp.params": {
"id": public_id,
"metadata": self._metadata[private_key],
},
},
)
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {
"id": public_id,
"subscriptions": self._id2mtypes[private_key],
},
},
)
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(
private_key, hub_public_id, message
)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug(f"notify disconnection to {public_id}")
self._launch_thread(
target=_xmlrpc_call_disconnect,
args=(
endpoint,
private_key,
self._hub_public_id,
{
"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"},
},
),
)
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (
xmlrpc_addr,
_HubAsClient(self._hub_as_client_request_handler),
)
return ""
# Dictionary stored with the public id
log.debug(f"set_xmlrpc_callback: {private_key} {xmlrpc_addr}")
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(
self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1
)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug(f"register: private-key = {private_key} and self-id = {public_id}")
return {
"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id,
}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = "cli#hub"
if self._client_id_counter > 0:
public_id = f"cli#{self._client_id_counter}"
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug(f"unregister {public_key} ({private_key})")
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(f"declare_metadata: private-key = {private_key} {metadata = !s}")
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug(
f"get_metadata: private-key = {private_key} client-id = {client_id}"
)
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug(f"--> metadata = {self._metadata[client_private_key]}")
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(
f"declare_subscriptions: private-key = {private_key} {mtypes = !s}"
)
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and mtype2 != mtype:
if mtype2 in mtypes:
del mtypes[mtype2]
log.debug(
"declare_subscriptions: subscriptions accepted from "
f"{private_key} => {str(mtypes)}"
)
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug(
f"get_subscriptions: client-id = {client_id} "
f"mtypes = {self._id2mtypes[client_private_key]!s}"
)
return self._id2mtypes[client_private_key]
else:
log.debug(
f"get_subscriptions: client-id = {client_id} mtypes = missing"
)
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug(
f"get_registered_clients: {private_key = !s} clients = {reg_clients}"
)
return reg_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug(
f"get_subscribed_clients: private_key = {private_key} mtype = {mtype} "
f"clients = {sub_clients}"
)
return sub_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[: i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
self._launch_thread(
target=self._notify_, args=(private_key, recipient_id, message)
)
return {}
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug(
"notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id, recipient_public_id
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(
recipient_private_key, recipient_public_id, samp_method_name, arg_params
)
except Exception as exc:
warnings.warn(
"{} notification from client {} to client {} failed [{}]".format(
message["samp.mtype"], sender_public_id, recipient_public_id, exc
),
SAMPWarning,
)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(
target=self._notify,
args=(sender_private_key, _recipient_id, message),
)
if not recipient_ids:
warnings.warn(
"No client was able to receive this message",
SAMPWarning,
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(
target=self._call_,
args=(private_key, public_id, recipient_id, msg_id, message),
)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_(
self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message
):
if sender_private_key not in self._private_keys:
return
try:
log.debug(
"call {} from {} to {} ({})".format(
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
message["samp.mtype"],
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(
recipient_private_key, recipient_public_id, samp_methodName, arg_params
)
except Exception as exc:
warnings.warn(
"{} call {} from client {} to client {} failed [{},{}]".format(
message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
type(exc),
exc,
),
SAMPWarning,
)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(
3,
f"samp.mtype keyword is missing in message tagged as {msg_tag}",
)
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(
target=self._call_,
args=(
sender_private_key,
sender_public_id,
receiver_public_id,
_msg_id,
message,
),
)
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call", message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del self._sync_msg_ids_heap[msg_id]
raise SAMPProxyTimeoutError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del self._sync_msg_ids_heap[msg_id]
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(
target=self._reply_, args=(private_key, msg_id, response)
)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(
";;", 3
)
try:
log.debug(
f"reply {counter} from {responder_public_id} to {recipient_public_id}"
)
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(
recipient_public_id
)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(
recipient_private_key,
recipient_public_id,
samp_method_name,
arg_params,
)
except Exception as exc:
warnings.warn(
(
f"{recipient_msg_tag} reply from client {responder_public_id} "
f"to client {recipient_public_id} failed [{exc}]"
),
SAMPWarning,
)
def _retry_method(
self, recipient_private_key, recipient_public_id, samp_method_name, arg_params
):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (
self._web_profile
and recipient_private_key in self._web_profile_callbacks
):
# Web Profile
callback = {
"samp.methodName": samp_method_name,
"samp.params": arg_params,
}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(
recipient_private_key, *arg_params
)
except xmlrpc.Fault as exc:
log.debug(
f"{recipient_public_id} XML-RPC endpoint error "
f"(attempt {attempt + 1}): {exc.faultString}"
)
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = (
samp_method_name + " failed after " + str(conf.n_retries) + " attempts"
)
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return (
f"msg#{self._hub_msg_id_counter};;{self._hub_public_id};;"
f"{sender_public_id};;{sender_msg_id}"
)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}},
)
elif "samp.mtype" in message and (
message["samp.mtype"] == "x-samp.query.by-meta"
or message["samp.mtype"] == "samp.query.by-meta"
):
ids_list = self._query_by_metadata(
message["samp.params"]["key"], message["samp.params"]["value"]
)
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}},
)
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(
self, identity_info, client_address=("unknown", 0), origin="unknown"
):
self._update_last_activity_time()
if client_address[0] not in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(
403,
"Request of registration rejected "
"by the Hub (application name not "
"provided).",
)
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address, origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = "http://localhost:{}/translator/{}?ref=".format(
self._web_port, register_map["samp.private-key"]
)
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
| SAMPHubServer |
python | openai__openai-python | tests/api_resources/fine_tuning/checkpoints/test_permissions.py | {
"start": 7302,
"end": 14545
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
)
assert_matches_type(AsyncPage[PermissionCreateResponse], permission, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(AsyncPage[PermissionCreateResponse], permission, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = await response.parse()
assert_matches_type(AsyncPage[PermissionCreateResponse], permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create(
fine_tuned_model_checkpoint="",
project_ids=["string"],
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="after",
limit=0,
order="ascending",
project_id="project_id",
)
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = await response.parse()
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
fine_tuned_model_checkpoint="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = await response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
| TestAsyncPermissions |
python | jina-ai__jina | jina/orchestrate/deployments/__init__.py | {
"start": 5059,
"end": 83394
} | class ____(JAMLCompatible, PostMixin, BaseOrchestrator, metaclass=DeploymentType):
"""A Deployment is an immutable set of pods, which run in replicas. They share the same input and output socket.
Internally, the pods can run with the process/thread backend. They can also be run in their own containers
:param args: arguments parsed from the CLI
:param needs: deployments names of preceding deployments, the output of these deployments go into the input of this deployment
"""
class _ReplicaSet:
def __init__(
self,
deployment_args: Namespace,
args: List[Namespace],
head_pod,
name,
):
self.deployment_args = copy.copy(deployment_args)
self.args = args
self.shard_id = args[0].shard_id
self._pods = []
self.head_pod = head_pod
self.name = name
logger_kwargs = vars(self.deployment_args)
logger_kwargs.pop('name')
self.logger = JinaLogger(context=self.name, name=self.name, **logger_kwargs)
def _add_voter_to_leader(self):
leader_address = f'{self._pods[0].runtime_ctrl_address}'
voter_addresses = [pod.runtime_ctrl_address for pod in self._pods[1:]]
replica_ids = [pod.args.replica_id for pod in self._pods[1:]]
self.logger.debug('Starting process to call Add Voters')
res = _call_add_voters(
leader=leader_address,
voters=voter_addresses,
replica_ids=replica_ids,
logger=self.logger,
)
if res:
self.logger.debug('Finished adding voters')
else:
self.logger.error('Adding Voters did not finish successfully')
async def _async_add_voter_to_leader(self):
leader_address = f'{self._pods[0].runtime_ctrl_address}'
voter_addresses = [pod.runtime_ctrl_address for pod in self._pods[1:]]
replica_ids = [pod.args.replica_id for pod in self._pods[1:]]
self.logger.debug('Starting process to call Add Voters')
res = await _async_call_add_voters(
leader=leader_address,
voters=voter_addresses,
replica_ids=replica_ids,
logger=self.logger,
)
if res:
self.logger.debug('Finished adding voters')
else:
self.logger.error('Adding Voters did not finish successfully')
@property
def is_ready(self):
return all(p.is_ready.is_set() for p in self._pods)
def clear_pods(self):
self._pods.clear()
@property
def num_pods(self):
return len(self._pods)
def join(self):
for pod in self._pods:
pod.join()
def wait_start_success(self):
self.logger.debug('Waiting for ReplicaSet to start successfully')
for pod in self._pods:
pod.wait_start_success()
# should this be done only when the cluster is started ?
if self._pods[0].args.stateful:
self._add_voter_to_leader()
self.logger.debug('ReplicaSet started successfully')
async def async_wait_start_success(self):
self.logger.debug('Waiting for ReplicaSet to start successfully')
await asyncio.gather(
*[pod.async_wait_start_success() for pod in self._pods]
)
# should this be done only when the cluster is started ?
if self._pods[0].args.stateful:
await self._async_add_voter_to_leader()
self.logger.debug('ReplicaSet started successfully')
def __enter__(self):
for _args in self.args:
_args.noblock_on_start = True
pod = PodFactory.build_pod(_args).start()
self._pods.append(pod)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
closing_exception = None
for pod in self._pods:
try:
pod.close()
except Exception as exc:
if closing_exception is None:
closing_exception = exc
if exc_val is None and closing_exception is not None:
raise closing_exception
# overload_inject_start_deployment
@overload
def __init__(
self,
*,
allow_concurrent: Optional[bool] = False,
compression: Optional[str] = None,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
description: Optional[str] = None,
disable_auto_volume: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
exit_on_exceptions: Optional[List] = [],
external: Optional[bool] = False,
floating: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
grpc_channel_options: Optional[dict] = None,
grpc_metadata: Optional[dict] = None,
grpc_server_options: Optional[dict] = None,
host: Optional[List] = ['0.0.0.0'],
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'executor',
native: Optional[bool] = False,
no_reduce: Optional[bool] = False,
output_array_type: Optional[str] = None,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_monitoring: Optional[int] = None,
prefer_platform: Optional[str] = None,
protocol: Optional[Union[str, List[str]]] = ['GRPC'],
provider: Optional[str] = ['NONE'],
provider_endpoint: Optional[str] = None,
py_modules: Optional[List] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
raft_configuration: Optional[dict] = None,
reload: Optional[bool] = False,
replicas: Optional[int] = 1,
retries: Optional[int] = -1,
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
stateful: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
tls: Optional[bool] = False,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_dynamic_batching: Optional[dict] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
volumes: Optional[List] = None,
when: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Deployment to serve or deploy and Executor or Gateway
:param allow_concurrent: Allow concurrent requests to be processed by the Executor. This is only recommended if the Executor is thread-safe.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param disable_auto_volume: Do not automatically mount a volume for dockerized Executors.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param exit_on_exceptions: List of exceptions that will cause the Executor to shut down.
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_metadata: The metadata to be passed to the gRPC request.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0. In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. Then, every resulting address will be considered as one replica of the Executor.
:param install_requirements: If set, try to install `requirements.txt` from the local Executor if exists in the Executor folder. If using Hub, install `requirements.txt` in the Hub Executor bundle to local.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_reduce: Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]. In the case of an external Executor (`--external` or `external=True`) this can be a list of ports. Then, every resulting address will be considered as one replica of the Executor.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefer_platform: The preferred target Docker platform. (e.g. "linux/amd64", "linux/arm64")
:param protocol: Communication protocol of the server exposed by the Executor. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://jina.ai/serve/concepts/executor/executor-files/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node.
:param reload: If set, the Executor will restart while serving if YAML configuration source or Executor modules are changed. If YAML configuration is changed, the whole deployment is reloaded and new processes will be restarted. If only Python modules of the Executor have changed, they will be reloaded to the interpreter without restarting process.
:param replicas: The number of replicas in the deployment
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param tls: If set, connect to deployment using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached before the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_before_address: The address of the uses-before runtime
:param uses_dynamic_batching: Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param when: The condition that the documents need to fulfill before reaching the Executor.The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_deployment
def __init__(
self,
args: Union['Namespace', Dict, None] = None,
needs: Optional[Set[str]] = None,
include_gateway: bool = True,
**kwargs,
):
super().__init__()
self._gateway_kwargs = {}
self._include_gateway = include_gateway
if self._include_gateway:
# arguments exclusive to the gateway
for field in ['port', 'ports']:
if field in kwargs:
self._gateway_kwargs[field] = kwargs.pop(field)
# arguments common to both gateway and the Executor
for field in ['host', 'log_config']:
if field in kwargs:
self._gateway_kwargs[field] = kwargs[field]
parser = set_deployment_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(kwargs, parser, True)
self.args = args
self._gateway_load_balancer = False
if self.args.provider in (ProviderType.SAGEMAKER, ProviderType.AZURE):
if self._gateway_kwargs.get('port', 0) == 8080:
raise ValueError(
'Port 8080 is reserved for CSP deployment. '
'Please use another port'
)
if self.args.port != [8080]:
warnings.warn(
'Port is changed to 8080 for CSP deployment. '
f'Port {self.args.port} is ignored'
)
self.args.port = [8080]
if self.args.protocol != [ProtocolType.HTTP]:
warnings.warn(
'Protocol is changed to HTTP for CSP deployment. '
f'Protocol {self.args.protocol} is ignored'
)
self.args.protocol = [ProtocolType.HTTP]
if self._include_gateway and ProtocolType.HTTP in self.args.protocol:
self._gateway_load_balancer = True
log_config = kwargs.get('log_config')
if log_config:
self.args.log_config = log_config
self.args.polling = (
args.polling if hasattr(args, 'polling') else PollingType.ANY
)
# polling only works for shards, if there are none, polling will be ignored
if getattr(args, 'shards', 1) == 1:
self.args.polling = PollingType.ANY
if (
getattr(args, 'shards', 1) > 1
and ProtocolType.HTTP in self.args.protocol
and self.args.deployment_role != DeploymentRoleType.GATEWAY
):
raise RuntimeError(
f'It is not supported to have {ProtocolType.HTTP.to_string()} deployment for '
f'Deployments with more than one shard'
)
if (
ProtocolType.WEBSOCKET in self.args.protocol
and self.args.deployment_role != DeploymentRoleType.GATEWAY
):
raise RuntimeError(
f'It is not supported to have {ProtocolType.WEBSOCKET.to_string()} deployment for '
f'Deployments'
)
is_mac_os = platform.system() == 'Darwin'
is_windows_os = platform.system() == 'Windows'
is_37 = sys.version_info.major == 3 and sys.version_info.minor == 7
if self.args.stateful and (is_windows_os or (is_mac_os and is_37)):
if is_windows_os:
raise RuntimeError('Stateful feature is not available on Windows')
if is_mac_os:
raise RuntimeError(
'Stateful feature when running on MacOS requires Python3.8 or newer version'
)
if self.args.stateful and (
ProtocolType.WEBSOCKET in self.args.protocol
or ProtocolType.HTTP in self.args.protocol
or len(self.args.protocol) > 1
):
raise RuntimeError(
f'Stateful feature is only available for Deployments using a single {ProtocolType.GRPC.to_string()} protocol. {self.args.protocol} were requested'
)
self.needs = (
needs or set()
) #: used in the :class:`jina.flow.Flow` to build the graph
# parse addresses for distributed replicas
(
self.ext_repl_hosts,
self.ext_repl_ports,
self.ext_repl_schemes,
self.ext_repl_tls,
) = ([], [], [], [])
if self.args.pod_role != PodRoleType.GATEWAY:
self._parse_external_replica_hosts_and_ports()
self._parse_addresses_into_host_and_port()
if len(self.ext_repl_ports) > 1:
if self.args.replicas != 1 and self.args.replicas != len(
self.ext_repl_ports
):
raise ValueError(
f'Number of hosts ({len(self.args.host)}) does not match the number of replicas ({self.args.replicas})'
)
elif self.args.external:
self.args.replicas = len(self.ext_repl_ports)
self.uses_before_pod = None
self.uses_after_pod = None
self.head_pod = None
self.gateway_pod = None
self.shards = {}
self._update_port_monitoring_args()
self.update_pod_args()
if self._include_gateway:
gateway_parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
self._gateway_kwargs, gateway_parser, True
)
args.protocol = self.args.protocol
args.deployments_addresses = json.dumps(
{'executor': self._get_connection_list_for_single_executor()}
)
args.graph_description = (
'{"start-gateway": ["executor"], "executor": ["end-gateway"]}'
)
_update_gateway_args(
args, gateway_load_balancer=self._gateway_load_balancer
)
self.pod_args['gateway'] = args
else:
self.pod_args['gateway'] = None
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
def _get_connection_list_for_flow(self) -> List[str]:
if self.head_args:
# add head information
return [f'{self.protocol.lower()}://{self.host}:{self.head_port}']
else:
# there is no head, add the worker connection information instead
ports = self.ports
hosts = [
(
__docker_host__
if host_is_local(host) and in_docker() and self._is_docker
else host
)
for host in self.hosts
]
return [
f'{self.protocol.lower()}://{host}:{port}'
for host, port in zip(hosts, ports)
]
def _get_connection_list_for_single_executor(
self,
) -> Union[List[str], Dict[str, List[str]]]:
if self.head_args:
# add head information
return [f'{self.protocol.lower()}://{self.host}:{self.head_port}']
else:
# there is no head, add the worker connection information instead
ports_dict = defaultdict(list)
for replica_pod_arg in self.pod_args['pods'][0]:
for protocol, port in zip(
replica_pod_arg.protocol, replica_pod_arg.port
):
ports_dict[str(protocol)].append(port)
host = (
__docker_host__
if host_is_local(self.args.host[0]) and in_docker() and self._is_docker
else self.args.host[0]
)
connection_dict = {}
for protocol, ports in ports_dict.items():
connection_dict[protocol] = [
f'{protocol.lower()}://{host}:{port}' for port in ports
]
return connection_dict
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
super().__exit__(exc_type, exc_val, exc_tb)
self.join()
if self._include_gateway:
self._stop_time = time.time()
send_telemetry_event(
event='stop',
obj_cls_name=self.__class__.__name__,
entity_id=self._entity_id,
duration=self._stop_time - self._start_time,
exc_type=str(exc_type),
)
self.logger.close()
def _parse_addresses_into_host_and_port(self):
# splits addresses passed to `host` into separate `host` and `port`
for i, _host in enumerate(self.args.host):
_hostname, port, scheme, tls = parse_host_scheme(_host)
if _hostname != _host: # more than just hostname was passed to `host`
self.args.host[i] = _hostname
self.args.port[i] = port
self.args.scheme = scheme
self.args.tls = tls
for i, repl_host in enumerate(self.ext_repl_hosts):
_hostname, port, scheme, tls = parse_host_scheme(repl_host)
if (
_hostname != self.ext_repl_hosts[i]
): # more than just hostname was passed to `host`
self.ext_repl_hosts[i] = _hostname
self.ext_repl_ports[i] = port
self.ext_repl_schemes[i] = scheme
self.ext_repl_tls[i] = tls
def _parse_external_replica_hosts_and_ports(self):
# splits user provided lists of hosts and ports into a host and port for every distributed replica
ext_repl_ports: List = self.args.port.copy()
ext_repl_hosts: List = self.args.host.copy()
if len(ext_repl_hosts) < len(ext_repl_ports):
if (
len(ext_repl_hosts) == 1
): # only one host given, assume replicas are on the same host
ext_repl_hosts = ext_repl_hosts * len(ext_repl_ports)
self.args.host = self.args.host * len(ext_repl_ports)
elif len(ext_repl_hosts) > len(ext_repl_ports):
if (
len(ext_repl_ports) == 1
): # only one port given, assume replicas are on the same port
ext_repl_ports = ext_repl_ports * len(ext_repl_hosts)
self.args.port = self.args.port * len(ext_repl_hosts)
if len(ext_repl_hosts) != len(ext_repl_ports):
raise ValueError(
f'Number of hosts ({len(ext_repl_hosts)}) does not match the number of ports ({len(ext_repl_ports)})'
)
self.ext_repl_hosts, self.ext_repl_ports = ext_repl_hosts, ext_repl_ports
# varying tls and schemes other than 'grpc' only implemented if the entire address is passed to `host`
self.ext_repl_schemes = [
getattr(self.args, 'scheme', None) for _ in self.ext_repl_ports
]
self.ext_repl_tls = [
getattr(self.args, 'tls', None) for _ in self.ext_repl_ports
]
def _update_port_monitoring_args(self):
_all_port_monitoring = self.args.port_monitoring
self.args.all_port_monitoring = (
[_all_port_monitoring]
if not type(_all_port_monitoring) == list
else _all_port_monitoring
)
self.args.port_monitoring = int(
self.args.all_port_monitoring[0]
) # this is for the head
def update_pod_args(self):
"""Update args of all its pods based on Deployment args. Including head/tail"""
if self.args.runtime_cls == 'GatewayRuntime':
_update_gateway_args(self.args)
if isinstance(self.args, Dict):
# This is used when a Deployment is created in a remote context, where pods & their connections are already given.
self.pod_args = self.args
else:
self.pod_args = self._parse_args(self.args)
def update_worker_pod_args(self):
"""Update args of all its worker pods based on Deployment args. Does not touch head and tail"""
self.pod_args['pods'] = self._set_pod_args()
@property
def role(self) -> 'DeploymentRoleType':
"""Return the role of this :class:`Deployment`.
.. # noqa: DAR201
"""
return self.args.deployment_role
@property
def name(self) -> str:
"""The name of this :class:`Deployment`.
.. # noqa: DAR201
"""
return self.args.name
@property
def head_host(self) -> str:
"""Get the host of the HeadPod of this deployment
.. # noqa: DAR201
"""
return self.head_args.host if self.head_args else None
@property
def head_port(self):
"""Get the port of the HeadPod of this deployment
.. # noqa: DAR201
"""
return self.head_args.port[0] if self.head_args else None
@property
def head_port_monitoring(self):
"""Get the port_monitoring of the HeadPod of this deployment
.. # noqa: DAR201
"""
return self.head_args.port_monitoring if self.head_args else None
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port,
protocol=self.protocol,
grpc_channel_options=self.args.grpc_channel_options,
)
kwargs.update(self._gateway_kwargs)
return Client(**kwargs)
@staticmethod
def _copy_to_head_args(args: Namespace) -> Namespace:
"""
Set the outgoing args of the head router
:param args: basic arguments
:return: enriched head arguments
"""
_head_args = copy.deepcopy(args)
_head_args.polling = args.polling
_head_args.port = args.port
_head_args.host = args.host[0]
_head_args.uses = args.uses
_head_args.pod_role = PodRoleType.HEAD
_head_args.runtime_cls = 'HeadRuntime'
_head_args.replicas = 1
if args.name:
_head_args.name = f'{args.name}/head'
else:
_head_args.name = 'head'
return _head_args
@property
def deployments(self) -> List[Dict]:
"""Get deployments of the deployment. The Deployment just gives one deployment.
:return: list of deployments
"""
return [
{
'name': self.name,
'head_host': self.head_host,
'head_port': self.head_port,
}
]
@property
def _is_docker(self) -> bool:
"""
Check if this deployment is to be run in Docker.
:return: True if this deployment is to be run in Docker
"""
from hubble.executor.helper import is_valid_docker_uri
uses = getattr(self.args, 'uses', '')
return is_valid_docker_uri(uses)
@property
def _is_executor_from_yaml(self) -> bool:
"""
Check if this deployment is to be run from YAML configuration.
:return: True if this deployment is to be run from YAML configuration
"""
uses = getattr(self.args, 'uses', '')
return uses.endswith('yml') or uses.endswith('yaml')
@property
def tls_enabled(self):
"""
Check if secure connection via TLS is enabled for this Deployment.
:return: True if tls is enabled, False otherwise
"""
has_cert = getattr(self.args, 'ssl_certfile', None) is not None
has_key = getattr(self.args, 'ssl_keyfile', None) is not None
tls = getattr(self.args, 'tls', False)
return tls or (has_cert and has_key)
@property
def external(self) -> bool:
"""
Check if this deployment is external.
:return: True if this deployment is provided as an external deployment, False otherwise
"""
return getattr(self.args, 'external', False)
@property
def grpc_metadata(self):
"""
Get the gRPC metadata for this deployment
:return: The gRPC metadata for this deployment. If the deployment is a Gateway, return None.
"""
return getattr(self.args, 'grpc_metadata', None)
@property
def protocol(self):
"""
:return: the protocol of this deployment
"""
args = self.pod_args['gateway'] or self.args
protocol = getattr(args, 'protocol', ['grpc'])
if not isinstance(protocol, list):
protocol = [protocol]
protocol = [
str(_p).lower() + ('s' if self.tls_enabled else '') for _p in protocol
]
if len(protocol) == 1:
return protocol[0]
else:
return protocol
@property
def first_pod_args(self) -> Namespace:
"""Return the first worker pod's args
.. # noqa: DAR201
"""
# note this will be never out of boundary
return self.pod_args['gateway'] or self.pod_args['pods'][0][0]
@property
def host(self) -> str:
"""Get the host name of this deployment
.. # noqa: DAR201
"""
return self.first_pod_args.host
@property
def port(self):
"""
:return: The port of this deployment
"""
return self.first_pod_args.port[0]
@property
def ports(self) -> List[int]:
"""Returns a list of ports exposed by this Deployment.
Exposed means these are the ports a Client/Gateway is supposed to communicate with.
For sharded deployments this will be the head_port.
For non-sharded deployments it will be all replica ports.
.. # noqa: DAR201
"""
if self.head_port:
return [self.head_port]
else:
ports = []
for replica in self.pod_args['pods'][0]:
if isinstance(replica.port, list):
ports.extend(replica.port)
else:
ports.append(replica.port)
return ports
@property
def hosts(self) -> List[str]:
"""Returns a list of host addresses exposed by this Deployment.
Exposed means these are the host a Client/Gateway is supposed to communicate with.
For sharded deployments this will be the head host.
For non-sharded deployments it will be all replica hosts.
.. # noqa: DAR201
"""
if self.head_host:
return [self.head_host]
else:
return [replica.host for replica in self.pod_args['pods'][0]]
def _parse_args(
self, args: Namespace
) -> Dict[str, Optional[Union[List[Namespace], Namespace]]]:
return self._parse_base_deployment_args(args)
@property
def head_args(self) -> Namespace:
"""Get the arguments for the `head` of this Deployment.
.. # noqa: DAR201
"""
return self.pod_args['head']
@head_args.setter
def head_args(self, args):
"""Set the arguments for the `head` of this Deployment.
.. # noqa: DAR101
"""
self.pod_args['head'] = args
@property
def uses_before_args(self) -> Namespace:
"""Get the arguments for the `uses_before` of this Deployment.
.. # noqa: DAR201
"""
return self.pod_args['uses_before']
@uses_before_args.setter
def uses_before_args(self, args):
"""Set the arguments for the `uses_before` of this Deployment.
.. # noqa: DAR101
"""
self.pod_args['uses_before'] = args
@property
def uses_after_args(self) -> Namespace:
"""Get the arguments for the `uses_after` of this Deployment.
.. # noqa: DAR201
"""
return self.pod_args['uses_after']
@uses_after_args.setter
def uses_after_args(self, args):
"""Set the arguments for the `uses_after` of this Deployment.
.. # noqa: DAR101
"""
self.pod_args['uses_after'] = args
@property
def all_args(self) -> List[Namespace]:
"""Get all arguments of all Pods in this Deployment.
.. # noqa: DAR201
"""
all_args = (
([self.pod_args['uses_before']] if self.pod_args['uses_before'] else [])
+ ([self.pod_args['uses_after']] if self.pod_args['uses_after'] else [])
+ ([self.pod_args['head']] if self.pod_args['head'] else [])
+ ([self.pod_args['gateway']] if self._include_gateway else [])
)
for shard_id in self.pod_args['pods']:
all_args += self.pod_args['pods'][shard_id]
return all_args
@property
def num_pods(self) -> int:
"""Get the number of running :class:`Pod`
.. # noqa: DAR201
"""
num_pods = 0
if self.head_pod is not None:
num_pods += 1
if self.uses_before_pod is not None:
num_pods += 1
if self.uses_after_pod is not None:
num_pods += 1
if self.gateway_pod is not None:
num_pods += 1
if self.shards: # external deployments
for shard_id in self.shards:
num_pods += self.shards[shard_id].num_pods
return num_pods
def __eq__(self, other: 'Deployment'):
return self.num_pods == other.num_pods and self.name == other.name
@staticmethod
def get_worker_host(pod_args, pod_is_container, head_is_container):
"""
Check if the current pod and head are both containerized on the same host
If so, __docker_host__ needs to be advertised as the worker's address to the head
:param pod_args: arguments of the worker pod
:param pod_is_container: boolean specifying if pod is to be run in container
:param head_is_container: boolean specifying if head pod is to be run in container
:return: host to pass in connection list of the head
"""
# Check if the current pod and head are both containerized on the same host
# If so __docker_host__ needs to be advertised as the worker's address to the head
worker_host = (
__docker_host__
if (pod_is_container and (head_is_container or in_docker()))
and host_is_local(pod_args.host)
else pod_args.host
)
return worker_host
def _wait_until_all_ready(self):
import warnings
with warnings.catch_warnings():
wait_for_ready_coro = self.async_wait_start_success()
try:
_ = asyncio.get_event_loop()
except:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def _f():
pass
running_in_event_loop = False
try:
asyncio.get_event_loop().run_until_complete(_f())
except:
running_in_event_loop = True
if not running_in_event_loop:
asyncio.get_event_loop().run_until_complete(wait_for_ready_coro)
else:
wait_ready_thread = threading.Thread(
target=self.wait_start_success, daemon=True
)
wait_ready_thread.start()
wait_ready_thread.join()
def start(self) -> 'Deployment':
"""
Start to run all :class:`Pod` in this Deployment.
:return: started deployment
.. note::
If one of the :class:`Pod` fails to start, make sure that all of them
are properly closed.
"""
self._start_time = time.time()
if not self._is_docker and getattr(self.args, 'install_requirements', False):
install_package_dependencies(_get_package_path_from_uses(self.args.uses))
if self.pod_args['uses_before'] is not None:
_args = self.pod_args['uses_before']
_args.noblock_on_start = True
self.uses_before_pod = PodFactory.build_pod(_args)
self.enter_context(self.uses_before_pod)
if self.pod_args['uses_after'] is not None:
_args = self.pod_args['uses_after']
_args.noblock_on_start = True
self.uses_after_pod = PodFactory.build_pod(_args)
self.enter_context(self.uses_after_pod)
num_shards = len(self.pod_args['pods'])
for shard_id in self.pod_args['pods']:
self.shards[shard_id] = self._ReplicaSet(
deployment_args=self.args,
args=self.pod_args['pods'][shard_id],
head_pod=self.head_pod,
name=(
f'{self.name}-replica-set-{shard_id}'
if num_shards > 1
else f'{self.name}-replica-set'
),
)
self.enter_context(self.shards[shard_id])
if self.pod_args['head'] is not None:
_args = self.pod_args['head']
_args.noblock_on_start = True
self.head_pod = PodFactory.build_pod(_args)
self.enter_context(self.head_pod)
if self._include_gateway:
_args = self.pod_args['gateway']
_args.noblock_on_start = True
self.gateway_pod = PodFactory.build_pod(
_args, gateway_load_balancer=self._gateway_load_balancer
)
self.enter_context(self.gateway_pod)
if not self.args.noblock_on_start:
self._wait_until_all_ready()
if self._include_gateway:
all_panels = []
self._get_summary_table(all_panels)
from rich.rule import Rule
print(Rule(':tada: Deployment is ready to serve!'), *all_panels)
send_telemetry_event(
event='start',
obj_cls_name=self.__class__.__name__,
entity_id=self._entity_id,
)
return self
def wait_start_success(self) -> None:
"""Block until all pods start successfully.
If not successful, it will raise an error hoping the outer function will catch it
"""
try:
if self.uses_before_pod is not None:
self.uses_before_pod.wait_start_success()
if self.uses_after_pod is not None:
self.uses_after_pod.wait_start_success()
if self.head_pod is not None:
self.head_pod.wait_start_success()
if self.gateway_pod is not None:
self.gateway_pod.wait_start_success()
for shard_id in self.shards:
self.shards[shard_id].wait_start_success()
except:
self.close()
raise
async def async_wait_start_success(self) -> None:
"""Block until all pods start successfully.
If unsuccessful, it will raise an error hoping the outer function will catch it
"""
try:
coros = []
if self.uses_before_pod is not None:
coros.append(self.uses_before_pod.async_wait_start_success())
if self.uses_after_pod is not None:
coros.append(self.uses_after_pod.async_wait_start_success())
if self.gateway_pod is not None:
coros.append(self.gateway_pod.async_wait_start_success())
if self.head_pod is not None:
coros.append(self.head_pod.async_wait_start_success())
for shard_id in self.shards:
coros.append(self.shards[shard_id].async_wait_start_success())
await asyncio.gather(*coros)
self.logger.debug('Deployment started successfully')
except:
self.close()
raise
def join(self):
"""Wait until all pods exit"""
try:
if self.uses_before_pod is not None:
self.uses_before_pod.join()
if self.uses_after_pod is not None:
self.uses_after_pod.join()
if self.shards:
for shard_id in self.shards:
self.shards[shard_id].join()
if self.head_pod is not None:
self.head_pod.join()
if self.gateway_pod is not None:
self.gateway_pod.join()
except KeyboardInterrupt:
pass
finally:
self.head_pod = None
if self.shards:
for shard_id in self.shards:
self.shards[shard_id].clear_pods()
@property
def is_ready(self) -> bool:
"""Check if Deployment is ready
.. note::
A Deployment is ready when all the Pods it contains are ready
.. # noqa: DAR201
"""
is_ready = True
if self.head_pod is not None:
is_ready = self.head_pod.is_ready.is_set()
if is_ready:
for shard_id in self.shards:
is_ready = self.shards[shard_id].is_ready
if is_ready and self.uses_before_pod is not None:
is_ready = self.uses_before_pod.is_ready.is_set()
if is_ready and self.uses_after_pod is not None:
is_ready = self.uses_after_pod.is_ready.is_set()
if is_ready and self.gateway_pod is not None:
is_ready = self.gateway_pod.is_ready.is_set()
return is_ready
@staticmethod
def _parse_devices(value: str, num_devices: int):
"""Parse a list of devices from string, like `start:stop:step` or 'num1,num2,num3` or combination of both.
:param value: a string-like
:param num_devices: total number of devices
:return: slice
"""
use_uuids = False
if re.match(WRAPPED_SLICE_BASE, value):
value = value[1:-1]
if value:
parts = value.split(',')
if len(parts) == 1:
parts = value.split(':')
if len(parts) == 1:
try:
int(parts[0])
except:
use_uuids = True
if use_uuids:
return parts
parts = [parts[0], str(int(parts[0]) + 1)]
else:
# try to detect if parts are not numbers
try:
int(parts[0])
except:
use_uuids = True
if not use_uuids:
return [int(p) for p in parts]
else:
return parts
else:
parts = []
all_devices = range(num_devices)
return all_devices[slice(*[int(p) if p else None for p in parts])]
@staticmethod
def _roundrobin_cuda_device(device_str: Optional[str], replicas: int):
"""Parse CUDA device string with RR prefix
:param device_str: `RRm:n`, where `RR` is the prefix, m:n is python slice format
:param replicas: the number of replicas
:return: a map from replica ID to device ID
"""
if (
device_str
and isinstance(device_str, str)
and device_str.startswith('RR')
and replicas >= 1
):
try:
num_devices = str(subprocess.check_output(['nvidia-smi', '-L'])).count(
'UUID'
)
except:
num_devices = int(os.environ.get('CUDA_TOTAL_DEVICES', 0))
if num_devices == 0:
return
selected_devices = []
if device_str[2:]:
for device in Deployment._parse_devices(device_str[2:], num_devices):
selected_devices.append(device)
else:
selected_devices = range(num_devices)
_c = cycle(selected_devices)
return {j: next(_c) for j in range(replicas)}
def _set_pod_args(self) -> Dict[int, List[Namespace]]:
result = {}
shards = getattr(self.args, 'shards', 1)
replicas = getattr(self.args, 'replicas', 1)
if self.args.deployment_role == DeploymentRoleType.GATEWAY:
replicas = 1
sharding_enabled = shards and shards > 1
cuda_device_map = None
if self.args.env or os.environ.get('CUDA_VISIBLE_DEVICES', '').startswith('RR'):
args_env = self.args.env or {}
cuda_visible_devices = args_env.get(
'CUDA_VISIBLE_DEVICES'
) or os.environ.get('CUDA_VISIBLE_DEVICES', None)
cuda_device_map = Deployment._roundrobin_cuda_device(
cuda_visible_devices, replicas
)
all_shard_pod_ports = self.args.peer_ports or {'0': []}
if isinstance(all_shard_pod_ports, str):
all_shard_pod_ports = json.loads(all_shard_pod_ports)
if isinstance(all_shard_pod_ports, list):
# it is a single shard
all_shard_pod_ports = {'0': all_shard_pod_ports}
peer_ports_all_shards = {}
for k, v in all_shard_pod_ports.items():
peer_ports_all_shards[str(k)] = v
if self.args.stateful and len(peer_ports_all_shards.keys()) < shards:
raise ValueError(
'The configuration of `peer_ports` does not match the number of shards requested'
)
for shard_id in range(shards):
peer_ports = peer_ports_all_shards.get(str(shard_id), [])
if len(peer_ports) > 0 and len(peer_ports) != replicas:
raise ValueError(
'peer-ports argument does not match number of replicas, it will be ignored'
)
elif len(peer_ports) == 0:
peer_ports = [random_port() for _ in range(replicas)]
replica_args = []
for replica_id, peer_port in zip(range(replicas), peer_ports):
_args = copy.deepcopy(self.args)
if self.args.deployment_role == DeploymentRoleType.GATEWAY:
_args.replicas = replicas
_args.shard_id = shard_id
_args.replica_id = replica_id
# for gateway pods, the pod role shouldn't be changed
if _args.pod_role != PodRoleType.GATEWAY:
_args.pod_role = PodRoleType.WORKER
if len(self.args.host) == replicas:
_args.host = self.args.host[replica_id]
else:
_args.host = self.args.host[0]
else:
_args.host = self.args.host
if cuda_device_map:
_args.env = _args.env or {}
_args.env['CUDA_VISIBLE_DEVICES'] = str(cuda_device_map[replica_id])
if _args.name:
_args.name += (
f'/shard-{shard_id}/rep-{replica_id}'
if sharding_enabled
else f'/rep-{replica_id}'
)
else:
_args.name = f'{replica_id}'
# the gateway needs to respect the assigned port
if self.args.deployment_role == DeploymentRoleType.GATEWAY:
_args.port = self.args.port
elif not self.external:
if shards == 1 and replicas == 1:
if len(_args.protocol) > 1 and self._include_gateway:
_args.port = [
random_port() for _ in range(len(self.args.protocol))
]
else:
_args.port = self.args.port
_args.port_monitoring = self.args.port_monitoring
elif shards == 1:
_args.port_monitoring = (
random_port()
if replica_id >= len(self.args.all_port_monitoring)
else self.args.all_port_monitoring[replica_id]
)
# if there are no shards/replicas, we dont need to distribute ports randomly
# we should rather use the pre assigned one
_args.port = [
random_port() for _ in range(len(self.args.protocol))
]
_args.port[0] = peer_port
elif shards > 1:
port_monitoring_index = (
replica_id + replicas * shard_id + 1
) # the first index is for the head
_args.port_monitoring = (
random_port()
if port_monitoring_index
>= len(self.args.all_port_monitoring)
else self.args.all_port_monitoring[
port_monitoring_index
] # we skip the head port here
)
_args.port = [peer_port]
else:
_args.port = [peer_port]
_args.port_monitoring = random_port()
else:
_args.port = [self.ext_repl_ports[replica_id]]
_args.host = self.ext_repl_hosts[replica_id]
_args.scheme = self.ext_repl_schemes[replica_id]
_args.tls = self.ext_repl_tls[replica_id]
# pod workspace if not set then derive from workspace
if not _args.workspace:
_args.workspace = self.args.workspace
replica_args.append(_args)
result[shard_id] = replica_args
return result
@staticmethod
def _set_uses_before_after_args(args: Namespace, entity_type: str) -> Namespace:
_args = copy.deepcopy(args)
_args.pod_role = PodRoleType.WORKER
_args.host = _args.host[0] or __default_host__
_args.port = [random_port()]
if _args.name:
_args.name += f'/{entity_type}-0'
else:
_args.name = f'{entity_type}-0'
if 'uses_before' == entity_type:
_args.uses_requests = None
_args.uses = args.uses_before or __default_executor__
elif 'uses_after' == entity_type:
_args.uses_requests = None
_args.uses = args.uses_after or __default_executor__
else:
raise ValueError(
f'uses_before/uses_after pod does not support type {entity_type}'
)
# pod workspace if not set then derive from workspace
if not _args.workspace:
_args.workspace = args.workspace
return _args
def _parse_base_deployment_args(self, args):
parsed_args = {
'head': None,
'uses_before': None,
'uses_after': None,
'gateway': None,
'pods': {},
}
if self.args.stateful and self.args.replicas in [1, 2]:
self.logger.debug(
'Stateful Executor is not recommended to be used less than 3 replicas'
)
if self.args.stateful and self.args.workspace is None:
raise ValueError(
'Stateful Executors need to be provided `workspace` when used in a Deployment'
)
# a gateway has no heads and uses
# also there a no heads created, if there are no shards
if self.role != DeploymentRoleType.GATEWAY and getattr(args, 'shards', 1) > 1:
if (
getattr(args, 'uses_before', None)
and args.uses_before != __default_executor__
):
uses_before_args = self._set_uses_before_after_args(
args, entity_type='uses_before'
)
parsed_args['uses_before'] = uses_before_args
args.uses_before_address = (
f'{uses_before_args.host}:{uses_before_args.port[0]}'
)
if (
getattr(args, 'uses_after', None)
and args.uses_after != __default_executor__
):
uses_after_args = self._set_uses_before_after_args(
args, entity_type='uses_after'
)
parsed_args['uses_after'] = uses_after_args
args.uses_after_address = (
f'{uses_after_args.host}:{uses_after_args.port[0]}'
)
parsed_args['head'] = Deployment._copy_to_head_args(args)
parsed_args['pods'] = self._set_pod_args()
if parsed_args['head'] is not None:
connection_list = defaultdict(list)
for shard_id in parsed_args['pods']:
for pod_idx, pod_args in enumerate(parsed_args['pods'][shard_id]):
worker_host = self.get_worker_host(pod_args, self._is_docker, False)
connection_list[shard_id].append(
f'{worker_host}:{pod_args.port[0]}'
)
parsed_args['head'].connection_list = json.dumps(connection_list)
return parsed_args
@property
def _mermaid_str(self) -> List[str]:
"""String that will be used to represent the Deployment graphically when `Flow.plot()` is invoked.
It does not include used_before/uses_after
.. # noqa: DAR201
"""
mermaid_graph = []
secret = '<secret>'
if self.role != DeploymentRoleType.GATEWAY and not self.external:
mermaid_graph = [f'subgraph {self.name};', '\ndirection LR;\n']
uses_before_name = (
self.uses_before_args.name
if self.uses_before_args is not None
else None
)
uses_before_uses = (
replace_secret_of_hub_uri(self.uses_before_args.uses, secret)
if self.uses_before_args is not None
else None
)
uses_after_name = (
self.uses_after_args.name if self.uses_after_args is not None else None
)
uses_after_uses = (
replace_secret_of_hub_uri(self.uses_after_args.uses, secret)
if self.uses_after_args is not None
else None
)
shard_names = []
if len(self.pod_args['pods']) > 1:
# multiple shards
for shard_id, pod_args in self.pod_args['pods'].items():
shard_name = f'{self.name}/shard-{shard_id}'
shard_names.append(shard_name)
shard_mermaid_graph = [
f'subgraph {shard_name};',
'\ndirection TB;\n',
]
names = [
args.name for args in pod_args
] # all the names of each of the replicas
uses = [
args.uses for args in pod_args
] # all the uses should be the same but let's keep it this
# way
for rep_i, (name, use) in enumerate(zip(names, uses)):
escaped_uses = f'"{replace_secret_of_hub_uri(use, secret)}"'
shard_mermaid_graph.append(f'{name}[{escaped_uses}]:::pod;')
shard_mermaid_graph.append('end;')
shard_mermaid_graph = [
node.replace(';', '\n') for node in shard_mermaid_graph
]
mermaid_graph.extend(shard_mermaid_graph)
mermaid_graph.append('\n')
if uses_before_name is not None:
for shard_name in shard_names:
escaped_uses_before_uses = (
f'"{replace_secret_of_hub_uri(uses_before_uses, secret)}"'
)
mermaid_graph.append(
f'{self.args.name}-head[{escaped_uses_before_uses}]:::HEADTAIL --> {shard_name};'
)
if uses_after_name is not None:
for shard_name in shard_names:
escaped_uses_after_uses = f'"{uses_after_uses}"'
mermaid_graph.append(
f'{shard_name} --> {self.args.name}-tail[{escaped_uses_after_uses}]:::HEADTAIL;'
)
else:
# single shard case, no uses_before or uses_after_considered
pod_args = list(self.pod_args['pods'].values())[0][0]
uses = f'"{replace_secret_of_hub_uri(pod_args.uses, secret)}"'
# just put the replicas in parallel
if pod_args.replicas > 1:
for rep_i in range(pod_args.replicas):
mermaid_graph.append(
f'{pod_args.name}/rep-{rep_i}["{uses}"]:::pod;'
)
else:
mermaid_graph.append(f'{pod_args.name}["{uses}"]:::pod;')
mermaid_graph.append('end;')
return mermaid_graph
def block(
self,
stop_event: Optional[Union['threading.Event', 'multiprocessing.Event']] = None,
):
"""Block the Deployment until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that once set will resume the control flow
to main thread.
"""
def _reload_deployment(changed_file):
self.logger.info(
f'change in Executor configuration YAML {changed_file} observed, reloading Executor deployment'
)
self.__exit__(None, None, None)
new_deployment = Deployment(
self.args, self.needs, include_gateway=self._include_gateway
)
self.__dict__ = new_deployment.__dict__
self.__enter__()
try:
watch_changes = self.args.reload
if watch_changes and self._is_executor_from_yaml:
with ImportExtensions(
required=True,
help_text='''reload requires watchfiles dependency to be installed. You can run `pip install
watchfiles''',
):
from watchfiles import watch
new_stop_event = stop_event or threading.Event()
if self._is_executor_from_yaml:
for changes in watch(*[self.args.uses], stop_event=new_stop_event):
for _, changed_file in changes:
_reload_deployment(self.args.uses)
else:
wait_event = stop_event
if not wait_event:
self._stop_event = threading.Event()
wait_event = self._stop_event
if not __windows__:
wait_event.wait()
else:
while True:
if wait_event.is_set():
break
time.sleep(0.5)
except KeyboardInterrupt:
pass
def _get_summary_table(self, all_panels: List[Panel]):
address_table = self._init_table()
if not isinstance(self.protocol, list):
_protocols = [str(self.protocol)]
else:
_protocols = [str(_p) for _p in self.protocol]
if not isinstance(self.first_pod_args.port, list):
_ports = [self.first_pod_args.port]
else:
_ports = [str(_p) for _p in self.first_pod_args.port]
swagger_ui_link = None
redoc_link = None
for _port, _protocol in zip(_ports, _protocols):
address_table.add_row(':chains:', 'Protocol', _protocol)
_protocol = _protocol.lower()
address_table.add_row(
':house:',
'Local',
f'[link={_protocol}://{self.host}:{_port}]{self.host}:{_port}[/]',
)
address_table.add_row(
':lock:',
'Private',
f'[link={_protocol}://{self.address_private}:{_port}]{self.address_private}:{_port}[/]',
)
if self.address_public:
address_table.add_row(
':earth_africa:',
'Public',
f'[link={_protocol}://{self.address_public}:{_port}]{self.address_public}:{_port}[/]',
)
if _protocol == ProtocolType.HTTP.to_string().lower():
swagger_ui_link = f'[link={_protocol}://{self.host}:{_port}/docs]{self.host}:{_port}/docs'
redoc_link = f'[link={_protocol}://{self.host}:{_port}/redoc]{self.host}:{_port}/redoc'
all_panels.append(
Panel(
address_table,
title=':link: [b]Endpoint[/]',
expand=False,
)
)
if ProtocolType.HTTP.to_string().lower() in [p.lower() for p in _protocols]:
http_ext_table = self._init_table()
http_ext_table.add_row(':speech_balloon:', 'Swagger UI', swagger_ui_link)
http_ext_table.add_row(':books:', 'Redoc', redoc_link)
all_panels.append(
Panel(
http_ext_table,
title=':gem: [b]HTTP extension[/]',
expand=False,
)
)
if self.args.monitoring:
monitor_ext_table = self._init_table()
for replica in self.pod_args['pods'][0]:
monitor_ext_table.add_row(
':flashlight:', # upstream issue: they dont have :torch: emoji, so we use :flashlight:
# to represent observability of Prometheus (even they have :torch: it will be a war
# between AI community and Cloud-native community fighting on this emoji)
replica.name,
f'...[b]:{replica.port_monitoring}[/]',
)
all_panels.append(
Panel(
monitor_ext_table,
title=':gem: [b]Prometheus extension[/]',
expand=False,
)
)
return all_panels
@property
def _docker_compose_address(self):
from jina.orchestrate.deployments.config.docker_compose import port
from jina.orchestrate.deployments.config.helper import to_compatible_name
if self.external:
docker_compose_address = [f'{self.protocol}://{self.host}:{self.port}']
elif self.head_args:
docker_compose_address = [
f'{to_compatible_name(self.head_args.name)}:{port}'
]
else:
if self.args.replicas == 1 or self.name == 'gateway':
docker_compose_address = [f'{to_compatible_name(self.name)}:{port}']
else:
docker_compose_address = []
for rep_id in range(self.args.replicas):
node_name = f'{self.name}/rep-{rep_id}'
docker_compose_address.append(
f'{to_compatible_name(node_name)}:{port}'
)
return docker_compose_address
def _to_docker_compose_config(self, deployments_addresses=None):
from jina.orchestrate.deployments.config.docker_compose import (
DockerComposeConfig,
)
docker_compose_deployment = DockerComposeConfig(
args=self.args, deployments_addresses=deployments_addresses
)
return docker_compose_deployment.to_docker_compose_config()
def _inner_gateway_to_docker_compose_config(self):
from jina.orchestrate.deployments.config.docker_compose import (
DockerComposeConfig,
)
self.pod_args['gateway'].port = self.pod_args['gateway'].port or [random_port()]
cargs = copy.deepcopy(self.pod_args['gateway'])
cargs.uses = __default_grpc_gateway__
cargs.graph_description = (
f'{{"{self.name}": ["end-gateway"], "start-gateway": ["{self.name}"]}}'
)
docker_compose_deployment = DockerComposeConfig(
args=cargs,
deployments_addresses={self.name: self._docker_compose_address},
)
return docker_compose_deployment.to_docker_compose_config()
def to_docker_compose_yaml(
self,
output_path: Optional[str] = None,
network_name: Optional[str] = None,
):
"""
Converts a Jina Deployment into a Docker compose YAML file
If you don't want to rebuild image on Executor Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param output_path: The path to dump the YAML file to
:param network_name: The name of the network that will be used by the deployment
"""
import yaml
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
service_configs = self._to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
if self._include_gateway:
service_configs = self._inner_gateway_to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+', encoding='utf-8') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
command = (
'docker compose up'
if output_path is None
else f'docker compose -f {output_path} up'
)
self.logger.info(
f'Docker Compose file has been created under [b]{output_path}[/b]. You can use it by running [b]{command}[/b]'
)
def _to_kubernetes_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
k8s_deployments_addresses: Optional[Dict] = None,
):
import yaml
from jina.orchestrate.deployments.config.k8s import K8sDeploymentConfig
if self.external:
self.logger.warning(
'The Deployment is external, cannot create YAML deployment files'
)
return
if self.args.name == 'gateway':
if self.args.default_port:
from jina.serve.networking import GrpcConnectionPool
self.args.port = [
GrpcConnectionPool.K8S_PORT + i
for i in range(len(self.args.protocol))
]
self.first_pod_args.port = [
GrpcConnectionPool.K8S_PORT + i
for i in range(len(self.args.protocol))
]
self.args.port_monitoring = GrpcConnectionPool.K8S_PORT_MONITORING
self.first_pod_args.port_monitoring = (
GrpcConnectionPool.K8S_PORT_MONITORING
)
self.args.default_port = False
self.args.deployments_addresses = k8s_deployments_addresses
else:
if len(self.args.protocol) > 1 and len(self.args.port) != len(
self.args.protocol
):
from jina.serve.networking import GrpcConnectionPool
self.args.port = [
GrpcConnectionPool.K8S_PORT + i
for i in range(len(self.args.protocol))
]
k8s_deployment = K8sDeploymentConfig(
args=self.args, k8s_namespace=k8s_namespace
)
configs = k8s_deployment.to_kubernetes_yaml()
for name, k8s_objects in configs:
filename = os.path.join(output_base_path, f'{name}.yml')
os.makedirs(output_base_path, exist_ok=True)
with open(filename, 'w+', encoding='utf-8') as fp:
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
def to_kubernetes_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
):
"""
Convert a Jina Deployment into a set of YAML deployments to deploy in Kubernetes.
If you don't want to rebuild image on Executor Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param output_base_path: The base path where to dump all the YAML files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
"""
k8s_namespace = k8s_namespace or 'default'
# the Deployment conversion needs to be done in a version without Gateway included. Deployment does quite some changes to its args
# to let some of the args (like ports) go to the gateway locally. In Kubernetes, we want no gateway
self._to_kubernetes_yaml(
output_base_path=output_base_path,
k8s_namespace=k8s_namespace,
)
self.logger.info(
f'K8s YAML files have been created under [b]{output_base_path}[/]. You can use it by running [b]kubectl apply -R -f {output_base_path}[/]'
)
to_k8s_yaml = to_kubernetes_yaml
| Deployment |
python | getsentry__sentry | src/sentry/shared_integrations/client/base.py | {
"start": 763,
"end": 971
} | class ____(TypedDict):
timeout: int
allow_redirects: bool
# the below are taken from session.merge_environment_settings
proxies: Any
stream: Any
verify: Any
cert: Any
| SessionSettings |
python | django__django | tests/get_object_or_404/models.py | {
"start": 842,
"end": 1077
} | class ____(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
attribute_error_objects = AttributeErrorManager()
| Article |
python | ethereum__web3.py | web3/middleware/base.py | {
"start": 462,
"end": 4243
} | class ____:
"""
Base class for web3.py middleware. This class is not meant to be used directly,
but instead inherited from.
"""
_w3: Union["AsyncWeb3[Any]", "Web3"]
def __init__(self, w3: Union["AsyncWeb3[Any]", "Web3"]) -> None:
self._w3 = w3
def __hash__(self) -> int:
return hash(f"{self.__class__.__name__}({str(self.__dict__)})")
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Web3Middleware):
return False
return self.__hash__() == other.__hash__()
# -- sync -- #
def wrap_make_request(self, make_request: "MakeRequestFn") -> "MakeRequestFn":
def middleware(method: "RPCEndpoint", params: Any) -> "RPCResponse":
method, params = self.request_processor(method, params)
return self.response_processor(method, make_request(method, params))
return middleware
def wrap_make_batch_request(
self, make_batch_request: "MakeBatchRequestFn"
) -> "MakeBatchRequestFn":
def middleware(
requests_info: list[tuple["RPCEndpoint", Any]],
) -> Union[list["RPCResponse"], "RPCResponse"]:
req_processed = [
self.request_processor(method, params)
for (method, params) in requests_info
]
response = make_batch_request(req_processed)
if not isinstance(response, list):
# RPC errors return only one response with the error object
return response
methods, _params = zip(*req_processed)
formatted_responses = [
self.response_processor(m, r) for m, r in zip(methods, response)
]
return formatted_responses
return middleware
def request_processor(self, method: "RPCEndpoint", params: Any) -> Any:
return method, params
def response_processor(
self, method: "RPCEndpoint", response: "RPCResponse"
) -> "RPCResponse":
return response
# -- async -- #
async def async_wrap_make_request(
self, make_request: "AsyncMakeRequestFn"
) -> "AsyncMakeRequestFn":
async def middleware(method: "RPCEndpoint", params: Any) -> "RPCResponse":
method, params = await self.async_request_processor(method, params)
return await self.async_response_processor(
method,
await make_request(method, params),
)
return middleware
async def async_wrap_make_batch_request(
self, make_batch_request: "AsyncMakeBatchRequestFn"
) -> "AsyncMakeBatchRequestFn":
async def middleware(
requests_info: list[tuple["RPCEndpoint", Any]],
) -> Union[list["RPCResponse"], "RPCResponse"]:
req_processed = [
await self.async_request_processor(method, params)
for (method, params) in requests_info
]
response = await make_batch_request(req_processed)
if not isinstance(response, list):
# RPC errors return only one response with the error object
return response
methods, _params = zip(*req_processed)
formatted_responses = [
await self.async_response_processor(m, r)
for m, r in zip(methods, response)
]
return formatted_responses
return middleware
async def async_request_processor(
self,
method: "RPCEndpoint",
params: Any,
) -> Any:
return method, params
async def async_response_processor(
self,
method: "RPCEndpoint",
response: "RPCResponse",
) -> "RPCResponse":
return response
| Web3Middleware |
python | python-poetry__poetry | src/poetry/utils/env/base_env.py | {
"start": 1415,
"end": 16466
} | class ____(ABC):
"""
An abstract Python environment.
"""
def __init__(self, path: Path, base: Path | None = None) -> None:
self._is_windows = sys.platform == "win32"
self._is_mingw = sysconfig.get_platform().startswith("mingw")
self._is_conda = bool(os.environ.get("CONDA_DEFAULT_ENV"))
if self._is_windows:
path = get_real_windows_path(path)
base = get_real_windows_path(base) if base else None
bin_dir = "bin" if not self._is_windows or self._is_mingw else "Scripts"
self._path = path
self._bin_dir = self._path / bin_dir
self._executable = "python"
self._pip_executable = "pip"
self.find_executables()
self._base = base or path
self._site_packages: SitePackages | None = None
self._supported_tags: list[Tag] | None = None
self._purelib: Path | None = None
self._platlib: Path | None = None
self._script_dirs: list[Path] | None = None
self._embedded_pip_path: Path | None = None
@property
def bin_dir(self) -> Path:
return self._bin_dir
@property
def path(self) -> Path:
return self._path
@property
def base(self) -> Path:
return self._base
@property
def version_info(self) -> PythonVersion:
version_info: PythonVersion = self.marker_env["version_info"]
return version_info
@property
def python_implementation(self) -> str:
implementation: str = self.marker_env["platform_python_implementation"]
return implementation
@property
def python(self) -> Path:
"""
Path to current python executable
"""
return Path(self._bin(self._executable))
@cached_property
def marker_env(self) -> MarkerEnv:
return self.get_marker_env()
@property
def parent_env(self) -> GenericEnv:
from poetry.utils.env.generic_env import GenericEnv
return GenericEnv(self.base, child_env=self)
def _find_python_executable(self) -> None:
bin_dir = self._bin_dir
if self._is_windows and self._is_conda:
bin_dir = self._path
python_executables = sorted(
p.name
for p in bin_dir.glob("python*")
if re.match(r"python(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
)
if python_executables:
executable = python_executables[0]
if executable.endswith(".exe"):
executable = executable[:-4]
self._executable = executable
def _find_pip_executable(self) -> None:
pip_executables = sorted(
p.name
for p in self._bin_dir.glob("pip*")
if re.match(r"pip(?:\d+(?:\.\d+)?)?(?:\.exe)?$", p.name)
)
if pip_executables:
pip_executable = pip_executables[0]
if pip_executable.endswith(".exe"):
pip_executable = pip_executable[:-4]
self._pip_executable = pip_executable
def find_executables(self) -> None:
self._find_python_executable()
self._find_pip_executable()
def get_embedded_wheel(self, distribution: str) -> Path:
wheel: Wheel = get_embed_wheel(
distribution, f"{self.version_info[0]}.{self.version_info[1]}"
)
path: Path = wheel.path
return path
@property
def pip_embedded(self) -> Path:
if self._embedded_pip_path is None:
self._embedded_pip_path = self.get_embedded_wheel("pip") / "pip"
return self._embedded_pip_path
@property
def pip(self) -> Path:
"""
Path to current pip executable
"""
# we do not use as_posix() here due to issues with windows pathlib2
# implementation
path = Path(self._bin(self._pip_executable))
if not path.exists():
return self.pip_embedded
return path
@property
def platform(self) -> str:
return sys.platform
@property
def os(self) -> str:
return os.name
@property
def site_packages(self) -> SitePackages:
if self._site_packages is None:
self._site_packages = SitePackages(
self.purelib,
self.platlib,
self.fallbacks,
)
return self._site_packages
@property
def usersite(self) -> Path | None:
if "usersite" in self.paths:
return Path(self.paths["usersite"])
return None
@property
def userbase(self) -> Path | None:
if "userbase" in self.paths:
return Path(self.paths["userbase"])
return None
@property
def purelib(self) -> Path:
if self._purelib is None:
self._purelib = Path(self.paths["purelib"])
return self._purelib
@property
def platlib(self) -> Path:
if self._platlib is None:
if "platlib" in self.paths:
self._platlib = Path(self.paths["platlib"])
else:
self._platlib = self.purelib
return self._platlib
@cached_property
def fallbacks(self) -> list[Path]:
paths = [Path(path) for path in self.paths.get("fallbacks", [])]
paths += [self.usersite] if self.usersite else []
return paths
def set_paths(
self,
purelib: str | Path | None = None,
platlib: str | Path | None = None,
userbase: str | Path | None = None,
usersite: str | Path | None = None,
) -> None:
"""
A cached property aware way to set environment paths during runtime.
In some cases, like in `PluginManager._install()` method, paths are modified during execution. Direct
modification of `self.paths` is not safe as caches relying on are not invalidated. This helper method
ensures that we clear the relevant caches why paths are modified.
"""
if purelib:
self.paths["purelib"] = str(purelib)
if platlib:
self.paths["platlib"] = str(platlib)
if userbase:
self.paths["userbase"] = str(userbase)
if usersite:
self.paths["usersite"] = str(usersite)
# clear cached properties using the env paths
self.__dict__.pop("fallbacks", None)
self.__dict__.pop("scheme_dict", None)
@cached_property
def scheme_dict(self) -> dict[str, str]:
"""
This property exists to allow cases where system environment paths are not writable and
user site is enabled. This enables us to ensure packages (wheels) are correctly installed
into directories where the current user can write to.
If all candidates in `self.paths` is writable, no modification is made. If at least one path is not writable
and all generated writable candidates are indeed writable, these are used instead. If any candidate is not
writable, the original paths are returned.
Alternative writable candidates are generated by replacing discovered prefix, with "userbase"
if available. The original prefix is computed as the common path prefix of "scripts" and "purelib".
For example, given `{ "purelib": "/usr/local/lib/python3.13/site-packages", "scripts": "/usr/local/bin",
"userbase": "/home/user/.local" }`; the candidate "purelib" path would be
`/home/user/.local/lib/python3.13/site-packages`.
"""
paths = self.paths.copy()
if (
not self.is_venv()
and paths.get("userbase")
and ("scripts" in paths and "purelib" in paths)
):
overrides: dict[str, str] = {}
try:
base_path = os.path.commonpath([paths["scripts"], paths["purelib"]])
except ValueError:
return paths
scheme_names = [key for key in SCHEME_NAMES if key in self.paths]
for key in scheme_names:
if not is_dir_writable(path=Path(paths[key]), create=True):
# there is at least one path that is not writable
break
else:
# all paths are writable, return early
return paths
for key in scheme_names:
candidate = paths[key].replace(base_path, paths["userbase"])
if not is_dir_writable(path=Path(candidate), create=True):
# at least one candidate is not writable, we cannot do much here
return paths
overrides[key] = candidate
paths.update(overrides)
return paths
def _get_lib_dirs(self) -> list[Path]:
return [self.purelib, self.platlib, *self.fallbacks]
def is_path_relative_to_lib(self, path: Path) -> bool:
for lib_path in self._get_lib_dirs():
with contextlib.suppress(ValueError):
path.relative_to(lib_path)
return True
return False
@property
@abstractmethod
def sys_path(self) -> list[str]: ...
@cached_property
def paths(self) -> dict[str, str]:
paths = self.get_paths()
if self.is_venv():
# We copy pip's logic here for the `include` path
paths["include"] = str(
self.path.joinpath(
"include",
"site",
f"python{self.version_info[0]}.{self.version_info[1]}",
)
)
return paths
@property
def supported_tags(self) -> list[Tag]:
if self._supported_tags is None:
self._supported_tags = self.get_supported_tags()
return self._supported_tags
@classmethod
def get_base_prefix(cls) -> Path:
real_prefix = getattr(sys, "real_prefix", None)
if real_prefix is not None:
return Path(real_prefix)
base_prefix = getattr(sys, "base_prefix", None)
if base_prefix is not None:
return Path(base_prefix)
return Path(sys.prefix)
@abstractmethod
def get_marker_env(self) -> MarkerEnv: ...
def get_pip_command(self, embedded: bool = False) -> list[str]:
if embedded or not Path(self._bin(self._pip_executable)).exists():
return [str(self.python), str(self.pip_embedded)]
# run as module so that pip can update itself on Windows
return [str(self.python), "-m", "pip"]
@abstractmethod
def get_supported_tags(self) -> list[Tag]: ...
@abstractmethod
def get_paths(self) -> dict[str, str]: ...
def is_valid_for_marker(self, marker: BaseMarker) -> bool:
valid: bool = marker.validate(self.marker_env)
return valid
def is_sane(self) -> bool:
"""
Checks whether the current environment is sane or not.
"""
return True
def get_command_from_bin(self, bin: str) -> list[str]:
if bin == "pip":
# when pip is required we need to ensure that we fall back to
# embedded pip when pip is not available in the environment
return self.get_pip_command()
return [self._bin(bin)]
def run(self, bin: str, *args: str, **kwargs: Any) -> str:
cmd = self.get_command_from_bin(bin) + list(args)
return self._run(cmd, **kwargs)
def run_pip(self, *args: str, **kwargs: Any) -> str:
pip = self.get_pip_command()
cmd = pip + list(args)
return self._run(cmd, **kwargs)
def run_python_script(self, content: str, **kwargs: Any) -> str:
# Options Used:
# -I : Run Python in isolated mode. (#6627)
# -W ignore : Suppress warnings.
#
# TODO: Consider replacing (-I) with (-EP) once support for managing Python <3.11 environments dropped.
# This is useful to prevent user site being disabled over zealously.
return self.run(
self._executable,
"-I",
"-W",
"ignore",
"-c",
content,
stderr=subprocess.PIPE,
**kwargs,
)
def _run(self, cmd: list[str], **kwargs: Any) -> str:
"""
Run a command inside the Python environment.
"""
call = kwargs.pop("call", False)
env = kwargs.pop("env", dict(os.environ))
stderr = kwargs.pop("stderr", subprocess.STDOUT)
try:
if call:
assert stderr != subprocess.PIPE
subprocess.check_call(cmd, stderr=stderr, env=env, **kwargs)
output = ""
else:
encoding = "locale" if sys.version_info >= (3, 10) else None
output = subprocess.check_output(
cmd, stderr=stderr, env=env, text=True, encoding=encoding, **kwargs
)
except CalledProcessError as e:
raise EnvCommandError(e)
return output
def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
command = self.get_command_from_bin(bin) + list(args)
env = kwargs.pop("env", dict(os.environ))
if not self._is_windows:
return os.execvpe(command[0], command, env=env)
kwargs["shell"] = True
exe = subprocess.Popen(command, env=env, **kwargs)
exe.communicate()
return exe.returncode
@abstractmethod
def is_venv(self) -> bool: ...
@property
def script_dirs(self) -> list[Path]:
if self._script_dirs is None:
scripts = self.paths.get("scripts")
self._script_dirs = [
Path(scripts) if scripts is not None else self._bin_dir
]
if self.userbase:
self._script_dirs.append(self.userbase / self._script_dirs[0].name)
return self._script_dirs
def _bin(self, bin: str) -> str:
"""
Return path to the given executable.
"""
if self._is_windows and not bin.endswith(".exe"):
bin_path = self._bin_dir / (bin + ".exe")
else:
bin_path = self._bin_dir / bin
if not bin_path.exists():
# On Windows, some executables can be in the base path
# This is especially true when installing Python with
# the official installer, where python.exe will be at
# the root of the env path.
if self._is_windows:
if not bin.endswith(".exe"):
bin_path = self._path / (bin + ".exe")
else:
bin_path = self._path / bin
if bin_path.exists():
return str(bin_path)
return bin
return str(bin_path)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Env):
return False
return other.__class__ == self.__class__ and other.path == self.path
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self._path}")'
| Env |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4.py | {
"start": 12421,
"end": 16722
} | class ____(_NavigationToolbar2GTK, Gtk.Box):
def __init__(self, canvas):
Gtk.Box.__init__(self)
self.add_css_class('toolbar')
self._gtk_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append(Gtk.Separator())
continue
image = Gtk.Image.new_from_gicon(
Gio.Icon.new_for_string(
str(cbook._get_data_path('images',
f'{image_file}-symbolic.svg'))))
self._gtk_ids[text] = button = (
Gtk.ToggleButton() if callback in ['zoom', 'pan'] else
Gtk.Button())
button.set_child(image)
button.add_css_class('flat')
button.add_css_class('image-button')
# Save the handler id, so that we can block it as needed.
button._signal_handler = button.connect(
'clicked', getattr(self, callback))
button.set_tooltip_text(tooltip_text)
self.append(button)
# This filler item ensures the toolbar is always at least two text
# lines high. Otherwise the canvas gets redrawn as the mouse hovers
# over images because those use two-line messages which resize the
# toolbar.
label = Gtk.Label()
label.set_markup(
'<small>\N{NO-BREAK SPACE}\n\N{NO-BREAK SPACE}</small>')
label.set_hexpand(True) # Push real message to the right.
self.append(label)
self.message = Gtk.Label()
self.message.set_justify(Gtk.Justification.RIGHT)
self.append(self.message)
_NavigationToolbar2GTK.__init__(self, canvas)
def save_figure(self, *args):
dialog = Gtk.FileChooserNative(
title='Save the figure',
transient_for=self.canvas.get_root(),
action=Gtk.FileChooserAction.SAVE,
modal=True)
self._save_dialog = dialog # Must keep a reference.
ff = Gtk.FileFilter()
ff.set_name('All files')
ff.add_pattern('*')
dialog.add_filter(ff)
dialog.set_filter(ff)
formats = []
default_format = None
for i, (name, fmts) in enumerate(
self.canvas.get_supported_filetypes_grouped().items()):
ff = Gtk.FileFilter()
ff.set_name(name)
for fmt in fmts:
ff.add_pattern(f'*.{fmt}')
dialog.add_filter(ff)
formats.append(name)
if self.canvas.get_default_filetype() in fmts:
default_format = i
# Setting the choice doesn't always work, so make sure the default
# format is first.
formats = [formats[default_format], *formats[:default_format],
*formats[default_format+1:]]
dialog.add_choice('format', 'File format', formats, formats)
dialog.set_choice('format', formats[0])
dialog.set_current_folder(Gio.File.new_for_path(
os.path.expanduser(mpl.rcParams['savefig.directory'])))
dialog.set_current_name(self.canvas.get_default_filename())
@functools.partial(dialog.connect, 'response')
def on_response(dialog, response):
file = dialog.get_file()
fmt = dialog.get_choice('format')
fmt = self.canvas.get_supported_filetypes_grouped()[fmt][0]
dialog.destroy()
self._save_dialog = None
if response != Gtk.ResponseType.ACCEPT:
return
# Save dir for next time, unless empty str (which means use cwd).
if mpl.rcParams['savefig.directory']:
parent = file.get_parent()
mpl.rcParams['savefig.directory'] = parent.get_path()
try:
self.canvas.figure.savefig(file.get_path(), format=fmt)
except Exception as e:
msg = Gtk.MessageDialog(
transient_for=self.canvas.get_root(),
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK, modal=True,
text=str(e))
msg.show()
dialog.show()
return self.UNKNOWN_SAVED_STATUS
| NavigationToolbar2GTK4 |
python | OmkarPathak__pygorithm | pygorithm/data_structures/heap.py | {
"start": 179,
"end": 3766
} | class ____(queue.Queue):
"""
min-heap implementation as queue
"""
@staticmethod
def parent_idx(idx):
"""
retrieve the parent
"""
return idx // 2
@staticmethod
def left_child_idx(idx):
"""
retrieve the left child
"""
return (idx * 2) + 1
@staticmethod
def right_child_idx(idx):
"""
retrieve the right child
"""
return (idx * 2) + 2
def insert(self, data):
"""
inserting an element in the heap
"""
# TODO: Fix this if we want this compatible with 2.7
super().enqueue(data)
if self.rear >= 1: # heap may need to be fixed
self.heapify_up()
def heapify_up(self):
"""
Start at the end of the tree (last enqueued item).
Compare the rear item to its parent, swap if
the parent is larger than the child (min-heap property).
Repeat until the min-heap property is met.
Best Case: O(1), item is inserted at correct position, no swaps needed
Worst Case: O(log n), item needs to be swapped throughout all levels of tree
"""
child = self.rear
parent = self.parent_idx(child)
while self.queue[child] < self.queue[self.parent_idx(child)]:
# Swap (sift up) and update child:parent relation
self.queue[child], self.queue[parent] = self.queue[parent], self.queue[child]
child = parent
parent = self.parent_idx(child)
def pop(self):
"""
Removes the lowest value element (highest priority, at root) from the heap
"""
min = super().dequeue()
if self.rear >= 1: # heap may need to be fixed
self.heapify_down()
return min
def favorite(self, parent):
"""
Determines which child has the highest priority by 3 cases
"""
left = self.left_child_idx(parent)
right = self.right_child_idx(parent)
# case 1: both nodes exist
if left <= self.rear and right <= self.rear:
if self.queue[left] <= self.queue[right]:
return left
else:
return right
# case 2: only left exists
elif left <= self.rear:
return left
# case 3: no children (if left doesn't exist, neither can the right)
else:
return None
def heapify_down(self):
"""
Select the root and sift down until min-heap property is met.
While a favorite child exists, and that child is smaller
than the parent, swap them (sift down).
Best Case: O(1), item is inserted at correct position, no swaps needed
Worst Case: O(logn), item needs to be swapped throughout all levels of tree
"""
cur = 0 # start at the root
fav = self.favorite(cur) # determine favorite child
while self.queue[fav] is not None:
if self.queue[cur] > self.queue[fav]:
# Swap (sift down) and update parent:favorite relation
fav = self.favorite(cur)
self.queue[cur], self.queue[fav] = self.queue[fav], self.queue[cur]
cur = fav
else:
return
# TODO: Is this necessary?
@staticmethod
def time_complexities():
return "[Insert & Pop] Best Case: O(1), Worst Case: O(logn)"
def get_code(self):
"""
returns the code for the current class
"""
return inspect.getsource(Heap)
| Heap |
python | mwaskom__seaborn | seaborn/_core/plot.py | {
"start": 1649,
"end": 1932
} | class ____(TypedDict, total=False):
mark: Mark # TODO allow list?
stat: Stat | None # TODO allow list?
move: Move | list[Move] | None
data: PlotData
source: DataSource
vars: dict[str, VariableSpec]
orient: str
legend: bool
label: str | None
| Layer |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/grant_types/base.py | {
"start": 2540,
"end": 10909
} | class ____:
error_uri = None
request_validator = None
default_response_mode = 'fragment'
refresh_token = True
response_types = ['code']
def __init__(self, request_validator=None, **kwargs):
self.request_validator = request_validator or RequestValidator()
# Transforms class variables into instance variables:
self.response_types = self.response_types
self.refresh_token = self.refresh_token
self._setup_custom_validators(kwargs)
self._code_modifiers = []
self._token_modifiers = []
for kw, val in kwargs.items():
setattr(self, kw, val)
def _setup_custom_validators(self, kwargs):
post_auth = kwargs.get('post_auth', [])
post_token = kwargs.get('post_token', [])
pre_auth = kwargs.get('pre_auth', [])
pre_token = kwargs.get('pre_token', [])
if not hasattr(self, 'validate_authorization_request'):
if post_auth or pre_auth:
msg = ("{} does not support authorization validators. Use "
"token validators instead.").format(self.__class__.__name__)
raise ValueError(msg)
# Using tuples here because they can't be appended to:
post_auth, pre_auth = (), ()
self.custom_validators = ValidatorsContainer(post_auth, post_token,
pre_auth, pre_token)
def register_response_type(self, response_type):
self.response_types.append(response_type)
def register_code_modifier(self, modifier):
self._code_modifiers.append(modifier)
def register_token_modifier(self, modifier):
self._token_modifiers.append(modifier)
def create_authorization_response(self, request, token_handler):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
"""
raise NotImplementedError('Subclasses must implement this method.')
def create_token_response(self, request, token_handler):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
"""
raise NotImplementedError('Subclasses must implement this method.')
def add_token(self, token, token_handler, request):
"""
:param token:
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# Only add a hybrid access token on auth step if asked for
if request.response_type not in ["token", "code token", "id_token token", "code id_token token"]:
return token
token.update(token_handler.create_token(request, refresh_token=False))
return token
def validate_grant_type(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
client_id = getattr(request, 'client_id', None)
if not self.request_validator.validate_grant_type(client_id,
request.grant_type, request.client, request):
log.debug('Unauthorized from %r (%r) access to grant type %s.',
request.client_id, request.client, request.grant_type)
raise errors.UnauthorizedClientError(request=request)
def validate_scopes(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
if not request.scopes:
request.scopes = utils.scope_to_list(request.scope) or utils.scope_to_list(
self.request_validator.get_default_scopes(request.client_id, request))
log.debug('Validating access to scopes %r for client %r (%r).',
request.scopes, request.client_id, request.client)
if not self.request_validator.validate_scopes(request.client_id,
request.scopes, request.client, request):
raise errors.InvalidScopeError(request=request)
def prepare_authorization_response(self, request, token, headers, body, status):
"""Place token according to response mode.
Base classes can define a default response mode for their authorization
response by overriding the static `default_response_mode` member.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token:
:param headers:
:param body:
:param status:
"""
request.response_mode = request.response_mode or self.default_response_mode
if request.response_mode not in ('query', 'fragment'):
log.debug('Overriding invalid response mode %s with %s',
request.response_mode, self.default_response_mode)
request.response_mode = self.default_response_mode
token_items = token.items()
if request.response_type == 'none':
state = token.get('state', None)
token_items = [('state', state)] if state else []
if request.response_mode == 'query':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=False)
return headers, body, status
if request.response_mode == 'fragment':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=True)
return headers, body, status
raise NotImplementedError(
'Subclasses must set a valid default_response_mode')
def _get_default_headers(self):
"""Create default headers for grant responses."""
return {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
def _handle_redirects(self, request):
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
# The authorization server MUST verify that the redirection URI
# to which it will redirect the access token matches a
# redirection URI registered by the client as described in
# Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(request=request)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
def _create_cors_headers(self, request):
"""If CORS is allowed, create the appropriate headers."""
if 'origin' not in request.headers:
return {}
origin = request.headers['origin']
if not is_secure_transport(origin):
log.debug('Origin "%s" is not HTTPS, CORS not allowed.', origin)
return {}
elif not self.request_validator.is_origin_allowed(
request.client_id, origin, request):
log.debug('Invalid origin "%s", CORS not allowed.', origin)
return {}
else:
log.debug('Valid origin "%s", injecting CORS headers.', origin)
return {'Access-Control-Allow-Origin': origin}
| GrantTypeBase |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 1582,
"end": 6353
} | class ____(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.embedding_bit = 8
self.embedding_act_bit = 16
self.act_bit = 8
self.ln_input_bit = 22
self.ln_output_bit = 32
self.word_embeddings = QuantEmbedding(
config.vocab_size,
config.hidden_size,
padding_idx=config.pad_token_id,
weight_bit=self.embedding_bit,
quant_mode=self.quant_mode,
)
self.token_type_embeddings = QuantEmbedding(
config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode
)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = QuantEmbedding(
config.max_position_embeddings,
config.hidden_size,
padding_idx=self.padding_idx,
weight_bit=self.embedding_bit,
quant_mode=self.quant_mode,
)
# Integer-only addition between embeddings
self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
output_bit=self.ln_output_bit,
quant_mode=self.quant_mode,
force_dequant=config.force_dequant,
)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds, inputs_embeds_scaling_factor = self.word_embeddings(input_ids)
else:
inputs_embeds_scaling_factor = None
token_type_embeddings, token_type_embeddings_scaling_factor = self.token_type_embeddings(token_type_ids)
embeddings, embeddings_scaling_factor = self.embeddings_act1(
inputs_embeds,
inputs_embeds_scaling_factor,
identity=token_type_embeddings,
identity_scaling_factor=token_type_embeddings_scaling_factor,
)
position_embeddings, position_embeddings_scaling_factor = self.position_embeddings(position_ids)
embeddings, embeddings_scaling_factor = self.embeddings_act1(
embeddings,
embeddings_scaling_factor,
identity=position_embeddings,
identity_scaling_factor=position_embeddings_scaling_factor,
)
embeddings, embeddings_scaling_factor = self.LayerNorm(embeddings, embeddings_scaling_factor)
embeddings = self.dropout(embeddings)
embeddings, embeddings_scaling_factor = self.output_activation(embeddings, embeddings_scaling_factor)
return embeddings, embeddings_scaling_factor
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
| IBertEmbeddings |
python | getsentry__sentry | tests/sentry/apidocs/test_extensions.py | {
"start": 1130,
"end": 4954
} | class ____(Serializer):
def serialize(self, obj: Any, attrs: Mapping[Any, Any], user: Any, **kwargs: Any):
raise NotImplementedError
def test_sentry_response_serializer_extension() -> None:
seralizer_extension = SentryResponseSerializerExtension(BasicSerializer)
schema = seralizer_extension.map_serializer(AutoSchema(), "response")
assert schema == {
"type": "object",
"properties": {
"a": {"type": "integer"},
"b": {"type": "string"},
"c": {"type": "boolean"},
"d": {"type": "array", "items": {"type": "integer"}},
"e": {"type": "object", "properties": {"zz": {"type": "string"}}, "required": ["zz"]},
"f": {"enum": [3], "type": "integer"},
# Test that a Union generates an anyOf
"g": {"anyOf": [{"type": "string"}, {"type": "boolean"}]},
# Test that including None with a 2 type Union adds nullable: True
# but does not create an anyOf
"h": {"type": "string", "nullable": True},
# Test that including None with a >2 type Union does not add nullable: True
# but includes {type: "object", nullable: True} in the anyOf
"i": {
"anyOf": [
{"type": "integer"},
{"format": "double", "type": "number"},
{"type": "object", "nullable": True},
]
},
},
"required": ["b", "c", "d", "e", "f", "g", "h", "i"],
}
def test_sentry_inline_response_serializer_extension() -> None:
inline_serializer = inline_sentry_response_serializer(
"BasicStuff", list[BasicSerializerResponse]
)
seralizer_extension = SentryInlineResponseSerializerExtension(inline_serializer)
schema = seralizer_extension.map_serializer(AutoSchema(), "response")
assert schema == {
"type": "array",
"items": {
"type": "object",
"properties": {
"a": {"type": "integer"},
"b": {"type": "string"},
"c": {"type": "boolean"},
"d": {"type": "array", "items": {"type": "integer"}},
"e": {
"type": "object",
"properties": {"zz": {"type": "string"}},
"required": ["zz"],
},
"f": {"enum": [3], "type": "integer"},
# Test that a Union generates an anyOf
"g": {"anyOf": [{"type": "string"}, {"type": "boolean"}]},
# Test that including None with a 2 type Union adds nullable: True
# but does not create an anyOf
"h": {"type": "string", "nullable": True},
# Test that including None with a >2 type Union does not add nullable: True
# but includes {type: "object", nullable: True} in the anyOf
"i": {
"anyOf": [
{"type": "integer"},
{"format": "double", "type": "number"},
{"type": "object", "nullable": True},
]
},
},
"required": ["b", "c", "d", "e", "f", "g", "h", "i"],
},
}
def test_sentry_fails_when_serializer_not_typed() -> None:
seralizer_extension = SentryResponseSerializerExtension(FailSerializer)
with pytest.raises(TypeError):
seralizer_extension.map_serializer(AutoSchema(), "response")
def test_sentry_restricted_json_field_extension() -> None:
seralizer_extension = RestrictedJsonFieldExtension(serializers.JSONField)
schema = seralizer_extension.map_serializer_field(AutoSchema(), "response")
assert schema == {"type": "object", "additionalProperties": {}}
| FailSerializer |
python | cython__cython | Cython/Compiler/Tests/TestTypes.py | {
"start": 631,
"end": 3294
} | class ____(unittest.TestCase):
TEST_DATA = [
("char*", "char__ptr"),
("char *", "char__ptr"),
("char **", "char__ptr__ptr"),
("_typedef", "_typedef"),
("__typedef", "__dundertypedef"),
("___typedef", "__dunder_typedef"),
("____typedef", "__dunder__dundertypedef"),
("_____typedef", "__dunder__dunder_typedef"),
("const __typedef", "__const___dundertypedef"),
("int[42]", "int__lArr42__rArr"),
("int[:]", "int__lArr__D__rArr"),
("int[:,:]", "int__lArr__D__comma___D__rArr"),
("int[:,:,:]", "int__lArr__D__comma___D__comma___D__rArr"),
("int[:,:,...]", "int__lArr__D__comma___D__comma___EL__rArr"),
("std::vector", "std__in_vector"),
("std::vector&&", "std__in_vector__fwref"),
("const std::vector", "__const_std__in_vector"),
("const std::vector&", "__const_std__in_vector__ref"),
("const_std", "const_std"),
]
def test_escape_special_type_characters(self):
test_func = PT._escape_special_type_characters # keep test usage visible for IDEs
function_name = "_escape_special_type_characters"
self._test_escape(function_name)
def test_type_identifier_for_declaration(self):
test_func = PT.type_identifier_from_declaration # keep test usage visible for IDEs
function_name = test_func.__name__
self._test_escape(function_name)
# differences due to whitespace removal
test_data = [
("const &std::vector", "const__refstd__in_vector"),
("const &std::vector<int>", "const__refstd__in_vector__lAngint__rAng"),
("const &&std::vector", "const__fwrefstd__in_vector"),
("const &&&std::vector", "const__fwref__refstd__in_vector"),
("const &&std::vector", "const__fwrefstd__in_vector"),
("void (*func)(int x, float y)",
"975d51__void__lParen__ptrfunc__rParen__lParenint__spac__etc"),
("float ** (*func)(int x, int[:] y)",
"31883a__float__ptr__ptr__lParen__ptrfunc__rParen__lPar__etc"),
]
self._test_escape(function_name, test_data)
def _test_escape(self, func_name, test_data=TEST_DATA):
escape = getattr(PT, func_name)
for declaration, expected in test_data:
escaped_value = escape(declaration)
self.assertEqual(escaped_value, expected, "%s('%s') == '%s' != '%s'" % (
func_name, declaration, escaped_value, expected))
# test that the length has been successfully capped
self.assertLessEqual(len(escaped_value), 64)
| TestTypeIdentifiers |
python | tensorflow__tensorflow | tensorflow/python/framework/test_util.py | {
"start": 147769,
"end": 149702
} | class ____:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self) -> backprop.GradientTape:
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly: bool) -> Iterator[None]:
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
| AbstractGradientTape |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/emr.py | {
"start": 1236,
"end": 10461
} | class ____(AwsBaseHook):
"""
Interact with Amazon Elastic MapReduce Service (EMR).
Provide thick wrapper around :external+boto3:py:class:`boto3.client("emr") <EMR.Client>`.
:param emr_conn_id: :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`.
This attribute is only necessary when using
the :meth:`airflow.providers.amazon.aws.hooks.emr.EmrHook.create_job_flow`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
conn_name_attr = "emr_conn_id"
default_conn_name = "emr_default"
conn_type = "emr"
hook_name = "Amazon Elastic MapReduce"
def __init__(self, emr_conn_id: str | None = default_conn_name, *args, **kwargs) -> None:
self.emr_conn_id = emr_conn_id
kwargs["client_type"] = "emr"
super().__init__(*args, **kwargs)
def get_cluster_id_by_name(self, emr_cluster_name: str, cluster_states: list[str]) -> str | None:
"""
Fetch id of EMR cluster with given name and (optional) states; returns only if single id is found.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.list_clusters`
:param emr_cluster_name: Name of a cluster to find
:param cluster_states: State(s) of cluster to find
:return: id of the EMR cluster
"""
response_iterator = (
self.get_conn().get_paginator("list_clusters").paginate(ClusterStates=cluster_states)
)
matching_clusters = [
cluster
for page in response_iterator
for cluster in page["Clusters"]
if cluster["Name"] == emr_cluster_name
]
if len(matching_clusters) == 1:
cluster_id = matching_clusters[0]["Id"]
self.log.info("Found cluster name = %s id = %s", emr_cluster_name, cluster_id)
return cluster_id
if len(matching_clusters) > 1:
raise AirflowException(f"More than one cluster found for name {emr_cluster_name}")
self.log.info("No cluster found for name %s", emr_cluster_name)
return None
def create_job_flow(self, job_flow_overrides: dict[str, Any]) -> dict[str, Any]:
"""
Create and start running a new cluster (job flow).
.. seealso::
- :external+boto3:py:meth:`EMR.Client.run_job_flow`
This method uses ``EmrHook.emr_conn_id`` to receive the initial Amazon EMR cluster configuration.
If ``EmrHook.emr_conn_id`` is empty or the connection does not exist, then an empty initial
configuration is used.
:param job_flow_overrides: Is used to overwrite the parameters in the initial Amazon EMR configuration
cluster. The resulting configuration will be used in the
:external+boto3:py:meth:`EMR.Client.run_job_flow`.
.. seealso::
- :ref:`Amazon Elastic MapReduce Connection <howto/connection:emr>`
- :external+boto3:py:meth:`EMR.Client.run_job_flow`
- `API RunJobFlow <https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html>`_
"""
config = {}
if self.emr_conn_id:
try:
emr_conn = self.get_connection(self.emr_conn_id)
except AirflowNotFoundException:
warnings.warn(
f"Unable to find {self.hook_name} Connection ID {self.emr_conn_id!r}, "
"using an empty initial configuration. If you want to get rid of this warning "
"message please provide a valid `emr_conn_id` or set it to None.",
UserWarning,
stacklevel=2,
)
else:
if emr_conn.conn_type and emr_conn.conn_type != self.conn_type:
warnings.warn(
f"{self.hook_name} Connection expected connection type {self.conn_type!r}, "
f"Connection {self.emr_conn_id!r} has conn_type={emr_conn.conn_type!r}. "
f"This connection might not work correctly.",
UserWarning,
stacklevel=2,
)
config = emr_conn.extra_dejson.copy()
config.update(job_flow_overrides)
response = self.get_conn().run_job_flow(**config)
return response
def add_job_flow_steps(
self,
job_flow_id: str,
steps: list[dict] | str | None = None,
wait_for_completion: bool = False,
waiter_delay: int | None = None,
waiter_max_attempts: int | None = None,
execution_role_arn: str | None = None,
) -> list[str]:
"""
Add new steps to a running cluster.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.add_job_flow_steps`
:param job_flow_id: The id of the job flow to which the steps are being added
:param steps: A list of the steps to be executed by the job flow
:param wait_for_completion: If True, wait for the steps to be completed. Default is False
:param waiter_delay: The amount of time in seconds to wait between attempts. Default is 5
:param waiter_max_attempts: The maximum number of attempts to be made. Default is 100
:param execution_role_arn: The ARN of the runtime role for a step on the cluster.
"""
config = {}
waiter_delay = waiter_delay or 30
waiter_max_attempts = waiter_max_attempts or 60
if execution_role_arn:
config["ExecutionRoleArn"] = execution_role_arn
response = self.get_conn().add_job_flow_steps(JobFlowId=job_flow_id, Steps=steps, **config)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Adding steps failed: {response}")
self.log.info("Steps %s added to JobFlow", response["StepIds"])
if wait_for_completion:
waiter = self.get_conn().get_waiter("step_complete")
for step_id in response["StepIds"]:
try:
wait(
waiter=waiter,
waiter_max_attempts=waiter_max_attempts,
waiter_delay=waiter_delay,
args={"ClusterId": job_flow_id, "StepId": step_id},
failure_message=f"EMR Steps failed: {step_id}",
status_message="EMR Step status is",
status_args=["Step.Status.State", "Step.Status.StateChangeReason"],
)
except AirflowException as ex:
if "EMR Steps failed" in str(ex):
resp = self.get_conn().describe_step(ClusterId=job_flow_id, StepId=step_id)
failure_details = resp["Step"]["Status"].get("FailureDetails", None)
if failure_details:
self.log.error("EMR Steps failed: %s", failure_details)
raise
return response["StepIds"]
def test_connection(self):
"""
Return failed state for test Amazon Elastic MapReduce Connection (untestable).
We need to overwrite this method because this hook is based on
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsGenericHook`,
otherwise it will try to test connection to AWS STS by using the default boto3 credential strategy.
"""
msg = (
f"{self.hook_name!r} Airflow Connection cannot be tested, by design it stores "
f"only key/value pairs and does not make a connection to an external resource."
)
return False, msg
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Amazon Elastic MapReduce Connection."""
return {
"hidden_fields": ["host", "schema", "port", "login", "password"],
"relabeling": {
"extra": "Run Job Flow Configuration",
},
"placeholders": {
"extra": json.dumps(
{
"Name": "MyClusterName",
"ReleaseLabel": "emr-5.36.0",
"Applications": [{"Name": "Spark"}],
"Instances": {
"InstanceGroups": [
{
"Name": "Primary node",
"Market": "SPOT",
"InstanceRole": "MASTER",
"InstanceType": "m5.large",
"InstanceCount": 1,
},
],
"KeepJobFlowAliveWhenNoSteps": False,
"TerminationProtected": False,
},
"StepConcurrencyLevel": 2,
},
indent=2,
),
},
}
| EmrHook |
python | pytorch__pytorch | test/dynamo/test_guard_serialization.py | {
"start": 2278,
"end": 2416
} | class ____:
def __getstate__(self):
raise NotImplementedError
def add(self, x):
return x + 1
| MyClassNotSerializable |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 9301,
"end": 9490
} | class ____(AnsibleTemplateError):
"""The internal template transform limit was exceeded."""
_default_message = "Template transform limit exceeded."
| AnsibleTemplateTransformLimitError |
python | getsentry__sentry | src/sentry/db/models/fields/jsonfield.py | {
"start": 1977,
"end": 5137
} | class ____(models.TextField):
"""
A field that will ensure the data entered into it is valid JSON.
Originally from https://github.com/adamchainz/django-jsonfield/blob/0.9.13/jsonfield/fields.py
Adapted to fit our requirements of:
- always using a text field
- being able to serialize dates/decimals
- not emitting deprecation warnings
By default, this field will also invoke the Creator descriptor when setting the attribute.
This can make it difficult to use json fields that receive raw strings, so optionally setting no_creator_hook=True
surpresses this behavior.
"""
default_error_messages = {"invalid": _("'%s' is not a valid JSON string.")}
description = "JSON object"
no_creator_hook = False
def __init__(self, *args, json_dumps=json.dumps, **kwargs):
self.json_dumps = json_dumps
if not kwargs.get("null", False):
kwargs["default"] = kwargs.get("default", dict)
super().__init__(*args, **kwargs)
self.validate(self.get_default(), None)
def contribute_to_class(
self, cls: type[models.Model], name: str, private_only: bool = False
) -> None:
"""
Add a descriptor for backwards compatibility
with previous Django behavior.
"""
super().contribute_to_class(cls, name, private_only=private_only)
if not self.no_creator_hook:
setattr(cls, name, Creator(self))
def validate(self, value, model_instance):
if not self.null and value is None:
raise ValidationError(self.error_messages["null"])
try:
self.get_prep_value(value)
except Exception:
raise ValidationError(self.error_messages["invalid"] % value)
def get_default(self):
if self.has_default():
default = self.default
if callable(default):
default = default()
if isinstance(default, str):
return json.loads(default)
return json.loads(self.json_dumps(default))
return super().get_default()
def get_internal_type(self) -> str:
return "TextField"
def db_type(self, connection) -> str:
return "text"
def to_python(self, value):
if isinstance(value, str) or self.no_creator_hook:
if value == "":
if self.null:
return None
if self.blank:
return ""
try:
value = json.loads(value)
except ValueError:
msg = self.error_messages["invalid"] % value
raise ValidationError(msg)
# TODO: Look for date/time/datetime objects within the structure?
return value
def get_db_prep_value(self, value, connection=None, prepared=None):
return self.get_prep_value(value)
def get_prep_value(self, value):
if value is None:
if not self.null and self.blank:
return ""
return None
return self.json_dumps(value)
def value_to_string(self, obj):
return self.value_from_object(obj)
| JSONField |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/auth_user_merge_verification_code.py | {
"start": 454,
"end": 1378
} | class ____(Endpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ENTERPRISE
permission_classes = (SentryIsAuthenticated,)
"""
Generate and update verification codes for the user account merge flow.
"""
def post(self, request: Request) -> Response:
user = request.user
if user.id is None:
return Response(
status=403, data={"error": "Must be authenticated to use this endpoint."}
)
try:
# regenerate the code if it exists
code = UserMergeVerificationCode.objects.get(user_id=user.id)
code.regenerate_token()
except UserMergeVerificationCode.DoesNotExist:
code = UserMergeVerificationCode.objects.create(user_id=user.id)
code.send_email()
return Response(status=status.HTTP_201_CREATED)
| AuthUserMergeVerificationCodeEndpoint |
python | kamyu104__LeetCode-Solutions | Python/count-positions-on-street-with-required-brightness.py | {
"start": 75,
"end": 617
} | class ____(object):
def meetRequirement(self, n, lights, requirement):
"""
:type n: int
:type lights: List[List[int]]
:type requirement: List[int]
:rtype: int
"""
cnt = collections.defaultdict(int)
for p, r in lights:
cnt[max(p-r, 0)] += 1
cnt[min(p+r, n-1)+1] -= 1
result = curr = 0
for i, r in enumerate(requirement):
curr += cnt.get(i, 0)
if curr >= r:
result += 1
return result
| Solution |
python | openai__openai-python | src/openai/resources/evals/evals.py | {
"start": 12098,
"end": 22998
} | class ____(AsyncAPIResource):
@cached_property
def runs(self) -> AsyncRuns:
return AsyncRuns(self._client)
@cached_property
def with_raw_response(self) -> AsyncEvalsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncEvalsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncEvalsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncEvalsWithStreamingResponse(self)
async def create(
self,
*,
data_source_config: eval_create_params.DataSourceConfig,
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalCreateResponse:
"""
Create the structure of an evaluation that can be used to test a model's
performance. An evaluation is a set of testing criteria and the config for a
data source, which dictates the schema of the data used in the evaluation. After
creating an evaluation, you can run it on different models and model parameters.
We support several types of graders and datasources. For more information, see
the [Evals guide](https://platform.openai.com/docs/guides/evals).
Args:
data_source_config: The configuration for the data source used for the evaluation runs. Dictates the
schema of the data used in the evaluation.
testing_criteria: A list of graders for all eval runs in this group. Graders can reference
variables in the data source using double curly braces notation, like
`{{item.variable_name}}`. To reference the model's output, use the `sample`
namespace (ie, `{{sample.output_text}}`).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/evals",
body=await async_maybe_transform(
{
"data_source_config": data_source_config,
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
},
eval_create_params.EvalCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalCreateResponse,
)
async def retrieve(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalRetrieveResponse:
"""
Get an evaluation by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._get(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalRetrieveResponse,
)
async def update(
self,
eval_id: str,
*,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalUpdateResponse:
"""
Update certain properties of an evaluation.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: Rename the evaluation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}",
body=await async_maybe_transform(
{
"metadata": metadata,
"name": name,
},
eval_update_params.EvalUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalUpdateResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
order_by: Literal["created_at", "updated_at"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[EvalListResponse, AsyncCursorPage[EvalListResponse]]:
"""
List evaluations for a project.
Args:
after: Identifier for the last eval from the previous pagination request.
limit: Number of evals to retrieve.
order: Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
descending order.
order_by: Evals can be ordered by creation time or last updated time. Use `created_at` for
creation time or `updated_at` for last updated time.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/evals",
page=AsyncCursorPage[EvalListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"order_by": order_by,
},
eval_list_params.EvalListParams,
),
),
model=EvalListResponse,
)
async def delete(
self,
eval_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalDeleteResponse:
"""
Delete an evaluation.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._delete(
f"/evals/{eval_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=EvalDeleteResponse,
)
| AsyncEvals |
python | numba__numba | numba/core/types/iterators.py | {
"start": 94,
"end": 572
} | class ____(SimpleIterableType):
def __init__(self, dtype):
self.dtype = dtype
name = "range_state_%s" % (dtype,)
super(SimpleIterableType, self).__init__(name)
self._iterator_type = RangeIteratorType(self.dtype)
def unify(self, typingctx, other):
if isinstance(other, RangeType):
dtype = typingctx.unify_pairs(self.dtype, other.dtype)
if dtype is not None:
return RangeType(dtype)
| RangeType |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-asi/llama_index/llms/asi/base.py | {
"start": 312,
"end": 6953
} | class ____(OpenAILike):
"""
ASI LLM - Integration for ASI models.
Currently supported models:
- asi1-mini
Examples:
`pip install llama-index-llms-asi`
```python
from llama_index.llms.asi import ASI
# Set up the ASI class with the required model and API key
llm = ASI(model="asi1-mini", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of AI")
print(response)
```
"""
def __init__(
self,
model: str = DEFAULT_MODEL,
api_key: Optional[str] = None,
api_base: str = "https://api.asi1.ai/v1",
is_chat_model: bool = True,
is_function_calling_model: bool = False,
**kwargs: Any,
) -> None:
"""
Initialize the ASI LLM.
Args:
model (str): The ASI model to use.
api_key (Optional[str]): The API key to use.
api_base (str): The base URL for the ASI API.
is_chat_model (bool): Whether the model supports chat.
is_function_calling_model (bool): Whether the model supports
function calling.
**kwargs (Any): Additional arguments to pass to the OpenAILike
constructor.
"""
api_key = api_key or os.environ.get("ASI_API_KEY", None)
if api_key is None:
raise ValueError(
"Must specify `api_key` or set environment variable `ASI_API_KEY`."
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
is_function_calling_model=is_function_calling_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "ASI"
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
"""
Override stream_chat to handle ASI's unique streaming format.
ASI's streaming format includes many empty content chunks during
the "thinking" phase before delivering the final response.
This implementation filters out empty chunks and only yields
chunks with actual content.
"""
def gen() -> ChatResponseGen:
raw_stream = super(OpenAILike, self).stream_chat(messages, **kwargs)
accumulated_content = ""
for chunk in raw_stream:
delta_content = ""
# Extract content from the chunk
if hasattr(chunk, "raw") and chunk.raw:
# Check for content in choices array
if "choices" in chunk.raw and chunk.raw["choices"]:
choice = chunk.raw["choices"][0]
if isinstance(choice, dict):
if "delta" in choice and isinstance(choice["delta"], dict):
if (
"content" in choice["delta"]
and choice["delta"]["content"]
):
delta_content = choice["delta"]["content"]
# Check for content in delta directly
if not delta_content and hasattr(chunk, "delta"):
if hasattr(chunk.delta, "content") and chunk.delta.content:
delta_content = chunk.delta.content
elif isinstance(chunk.delta, str) and chunk.delta:
delta_content = chunk.delta
if delta_content:
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=accumulated_content + delta_content,
),
delta=delta_content,
raw=chunk.raw if hasattr(chunk, "raw") else {},
)
accumulated_content += delta_content
yield response
return gen()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
"""
Override astream_chat to handle ASI's unique streaming format.
ASI's streaming format includes many empty content chunks during
the "thinking" phase before delivering the final response.
This implementation filters out empty chunks and only yields
chunks with actual content.
"""
async def gen() -> ChatResponseAsyncGen:
raw_stream = await super(OpenAILike, self).astream_chat(messages, **kwargs)
accumulated_content = ""
async for chunk in raw_stream:
delta_content = ""
# Extract content from the chunk
if hasattr(chunk, "raw") and chunk.raw:
# Check for content in choices array
if "choices" in chunk.raw and chunk.raw["choices"]:
choice = chunk.raw["choices"][0]
if isinstance(choice, dict):
if "delta" in choice and isinstance(choice["delta"], dict):
if (
"content" in choice["delta"]
and choice["delta"]["content"]
):
delta_content = choice["delta"]["content"]
# Check for content in delta directly
if not delta_content and hasattr(chunk, "delta"):
if hasattr(chunk.delta, "content") and chunk.delta.content:
delta_content = chunk.delta.content
elif isinstance(chunk.delta, str) and chunk.delta:
delta_content = chunk.delta
if delta_content:
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=accumulated_content + delta_content,
),
delta=delta_content,
raw=chunk.raw if hasattr(chunk, "raw") else {},
)
accumulated_content += delta_content
yield response
# Return the async generator function as a coroutine to match OpenAI's pattern
return gen()
| ASI |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_run.py | {
"start": 54333,
"end": 57998
} | class ____:
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_should_respond_200(self, test_client, dag_maker, session):
asset1 = Asset(name="ds1", uri="file:///da1")
with dag_maker(dag_id="source_dag", start_date=START_DATE1, session=session):
EmptyOperator(task_id="task", outlets=[asset1])
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
asset1_id = session.query(AssetModel.id).filter_by(uri=asset1.uri).scalar()
event = AssetEvent(
asset_id=asset1_id,
source_task_id=ti.task_id,
source_dag_id=ti.dag_id,
source_run_id=ti.run_id,
source_map_index=ti.map_index,
)
session.add(event)
with dag_maker(dag_id="TEST_DAG_ID", start_date=START_DATE1, session=session):
pass
dr = dag_maker.create_dagrun(run_id="TEST_DAG_RUN_ID", run_type=DagRunType.ASSET_TRIGGERED)
dr.consumed_asset_events.append(event)
session.commit()
assert event.timestamp
with assert_queries_count(3):
response = test_client.get(
"/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/upstreamAssetEvents",
)
assert response.status_code == 200
expected_response = {
"asset_events": [
{
"timestamp": from_datetime_to_zulu(event.timestamp),
"asset_id": asset1_id,
"uri": "file:///da1",
"extra": {},
"id": event.id,
"group": "asset",
"name": "ds1",
"source_dag_id": ti.dag_id,
"source_map_index": ti.map_index,
"source_run_id": ti.run_id,
"source_task_id": ti.task_id,
"created_dagruns": [
{
"dag_id": "TEST_DAG_ID",
"run_id": "TEST_DAG_RUN_ID",
"data_interval_end": from_datetime_to_zulu_without_ms(dr.data_interval_end),
"data_interval_start": from_datetime_to_zulu_without_ms(dr.data_interval_start),
"end_date": None,
"logical_date": from_datetime_to_zulu_without_ms(dr.logical_date),
"start_date": from_datetime_to_zulu_without_ms(dr.start_date),
"state": "running",
}
],
"partition_key": None,
}
],
"total_entries": 1,
}
assert response.json() == expected_response
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/upstreamAssetEvents",
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/upstreamAssetEvents"
)
assert response.status_code == 403
def test_should_respond_404(self, test_client):
response = test_client.get(
"/dags/invalid-id/dagRuns/invalid-run-id/upstreamAssetEvents",
)
assert response.status_code == 404
assert (
response.json()["detail"]
== "The DagRun with dag_id: `invalid-id` and run_id: `invalid-run-id` was not found"
)
| TestGetDagRunAssetTriggerEvents |
python | eventlet__eventlet | eventlet/wsgi.py | {
"start": 8442,
"end": 8701
} | class ____(Exception):
pass
def get_logger(log, debug):
if callable(getattr(log, 'info', None)) \
and callable(getattr(log, 'debug', None)):
return log
else:
return LoggerFileWrapper(log or sys.stderr, debug)
| HeadersTooLarge |
python | pytorch__pytorch | test/test_utils.py | {
"start": 1898,
"end": 20568
} | class ____(TestCase):
# This runs checkpoint_sequential on each of the nets in
# module_lists_to_compare, and compares them against the uncheckpointed model.
# To compare, it checks outputs as well as input gradients and parameter gradients
def _check_checkpoint_sequential(
self,
model,
module_lists_to_compare,
num_chunks,
input,
use_reentrant,
):
# not checkpointed
out = model(input)
out_not_checkpointed = out.detach().clone()
model.zero_grad()
out.sum().backward()
grad_not_checkpointed = {
name: param.grad.detach().clone()
for name, param in model.named_parameters()
}
input_grad_not_checkpointed = input.grad.detach().clone()
for model_to_compare in module_lists_to_compare:
# checkpointed model by passing list of modules
detached = input.detach()
detached.requires_grad = True
# pass list of modules to checkpoint
out = checkpoint_sequential(
model_to_compare, num_chunks, detached, use_reentrant=use_reentrant
)
out_checkpointed = out.detach().clone()
model.zero_grad()
out.sum().backward()
grad_checkpointed = {
name: param.grad.detach().clone()
for name, param in model.named_parameters()
}
input_grad_checkpointed = detached.grad.detach().clone()
# compare outputs as well as the gradients of input and parameters
self.assertEqual(out_checkpointed, out_not_checkpointed)
self.assertEqual(input_grad_not_checkpointed, input_grad_checkpointed)
for name in grad_checkpointed:
self.assertEqual(grad_checkpointed[name], grad_not_checkpointed[name])
# Test whether checkpoint is being triggered or not. For this, we check
# the number of times forward pass happens
def test_checkpoint_trigger(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.counter = 0
def forward(self, input_var):
self.counter += 1
# For reentrant, need to have autograd actually
# pack a tensor to trigger recomp
ret = input_var * torch.tensor(2.0)
return ret
# checkpointed
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
modules = [Net() for _ in range(10)]
for m in modules:
self.assertEqual(m.counter, 0)
input_var = torch.randn(3, 4, requires_grad=True)
out = checkpoint_sequential(
modules, 2, input_var, use_reentrant=use_reentrant
)
for m in modules:
self.assertEqual(m.counter, 1)
out.sum().backward()
for m in modules[: (len(modules) // 2)]:
self.assertEqual(m.counter, 2)
for m in modules[(len(modules) // 2) :]:
self.assertEqual(m.counter, 1)
def test_checkpoint_valid(self):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
)
input_var = torch.randn(1, 100, requires_grad=True)
# checkpointed
chunks = 2
modules = list(model.children())
out = checkpoint_sequential(modules, chunks, input_var, use_reentrant=True)
with self.assertRaisesRegex(
RuntimeError, "torch.utils.checkpoint is incompatible"
):
torch.autograd.grad(
outputs=[out],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
# works with use_reentrant=False, and grads are the same
out = model(input_var)
grads_no_checkpoint = torch.autograd.grad(
outputs=[out],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
out_checkpoint = checkpoint_sequential(
modules, chunks, input_var, use_reentrant=False
)
# check outputs are the same
self.assertEqual(out_checkpoint, out)
grads_checkpoint = torch.autograd.grad(
outputs=[out_checkpoint],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
self.assertEqual(grads_no_checkpoint, grads_checkpoint)
def test_checkpoint(self):
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
)
# Compare uncheckpointed model with its checkpointed counterparts
# In addition to running checkpoint_sequential on the nn.Sequential
# instance, we also run the function on the list of functions within
# the module.
self._check_checkpoint_sequential(
model,
[list(model.children()), model],
2,
torch.randn(1, 100, requires_grad=True),
use_reentrant=use_reentrant,
)
def test_checkpoint_module_list(self):
class ModuleListNet(nn.Module):
def __init__(self) -> None:
super().__init__()
module_list = [
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
]
self.module_list = nn.ModuleList(module_list)
def forward(self, input):
for layer in self.module_list:
input = layer(input)
return input
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
model = ModuleListNet()
# Compare uncheckpointed model with its checkpointed counterparts.
self._check_checkpoint_sequential(
model,
[list(model.module_list.children()), model.module_list],
2,
torch.randn(1, 100, requires_grad=True),
use_reentrant=use_reentrant,
)
def test_checkpoint_sequential_deprecated_multiple_args(self):
class Two(nn.Module):
def forward(self, a, b):
return a, b
model = nn.Sequential(Two())
a = torch.randn(1, 100, requires_grad=True)
b = torch.randn(1, 100, requires_grad=True)
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
with self.assertRaises(TypeError):
checkpoint_sequential(model, 1, a, b) # type: ignore[call-arg]
def test_checkpoint_sequential_deprecated_no_args(self):
class Noop(nn.Module):
def forward(self):
pass
model = nn.Sequential(Noop())
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
with self.assertRaises(TypeError):
checkpoint_sequential(model, 1) # type: ignore[call-arg]
def test_checkpoint_rng_cpu(self):
for _ in range(5):
inp = torch.randn(20000, device="cpu").requires_grad_()
phase1 = torch.nn.Dropout()
phase2 = torch.nn.Dropout()
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
@unittest.skipIf(not TEST_GPU, "No accelerator")
def test_checkpoint_rng_gpu(self):
for _ in range(5):
inp = torch.randn(20000, device=device_type).requires_grad_()
phase1 = torch.nn.Dropout()
phase2 = torch.nn.Dropout()
def run_fn(input):
return phase2(input)
state = torch.get_device_module(device_type).get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.get_device_module(device_type).set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
@unittest.skipIf(not TEST_GPU, "No accelerator")
def test_checkpoint_not_preserve_rng_state_and_without_reentrant(self):
inp = torch.randn(2, device=device_type).requires_grad_()
layer = torch.nn.Dropout()
def run_fn(input):
return layer(input)
out = checkpoint(run_fn, inp, use_reentrant=False, preserve_rng_state=False)
out.sum().backward()
# This should run without error
def test_checkpoint_non_tensor(self):
def run_fn(tensor1, tensor2):
if tensor2 is None:
return tensor1
return tensor1 + tensor2
input_var = torch.randn(1, 100, requires_grad=True)
out = checkpoint(run_fn, input_var, None, use_reentrant=True)
out.sum().backward()
def test_checkpoint_non_tensor_inputs_outputs(self):
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3, use_reentrant=True)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(
RuntimeError, "Trying to backward through the graph a second time"
):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
def test_checkpoint_no_tensors(self):
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3, use_reentrant=True)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_checkpoint_partial_grad(self):
def run_fn(tensor1, tensor2):
# tensor 2 is used for other application logic
return tensor1, tensor2
input_var = torch.randn(1, 4, requires_grad=True)
input_var2 = torch.randn(1, 4, requires_grad=False)
out = checkpoint(run_fn, input_var, input_var2, use_reentrant=True)
out[0].sum().backward()
def run_fn2(tensor1, tensor2):
return tensor1
input_var = torch.randn(1, 4, requires_grad=False)
input_var2 = torch.randn(1, 4, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
r"none of output has requires_grad=True, this checkpoint\(\) is not necessary",
):
out = checkpoint(run_fn2, input_var, input_var2, use_reentrant=True)
out.sum().backward()
@unittest.skipIf(not TEST_GPU, "No accelerator")
def test_checkpointing_without_reentrant_early_free(self):
# I don't know how to check if the temporary saved variable buffer
# get de-allocated directly. So using GPU memory usage as a proxy
def _do_test(fn, should_free):
stats: list[int] = []
def track(x, idx):
# Track that at each step of the backward, some Tensor were
# de-allocated (which correspond to the checkpoint storage being
# emptied at each step)
def hook(_unused):
self.assertEqual(len(stats), idx)
torch.accelerator.synchronize()
stats.append(torch.accelerator.memory_allocated())
if idx > 0:
if should_free:
self.assertLess(stats[idx], stats[idx - 1])
else:
self.assertEqual(stats[idx], stats[idx - 1])
x.register_hook(hook)
def test_fn(x):
# The main property of this function is that it contains multiple
# operations that save gradients in a chain.
x = x**2
track(x, 2)
x = x**2
track(x, 1)
x = x**2
track(x, 0)
x = x**2
return x.sum()
fn(test_fn)
return stats
x = torch.zeros(10, device=device_type, requires_grad=True)
x.grad = torch.zeros_like(x)
# In a regular backward, buffers get eagerly freed
non_retain_stats = _do_test(lambda fn: fn(x).backward(), True)
# In a retain_grad backward, buffers get preserved
_unused_retain_stats = _do_test(
lambda fn: fn(x).backward(retain_graph=True), False
)
# In a regular backward with checkpoint, buffers get eagerly freed
checkpoint_non_retain_stats = _do_test(
lambda fn: checkpoint(fn, x, use_reentrant=False).backward(), True
)
# In a retain_grad backward with checkpoint, buffers get eagerly freed
checkpoint_retain_stats = _do_test(
lambda fn: checkpoint(fn, x, use_reentrant=False).backward(
retain_graph=True
),
True,
)
self.assertEqual(non_retain_stats, checkpoint_non_retain_stats)
self.assertEqual(non_retain_stats, checkpoint_retain_stats)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_get_device_states_recursive(self):
inp = {
"foo": torch.rand(10, device=f"{device_type}:0"),
"bar": [torch.rand(10, device=f"{device_type}:1")],
}
device_ids, device_states = get_device_states(inp)
self.assertEqual(2, len(device_ids))
self.assertEqual(2, len(device_states))
self.assertEqual(0, device_ids[0])
self.assertEqual(1, device_ids[1])
self.assertTrue(isinstance(device_states[0], torch.Tensor))
self.assertTrue(isinstance(device_states[1], torch.Tensor))
def test_infer_device_state_recursive_meta(self):
inp = {"foo": torch.rand(10, device="meta")}
device_type = _infer_device_type(inp)
self.assertEqual("meta", device_type)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_infer_device_state_recursive_multi_gpu(self):
# Check that no warning is issued for either gpu:0, gpu:1 or
# gpu:0, gpu:0 cases since they are both the same device type
inp = {
"foo": torch.rand(10, device=f"{device_type}:0"),
"bar": [torch.rand(10, device=f"{device_type}:1")],
}
with warnings.catch_warnings():
warnings.simplefilter("error")
_device_type = _infer_device_type(inp)
self.assertEqual(device_type, _device_type)
inp = {
"foo": torch.rand(10, device=f"{device_type}:0"),
"bar": [torch.rand(10, device=f"{device_type}:0")],
}
with warnings.catch_warnings():
warnings.simplefilter("error")
_device_type = _infer_device_type(inp)
self.assertEqual(device_type, _device_type)
# Check that a warning is issued for gpu:0, meta and that it includes
# device type information
inp = {
"foo": torch.rand(10, device=f"{device_type}:0"),
"bar": [torch.rand(10, device="meta")],
}
with warnings.catch_warnings(record=True) as w:
_device_type = _infer_device_type(inp)
self.assertEqual(device_type, _device_type)
self.assertEqual(len(w), 1)
warning_msg = str(w[-1].message)
self.assertTrue(
"Tensor arguments, excluding CPU tensors, are detected on at least two types of devices"
in warning_msg
)
self.assertTrue(f"Device types: ['{device_type}', 'meta']" in warning_msg)
self.assertTrue(f"first device type: {device_type}" in warning_msg)
| TestCheckpoint |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py | {
"start": 1295,
"end": 11745
} | class ____(Workflow):
def __init__(
self,
llm: Optional[FunctionCallingLLM] = None,
backend_tools: Optional[List[Union[BaseTool, Callable]]] = None,
frontend_tools: Optional[List[Union[BaseTool, Callable]]] = None,
system_prompt: Optional[str] = None,
initial_state: Optional[Dict[str, Any]] = None,
**workflow_kwargs: Any,
):
super().__init__(**workflow_kwargs)
self.llm = llm or Settings.llm
assert (
isinstance(self.llm, FunctionCallingLLM)
and self.llm.metadata.is_function_calling_model
), "llm must be a function calling model"
validated_frontend_tools: List[BaseTool] = [
validate_tool(tool) for tool in frontend_tools or []
]
validated_backend_tools: List[BaseTool] = [
validate_tool(tool) for tool in backend_tools or []
]
self.frontend_tools = {
tool.metadata.name: tool for tool in validated_frontend_tools
}
self.backend_tools = {
tool.metadata.name: tool for tool in validated_backend_tools
}
self.initial_state = initial_state or {}
self.system_prompt = system_prompt
def _snapshot_messages(self, ctx: Context, chat_history: List[ChatMessage]) -> None:
# inject tool calls into the assistant message
for msg in chat_history:
if msg.role == "assistant":
tool_calls = self.llm.get_tool_calls_from_response(
ChatResponse(message=msg), error_on_no_tool_call=False
)
if tool_calls:
msg.additional_kwargs["ag_ui_tool_calls"] = [
{
"id": tool_call.tool_id,
"name": tool_call.tool_name,
"arguments": json.dumps(tool_call.tool_kwargs),
}
for tool_call in tool_calls
]
ag_ui_messages = [llama_index_message_to_ag_ui_message(m) for m in chat_history]
ctx.write_event_to_stream(
MessagesSnapshotWorkflowEvent(
timestamp=timestamp(),
messages=ag_ui_messages,
)
)
@step
async def chat(
self, ctx: Context, ev: InputEvent | LoopEvent
) -> Optional[Union[StopEvent, ToolCallEvent]]:
if isinstance(ev, InputEvent):
ag_ui_messages = ev.input_data.messages
chat_history = [
ag_ui_message_to_llama_index_message(m) for m in ag_ui_messages
]
# State sometimes has unused messages, so we need to remove them
state = ev.input_data.state
if isinstance(state, dict):
state.pop("messages", None)
elif isinstance(state, str):
state = json.loads(state)
state.pop("messages", None)
else:
# initial state is not provided, use the default state
state = self.initial_state.copy()
# Save state to context for tools to use
await ctx.store.set("state", state)
ctx.write_event_to_stream(StateSnapshotWorkflowEvent(snapshot=state))
if state:
for msg in chat_history[::-1]:
if msg.role.value == "user":
msg.content = DEFAULT_STATE_PROMPT.format(
state=str(state), user_input=msg.content
)
break
if self.system_prompt:
if chat_history[0].role.value == "system":
chat_history[0].blocks.append(TextBlock(text=self.system_prompt))
else:
chat_history.insert(
0, ChatMessage(role="system", content=self.system_prompt)
)
await ctx.store.set("chat_history", chat_history)
else:
chat_history = await ctx.store.get("chat_history")
tools = list(self.frontend_tools.values())
tools.extend(list(self.backend_tools.values()))
resp_gen = await self.llm.astream_chat_with_tools(
tools=tools,
chat_history=chat_history,
allow_parallel_tool_calls=True,
)
resp_id = str(uuid.uuid4())
resp = ChatResponse(message=ChatMessage(role="assistant", content=""))
async for resp in resp_gen:
if resp.delta:
ctx.write_event_to_stream(
TextMessageChunkWorkflowEvent(
role="assistant",
delta=resp.delta,
timestamp=timestamp(),
message_id=resp_id,
)
)
chat_history.append(resp.message)
await ctx.store.set("chat_history", chat_history)
tool_calls = self.llm.get_tool_calls_from_response(
resp, error_on_no_tool_call=False
)
if tool_calls:
await ctx.store.set("num_tool_calls", len(tool_calls))
frontend_tool_calls = [
tool_call
for tool_call in tool_calls
if tool_call.tool_name in self.frontend_tools
]
backend_tool_calls = [
tool_call
for tool_call in tool_calls
if tool_call.tool_name in self.backend_tools
]
# Call backend tools first so that the frontend can return results for frontend tools
for tool_call in backend_tool_calls:
ctx.send_event(
ToolCallEvent(
tool_call_id=tool_call.tool_id,
tool_name=tool_call.tool_name,
tool_kwargs=tool_call.tool_kwargs,
)
)
ctx.write_event_to_stream(
ToolCallChunkWorkflowEvent(
tool_call_id=tool_call.tool_id,
tool_call_name=tool_call.tool_name,
delta=json.dumps(tool_call.tool_kwargs),
)
)
for tool_call in frontend_tool_calls:
ctx.send_event(
ToolCallEvent(
tool_call_id=tool_call.tool_id,
tool_name=tool_call.tool_name,
tool_kwargs=tool_call.tool_kwargs,
)
)
ctx.write_event_to_stream(
ToolCallChunkWorkflowEvent(
tool_call_id=tool_call.tool_id,
tool_call_name=tool_call.tool_name,
delta=json.dumps(tool_call.tool_kwargs),
)
)
# Send MessagesSnapshot AFTER ToolCallChunk events, as a "wrap it up" step
self._snapshot_messages(ctx, [*chat_history])
return None
# No tool calls, send snapshot immediately
self._snapshot_messages(ctx, [*chat_history])
return StopEvent()
@step
async def handle_tool_call(
self, ctx: Context, ev: ToolCallEvent
) -> ToolCallResultEvent:
try:
all_tools = {**self.frontend_tools, **self.backend_tools}
tool = all_tools[ev.tool_name]
kwargs = {**ev.tool_kwargs}
if isinstance(tool, FunctionTool) and tool.ctx_param_name:
kwargs[tool.ctx_param_name] = ctx
tool_output = await tool.acall(**kwargs)
# Update the state snapshot
current_state = await ctx.store.get("state", default={})
ctx.write_event_to_stream(
StateSnapshotWorkflowEvent(snapshot=current_state)
)
return ToolCallResultEvent(
tool_call_id=ev.tool_call_id,
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_output=tool_output,
)
except Exception as e:
return ToolCallResultEvent(
tool_call_id=ev.tool_call_id,
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_output=ToolOutput(
tool_name=ev.tool_name,
content=str(e),
raw_input=ev.tool_kwargs,
raw_output=str(e),
is_error=True,
),
)
@step
async def aggregate_tool_calls(
self, ctx: Context, ev: ToolCallResultEvent
) -> Optional[Union[StopEvent, LoopEvent]]:
num_tool_calls = await ctx.store.get("num_tool_calls")
tool_call_results: List[ToolCallResultEvent] = ctx.collect_events(
ev, [ToolCallResultEvent] * num_tool_calls
)
if tool_call_results is None:
return None
# organize tool results so that frontend tools are last
# for backend tools, update the messages snapshot with the tool output
frontend_tool_calls = [
tool_result
for tool_result in tool_call_results
if tool_result.tool_name in self.frontend_tools
]
backend_tool_calls = [
tool_result
for tool_result in tool_call_results
if tool_result.tool_name in self.backend_tools
]
new_tool_messages = []
for tool_result in backend_tool_calls:
new_tool_messages.append(
ChatMessage(
role="tool",
content=tool_result.tool_output.content,
additional_kwargs={
"tool_call_id": tool_result.tool_call_id,
},
)
)
# emit a messages snapshot event if there are new messages
chat_history = await ctx.store.get("chat_history")
if new_tool_messages:
chat_history.extend(new_tool_messages)
self._snapshot_messages(ctx, [*chat_history])
await ctx.store.set("chat_history", chat_history)
if len(frontend_tool_calls) > 0:
# Expect frontend tool calls to call back to the agent
return StopEvent()
return LoopEvent(messages=chat_history)
| AGUIChatWorkflow |
python | pypa__warehouse | tests/unit/packaging/test_services.py | {
"start": 35697,
"end": 38849
} | class ____:
def test_verify_service(self):
assert verifyClass(IProjectService, ProjectService)
@pytest.mark.parametrize("name", ["", ".,;", "_z"])
def test_check_project_name_invalid(self, name):
service = ProjectService(session=pretend.stub())
with pytest.raises(ProjectNameUnavailableInvalidError):
service.check_project_name(name)
@pytest.mark.parametrize("name", ["uu", "cgi", "nis", "mailcap"])
def test_check_project_name_stdlib(self, name):
service = ProjectService(session=pretend.stub())
with pytest.raises(ProjectNameUnavailableStdlibError):
service.check_project_name(name)
def test_check_project_name_already_exists(self, db_session):
service = ProjectService(session=db_session)
project = ProjectFactory.create(name="foo")
with pytest.raises(ProjectNameUnavailableExistingError) as exc:
service.check_project_name("foo")
assert exc.value.existing_project == project
with pytest.raises(ProjectNameUnavailableExistingError):
service.check_project_name("Foo")
def test_check_project_name_prohibited(self, db_session):
service = ProjectService(session=db_session)
ProhibitedProjectFactory.create(name="foo")
with pytest.raises(ProjectNameUnavailableProhibitedError):
service.check_project_name("foo")
with pytest.raises(ProjectNameUnavailableProhibitedError):
service.check_project_name("Foo")
def test_check_project_name_too_similar(self, db_session):
service = ProjectService(session=db_session)
ProjectFactory.create(name="f00")
with pytest.raises(ProjectNameUnavailableSimilarError):
service.check_project_name("foo")
def test_check_project_name_too_similar_multiple_existing(self, db_session):
service = ProjectService(session=db_session)
project1 = ProjectFactory.create(name="f00")
project2 = ProjectFactory.create(name="f0o")
with pytest.raises(ProjectNameUnavailableSimilarError) as exc:
service.check_project_name("foo")
assert (
exc.value.similar_project_name == project1.name
or exc.value.similar_project_name == project2.name
)
def test_check_project_name_typosquatting_prohibited(self, db_session):
# TODO: Update this test once we have a dynamic TopN approach
service = ProjectService(session=db_session)
ProhibitedProjectFactory.create(name="numpy")
with pytest.raises(ProjectNameUnavailableTypoSquattingError):
service.check_project_name("numpi")
def test_check_project_name_ok(self, db_session):
service = ProjectService(session=db_session)
# Should not raise any exception
service.check_project_name("foo")
def test_project_service_factory():
db = pretend.stub()
request = pretend.stub(
db=db,
find_service=lambda iface, name=None, context=None: None,
)
service = project_service_factory(pretend.stub(), request)
assert service.db == db
| TestProjectService |
python | PyCQA__pylint | tests/functional/r/regression/regression_4688_duplicated_bases_member_hints.py | {
"start": 145,
"end": 244
} | class ____(object, object):
def get(self):
self._non_existent_attribute # [no-member]
| Klass |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 2848,
"end": 3548
} | class ____(CookiecutterException):
"""
Exception for out-of-scope variables.
Raised when a template uses a variable which is not defined in the
context.
"""
def __init__(
self, message: str, error: TemplateError, context: dict[str, Any]
) -> None:
"""Exception for out-of-scope variables."""
self.message = message
self.error = error
self.context = context
def __str__(self) -> str:
"""Text representation of UndefinedVariableInTemplate."""
return (
f"{self.message}. "
f"Error message: {self.error.message}. "
f"Context: {self.context}"
)
| UndefinedVariableInTemplate |
python | numpy__numpy | numpy/distutils/command/install_headers.py | {
"start": 96,
"end": 920
} | class ____ (old_install_headers):
def run (self):
headers = self.distribution.headers
if not headers:
return
prefix = os.path.dirname(self.install_dir)
for header in headers:
if isinstance(header, tuple):
# Kind of a hack, but I don't know where else to change this...
if header[0] == 'numpy._core':
header = ('numpy', header[1])
if os.path.splitext(header[1])[1] == '.inc':
continue
d = os.path.join(*([prefix]+header[0].split('.')))
header = header[1]
else:
d = self.install_dir
self.mkpath(d)
(out, _) = self.copy_file(header, d)
self.outfiles.append(out)
| install_headers |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/descriptor.py | {
"start": 545,
"end": 684
} | class ____:
descr = CustomDataDescriptor('Descriptor instance docstring.')
@property
def prop(self):
"""Property."""
| Class |
python | sqlalchemy__sqlalchemy | test/sql/test_tablesample.py | {
"start": 545,
"end": 2378
} | class ____(fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
run_setup_bind = None
run_create_tables = None
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column("people_id", Integer, primary_key=True),
Column("age", Integer),
Column("name", String(30)),
)
def test_standalone(self):
table1 = self.tables.people
# no special alias handling even though clause is not in the
# context of a FROM clause
self.assert_compile(
tablesample(table1, 1, name="alias"),
"people AS alias TABLESAMPLE system(:system_1)",
)
self.assert_compile(
table1.tablesample(1, name="alias"),
"people AS alias TABLESAMPLE system(:system_1)",
)
self.assert_compile(
tablesample(
table1, func.bernoulli(1), name="alias", seed=func.random()
),
"people AS alias TABLESAMPLE bernoulli(:bernoulli_1) "
"REPEATABLE (random())",
)
def test_select_from(self):
table1 = self.tables.people
self.assert_compile(
select(table1.tablesample(text("1"), name="alias").c.people_id),
"SELECT alias.people_id FROM "
"people AS alias TABLESAMPLE system(1)",
)
def test_no_alias_construct(self):
a = table("a", column("x"))
assert_raises_message(
NotImplementedError,
"The TableSample class is not intended to be constructed "
"directly. "
r"Please use the tablesample\(\) standalone",
TableSample,
a,
"foo",
)
| TableSampleTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/list/retrievers.py | {
"start": 1839,
"end": 4598
} | class ____(BaseRetriever):
"""
Embedding based retriever for SummaryIndex.
Generates embeddings in a lazy fashion for all
nodes that are traversed.
Args:
index (SummaryIndex): The index to retrieve from.
similarity_top_k (Optional[int]): The number of top nodes to return.
"""
def __init__(
self,
index: SummaryIndex,
embed_model: Optional[BaseEmbedding] = None,
similarity_top_k: Optional[int] = 1,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
self._index = index
self._similarity_top_k = similarity_top_k
self._embed_model = embed_model or Settings.embed_model
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Retrieve nodes."""
node_ids = self._index.index_struct.nodes
# top k nodes
nodes = self._index.docstore.get_nodes(node_ids)
query_embedding, node_embeddings = self._get_embeddings(query_bundle, nodes)
top_similarities, top_idxs = get_top_k_embeddings(
query_embedding,
node_embeddings,
similarity_top_k=self._similarity_top_k,
embedding_ids=list(range(len(nodes))),
)
top_k_nodes = [nodes[i] for i in top_idxs]
node_with_scores = []
for node, similarity in zip(top_k_nodes, top_similarities):
node_with_scores.append(NodeWithScore(node=node, score=similarity))
logger.debug(f"> Top {len(top_idxs)} nodes:\n")
nl = "\n"
logger.debug(f"{nl.join([n.get_content() for n in top_k_nodes])}")
return node_with_scores
def _get_embeddings(
self, query_bundle: QueryBundle, nodes: List[BaseNode]
) -> Tuple[List[float], List[List[float]]]:
"""Get top nodes by similarity to the query."""
if query_bundle.embedding is None:
query_bundle.embedding = self._embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
node_embeddings: List[List[float]] = []
nodes_embedded = 0
for node in nodes:
if node.embedding is None:
nodes_embedded += 1
node.embedding = self._embed_model.get_text_embedding(
node.get_content(metadata_mode=MetadataMode.EMBED)
)
node_embeddings.append(node.embedding)
return query_bundle.embedding, node_embeddings
| SummaryIndexEmbeddingRetriever |
python | sympy__sympy | sympy/functions/combinatorial/factorials.py | {
"start": 1296,
"end": 9434
} | class ____(CombinatorialFunction):
r"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact `n! = gamma(n+1)` for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments a precomputed look up table is used. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes `n!` via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy.functions.special.gamma_functions import (gamma, polygamma)
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
_small_factorials: list[int] = []
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = prod(sieve.primerange(n//2 + 1, n + 1))
R_product = prod(primes)
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n.is_zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n = n.p
if n < 20:
if not cls._small_factorials:
result = 1
for i in range(1, 20):
result *= i
cls._small_factorials.append(result)
result = cls._small_factorials[n-1]
# GMPY factorial is faster, use it when available
#
# XXX: There is a sympy.external.gmpy.factorial function
# which provides gmpy.fac if available or the flint version
# if flint is used. It could be used here to avoid the
# conditional logic but it needs to be checked whether the
# pure Python fallback used there is as fast as the
# fallback used here (perhaps the fallback here should be
# moved to sympy.external.ntheory).
elif _gmpy is not None:
result = _gmpy.fac(n)
else:
bits = bin(n).count('1')
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _facmod(self, n, q):
res, N = 1, int(_sqrt(n))
# Exponent of prime p in n! is e_p(n) = [n/p] + [n/p**2] + ...
# for p > sqrt(n), e_p(n) < sqrt(n), the primes with [n/p] = m,
# occur consecutively and are grouped together in pw[m] for
# simultaneous exponentiation at a later stage
pw = [1]*N
m = 2 # to initialize the if condition below
for prime in sieve.primerange(2, n + 1):
if m > 1:
m, y = 0, n // prime
while y:
m += y
y //= prime
if m < N:
pw[m] = pw[m]*prime % q
else:
res = res*pow(prime, m, q) % q
for ex, bs in enumerate(pw):
if ex == 0 or bs == 1:
continue
if bs == 0:
return 0
res = res*pow(bs, ex, q) % q
return res
def _eval_Mod(self, q):
n = self.args[0]
if n.is_integer and n.is_nonnegative and q.is_integer:
aq = abs(q)
d = aq - n
if d.is_nonpositive:
return S.Zero
else:
isprime = aq.is_prime
if d == 1:
# Apply Wilson's theorem (if a natural number n > 1
# is a prime number, then (n-1)! = -1 mod n) and
# its inverse (if n > 4 is a composite number, then
# (n-1)! = 0 mod n)
if isprime:
return -1 % q
elif isprime is False and (aq - 6).is_nonnegative:
return S.Zero
elif n.is_Integer and q.is_Integer:
n, d, aq = map(int, (n, d, aq))
if isprime and (d - 1 < n):
fc = self._facmod(d - 1, aq)
fc = pow(fc, aq - 2, aq)
if d%2:
fc = -fc
else:
fc = self._facmod(n, aq)
return fc % q
def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs):
from sympy.functions.special.gamma_functions import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n, **kwargs):
from sympy.concrete.products import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_even(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 2).is_nonnegative
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer and x.is_nonnegative:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0].as_leading_term(x)
arg0 = arg.subs(x, 0)
if arg0.is_zero:
return S.One
elif not arg0.is_infinite:
return self.func(arg)
raise PoleError("Cannot expand %s around 0" % (self))
| factorial |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 23258,
"end": 23633
} | class ____(_DateFixture, fixtures.TablesTest):
__requires__ = ("time_microseconds",)
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18, 396)
@testing.requires.time_implicit_bound
def test_select_direct(self, connection):
result = connection.scalar(select(literal(self.data)))
eq_(result, self.data)
| TimeMicrosecondsTest |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 28605,
"end": 28745
} | class ____(Warning):
"""
Warning raised when trying to use where criteria on an incompatible HDF5 file.
"""
| IncompatibilityWarning |
python | sanic-org__sanic | tests/typing/samples/app_fully_custom.py | {
"start": 97,
"end": 198
} | class ____:
pass
app = Sanic("test", config=CustomConfig(), ctx=Foo())
reveal_type(app) # noqa
| Foo |
python | doocs__leetcode | solution/3600-3699/3643.Flip Square Submatrix Vertically/Solution.py | {
"start": 0,
"end": 328
} | class ____:
def reverseSubmatrix(
self, grid: List[List[int]], x: int, y: int, k: int
) -> List[List[int]]:
for i in range(x, x + k // 2):
i2 = x + k - 1 - (i - x)
for j in range(y, y + k):
grid[i][j], grid[i2][j] = grid[i2][j], grid[i][j]
return grid
| Solution |
python | ansible__ansible | test/units/playbook/role/test_role.py | {
"start": 1189,
"end": 4411
} | class ____(unittest.TestCase):
def test(self):
params = {'foo': 'bar'}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
@staticmethod
def _assert_hashable(res):
hash(res)
def _assert_set(self, res):
self.assertIsInstance(res, frozenset)
def test_dict_tuple(self):
params = {'foo': (1, 'bar',)}
res = hash_params(params)
self._assert_set(res)
def test_tuple(self):
params = (1, None, 'foo')
res = hash_params(params)
self._assert_hashable(res)
def test_tuple_dict(self):
params = ({'foo': 'bar'}, 37)
res = hash_params(params)
self._assert_hashable(res)
def test_list(self):
params = ['foo', 'bar', 1, 37, None]
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_dict_with_list_value(self):
params = {'foo': [1, 4, 'bar']}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_empty_set(self):
params = set([])
res = hash_params(params)
self._assert_hashable(res)
self._assert_set(res)
def test_generator(self):
def my_generator():
yield
params = my_generator()
res = hash_params(params)
self._assert_hashable(res)
assert list(params)
def test_container_but_not_iterable(self):
# This is a Container that is not iterable, which is unlikely but...
class MyContainer(Container):
def __init__(self, _some_thing):
pass
def __contains__(self, item):
"""Implementation omitted, since it will never be called."""
params = MyContainer('foo bar')
with pytest.raises(TypeError) as ex:
hash_params(params)
assert ex.value.args == ("'MyContainer' object is not iterable",)
def test_param_dict_dupe_values(self):
params1 = {'foo': False}
params2 = {'bar': False}
res1 = hash_params(params1)
res2 = hash_params(params2)
hash1 = hash(res1)
hash2 = hash(res2)
self.assertNotEqual(res1, res2)
self.assertNotEqual(hash1, hash2)
def test_param_dupe(self):
params1 = {
# 'from_files': {},
'tags': [],
u'testvalue': False,
u'testvalue2': True,
# 'when': []
}
params2 = {
# 'from_files': {},
'tags': [],
u'testvalue': True,
u'testvalue2': False,
# 'when': []
}
res1 = hash_params(params1)
res2 = hash_params(params2)
self.assertNotEqual(hash(res1), hash(res2))
self.assertNotEqual(res1, res2)
params_dict = {}
params_dict[res1] = 'params1'
params_dict[res2] = 'params2'
self.assertEqual(len(params_dict), 2)
del params_dict[res2]
self.assertEqual(len(params_dict), 1)
for key in params_dict:
self.assertTrue(key in params_dict)
self.assertIn(key, params_dict)
| TestHashParams |
python | PrefectHQ__prefect | src/prefect/server/events/ordering/db.py | {
"start": 825,
"end": 7032
} | class ____(_CausalOrdering):
_seen_events: Mapping[str, MutableMapping[UUID, bool]] = defaultdict(
lambda: TTLCache(maxsize=10000, ttl=SEEN_EXPIRATION.total_seconds())
)
scope: str
def __init__(self, scope: str):
self.scope = scope
async def event_has_been_seen(self, event: Union[UUID, Event]) -> bool:
id = event.id if isinstance(event, Event) else event
return self._seen_events[self.scope].get(id, False)
async def record_event_as_seen(self, event: ReceivedEvent) -> None:
self._seen_events[self.scope][event.id] = True
@db_injector
async def record_follower(
self, db: PrefectDBInterface, event: ReceivedEvent
) -> None:
"""Remember that this event is waiting on another event to arrive"""
assert event.follows
async with db.session_context(begin_transaction=True) as session:
await session.execute(
sa.insert(db.AutomationEventFollower).values(
scope=self.scope,
leader_event_id=event.follows,
follower_event_id=event.id,
received=event.received,
follower=event,
)
)
@db_injector
async def forget_follower(
self, db: PrefectDBInterface, follower: ReceivedEvent
) -> None:
"""Forget that this event is waiting on another event to arrive"""
assert follower.follows
async with db.session_context(begin_transaction=True) as session:
await session.execute(
sa.delete(db.AutomationEventFollower).where(
db.AutomationEventFollower.scope == self.scope,
db.AutomationEventFollower.follower_event_id == follower.id,
)
)
@db_injector
async def get_followers(
self, db: PrefectDBInterface, leader: ReceivedEvent
) -> List[ReceivedEvent]:
"""Returns events that were waiting on this leader event to arrive"""
async with db.session_context() as session:
query = sa.select(db.AutomationEventFollower.follower).where(
db.AutomationEventFollower.scope == self.scope,
db.AutomationEventFollower.leader_event_id == leader.id,
)
result = await session.execute(query)
followers = result.scalars().all()
return sorted(followers, key=lambda e: e.occurred)
@db_injector
async def get_lost_followers(self, db: PrefectDBInterface) -> List[ReceivedEvent]:
"""Returns events that were waiting on a leader event that never arrived"""
earlier = prefect.types._datetime.now("UTC") - PRECEDING_EVENT_LOOKBACK
async with db.session_context(begin_transaction=True) as session:
query = sa.select(db.AutomationEventFollower.follower).where(
db.AutomationEventFollower.scope == self.scope,
db.AutomationEventFollower.received < earlier,
)
result = await session.execute(query)
followers = result.scalars().all()
# forget these followers, since they are never going to see their leader event
await session.execute(
sa.delete(db.AutomationEventFollower).where(
db.AutomationEventFollower.scope == self.scope,
db.AutomationEventFollower.received < earlier,
)
)
return sorted(followers, key=lambda e: e.occurred)
@asynccontextmanager
async def preceding_event_confirmed(
self, handler: event_handler, event: ReceivedEvent, depth: int = 0
):
"""Events may optionally declare that they logically follow another event, so that
we can preserve important event orderings in the face of unreliable delivery and
ordering of messages from the queues.
This function keeps track of the ID of each event that this shard has successfully
processed going back to the PRECEDING_EVENT_LOOKBACK period. If an event arrives
that must follow another one, confirm that we have recently seen and processed that
event before proceeding.
Args:
event (ReceivedEvent): The event to be processed. This object should include metadata indicating
if and what event it follows.
depth (int, optional): The current recursion depth, used to prevent infinite recursion due to
cyclic dependencies between events. Defaults to 0.
Raises EventArrivedEarly if the current event shouldn't be processed yet."""
if depth > MAX_DEPTH_OF_PRECEDING_EVENT:
logger.exception(
"Event %r (%s) for %r has exceeded the maximum recursion depth of %s",
event.event,
event.id,
event.resource.id,
MAX_DEPTH_OF_PRECEDING_EVENT,
)
raise MaxDepthExceeded(event)
if event.follows:
if not await self.event_has_been_seen(event.follows):
age = prefect.types._datetime.now("UTC") - event.received
if age < PRECEDING_EVENT_LOOKBACK:
logger.debug(
"Event %r (%s) for %r arrived before the event it follows %s",
event.event,
event.id,
event.resource.id,
event.follows,
)
# record this follower for safe-keeping
await self.record_follower(event)
raise EventArrivedEarly(event)
yield
await self.record_event_as_seen(event)
# we have just processed an event that other events were waiting on, so let's
# react to them now in the order they occurred
for waiter in await self.get_followers(event):
await handler(waiter, depth + 1)
# if this event was itself waiting on something, let's consider it as resolved now
# that it has been processed
if event.follows:
await self.forget_follower(event)
| CausalOrdering |
python | sphinx-doc__sphinx | doc/development/tutorials/examples/todo.py | {
"start": 486,
"end": 573
} | class ____(Directive):
def run(self):
return [todolist('')]
| TodolistDirective |
python | ray-project__ray | python/ray/serve/tests/unit/test_metrics_utils.py | {
"start": 7453,
"end": 9884
} | class ____:
def test_aggregate_timeseries_empty(self):
assert aggregate_timeseries([], AggregationFunction.MEAN) is None
assert aggregate_timeseries([], AggregationFunction.MAX) is None
assert aggregate_timeseries([], AggregationFunction.MIN) is None
def test_aggregate_timeseries_mean(self):
assert (
aggregate_timeseries([TimeStampedValue(1.0, 5.0)], AggregationFunction.MEAN)
== 5.0
)
assert (
aggregate_timeseries(
[TimeStampedValue(1.0, 5.0), TimeStampedValue(2.0, 10.0)],
AggregationFunction.MEAN,
)
== 7.5
)
assert (
aggregate_timeseries(
[
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
],
AggregationFunction.MEAN,
)
== 10.0
)
def test_aggregate_timeseries_max(self):
assert (
aggregate_timeseries([TimeStampedValue(1.0, 5.0)], AggregationFunction.MAX)
== 5.0
)
assert (
aggregate_timeseries(
[TimeStampedValue(1.0, 5.0), TimeStampedValue(2.0, 10.0)],
AggregationFunction.MAX,
)
== 10.0
)
assert (
aggregate_timeseries(
[
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
],
AggregationFunction.MAX,
)
== 15.0
)
def test_aggregate_timeseries_min(self):
assert (
aggregate_timeseries([TimeStampedValue(1.0, 5.0)], AggregationFunction.MIN)
== 5.0
)
assert (
aggregate_timeseries(
[TimeStampedValue(1.0, 5.0), TimeStampedValue(2.0, 10.0)],
AggregationFunction.MIN,
)
== 5.0
)
assert (
aggregate_timeseries(
[
TimeStampedValue(1.0, 5.0),
TimeStampedValue(2.0, 10.0),
TimeStampedValue(3.0, 15.0),
],
AggregationFunction.MIN,
)
== 5.0
)
| TestAggregateTimeseries |
python | pennersr__django-allauth | allauth/account/forms.py | {
"start": 31465,
"end": 31934
} | class ____(forms.Form):
email = EmailField(required=True)
def __init__(self, *args, **kwargs):
self.email = kwargs.pop("email", None)
super().__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data["email"]
if email == self.email:
raise get_adapter().validation_error("same_as_current")
email, self.account_already_exists = email_already_exists(email)
return email
| ChangeEmailForm |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_storage_transfer_service.py | {
"start": 28739,
"end": 31007
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_resume(self, mock_hook):
op = CloudDataTransferServiceResumeOperationOperator(
operation_name=OPERATION_NAME,
task_id=TASK_ID,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.resume_transfer_operation.assert_called_once_with(
operation_name=OPERATION_NAME
)
assert result is None
# Setting all the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all
# fields
@pytest.mark.db_test
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_resume_with_templates(self, _, create_task_instance_of_operator, session):
dag_id = "test_operation_resume_with_templates"
ti = create_task_instance_of_operator(
CloudDataTransferServiceResumeOperationOperator,
dag_id=dag_id,
operation_name="{{ dag.dag_id }}",
gcp_conn_id="{{ dag.dag_id }}",
api_version="{{ dag.dag_id }}",
task_id=TASK_ID,
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.operation_name
assert dag_id == ti.task.gcp_conn_id
assert dag_id == ti.task.api_version
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_resume_should_throw_ex_when_name_none(self, mock_hook):
with pytest.raises(
AirflowException, match="The required parameter 'operation_name' is empty or None"
):
CloudDataTransferServiceResumeOperationOperator(operation_name="", task_id=TASK_ID)
| TestGcpStorageTransferOperationsResumeOperator |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 47028,
"end": 52379
} | class ____(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
conn = typing.cast(HTTP1Connection, self.request.connection)
if "expected_size" in self.request.arguments:
conn.set_max_body_size(int(self.get_argument("expected_size")))
if "body_timeout" in self.request.arguments:
conn.set_body_timeout(float(self.get_argument("body_timeout")))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application(
[("/buffered", BufferedHandler), ("/streaming", StreamingHandler)]
)
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def test_small_body(self):
response = self.fetch("/buffered", method="PUT", body=b"a" * 4096)
self.assertEqual(response.body, b"4096")
response = self.fetch("/streaming", method="PUT", body=b"a" * 4096)
self.assertEqual(response.body, b"4096")
def test_large_body_buffered(self):
with ExpectLog(gen_log, ".*Content-Length too long", level=logging.INFO):
response = self.fetch("/buffered", method="PUT", body=b"a" * 10240)
self.assertEqual(response.code, 400)
@unittest.skipIf(os.name == "nt", "flaky on windows")
def test_large_body_buffered_chunked(self):
# This test is flaky on windows for unknown reasons.
with ExpectLog(gen_log, ".*chunked body too large", level=logging.INFO):
response = self.fetch(
"/buffered",
method="PUT",
body_producer=lambda write: write(b"a" * 10240),
)
self.assertEqual(response.code, 400)
def test_large_body_streaming(self):
with ExpectLog(gen_log, ".*Content-Length too long", level=logging.INFO):
response = self.fetch("/streaming", method="PUT", body=b"a" * 10240)
self.assertEqual(response.code, 400)
@unittest.skipIf(os.name == "nt", "flaky on windows")
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, ".*chunked body too large", level=logging.INFO):
response = self.fetch(
"/streaming",
method="PUT",
body_producer=lambda write: write(b"a" * 10240),
)
self.assertEqual(response.code, 400)
def test_large_body_streaming_override(self):
response = self.fetch(
"/streaming?expected_size=10240", method="PUT", body=b"a" * 10240
)
self.assertEqual(response.body, b"10240")
def test_large_body_streaming_chunked_override(self):
response = self.fetch(
"/streaming?expected_size=10240",
method="PUT",
body_producer=lambda write: write(b"a" * 10240),
)
self.assertEqual(response.body, b"10240")
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(("127.0.0.1", self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(
b"PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n"
b"Content-Length: 42\r\n\r\n"
)
with ExpectLog(gen_log, "Timeout reading body", level=logging.INFO):
response = yield stream.read_until_close()
self.assertEqual(response, b"")
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(("127.0.0.1", self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(
b"PUT /streaming?expected_size=10240 HTTP/1.1\r\n"
b"Host: 127.0.0.1\r\n"
b"Content-Length: 10240\r\n\r\n"
)
stream.write(b"a" * 10240)
start_line, headers, response = yield read_stream_body(stream)
self.assertEqual(response, b"10240")
# Without the ?expected_size parameter, we get the old default value
stream.write(
b"PUT /streaming HTTP/1.1\r\n"
b"Host: 127.0.0.1\r\n"
b"Content-Length: 10240\r\n\r\n"
)
with ExpectLog(gen_log, ".*Content-Length too long", level=logging.INFO):
data = yield stream.read_until_close()
self.assertEqual(data, b"HTTP/1.1 400 Bad Request\r\n\r\n")
finally:
stream.close()
| BodyLimitsTest |
python | tiangolo__fastapi | fastapi/params.py | {
"start": 14078,
"end": 17130
} | class ____(Param): # type: ignore[misc]
in_ = ParamTypes.cookie
def __init__(
self,
default: Any = Undefined,
*,
default_factory: Union[Callable[[], Any], None] = _Unset,
annotation: Optional[Any] = None,
alias: Optional[str] = None,
alias_priority: Union[int, None] = _Unset,
# TODO: update when deprecating Pydantic v1, import these types
# validation_alias: str | AliasPath | AliasChoices | None
validation_alias: Union[str, None] = None,
serialization_alias: Union[str, None] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
pattern: Optional[str] = None,
regex: Annotated[
Optional[str],
deprecated(
"Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
),
] = None,
discriminator: Union[str, None] = None,
strict: Union[bool, None] = _Unset,
multiple_of: Union[float, None] = _Unset,
allow_inf_nan: Union[bool, None] = _Unset,
max_digits: Union[int, None] = _Unset,
decimal_places: Union[int, None] = _Unset,
examples: Optional[List[Any]] = None,
example: Annotated[
Optional[Any],
deprecated(
"Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
"although still supported. Use examples instead."
),
] = _Unset,
openapi_examples: Optional[Dict[str, Example]] = None,
deprecated: Union[deprecated, str, bool, None] = None,
include_in_schema: bool = True,
json_schema_extra: Union[Dict[str, Any], None] = None,
**extra: Any,
):
super().__init__(
default=default,
default_factory=default_factory,
annotation=annotation,
alias=alias,
alias_priority=alias_priority,
validation_alias=validation_alias,
serialization_alias=serialization_alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
pattern=pattern,
regex=regex,
discriminator=discriminator,
strict=strict,
multiple_of=multiple_of,
allow_inf_nan=allow_inf_nan,
max_digits=max_digits,
decimal_places=decimal_places,
deprecated=deprecated,
example=example,
examples=examples,
openapi_examples=openapi_examples,
include_in_schema=include_in_schema,
json_schema_extra=json_schema_extra,
**extra,
)
| Cookie |
python | sphinx-doc__sphinx | sphinx/util/docutils.py | {
"start": 19536,
"end": 22633
} | class ____:
"""A base class for Sphinx roles.
This class provides helper methods for Sphinx roles.
.. versionadded:: 2.0
.. note:: The subclasses of this class might not work with docutils.
This class is strongly coupled with Sphinx.
"""
# fmt: off
name: str #: The role name actually used in the document.
rawtext: str #: A string containing the entire interpreted text input.
text: str #: The interpreted text content.
lineno: int #: The line number where the interpreted text begins.
inliner: Inliner #: The ``docutils.parsers.rst.states.Inliner`` object.
#: A dictionary of directive options for customisation
#: (from the "role" directive).
options: dict[str, Any]
#: A list of strings, the directive content for customisation
#: (from the "role" directive).
content: Sequence[str]
# fmt: on
def __call__(
self,
name: str,
rawtext: str,
text: str,
lineno: int,
inliner: Inliner,
options: dict[str, Any] | None = None,
content: Sequence[str] = (),
) -> tuple[list[Node], list[system_message]]:
self.rawtext = rawtext
self.text = unescape(text)
self.lineno = lineno
self.inliner = inliner
self.options = options if options is not None else {}
self.content = content
# guess role type
if name:
self.name = name.lower()
else:
self.name = self.env.current_document.default_role
if not self.name:
self.name = self.env.config.default_role
if not self.name:
msg = 'cannot determine default role!'
raise SphinxError(msg)
return self.run()
def run(self) -> tuple[list[Node], list[system_message]]:
raise NotImplementedError
@property
def env(self) -> BuildEnvironment:
"""Reference to the :class:`.BuildEnvironment` object.
.. versionadded:: 2.0
"""
return self.inliner.document.settings.env
@property
def config(self) -> Config:
"""Reference to the :class:`.Config` object.
.. versionadded:: 2.0
"""
return self.env.config
def get_source_info(self, lineno: int | None = None) -> tuple[str, int]:
# .. versionadded:: 3.0
if lineno is None:
lineno = self.lineno
return self.inliner.reporter.get_source_and_line(lineno) # type: ignore[attr-defined]
def set_source_info(self, node: Node, lineno: int | None = None) -> None:
# .. versionadded:: 2.0
node.source, node.line = self.get_source_info(lineno)
def get_location(self) -> str:
"""Get current location info for logging.
.. versionadded:: 4.2
"""
source, line = self.get_source_info()
if source and line:
return f'{source}:{line}'
if source:
return f'{source}:'
if line:
return f'<unknown>:{line}'
return ''
| SphinxRole |
python | getsentry__sentry | src/sentry/runner/commands/run.py | {
"start": 813,
"end": 20615
} | class ____(click.ParamType):
name = "text"
def convert(self, value: str | None, param: object, ctx: object) -> frozenset[str] | None:
if value is None:
return None
# Providing a compatibility with splitting
# the `events` queue until multiple queues
# without the need to explicitly add them.
queues = set()
for queue in value.split(","):
if queue == "events":
queues.add("events.preprocess_event")
queues.add("events.process_event")
queues.add("events.save_event")
from sentry.runner.initializer import show_big_error
show_big_error(
[
"DEPRECATED",
"`events` queue no longer exists.",
"Switch to using:",
"- events.preprocess_event",
"- events.process_event",
"- events.save_event",
]
)
else:
queues.add(queue)
return frozenset(queues)
QueueSet = QueueSetType()
@click.group()
def run() -> None:
"Run a service."
@run.command()
@click.option(
"--bind",
"-b",
default=None,
help="Bind address.",
metavar="ADDRESS",
callback=_address_validate,
)
@click.option(
"--workers", "-w", default=0, help="The number of worker processes for handling requests."
)
@click.option("--upgrade", default=False, is_flag=True, help="Upgrade before starting.")
@click.option(
"--with-lock", default=False, is_flag=True, help="Use a lock if performing an upgrade."
)
@click.option(
"--noinput", default=False, is_flag=True, help="Do not prompt the user for input of any kind."
)
@log_options()
@configuration
def web(
bind: tuple[None, None] | tuple[str, int | None],
workers: int,
upgrade: bool,
with_lock: bool,
noinput: bool,
) -> None:
"Run web service."
if upgrade:
click.echo("Performing upgrade before service startup...")
from sentry.runner import call_command
try:
call_command(
"sentry.runner.commands.upgrade.upgrade",
verbosity=0,
noinput=noinput,
lock=with_lock,
)
except click.ClickException:
if with_lock:
click.echo("!! Upgrade currently running from another process, skipping.", err=True)
else:
raise
with managed_bgtasks(role="web"):
from sentry.services.http import SentryHTTPServer
SentryHTTPServer(host=bind[0], port=bind[1], workers=workers).run()
@run.command()
@click.option(
"--redis-cluster",
help="The rediscluster name to store run state in.",
default="default",
)
@log_options()
@configuration
def taskworker_scheduler(redis_cluster: str, **options: Any) -> None:
"""
Run a scheduler for taskworkers
All tasks defined in settings.TASKWORKER_SCHEDULES will be scheduled as required.
"""
from django.conf import settings
from sentry.taskworker.runtime import app
from sentry.taskworker.scheduler.runner import RunStorage, ScheduleRunner
from sentry.utils.redis import redis_clusters
app.load_modules()
run_storage = RunStorage(redis_clusters.get(redis_cluster))
with managed_bgtasks(role="taskworker-scheduler"):
runner = ScheduleRunner(app, run_storage)
for key, schedule_data in settings.TASKWORKER_SCHEDULES.items():
runner.add(key, schedule_data)
logger.info(
"taskworker.scheduler.schedule_data",
extra={
"schedule_keys": list(settings.TASKWORKER_SCHEDULES.keys()),
},
)
runner.log_startup()
while True:
sleep_time = runner.tick()
time.sleep(sleep_time)
@run.command()
@click.option(
"--pidfile",
help=(
"Optional file used to store the process pid. The "
"program will not start if this file already exists and "
"the pid is still alive."
),
)
@click.option(
"--logfile", "-f", help=("Path to log file. If no logfile is specified, stderr is used.")
)
@click.option("--quiet", "-q", is_flag=True, default=False)
@click.option("--no-color", is_flag=True, default=False)
@click.option("--autoreload", is_flag=True, default=False, help="Enable autoreloading.")
@click.option("--without-gossip", is_flag=True, default=False)
@click.option("--without-mingle", is_flag=True, default=False)
@click.option("--without-heartbeat", is_flag=True, default=False)
@log_options()
@configuration
def cron(**options: Any) -> None:
# TODO(taskworker) Remove this stub command
while True:
click.secho(
"The cron command has been removed. Use `sentry run taskworker-scheduler` instead.",
fg="yellow",
)
time.sleep(5)
@run.command()
@click.option(
"--hostname",
"-n",
help=(
"Set custom hostname, e.g. 'w1.%h'. Expands: %h" "(hostname), %n (name) and %d, (domain)."
),
)
@click.option(
"--queues",
"-Q",
type=QueueSet,
help=(
"List of queues to enable for this worker, separated by "
"comma. By default all configured queues are enabled. "
"Example: -Q video,image"
),
)
@click.option("--exclude-queues", "-X", type=QueueSet)
@click.option(
"--concurrency",
"-c",
default=1,
help=(
"Number of child processes processing the queue. The "
"default is the number of CPUs available on your "
"system."
),
)
@click.option(
"--logfile", "-f", help=("Path to log file. If no logfile is specified, stderr is used.")
)
@click.option("--quiet", "-q", is_flag=True, default=False)
@click.option("--no-color", is_flag=True, default=False)
@click.option("--autoreload", is_flag=True, default=False, help="Enable autoreloading.")
@click.option("--without-gossip", is_flag=True, default=False)
@click.option("--without-mingle", is_flag=True, default=False)
@click.option("--without-heartbeat", is_flag=True, default=False)
@click.option("--max-tasks-per-child", default=10000)
@click.option("--ignore-unknown-queues", is_flag=True, default=False)
@log_options()
@configuration
def worker(ignore_unknown_queues: bool, **options: Any) -> None:
# TODO(taskworker) Remove this stub command
while True:
click.secho(
"The worker command has been removed. Use `sentry run taskworker` instead.", fg="yellow"
)
time.sleep(5)
@run.command()
@click.option(
"--rpc-host",
help="The hostname and port for the taskworker-rpc. When using num-brokers the hostname will be appended with `-{i}` to connect to individual brokers.",
default="127.0.0.1:50051",
)
@click.option(
"--num-brokers", help="Number of brokers available to connect to", default=None, type=int
)
@click.option(
"--rpc-host-list",
help="Provide a comma separated list of broker RPC host:ports. Use when your broker host names are not compatible with `rpc-host`",
default=None,
)
@click.option(
"--max-child-task-count",
help="Number of tasks child processes execute before being restart",
default=taskworker_constants.DEFAULT_CHILD_TASK_COUNT,
)
@click.option("--concurrency", help="Number of child processes to create.", default=1)
@click.option(
"--namespace", help="The dedicated task namespace that this worker processes", default=None
)
@click.option(
"--result-queue-maxsize",
help="Size of multiprocessing queue for child process results",
default=taskworker_constants.DEFAULT_WORKER_QUEUE_SIZE,
)
@click.option(
"--child-tasks-queue-maxsize",
help="Size of multiprocessing queue for pending tasks for child processes",
default=taskworker_constants.DEFAULT_WORKER_QUEUE_SIZE,
)
@click.option(
"--rebalance-after",
help="The number of tasks to process before choosing a new broker instance. Requires num-brokers > 1",
default=taskworker_constants.DEFAULT_REBALANCE_AFTER,
)
@click.option(
"--processing-pool-name",
help="The name of the processing pool being used",
default="unknown",
)
@click.option(
"--health-check-file-path",
help="Full path of the health check file if health check is to be enabled",
)
@click.option(
"--health-check-sec-per-touch",
help="The number of seconds before touching the health check file",
default=taskworker_constants.DEFAULT_WORKER_HEALTH_CHECK_SEC_PER_TOUCH,
)
@log_options()
@configuration
def taskworker(**options: Any) -> None:
"""
Run a taskworker worker
"""
os.environ["GRPC_ENABLE_FORK_SUPPORT"] = "0"
# TODO(mark) restore autoreload
run_taskworker(**options)
def run_taskworker(
rpc_host: str,
num_brokers: int | None,
rpc_host_list: str | None,
max_child_task_count: int,
namespace: str | None,
concurrency: int,
child_tasks_queue_maxsize: int,
result_queue_maxsize: int,
rebalance_after: int,
processing_pool_name: str,
health_check_file_path: str | None,
health_check_sec_per_touch: float,
**options: Any,
) -> None:
"""
taskworker factory that can be reloaded
"""
from sentry.taskworker.client.client import make_broker_hosts
from sentry.taskworker.worker import TaskWorker
with managed_bgtasks(role="taskworker"):
worker = TaskWorker(
app_module="sentry.taskworker.runtime:app",
broker_hosts=make_broker_hosts(
host_prefix=rpc_host, num_brokers=num_brokers, host_list=rpc_host_list
),
max_child_task_count=max_child_task_count,
namespace=namespace,
concurrency=concurrency,
child_tasks_queue_maxsize=child_tasks_queue_maxsize,
result_queue_maxsize=result_queue_maxsize,
rebalance_after=rebalance_after,
processing_pool_name=processing_pool_name,
health_check_file_path=health_check_file_path,
health_check_sec_per_touch=health_check_sec_per_touch,
**options,
)
exitcode = worker.start()
raise SystemExit(exitcode)
@run.command()
@log_options()
@configuration
@click.option(
"--repeat",
type=int,
help="Number of messages to send to the kafka topic",
default=1,
show_default=True,
)
@click.option(
"--kwargs",
type=str,
help="Task function keyword arguments",
)
@click.option(
"--args",
type=str,
help="Task function arguments",
)
@click.option(
"--task-function-path",
type=str,
help="The path to the function name of the task to execute",
required=True,
)
@click.option(
"--bootstrap-servers",
type=str,
help="The bootstrap servers to use for the kafka topic",
default="127.0.0.1:9092",
)
@click.option(
"--kafka-topic",
type=str,
help="The kafka topic to use for the task",
default=None,
)
@click.option(
"--namespace",
type=str,
help="The namespace that the task is registered in",
default=None,
)
@click.option(
"--extra-arg-bytes",
type=int,
help="Generater random args of specified size in bytes",
default=None,
)
def taskbroker_send_tasks(
task_function_path: str,
args: str,
kwargs: str,
repeat: int,
bootstrap_servers: str,
kafka_topic: str,
namespace: str,
extra_arg_bytes: int | None,
) -> None:
from sentry import options
from sentry.conf.server import KAFKA_CLUSTERS
from sentry.utils.imports import import_string
KAFKA_CLUSTERS["default"]["common"]["bootstrap.servers"] = bootstrap_servers
if kafka_topic and namespace:
options.set("taskworker.route.overrides", {namespace: kafka_topic})
try:
func = import_string(task_function_path)
except Exception as e:
click.echo(f"Error: {e}")
raise click.Abort()
task_args = [] if not args else eval(args)
task_kwargs = {} if not kwargs else eval(kwargs)
if extra_arg_bytes is not None:
extra_padding_arg = "".join(
[chr(ord("a") + random.randint(0, ord("z") - ord("a"))) for _ in range(extra_arg_bytes)]
)
task_args.append(extra_padding_arg)
checkmarks = {int(repeat * (i / 10)) for i in range(1, 10)}
for i in range(repeat):
func.delay(*task_args, **task_kwargs)
if i in checkmarks:
click.echo(message=f"{int((i / repeat) * 100)}% complete")
click.echo(message=f"Successfully sent {repeat} messages.")
@run.command("consumer")
@log_options()
@click.argument(
"consumer_name",
)
@click.argument("consumer_args", nargs=-1)
@click.option(
"--topic",
type=str,
help="Which physical topic to use for this consumer. This can be a topic name that is not specified in settings. The logical topic is still hardcoded in sentry.consumers.",
)
@click.option(
"--kafka-slice-id",
type=int,
help="Which sliced kafka topic to use. This only applies if the target topic is configured in SLICED_KAFKA_TOPICS.",
)
@click.option(
"--cluster", type=str, help="Which cluster definition from settings to use for this consumer."
)
@click.option(
"--consumer-group",
"group_id",
required=True,
help="Kafka consumer group for the consumer.",
)
@click.option(
"--auto-offset-reset",
"auto_offset_reset",
default="earliest",
type=click.Choice(["earliest", "latest", "error"]),
help="Position in the commit log topic to begin reading from when no prior offset has been recorded.",
)
@click.option("--join-timeout", type=float, help="Join timeout in seconds.", default=None)
@click.option(
"--max-poll-interval-ms",
type=int,
default=30000,
)
@click.option(
"--group-instance-id",
type=str,
default=None,
)
@click.option(
"--synchronize-commit-log-topic",
help="Topic that the Snuba writer is publishing its committed offsets to.",
)
@click.option(
"--synchronize-commit-group",
help="Consumer group that the Snuba writer is committing its offset as.",
)
@click.option(
"--healthcheck-file-path",
help="A file to touch roughly every second to indicate that the consumer is still alive. See https://getsentry.github.io/arroyo/strategies/healthcheck.html for more information.",
)
@click.option(
"--enable-dlq/--disable-dlq",
help="Enable dlq to route invalid messages to the dlq topic. See https://getsentry.github.io/arroyo/dlqs.html#arroyo.dlq.DlqPolicy for more information.",
is_flag=True,
default=True,
)
@click.option(
"--stale-threshold-sec",
type=click.IntRange(min=120),
help="Enable backlog queue to route stale messages to the blq topic.",
)
@click.option(
"--log-level",
type=click.Choice(["debug", "info", "warning", "error", "critical"], case_sensitive=False),
help="log level to pass to the arroyo consumer",
)
@click.option(
"--strict-offset-reset/--no-strict-offset-reset",
default=True,
help=(
"--strict-offset-reset, the default, means that the kafka consumer "
"still errors in case the offset is out of range.\n\n"
"--no-strict-offset-reset will use the auto offset reset even in that case. "
"This is useful in development, but not desirable in production since expired "
"offsets mean data-loss.\n\n"
),
)
@click.option(
"--max-dlq-buffer-length",
type=int,
help="The maximum number of messages to buffer in the dlq before dropping messages. Defaults to unbounded.",
)
@click.option(
"--quantized-rebalance-delay-secs",
type=int,
default=None,
help="Quantized rebalancing means that during deploys, rebalancing is triggered across all pods within a consumer group at the same time. The value is used by the pods to align their group join/leave activity to some multiple of the delay",
)
@click.option(
"--profile-consumer-join",
is_flag=True,
default=False,
help="Adds a ProcessingStrategy to the start of a consumer that records a transaction of the consumer's join() method.",
)
@configuration
def basic_consumer(
consumer_name: str,
consumer_args: tuple[str, ...],
topic: str | None,
kafka_slice_id: int | None,
quantized_rebalance_delay_secs: int | None,
**options: Any,
) -> None:
"""
Launch a "new-style" consumer based on its "consumer name".
Example:
sentry run consumer ingest-profiles --consumer-group ingest-profiles
runs the ingest-profiles consumer with the consumer group ingest-profiles.
Consumers are defined in 'sentry.consumers'. Each consumer can take
additional CLI options. Those can be passed after '--':
sentry run consumer ingest-occurrences --consumer-group occurrence-consumer -- --processes 1
Consumer-specific arguments can be viewed with:
sentry run consumer ingest-occurrences --consumer-group occurrence-consumer -- --help
"""
from sentry.consumers import get_stream_processor
from sentry.metrics.middleware import add_global_tags
from sentry.options import get
log_level = options.pop("log_level", None)
if log_level is not None:
logging.getLogger("arroyo").setLevel(log_level.upper())
add_global_tags(
set_sentry_tags=True,
tags={
"kafka_topic": topic,
"consumer_group": options["group_id"],
"kafka_slice_id": kafka_slice_id,
},
)
processor = get_stream_processor(
consumer_name,
consumer_args,
topic=topic,
kafka_slice_id=kafka_slice_id,
add_global_tags=True,
**options,
)
# for backwards compat: should eventually be removed
if not quantized_rebalance_delay_secs and consumer_name == "ingest-generic-metrics":
quantized_rebalance_delay_secs = options.get("sentry-metrics.synchronized-rebalance-delay")
dump_stacktrace_on_shutdown = consumer_name in get("consumer.dump_stacktrace_on_shutdown", [])
run_processor_with_signals(
processor, quantized_rebalance_delay_secs, dump_stacktrace_on_shutdown
)
@run.command("dev-consumer")
@click.argument("consumer_names", nargs=-1)
@log_options()
@configuration
def dev_consumer(consumer_names: tuple[str, ...]) -> None:
"""
Launch multiple "new-style" consumers in the same thread.
This does the same thing as 'sentry run consumer', but is not configurable,
hardcodes consumer groups and is highly imperformant.
"""
from sentry.consumers import get_stream_processor
processors = [
get_stream_processor(
consumer_name,
[],
topic=None,
cluster=None,
group_id="sentry-consumer",
auto_offset_reset="latest",
strict_offset_reset=False,
join_timeout=None,
max_poll_interval_ms=None,
synchronize_commit_group=None,
synchronize_commit_log_topic=None,
enable_dlq=False,
stale_threshold_sec=None,
healthcheck_file_path=None,
enforce_schema=True,
profile_consumer_join=False,
)
for consumer_name in consumer_names
]
def handler(signum: object, frame: object) -> None:
for processor in processors:
processor.signal_shutdown()
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
while True:
for processor in processors:
processor._run_once()
@run.command("backpressure-monitor")
@log_options()
@configuration
def backpressure_monitor() -> None:
from sentry.processing.backpressure.monitor import start_service_monitoring
start_service_monitoring()
| QueueSetType |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 18456,
"end": 21533
} | class ____(ArceePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = ArceeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, ArceeForCausalLM
>>> model = ArceeForCausalLM.from_pretrained("meta-arcee/Arcee-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-arcee/Arcee-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
| ArceeForCausalLM |
python | facebook__pyre-check | tools/upgrade/commands/global_version_update.py | {
"start": 622,
"end": 4917
} | class ____(Command):
def __init__(
self,
*,
repository: Repository,
error_source: str,
hash: str,
paths: List[Path],
no_commit: bool,
) -> None:
super().__init__(repository)
self._error_source: str = error_source
self._hash: str = hash
self._paths: List[Path] = paths
self._no_commit: bool = no_commit
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "GlobalVersionUpdate":
return GlobalVersionUpdate(
repository=repository,
error_source=arguments.error_source,
hash=arguments.hash,
paths=arguments.paths,
no_commit=arguments.no_commit,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(GlobalVersionUpdate, GlobalVersionUpdate).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument("hash", help="Hash of new Pyre version")
parser.add_argument(
"--paths",
nargs="*",
help="A list of paths to local Pyre projects.",
default=[],
type=path_exists,
)
parser.add_argument(
"--error-source",
type=ErrorSource,
choices=list(ErrorSource),
default=ErrorSource.GENERATE,
)
parser.add_argument(
"--no-commit", action="store_true", help="Keep changes in working state."
)
def _set_local_overrides(
self, configuration_paths: List[Path], old_version: str
) -> None:
for configuration_path in configuration_paths:
if "mock_repository" in str(configuration_path):
# Skip local configurations we have for testing.
continue
local_configuration = Configuration(configuration_path)
if local_configuration.version:
LOG.info(
"Skipping %s as it already has a custom version field.",
configuration_path,
)
continue
local_configuration.set_version(old_version)
local_configuration.write()
def _suppress_global_errors(self, global_configuration: Configuration) -> None:
if global_configuration.targets or global_configuration.source_directories:
LOG.info("Suppressing errors after upgrading global version.")
command_arguments = CommandArguments(
comment=None,
max_line_length=None,
truncate=False,
unsafe=False,
force_format_unsuppressed=False,
lint=True,
no_commit=True,
should_clean=True,
)
fixme_command = Fixme(
command_arguments,
repository=self._repository,
error_source=self._error_source,
)
fixme_command.run()
self._repository.commit_changes(
commit=(not self._no_commit),
title="Update pyre global configuration version",
summary=f"Automatic upgrade to hash `{self._hash}`",
ignore_failures=True,
)
@override
def run(self) -> None:
global_configuration = Configuration.find_project_configuration()
# Update to new global version.
configuration = Configuration(global_configuration)
old_version = configuration.version
if not old_version:
LOG.error(
"Global configuration at %s has no version field.", global_configuration
)
return
configuration.set_version(self._hash)
configuration.write()
paths = self._paths
configuration_paths = (
[path / ".pyre_configuration.local" for path in paths]
if paths
else [
configuration.get_path()
for configuration in Configuration.gather_local_configurations()
]
)
self._set_local_overrides(configuration_paths, old_version)
self._suppress_global_errors(configuration)
| GlobalVersionUpdate |
python | pytorch__pytorch | test/functorch/test_aotdispatch.py | {
"start": 11612,
"end": 176435
} | class ____(AOTTestCase):
def run_autograd(
self,
f: Callable,
fw_graph_cell: list[Optional[Callable]],
decompositions: Optional[dict],
keep_input_mutations: bool,
dynamic: bool,
):
"""
Runs aot_autograd with the specified settings on f.
"""
if isinstance(f, nn.Module):
compiled_f = aot_module(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
decompositions=decompositions,
keep_inference_input_mutations=keep_input_mutations,
dynamic=dynamic,
)
else:
compiled_f = aot_function(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
decompositions=decompositions,
keep_inference_input_mutations=keep_input_mutations,
dynamic=dynamic,
)
return compiled_f
# test_mutation will:
# - Ensure that inputs are non-leaves, so our graphs can mutate them
# - try to mutate outputs of the graph (to ensure that autograd meta is set properly on outputs)
@patch("functorch.compile.config.debug_assert", True)
def verify_aot_autograd(
self,
f,
inp_: Union[Callable, list[Any]],
*,
test_mutation: bool = False,
keep_inp_mutations: bool = False,
decompositions: Optional[dict] = None,
dynamic: bool = False,
# Only active when inp_ is Callable.
# TODO: probably consolidate all tests to make inp a Callable.
make_inputs_subclasses: bool = False,
):
def make_inputs(inp_):
# Some tests pass in a callable for inp, to generate the inputs
# (useful if we want to generate complicated aliasing inputs)
if isinstance(inp_, Callable):
inp_callable = inp_
# The callable should return a tuple of f_inputs, f_graph_inputs
# (The idea is that we might want to compile a function with the graph inputs,
# but test autograd backprop all the way through the actual inputs)
with TwoTensorMode() if make_inputs_subclasses else nullcontext():
inp, graph_inps = inp_callable()
else:
inp = []
# Our input clones need to mimic when inputs are duplicates of one another
dupes_map = {}
for i, x in enumerate(inp_):
if x in dupes_map:
x_dupe_idx = dupes_map[x]
inp.append(inp[x_dupe_idx])
else:
dupes_map[x] = i
if not isinstance(x, torch.Tensor):
x_copy = x
else:
x_copy = x.detach().clone().requires_grad_(x.requires_grad)
if x.requires_grad and not x.is_leaf:
x_copy = x_copy.clone()
inp.append(x_copy)
if test_mutation:
# For graphs where we mutate inputs, need our test to make sure inputs aren't leaves
graph_inps = [x.add(1) for x in inp]
else:
graph_inps = inp
return inp, graph_inps
def check_results(
ref_results,
test_results,
ref_graph_inps,
test_graph_inps,
ref_inp,
test_inp,
):
ref_out, ref_grad = ref_results
test_out, test_grad = test_results
self.assertEqual(ref_grad, test_grad)
if isinstance(ref_out, torch.Tensor):
self.assertTrue(isinstance(test_out, torch.Tensor))
ref_out, test_out = [ref_out], [test_out]
for ref_o, test_o in zip(ref_out, test_out):
if isinstance(ref_o, torch.Tensor):
self.assertEqual(ref_o.requires_grad, test_o.requires_grad)
self.assertEqual(ref_o.is_leaf, test_o.is_leaf)
ref_is_view_of_non_interm = is_in_base(
ref_o, ref_graph_inps
) or is_in_base(ref_o, ref_out)
test_is_view_of_non_interm = is_in_base(
test_o, test_graph_inps
) or is_in_base(test_o, test_out)
self.assertEqual(
ref_is_view_of_non_interm, test_is_view_of_non_interm
)
self.assertEqual(ref_o, test_o)
if test_mutation:
# This tests that autograd meta is set properly on the output we can
# mutate it.
ref_o.add_(2)
test_o.add_(2)
self.assertEqual(ref_o, test_o)
# Reverse the modification
ref_o.sub_(2)
test_o.sub_(2)
self.assertEqual(ref_o, test_o)
for ref_i, test_i in zip(ref_inp, test_inp):
if isinstance(ref_i, torch.Tensor):
self.assertEqual(ref_i.requires_grad, test_i.requires_grad)
self.assertEqual(ref_i, test_i)
for keep_input_mutations in [True] if keep_inp_mutations else [True, False]:
inp, graph_inps = make_inputs(inp_)
test_inp, test_graph_inps = make_inputs(inp_)
fw_graph_cell = [None]
compiled_f = self.run_autograd(
f, fw_graph_cell, decompositions, keep_input_mutations, dynamic
)
ref_results = outs_and_grads(f, graph_inps, inp)
test_results = outs_and_grads(compiled_f, test_graph_inps, test_inp)
check_results(
ref_results, test_results, graph_inps, test_graph_inps, inp, test_inp
)
if isinstance(self, TestAOTAutogradWithCache):
# When testing with cache, run compiled_f a second time
cached_inp, cached_graph_inps = make_inputs(inp_)
cached_results = outs_and_grads(
compiled_f, cached_graph_inps, cached_inp
)
check_results(
ref_results,
cached_results,
graph_inps,
cached_graph_inps,
inp,
cached_inp,
)
return fw_graph_cell[0]
def test_non_tensor_and_none_inputs(self):
# int, None, Tensor
def f(a, b, c):
return a * c
inp = [2, None, torch.ones(3, 3, dtype=torch.float32, requires_grad=True)]
self.verify_aot_autograd(f, inp)
inp = [2, None, torch.ones(3, 3, dtype=torch.float32, requires_grad=False)]
self.verify_aot_autograd(f, inp)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
inp = [torch.randn(3, 3, requires_grad=False), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
# Test for bug occurring at the intersection of fake tensors & functionalization.
def test_squeeze_mutation(self):
def f(a):
b = a.clone().squeeze(-1)
b.add_(1.0)
return a + b
inp = [torch.randn(3, 1, requires_grad=True)]
self.verify_aot_autograd(f, inp, dynamic=True)
inp = [torch.randn(3, 1, requires_grad=False)]
self.verify_aot_autograd(f, inp, dynamic=True)
def test_complex_linear(self):
# https://github.com/pytorch/pytorch/issues/93424
inp = [torch.randn(1, 10, 10, dtype=torch.complex64)]
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(10, 10, dtype=torch.complex64)
def forward(self, x):
return self.linear(x).sum().abs()
self.verify_aot_autograd(F(), inp)
def test_embedding_bag_view_dynamic(self):
# Backwards pass tries to wrap a sparse tensor in a FunctionalTensorWrapper;
# test that this works even though the sparse tensor has no storage.
class F(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.emb = torch.nn.EmbeddingBag(100, 8, sparse=True)
def forward(self, x, y):
return self.emb(x, y).view(-1)
x = torch.arange(3)
y = torch.arange(3)
self.verify_aot_autograd(F(), [x, y], dynamic=False)
self.verify_aot_autograd(F(), [x, y], dynamic=True)
def test_input_mutation_simple(self):
def f(a):
a.mul_(2)
return a * 3
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
# Things to note:
# - the extra clone is because we need to pass the pre-mutated input to grad(),
# but autograd operates above functionalization so we need to manually clone.
# Hopefully backends can optimize this easily.
# - The extra return arg is because the compiled forward returns (mutated inputs + outputs)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
mul_1 = torch.ops.aten.mul.Tensor(mul, 3)
return (mul, mul_1)""",
)
def test_input_mutation_set__input_mutation(self):
def f(a):
b = torch.arange(9, dtype=a.dtype).reshape(3, 3)
with torch.no_grad():
a.set_(b)
return a * b
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
def test_set__steals_view_chain(self):
def f(a, b):
a_ = a.mul(2)
b_ = b.mul(2)
b_slice = b_[1].view(3, 3)
# a_clone should inherit the view chain from b_slice
a_.set_(b_slice)
# Also mutates b_,
a_.view(-1).mul_(2)
return a_ * b_slice
inp = [
torch.ones(3, 3, requires_grad=False),
torch.zeros(3, 9, requires_grad=False),
]
self.verify_aot_autograd(f, inp, keep_inp_mutations=True)
def _compile_autocast(self, device, *, forward_autocast):
with torch.library._scoped_library("mylib", "FRAGMENT") as m:
m.define("foo(Tensor x) -> Tensor")
m.impl("foo", torch.clone, "CompositeExplicitAutograd")
def autocast(x):
return x + 1
m.impl("foo", autocast, "AutocastCPU")
m.impl("foo", autocast, "AutocastCUDA")
foo = torch.ops.mylib.foo.default
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return foo(x)
@staticmethod
def backward(ctx, grad):
(x,) = ctx.saved_tensors
return grad * foo(x)
def fn(x):
with torch.amp.autocast(device, enabled=False):
return Foo.apply(x)
x = torch.tensor(0.0, device=device, requires_grad=True)
if forward_autocast:
with (
torch.amp.autocast(device),
torch._dynamo.config.patch(recompile_limit=999),
):
out = torch.compile(fn, fullgraph=True, backend="aot_eager")(x)
else:
with torch._dynamo.config.patch(recompile_limit=999):
out = torch.compile(fn, fullgraph=True, backend="aot_eager")(x)
(grad,) = torch.autograd.grad(out, x)
return out, grad
@torch._functorch.config.patch(backward_pass_autocast="same_as_forward")
def test_backward_pass_autocast_on(self):
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
for device in devices:
out, grad = self._compile_autocast(device, forward_autocast=True)
self.assertEqual(out, torch.zeros_like(out))
self.assertEqual(grad, torch.ones_like(grad))
@torch._functorch.config.patch(backward_pass_autocast="off")
def test_backward_pass_autocast_off(self):
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
for device in devices:
out, grad = self._compile_autocast(device, forward_autocast=True)
self.assertEqual(out, torch.zeros_like(out))
self.assertEqual(grad, torch.zeros_like(grad))
@torch._functorch.config.patch(backward_pass_autocast="off")
def test_backward_pass_autocast_custom(self):
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
for device in devices:
with torch._functorch.config.patch(
backward_pass_autocast=[{"device_type": device}]
):
out, grad = self._compile_autocast(device, forward_autocast=False)
self.assertEqual(out, torch.zeros_like(out))
self.assertEqual(grad, torch.ones_like(grad))
@skipIfDynamoInput(
"Test doesn't make sense with dynamo, which changes order of mutations"
)
def test_set__and_data_mutation_good(self):
def f(a, b):
# The data mutation happens *after* the set_(). This is ok (see the graph below)
with torch.no_grad():
a.set_(b)
b.mul_(2)
return a + b
inp = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
inp = [
torch.ones(3, 3, requires_grad=False),
torch.zeros(3, 3, requires_grad=False),
]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
# Important things to note:
# - "return a.set_(b)" desugars into "return b"
# - Both a and b are recorded as experiencing mutations,
# which is why we see "b_updated" (output of the mul) twice in the graph outputs.
# a is recorded as both a data mutation and a metadata mutation (due to set_ swapping its storage).
# - the runtime epilogue for a is "a.set_(mul)"
# - the runtime epilogue for b is "b.copy_(mul)"
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_2, 2)
add = torch.ops.aten.add.Tensor(mul, mul)
set_ = torch.ops.aten.set_.source_Tensor(primals_1, mul); primals_1 = set_ = None
copy_ = torch.ops.aten.copy_.default(primals_2, mul); primals_2 = mul = copy_ = None
return (add,)""",
)
# This is a (hopefully) extremely rare case that is difficult to handle,
# so we ban it.
# https://github.com/pytorch/pytorch/issues/126236
# https://github.com/pytorch/pytorch/pull/126113
@xfailIfTorchDynamo
def test_set__and_data_mutation_bad(self):
def f(a):
a_view = a.view(-1)
tmp = torch.ones(3, 3, requires_grad=True)
# Now, any mutations on either tmp
# will be tracked as graph input mutations.
with torch.no_grad():
a.set_(tmp)
# BAD: a_view is now detached from every graph input,
# so we won't recognize that this caused an input mutation!
a_view.mul_(2)
return a + tmp
inp = [torch.ones(3, 3, requires_grad=True)]
with self.assertRaisesRegex(
RuntimeError, "cannot mutate tensors with frozen storage"
):
self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
@skipIfDynamoInput(
"Test doesn't make sense with dynamo, which changes order of mutations"
)
def test_set__not_allowed(self):
def f(a, b):
with torch.no_grad():
a.set_(b)
# Mutating a will change a's grad_fn, which requires us to replay the mutation outside of the graph.
# We currently ban this today, when the input also received a set_() input mutation.
a.mul_(2)
return a + b
inp = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
with self.assertRaisesRegex(
AssertionError, "but the input has other mutations that we cannot"
):
self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
def test_input_mutation_set__nop(self):
def f(a):
b = torch.arange(9, dtype=a.dtype)
a_old = torch.ops.aten.alias.default(a)
with torch.no_grad():
a.set_(b)
a.set_(a_old)
return a + b.reshape(3, 3)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True, keep_inp_mutations=True)
# Things to note:
# - There are no set_() calls in the graph (we functionalize a.set_(b) into "b")
# - There is only **1** graph output. We properly realized that the two set_() calls
# undo each other, and so effectively no inputs are mutated.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
arange = torch.ops.aten.arange.default(9, dtype = torch.float32, device = device(type='cpu'), pin_memory = False)
alias = torch.ops.aten.alias.default(primals_1); primals_1 = None
view = torch.ops.aten.view.default(arange, [3, 3]); arange = None
add = torch.ops.aten.add.Tensor(alias, view); alias = view = None
return (add,)""",
)
def test_input_mutation_simple_with_none_and_nontensor(self):
# Tensor, None, int
def f(a, b, c):
return a * c
f_compiled = aot_function(f, nop)
for req_grad in [True, False]:
inp = [torch.ones(3, 3, requires_grad=req_grad), None, 3]
out_ref = f(*inp)
out_test = f_compiled(*inp)
self.assertEqual(out_ref, out_test)
# https://github.com/pytorch/pytorch/issues/93363
def test_mutates_input_noncontiguous(self):
def f(a):
a.add_(1)
return ()
f_compiled = aot_function(f, nop)
ref = torch.ones(4, requires_grad=True) + 0
ref_view = ref[0::2]
test = torch.ones(4, requires_grad=True) + 0
test_view = test[0::2]
out_ref = f(ref_view) # noqa: F841
out_test = f_compiled(test_view) # noqa: F841
self.assertEqual(ref, test)
def test_input_mutation_modifies_autograd_meta_of_aliases(self):
def f(a):
a.mul_(2)
out = a + 1
return out.detach()
x_ref = torch.ones(3, 3, requires_grad=True).clone()
x_ref_view = x_ref.view(3, 3)
x_test = torch.ones(3, 3, requires_grad=True).clone()
x_test_view = x_test.view(3, 3)
f_compiled = aot_function(f, nop, keep_inference_input_mutations=True)
f(x_ref)
f_compiled(x_test)
# f will mutate aliases of the input, including its autograd metadata!
# y.grad_fn is AsStridedBackward
self.assertEqual(x_ref_view, x_test_view)
self.assertEqual(x_ref_view._version, x_test_view._version)
self.assertEqual(x_ref_view.grad_fn.__class__, x_test_view.grad_fn.__class__)
# Test the actual gradients are correct
(x_ref * x_ref_view).sum().backward()
(x_test * x_test_view).sum().backward()
self.assertEqual(x_ref.grad, x_test.grad)
self.assertEqual(x_ref_view.grad, x_test_view.grad)
def test_nested_subclasses(self):
@torch.compile(backend="aot_eager")
def f(x):
return x.sin().cos()
a = torch.ones(4, requires_grad=True)
a2 = a.detach().clone().requires_grad_()
a3 = a.detach().clone().requires_grad_()
a4 = a.detach().clone().requires_grad_()
aa = TwoTensor(a, a2)
aa2 = TwoTensor(a3, a4)
aaaa = TwoTensor(aa, aa2)
out = f(aaaa)
self.assertTrue(isinstance(out, TwoTensor))
self.assertTrue(isinstance(out.a, TwoTensor))
self.assertTrue(isinstance(out.b, TwoTensor))
self.assertTrue(isinstance(out.a.a, torch.Tensor))
self.assertTrue(isinstance(out.a.b, torch.Tensor))
self.assertTrue(isinstance(out.b.a, torch.Tensor))
self.assertTrue(isinstance(out.b.b, torch.Tensor))
out.sum().backward()
self.assertTrue(isinstance(aaaa.grad, TwoTensor))
self.assertTrue(isinstance(aaaa.grad.a, TwoTensor))
self.assertTrue(isinstance(aaaa.grad.b, TwoTensor))
def test_nested_subclasses_non_nested_grad(self):
@torch.compile(backend="aot_eager")
def f(x):
return x.sin().cos()
a = torch.ones(4, requires_grad=True)
a2 = a.detach().clone().requires_grad_()
a3 = a.detach().clone().requires_grad_()
a4 = a.detach().clone().requires_grad_()
new_aa = TwoTensor(a3, a4)
aa = TwoTensor(a, a2)
aa2 = aa.detach().clone().requires_grad_()
aaaa = TwoTensor(aa, aa2)
out = f(new_aa)
new_out = out + aaaa
with self.assertRaisesRegex(
RuntimeError,
"""
During the backward, we encountered a tensor subclass where we guessed its
metadata incorrectly.
""", # noqa: F541
):
new_out.sum().backward()
def test_nested_subclasses_non_homogenous(self):
def f(x):
x_elem = x.elem
x_metadata = x.constant_attribute
return x_metadata * x_elem * x.sin().cos()
a = torch.ones(4, requires_grad=True)
a2 = a.detach().clone().requires_grad_()
a3 = a.detach().clone().requires_grad_()
a4 = a.detach().clone().requires_grad_()
aa = TwoTensor(a, a2)
aa2 = TwoTensor(a3, a4)
custom_aa = ConstantExtraMetadataTensor(aa)
custom_aa.constant_attribute = 6
custom_aa2 = ConstantExtraMetadataTensor(aa2)
custom_aa2.constant_attribute = 6
out_eager = f(custom_aa)
compiled_f = torch.compile(f, backend="aot_eager")
out = compiled_f(custom_aa2)
self.assertTrue(isinstance(out, TwoTensor))
self.assertTrue(isinstance(out.a, ConstantExtraMetadataTensor))
self.assertTrue(isinstance(out.b, ConstantExtraMetadataTensor))
self.assertTrue(torch.allclose(out_eager, out))
out_eager.sum().backward()
out.sum().backward()
self.assertTrue(torch.allclose(custom_aa.grad, custom_aa2.grad))
self.assertTrue(isinstance(custom_aa2.grad, TwoTensor))
self.assertTrue(isinstance(custom_aa2.grad.a, ConstantExtraMetadataTensor))
self.assertTrue(isinstance(custom_aa2.grad.b, ConstantExtraMetadataTensor))
def test_subclasses_mixed(self):
def f(x, y):
x_metadata = x.constant_attribute
out_a = x_metadata * x * y.a
out_b = x * y.a * y.b
return TwoTensor(out_a, out_b)
a = torch.ones(4, requires_grad=False)
a2 = a.clone()
custom_a = ConstantExtraMetadataTensor(a)
custom_a.constant_attribute = 5
custom_a2 = ConstantExtraMetadataTensor(a2)
custom_a2.constant_attribute = 5
b = torch.ones(4, requires_grad=False)
b2 = b.clone()
b3 = b.clone()
b4 = b.clone()
bb = TwoTensor(b, b2)
bb2 = TwoTensor(b3, b4)
out_eager = f(custom_a, bb)
compiled_f = torch.compile(f, backend="aot_eager")
out = compiled_f(custom_a2, bb2)
self.assertTrue(torch.allclose(out_eager, out))
self.assertTrue(isinstance(out, TwoTensor))
self.assertTrue(isinstance(out.a, ConstantExtraMetadataTensor))
self.assertTrue(isinstance(out.b, ConstantExtraMetadataTensor))
def test_subclasses_mixed_mode(self):
def f(x):
return x.sin().cos()
class AddConstantMetadataMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
out = func(*args, **(kwargs or {}))
if ConstantExtraMetadataTensor not in types:
out = ConstantExtraMetadataTensor(out)
out.constant_attribute = 5
return out
a = torch.ones(4, requires_grad=True)
a2 = a.detach().clone().requires_grad_()
a3 = a.detach().clone().requires_grad_()
a4 = a.detach().clone().requires_grad_()
aa = TwoTensor(a, a2)
aa2 = TwoTensor(a3, a4)
with AddConstantMetadataMode():
out_eager = f(aa)
compiled_f = torch.compile(f, backend="aot_eager")
with AddConstantMetadataMode():
out = compiled_f(aa2)
self.assertTrue(isinstance(out, ConstantExtraMetadataTensor))
self.assertTrue(isinstance(out.elem, TwoTensor))
self.assertTrue(torch.allclose(out_eager, out))
out_eager.sum().backward()
out.sum().backward()
self.assertTrue(torch.allclose(aa.grad, aa2.grad))
self.assertTrue(isinstance(aa2.grad, ConstantExtraMetadataTensor))
self.assertTrue(isinstance(aa2.grad.elem, TwoTensor))
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
def test_custom_tensor_metadata(self):
def f(x):
x_elem = x.elem
x_elem_elem = x_elem.elem
x_elem_metadata = x_elem.constant_attribute
return x * x_elem * x_elem_elem * x_elem_metadata
a = torch.ones(4, requires_grad=True)
custom_a = ConstantExtraMetadataTensor(a)
custom_a.constant_attribute = 6
custom_aa = ConstantExtraMetadataTensor(custom_a)
custom_aa.constant_attribute = 4
custom_aa_compile = custom_aa.detach().clone().requires_grad_()
custom_aa_compile.elem.constant_attribute = 6
out_eager = f(custom_aa)
compiled_f = torch.compile(f, backend="aot_eager")
out = compiled_f(custom_aa_compile)
self.assertTrue(torch.allclose(out_eager, out))
out.sum().backward()
self.assertTrue(isinstance(custom_aa_compile.grad, ConstantExtraMetadataTensor))
self.assertTrue(
isinstance(custom_aa_compile.grad.elem, ConstantExtraMetadataTensor)
)
def test_nested_subclasses_complicated_inps(self):
def f(x, y, z):
temp = x + y
temp_plain = x.a + y.b
res = temp.sum() + temp_plain.sum()
return x.sin().cos() + res
x = torch.ones(4, requires_grad=True)
x2 = x.detach().clone().requires_grad_()
xx = TwoTensor(x, x2)
xx2 = xx.detach().clone().requires_grad_()
x_nested = TwoTensor(xx, xx2)
x_nested_compile = x_nested.detach().clone().requires_grad_()
y_nested = x_nested.detach().clone().requires_grad_()
y_nested_compile = y_nested.detach().clone().requires_grad_()
z = x.detach().clone().requires_grad_()
z_compile = z.detach().clone().requires_grad_()
out_eager = f(x_nested, y_nested, z)
compiled_f = torch.compile(f, backend="aot_eager")
out = compiled_f(x_nested_compile, y_nested_compile, z_compile)
self.assertTrue(torch.allclose(out_eager, out))
self.assertTrue(isinstance(out, TwoTensor))
self.assertTrue(isinstance(out.a, TwoTensor))
self.assertTrue(isinstance(out.b, TwoTensor))
self.assertTrue(isinstance(out.a.a, torch.Tensor))
self.assertTrue(isinstance(out.a.b, torch.Tensor))
self.assertTrue(isinstance(out.b.a, torch.Tensor))
self.assertTrue(isinstance(out.b.b, torch.Tensor))
out.sum().backward()
out_eager.sum().backward()
self.assertTrue(isinstance(x_nested_compile.grad, TwoTensor))
self.assertTrue(isinstance(x_nested_compile.grad.a, TwoTensor))
self.assertTrue(isinstance(x_nested_compile.grad.b, TwoTensor))
self.assertTrue(isinstance(y_nested_compile.grad, TwoTensor))
self.assertTrue(isinstance(y_nested_compile.grad.a, TwoTensor))
self.assertTrue(isinstance(y_nested_compile.grad.b, TwoTensor))
self.assertTrue(torch.allclose(x_nested_compile.grad.a.a, x_nested.grad.a.a))
self.assertTrue(torch.allclose(x_nested_compile.grad.a.b, x_nested.grad.a.b))
self.assertTrue(torch.allclose(y_nested_compile.grad.a.a, y_nested.grad.a.a))
self.assertTrue(torch.allclose(y_nested_compile.grad.a.b, y_nested.grad.a.b))
@unittest.skipIf(IS_WINDOWS, "Windows isn't supported for this case")
def test_nested_subclasses_complicated_inps_mixed(self):
def f(x, y):
y_elem = y.elem
y_elem_elem = y_elem.elem
y_elem_metadata = y_elem.constant_attribute
return y * y_elem * y_elem_elem * y_elem_metadata + x
x = torch.ones(4, requires_grad=True)
x2 = x.detach().clone().requires_grad_()
xx = TwoTensor(x, x2)
xx2 = xx.detach().clone().requires_grad_()
x_nested = TwoTensor(xx, xx2)
x_nested_compile = x_nested.detach().clone().requires_grad_()
a = torch.ones(4, requires_grad=True)
custom_a = ConstantExtraMetadataTensor(a)
custom_a.constant_attribute = 6
custom_aa = ConstantExtraMetadataTensor(custom_a)
custom_aa.constant_attribute = 4
custom_aa_compile = custom_aa.detach().clone().requires_grad_()
custom_aa_compile.constant_attribute = 4
custom_aa_compile.elem.constant_attribute = 6
compiled_f = torch.compile(f, backend="aot_eager")
out_eager = f(x_nested, custom_aa)
out = compiled_f(x_nested_compile, custom_aa_compile)
self.assertTrue(torch.allclose(out_eager, out))
out.sum().backward()
out_eager.sum().backward()
self.assertTrue(torch.allclose(x_nested_compile.grad, x_nested.grad))
self.assertTrue(torch.allclose(custom_aa_compile.grad, custom_aa.grad))
def test_composite_impl_compile(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, a):
return self.linear(a)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(Foo(), inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
t = torch.ops.aten.t.default(primals_1); primals_1 = None
addmm = torch.ops.aten.addmm.default(primals_2, primals_3, t); primals_2 = None
return (addmm, primals_3, t)""",
)
with torch.inference_mode():
fw_graph = self.verify_aot_autograd(Foo(), inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
t = torch.ops.aten.t.default(arg0_1); arg0_1 = None
addmm = torch.ops.aten.addmm.default(arg1_1, arg2_1, t); arg1_1 = arg2_1 = t = None
return (addmm,)""",
)
def test_outputs_are_aliased(self):
# Tensor, None, int
def f(a):
b = a.mul(2)
c = b.view(-1)
return b, c
f_compiled = aot_function(f, nop)
for req_grad in [True, False]:
inp = torch.ones(3, requires_grad=req_grad)
out_ref = f(inp)
out_test = f_compiled(inp)
self.assertEqual(out_ref[0], out_test[0])
self.assertEqual(out_ref[1], out_test[1])
# Try mutating one of the outputs, which is aliased.
out_ref[0].mul_(3)
out_test[0].mul_(3)
# Assert that the aliasing relationship was preserved
self.assertEqual(out_ref[0], out_test[0])
self.assertEqual(out_ref[1], out_test[1])
def test_input_mutation_is_output(self):
def f(a):
a.mul_(2)
return a
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
return (mul, mul)""",
)
def test_input_mutation_multiple(self):
def f(a, b, c):
a.mul_(2)
c.mul_(2)
return a + b + c
def create_inp(req_grad):
return [
torch.ones(3, 3, requires_grad=req_grad),
torch.ones(3, 3, requires_grad=req_grad),
torch.ones(3, 3, requires_grad=req_grad),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
clone_1 = torch.ops.aten.clone.default(primals_3); primals_3 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
mul_1 = torch.ops.aten.mul.Tensor(clone_1, 2); clone_1 = None
add = torch.ops.aten.add.Tensor(mul, primals_2); primals_2 = None
add_1 = torch.ops.aten.add.Tensor(add, mul_1); add = None
return (mul, mul_1, add_1)""",
)
def test_input_mutation_return(self):
def f(a, b):
return torch.sin(a, out=b)
inp = [torch.randn(3, 3), torch.ones(3, 3)]
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
sin = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
copy_ = torch.ops.aten.copy_.default(arg1_1, sin); arg1_1 = sin = None
return (copy_,)""",
)
def test_input_mutation_metadata(self):
def f(a, b):
a.transpose_(1, 0)
return a + b
def create_inp(req_grad):
return [
torch.ones(3, 3, requires_grad=req_grad),
torch.ones(3, 3, requires_grad=req_grad),
]
self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
@parametrize("backend", ["aot_eager", "inductor"])
@parametrize("view_replay_for_aliased_outputs", [False, True])
@parametrize("dynamic_shapes", [False, True])
def test_alias_of_intermediate_detach(
self, backend, view_replay_for_aliased_outputs, dynamic_shapes
):
with patch(
"torch._functorch.config.view_replay_for_aliased_outputs",
view_replay_for_aliased_outputs,
):
def fn(x):
x = x + 1
a = x.transpose(0, 1)
return a.detach(), a
def inp_fn():
t = torch.ones(3, 3, requires_grad=True)
if dynamic_shapes:
torch._dynamo.mark_dynamic(t, 0)
torch._dynamo.mark_dynamic(t, 1)
return t
x_ref = inp_fn()
y_ref = fn(x_ref)
x = inp_fn()
y = torch.compile(fn, backend=backend, fullgraph=True)(x)
self.assertEqual(y_ref, y)
y0, y1 = y
self.assertFalse(y0.requires_grad)
self.assertTrue(y1.requires_grad)
# Check that detach and diff view points to the same intermediate tensor storage
self.assertEqual(y0.data_ptr(), y1.data_ptr())
self.assertTrue(y1._is_view())
sum(y_ref).sum().backward()
sum(y).sum().backward()
self.assertEqual(x_ref.grad, x.grad)
def test_input_mutation_storage_resize_up(self):
def f(a):
torch.ops.inductor.resize_storage_bytes_(a, 32)
# float32, 4 bytes per element, 32 bytes == 8 elements
with torch.no_grad():
a.copy_(torch.ones(8))
return a + 1
inp = torch.zeros(8, requires_grad=True)
# Input starts with zero-size-storage
inp.untyped_storage().resize_(0)
fw_graph_cell = [None]
compiled_f = aot_function(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
decompositions={},
keep_inference_input_mutations=True,
dynamic=False,
)
compiled_f(inp)
# Final functionalized graph has two mutation ops:
# (1) a resize_() to resize input tensor up
# (2) a copy_() to fill in the resized input with valid data
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1):
resize_storage_bytes_ = torch.ops.inductor.resize_storage_bytes_.default(primals_1, 32); resize_storage_bytes_ = None
ones = torch.ops.aten.ones.default([8], device = device(type='cpu'), pin_memory = False)
copy = torch.ops.aten.copy.default(primals_1, ones); ones = None
add = torch.ops.aten.add.Tensor(copy, 1)
copy_ = torch.ops.aten.copy_.default(primals_1, copy); primals_1 = copy = copy_ = None
return (add,)""",
)
def test_input_mutation_storage_resize_down(self):
def f(a):
out = a.sin()
torch.ops.inductor.resize_storage_bytes_(a, 0)
return out
inp = torch.zeros(8, requires_grad=True)
fw_graph_cell = [None]
compiled_f = aot_function(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
decompositions={},
keep_inference_input_mutations=True,
dynamic=False,
)
compiled_f(inp)
# Final functionalized graph has one mutation ops:
# (1) a resize_() to resize input tensor down
# Even though there was technically a "data mutation" on the input (from a.copy_()),
# We don't include it in the graph since the final input size has zero storage
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1):
sin = torch.ops.aten.sin.default(primals_1)
resize_storage_bytes_ = torch.ops.inductor.resize_storage_bytes_.default(primals_1, 0); resize_storage_bytes_ = None
return (sin, primals_1)""",
)
# def test_input_mutation_storage_resize_up_down(self):
# def f(a):
# torch.ops.inductor.resize_storage_bytes_(a, 32)
# # float32, 4 bytes per element, 32 bytes == 8 elements
# with torch.no_grad():
# a.copy_(torch.ones(8))
# out = a.sin()
# torch.ops.inductor.resize_storage_bytes_(a, 0)
# return out
# inp = torch.zeros(8, requires_grad=True)
# # Input starts with zero-size-storage
# inp.untyped_storage().resize_(0)
# fw_graph_cell = [None]
# compiled_f = aot_function(
# f,
# fw_compiler=make_boxed_compiler(
# partial(extract_graph, graph_cell=fw_graph_cell)
# ),
# bw_compiler=nop,
# decompositions={},
# keep_inference_input_mutations=True,
# dynamic=False,
# )
# out = compiled_f(inp)
# # Final graph has two interesting properties:
# # (1) no resizes in the functional graph, since the two resizes cancel out
# # and the final size is zero
# # (2) no copy_ in the functional graph, even though we copied data into the input,
# # because the input has no storage at the end of graph execution (so no data to copy)
# self.assertExpectedInline(
# fw_graph_cell[0].code.strip(),
# """\
# def forward(self, primals_1):
# ones = torch.ops.aten.ones.default([8], device = device(type='cpu'), pin_memory = False)
# copy = torch.ops.aten.copy.default(primals_1, ones); primals_1 = ones = None
# sin = torch.ops.aten.sin.default(copy)
# return [sin, copy]""",
# )
# skipped after confirming with @yf225 and @bdhirsh
@unittest.skipIf(
True,
"using set_ unsafely and PT2 FSDP2 no longer uses set_ as used in this test",
)
def test_input_mutation_storage_resize_down_and_set_(self):
# Meant to mimic ppFSDP
class TracableCreateParameter(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor, placeholder):
assert not tensor.requires_grad
return placeholder.set_(tensor)
@staticmethod
def backward(ctx, grad):
return None, grad # grad flows to placeholder
def f(dummy_param, param_shard):
# simulate allgather
with torch.no_grad():
allgather_param = torch.cat([param_shard, param_shard])
# simulate propagating grad state through dummy param, using data of allgather param
dummy_param_with_grad_state = TracableCreateParameter.apply( # noqa: F841
allgather_param, dummy_param
)
out = dummy_param.sin()
# Resize out dummy param, which now has the allgather data
torch.ops.inductor.resize_storage_bytes_(dummy_param, 0)
return out
# Simulates the local shard of our param
param_shard = torch.zeros(8, requires_grad=True)
# The dummy, zero-sized allgathered param that autograd will actually compute gradients on
dummy_param = torch.zeros(16, requires_grad=True)
dummy_param.untyped_storage().resize_(0)
fw_graph_cell = [None]
compiled_f = aot_function(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
decompositions={},
keep_inference_input_mutations=True,
dynamic=False,
)
compiled_f(dummy_param, param_shard)
# Important stuff to point out:
# (1) We save cat for backward (input to the sin()).
# While the original code was dummy_param.sin(),
# dummy_param actually contains the `cat` tensor due to the set_() call
# (2) We emit a cat.resize_storage_(0) in the graph.
# After the set_(), cat is the actually data of dummy_param, which is what we call resize_() on
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_2):
cat = torch.ops.aten.cat.default([primals_2, primals_2]); primals_2 = None
sin = torch.ops.aten.sin.default(cat)
resize_storage_bytes_ = torch.ops.inductor.resize_storage_bytes_.default(cat, 0); resize_storage_bytes_ = None
set_ = torch.ops.aten.set_.source_Tensor(primals_1, cat); primals_1 = set_ = None
return (sin, cat)""",
)
def test_input_mutation_storage_resize_before_set_(self):
def f(a):
with torch.no_grad():
torch.ops.inductor.resize_storage_bytes_(a, 0)
a.set_(torch.ones(2))
inp = torch.zeros(8, requires_grad=True)
compiled_f = aot_function(
f,
fw_compiler=nop,
bw_compiler=nop,
decompositions={},
keep_inference_input_mutations=True,
dynamic=False,
)
compiled_f(inp)
# def test_input_mutation_storage_resize_not_supported(self):
# def f(a):
# a.mul_(2)
# torch.ops.inductor.resize_storage_bytes_(a, 0)
# return a
# inp = torch.zeros(8, requires_grad=True)
# with self.assertRaisesRegex(
# AssertionError, "the input has other mutations that we cannot"
# ):
# compiled_f = aot_function(
# f,
# fw_compiler=nop,
# bw_compiler=nop,
# decompositions={},
# keep_inference_input_mutations=True,
# dynamic=False,
# )
# out = compiled_f(inp)
def test_input_output_aliase_custom_autograd_function(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gx):
return gx * 0.5
def f(x):
return Foo.apply(x)
inp = [torch.ones(2, 2, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=False)
def test_input_mutation_requires_grad_detach(self):
# Here, "a" requires grad, and gets mutated, so we append a copy_() to the end of the graph.
# Its mutation doesn't take part in autograd though, because we mutated a detach'd view.
# Need to make sure that this copy_() doesn't error, and doesn't participate in autograd either.
def f(a):
a.detach().mul_(2)
return a + 3
inp = [torch.ones(4, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=False)
inp = [torch.ones(4, requires_grad=True)]
# test_mutation=True will first do some compute on inp, so it is no longer an autograd leaf
# by the time it becomes a graph input. Good to test both cases.
self.verify_aot_autograd(f, inp, test_mutation=True)
def test_input_mutation_hidden_from_autograd_aliasing(self):
def f(a):
a_alias = a.view(-1)
with torch.no_grad():
a_alias.mul_(2)
return a + 1
inp = [torch.ones(4, requires_grad=True)]
# The important bit: we detected that the input mutation is safe
# to include **inside** the graph, since it was under no_grad
# (so all we need to do is use mark_dirty() on the input to bump the VC)
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
view = torch.ops.aten.view.default(primals_1, [-1])
mul = torch.ops.aten.mul.Tensor(view, 2); view = None
view_1 = torch.ops.aten.view.default(mul, [4]); mul = None
add = torch.ops.aten.add.Tensor(view_1, 1)
copy_ = torch.ops.aten.copy_.default(primals_1, view_1); primals_1 = view_1 = copy_ = None
return (add,)""",
)
def test_input_mutation_requires_grad_no_grad(self):
def f(a):
with torch.no_grad():
a.mul_(2)
return a + 3
inp = [torch.ones(4, requires_grad=True)]
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
# Even though the input requires_grad, we expect the keep the input mutation in the graph
# (Even though this is a training graph!)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 2)
add = torch.ops.aten.add.Tensor(mul, 3)
copy_ = torch.ops.aten.copy_.default(primals_1, mul); primals_1 = mul = copy_ = None
return (add,)""",
)
def test_input_mutation_requires_grad_no_grad_inference_graph(self):
def f(a):
with torch.no_grad():
a.mul_(2)
return a + 3
inp = [torch.ones(4, requires_grad=True)]
# Even though the input requires_grad, we expect the keep the input mutation in the graph
fw_graph = self.verify_aot_autograd(
f, inp, test_mutation=True, keep_inp_mutations=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, 2)
add = torch.ops.aten.add.Tensor(mul, 3)
copy_ = torch.ops.aten.copy_.default(arg0_1, mul); arg0_1 = mul = copy_ = None
return (add,)""",
)
def test_input_mutation_requires_grad_no_grad_detach_mixed(self):
# Perform a mix of mutations on a:
# 1 normal, 1 in no_grad, 1 on a detach'd tensor.
# Only the first should participate in gradient computation.
def f(a):
a.detach().mul_(2)
a.mul_(3)
with torch.no_grad():
a.mul_(4)
return a + 5
inp = [torch.ones(4, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
def test_input_mutation_metadata2(self):
def f(a):
a.transpose_(1, 0)
a.mul_(2)
return a + 1
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
def test_input_mutation_batchnorm(self):
def f(inpt, weight, bias, running_mean, running_var):
# This is additionally a good test, because the input tensors that we mutate
# are *also* saved for backwards.
# This tests that what we save for the backward is actually cloned inputs,
# and not the original inputs that got mutated.
return torch._native_batch_norm_legit(
inpt, weight, bias, running_mean, running_var, True, 0.5, 1e-5
)
def create_inp(req_grad):
return [
torch.ones(2, 5, 5, 5, requires_grad=req_grad),
torch.ones(5, requires_grad=req_grad),
torch.ones(5, requires_grad=req_grad),
torch.ones(5),
torch.ones(5),
]
from torch._decomp import get_decompositions
# This simulates what inductor does (running the fw + bw decompositions)
decompositions = get_decompositions(
[
torch.ops.aten._native_batch_norm_legit_functional,
torch.ops.aten.native_batch_norm_backward,
]
)
self.verify_aot_autograd(
f, create_inp(True), test_mutation=True, decompositions=decompositions
)
self.verify_aot_autograd(
f, create_inp(False), test_mutation=True, decompositions=decompositions
)
def test_batchnorm_inference(self):
inp = [
torch.ones(2, 5, 5, 5, requires_grad=True),
torch.ones(5, requires_grad=True),
torch.ones(5, requires_grad=True),
torch.ones(5),
torch.ones(5),
]
m = torch.nn.BatchNorm2d(4, 4)
m.eval()
fw_graph_cell = [None]
inp = torch.ones(4, 4, 4, 4)
fw_graph_cell = [None]
compiled_m = aot_module(
m,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=nop,
keep_inference_input_mutations=True,
)
inp = torch.ones(4, 4, 4, 4)
with torch.no_grad():
compiled_m(inp)
# expectation: there are no copy_() calls in the decomposed batch norm when running under training=False (eval mode)
code = fw_graph_cell[0].code.strip()
self.assertTrue("copy_" not in str(code))
def test_input_output_view_simple(self):
def f(a):
return a.view(-1)
inp = [torch.ones(2, 2, requires_grad=False).add(1)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 2, requires_grad=True).add(1)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# Outputs that alias inputs are pulled out of the graph entirely, so we don't compile anything here
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1):
view = torch.ops.aten.view.default(arg0_1, [-1]); arg0_1 = None
return (view,)""",
)
def test_input_output_view_mutate_multiple(self):
def f(a, b, c):
a.mul_(2)
c.mul_(3)
return b.view(2, 2), c.view(2, 2)
def create_inp(req_grad):
return [
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
# The original function returned two outputs, both of which aliased inputs.
# We expect two outputs in the functional graph, a_updated and c_updated.
# The actual aliased outputs themselves aren't in the compiled forward graph;
# Instead, they're generated outside of the graph.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
clone_1 = torch.ops.aten.clone.default(primals_3); primals_3 = None
mul = torch.ops.aten.mul.Tensor(clone, 2); clone = None
mul_1 = torch.ops.aten.mul.Tensor(clone_1, 3); clone_1 = None
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
view_2 = torch.ops.aten.view.default(mul_1, [2, 2])
return (mul, mul_1, view, view_2)""",
)
def test_input_output_view_metadata_mutate_multiple(self):
def f(a, b, c):
b.mul_(3)
c.t_()
return a.view(2, 2), b.view(2, 2), c.view(2, 2)
def create_inp(req_grad):
return [
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
# Important thing to check here: of the three inputs:
# Only the b.mul_(3) should show up in the graph (we functionalize it and return it).
# Everything else that does not show up in the graph includes:
# - The metadata mutation on c (we do it outside the graph)
# - All 3 original fw outputs, which are aliases of inputs (we regenerate them outside of the graph)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_2); primals_2 = None
view = torch.ops.aten.view.default(primals_3, [2, 2]); primals_3 = None
mul = torch.ops.aten.mul.Tensor(clone, 3); clone = None
t = torch.ops.aten.t.default(view); view = None
view_1 = torch.ops.aten.view.default(primals_1, [2, 2]); primals_1 = None
view_3 = torch.ops.aten.view.default(t, [2, 2])
view_4 = torch.ops.aten.view.default(mul, [2, 2])
return (mul, t, view_1, view_4, view_3)""",
)
def test_input_mutation_and_output_view(self):
def f(a):
a.add_(1)
return a.view(-1)
inp = [torch.ones(2, 2, requires_grad=False).add(1)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 2, requires_grad=True).add(1)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# Here, total # of outputs is 1 because:
# - num_mutated_inps = 1 (a_updated)
# - num_fw_outputs = 0 (the output is an alias of the input, so we move it outside the compiled fw)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
view_1 = torch.ops.aten.view.default(add, [-1])
return (add, view_1)""",
)
def test_input_mutation_output_view_multiple(self):
def f(a, b, c, d):
b.transpose_(1, 0)
c.add_(1)
return d + 1, b.diagonal(), a + c
def create_inp(req_grad):
return [
torch.arange(4, requires_grad=req_grad, dtype=torch.float32)
.view(2, 2)
.add(1),
torch.arange(4, requires_grad=req_grad, dtype=torch.float32)
.view(2, 2)
.add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
torch.ones(2, 2, requires_grad=req_grad).add(1),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3, primals_4):
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
clone = torch.ops.aten.clone.default(primals_3); primals_3 = None
transpose = torch.ops.aten.transpose.int(view, 1, 0); view = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
add_1 = torch.ops.aten.add.Tensor(primals_4, 1); primals_4 = None
diagonal = torch.ops.aten.diagonal.default(transpose)
add_2 = torch.ops.aten.add.Tensor(primals_1, add); primals_1 = None
return (transpose, add, add_1, diagonal, add_2)""",
)
def test_output_aliases_intermediate_single(self):
def f(a):
out = torch.mul(a, 3)
return out.view(-1)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# In AOTAutograd, we are obligated to make the compiled forward directly return `out`,
# and reconstruct `out.view(-1)` as a fresh output.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1]); mul = None
return (view,)""",
)
def test_output_aliases_input_multi_output_view_should_raise_autograd_error(self):
def f1(a):
return list(a.unbind(0))
f1_compiled = aot_function(f1, nop)
inp1 = torch.ones(3, 3, requires_grad=True).clone()
inp2 = torch.ones(3, 3, requires_grad=True).clone()
inp3 = torch.ones(3, 3, requires_grad=True).clone()
with self.assertRaisesRegex(
RuntimeError, "Such functions do not allow the output views"
):
out_test1 = f1_compiled(inp1)
# This raises a runtime error from autograd in eager mode
out_test1[0].mul_(2)
with self.assertRaisesRegex(
RuntimeError, "Such functions do not allow the output views"
):
out_test2 = f1_compiled(inp2)
inp2.mul_(2)
# In eager mode, if we mutate a tensor, any multi-output-view aliases
# get their grad_fn replaced with error nodes, so accessing grad_fn should error
out_test2[0].grad_fn
with self.assertRaisesRegex(
RuntimeError, "Such functions do not allow the output views"
):
f1_compiled(inp3)
out_test1[0].detach().mul_(2)
# The above case also applies to detached aliases (they turn the multi-output-view
# alias's grad_fns into error nodes)
out_test2[0].grad_fn
def test_output_aliases_input_multi_output_view(self):
# All aliased outs are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f1(a):
return list(a.unbind(0))
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f1_compiled = aot_function(f1, nop)
out_ref = f1(inp_ref)
out_test = f1_compiled(inp)
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
self.assertTrue(
all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
)
sum(out_ref).sum().backward()
sum(out_test).sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
# Several of the outputs are from multi-output views.
# However: they are part of the same alias set as "a", and "a.view(out.shape)",
# which are both user-visible.
# AOTAutograd will not try to be smart here and hide the aliasing relationships from autograd.
# Instead, it will perform its "output aliases input" logic, and regenerate all aliases.
def f3(a):
return *list(a.unbind(0)), a.view(a.shape)
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f3_compiled = aot_function(f3, nop)
inp_ref_clone = inp_ref.clone()
inp_clone = inp.clone()
out_ref = f3(inp_ref_clone)
out_test = f3_compiled(inp_clone)
self.assertTrue(all("UnbindBackward" in str(o.grad_fn) for o in out_test[:3]))
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
out_test[-1].mul_(2)
# Also mutate the input, which should affect the aliased output.
inp_ref_clone.view(-1).mul_(3)
inp_clone.view(-1).mul_(3)
# Do backward
(inp_ref + out_ref[-1]).sum().backward()
(inp + out_test[-1]).sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
def test_output_aliases_intermediate_multi_output_view(self):
# All aliased outs are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f1(a):
out = torch.mul(a, 3)
return list(out.unbind(0))
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f1_compiled = aot_function(f1, nop)
out_ref = f1(inp_ref)
out_test = f1_compiled(inp)
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
self.assertTrue(
all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
)
sum(out_ref).sum().backward()
sum(out_test).sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
# All aliased outs but one are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f2(a):
out = torch.mul(a, 3)
return *list(out.unbind(0)), out
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f2_compiled = aot_function(f2, nop)
out_ref = f2(inp_ref)
out_test = f2_compiled(inp)
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
self.assertTrue(
all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
)
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
out_test[-1].mul_(2)
out_ref[-1].sum().backward()
out_test[-1].sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
# All aliased outs but one are from multi-output views, so AOTAutograd will hide the aliasing from autograd.
def f3(a):
out = torch.mul(a, 3)
return *list(out.unbind(0)), out.view(out.shape)
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f3_compiled = aot_function(f3, nop)
out_ref = f3(inp_ref)
out_test = f3_compiled(inp)
# Assert that we get CompiledFunctionBackward in the backward graph,
# and not AsStridedBackward. No view-regeneration necessary for this mult-output view case.
# See Note: [AOTAutograd: differentiable outputs that alias each other from a multi-output view call]
self.assertTrue(
all("CompiledFunctionBackward" in str(o.grad_fn) for o in out_test)
)
# The last output is not from a multi-output view, so autograd will let us mutate it.
out_ref[-1].mul_(2)
out_test[-1].mul_(2)
out_ref[-1].sum().backward()
out_test[-1].sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
# There are 5 outputs that all alias each other.
# 3 of them come from multi-output views, but the other 3 are "ordinary" aliases.
# Therefore, AOTAutograd will not attempt the multi-output-view optimization,
# and apply the intermediate_base logic to all aliases.
# (In theory we could probably get AOTAutograd to only apply the intermediate base
# logic to the last 2 outputs and not the first 3. We should probably
# just do the graph partitioning defined in this doc instead though).
# https://docs.google.com/document/d/1DlfFq8TKbuAn2zyJxLfoW-X1qkkm5PLdHFtySo03QAk/edit
def f4(a):
out = torch.mul(a, 3)
# also return the graph intermediate directly,
# which will force AOTAutograd to do the "intermediate base" logic.
# (Why? The user can mutate "out", which should change the autograd metadata
# of the other aliased outputs)
return *list(out.unbind(0)), out, out.view(out.shape)
inp = torch.ones(3, 3, requires_grad=True)
inp_ref = torch.ones(3, 3, requires_grad=True)
f4_compiled = aot_function(f4, nop)
out_ref = f4(inp_ref)
out_test = f4_compiled(inp)
# Mutate the last output of f4 (autograd will allow this, since it is not a multi-output view,
# as long as *only* the non-multi-output views participate in the backward)
# Note: We could probably try to hide **only** the multi-output views from autograd here
# and only do the intermediate base logic for the last two aliases.
# Longer term solution of graph partitioning is probably cleaner though (see the note).
out_ref[-1].mul_(2)
out_test[-1].mul_(2)
out_ref_sum = out_ref[-1] + out_ref[-2]
out_test_sum = out_test[-1] + out_test[-2]
out_ref_sum.sum().backward()
out_test_sum.sum().backward()
self.assertEqual(inp_ref.grad, inp.grad)
def test_output_aliases_intermediate_mutation_linear(self):
def f(x):
return (x + 1).view(-1)
inp = [torch.ones(3, 3, requires_grad=True)]
# use inductor's decomps (which will e.g. turn _unsafe_view() into view())
from torch._inductor.decomposition import decompositions
f_compiled = aot_function(f, nop, decompositions=decompositions)
out_ref = f(*inp)
out_test = f_compiled(*inp)
out_ref.mul_(2)
out_test.mul_(2)
self.assertEqual(out_ref, out_test)
def test_output_aliases_intermediate_no_grad(self):
def f(a, b):
out = torch.mul(a, 3)
# First output is an alias of an intermediate that doesn't require grad
return out.view(-1), b.add(1)
inp = [torch.ones(3, 3), torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3), torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
# important bit: we don't bother generating an intermediate base as an output in the graph,
# because the intermediate base itself didn't require gradients.
# (the only problematic case is when both the base and the aliasesed output require gradients).
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1]); mul = None
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
return (view, add)""",
)
def test_output_aliases_intermediate_returned_multiple_times(self):
def f(a):
out = torch.mul(a, 3)
out_view = out.view(-1)
return out, out_view, out
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
def test_output_aliases_intermediate_multiple(self):
def f(a):
out = torch.mul(a, 3)
# AOTAutograd should manually generate these two output views in the epilogue.
return out.view(-1), out.view(-1)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
view_1 = torch.ops.aten.view.default(mul, [-1])
return (view, view_1, mul)""",
)
def test_output_aliases_intermediate_and_returned(self):
def f(a):
out = torch.mul(a, 3)
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out.view(-1), out
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
return (view, mul)""",
)
def test_output_aliases_intermediate_and_returned_flipped(self):
def f(a):
out = torch.mul(a, 3)
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out, out.view(-1)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
return (mul, view)""",
)
def test_output_aliases_intermediate_and_returned_different_grad(self):
def f(a):
out = torch.mul(a, 3)
# AOTAutograd should manually generate the first output (a view of an intermediate)
# but not the second (which is itself the intermediate for the first)
return out.view(-1), out, out[0].detach()
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
select = torch.ops.aten.select.int(mul, 0, 0)
detach = torch.ops.aten.detach.default(select); select = None
return (view, mul, detach)""",
)
def test_output_aliases_intermediate_inplace_view(self):
def f(a):
out = torch.mul(a, 3)
out.t_()
return out
# TODO: fix this test.
# See https://github.com/pytorch/pytorch/issues/90507
# self.verify_aot_autograd(f, inp, test_mutation=True)
def test_output_aliases_intermediate_inplace_view_with_detach(self):
def f(a):
out = torch.mul(a, 3)
out.t_()
out.detach_()
# Thanks to the detach_() AOT Autograd doesn't need to do anything.
# `out` will show up as having OutputType.non_alias,
# and ._is_view() == False
return out, a + 1
inp = [torch.ones(2, 4, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(2, 4, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3)
t = torch.ops.aten.t.default(mul); mul = None
add = torch.ops.aten.add.Tensor(primals_1, 1); primals_1 = None
return (t, add)""",
)
def test_output_aliases_intermediate_inplace_view_and_view(self):
def f(a):
out = torch.mul(a, 3)
out_view = out.unsqueeze(0)
out.t_()
out_view2 = out.unsqueeze(0)
return out_view, out, out_view2
inp = [torch.ones(2, 4, requires_grad=True)] # noqa: F841
# TODO: fix this test.
# See <github issue link>
# self.verify_aot_autograd(f, inp, test_mutation=True)
def test_output_aliases_intermediate_multiple_mixed(self):
def f(a):
out1 = torch.mul(a, 3)
out2 = torch.mul(a, 4)
# AOTAutograd should manually generate these two output views in the epilogue.
return out1.view(-1), out2.transpose(1, 0), out1.transpose(1, 0)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
mul = torch.ops.aten.mul.Tensor(primals_1, 3)
mul_1 = torch.ops.aten.mul.Tensor(primals_1, 4); primals_1 = None
view = torch.ops.aten.view.default(mul, [-1])
transpose = torch.ops.aten.transpose.int(mul_1, 1, 0); mul_1 = None
transpose_1 = torch.ops.aten.transpose.int(mul, 1, 0)
return (view, transpose, transpose_1, mul)""",
)
def test_output_all_alias_types(self):
# There are 3 types of aliasing that require us to return metadata in the compiled fw:
# (1) outputs that are views of inputs
# (2) outputs that are views of intermediates
# (3) inputs that get metadata mutations
# test all 3 of them here
def f(a):
a.transpose_(1, 0)
tmp = a.mul(2)
return tmp.squeeze(), tmp.transpose(1, 0), a.unsqueeze(0)
def inp_callable(req_grad):
x = torch.ones(1, 2, 4, requires_grad=req_grad).clone()
return [(x,), (x,)]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
# TODO: make this test run with dynamic shapes so it is more meaningful
# metadata output order: (a_updated_meta, out1_meta, out2_meta, out3_meta)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
view = torch.ops.aten.view.default(primals_1, [1, 2, 4]); primals_1 = None
transpose = torch.ops.aten.transpose.int(view, 1, 0); view = None
mul = torch.ops.aten.mul.Tensor(transpose, 2)
squeeze = torch.ops.aten.squeeze.default(mul)
transpose_1 = torch.ops.aten.transpose.int(mul, 1, 0)
unsqueeze = torch.ops.aten.unsqueeze.default(transpose, 0)
return (transpose, squeeze, transpose_1, unsqueeze, mul)""",
)
@parametrize("req_grad", [False, True])
def test_subclass_metadata_mutation(self, req_grad):
def f(a):
a.transpose_(1, 0)
tmp = a.mul(2)
return tmp.transpose(1, 0)
def inp_callable(req_grad):
x = torch.ones(1, 2, 4, requires_grad=req_grad).clone()
return [(x,), (x,)]
# See https://github.com/pytorch/pytorch/issues/114975
with self.assertRaisesRegex(
RuntimeError,
"Metadata mutations are currently not allowed on tensor subclasses",
):
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=req_grad),
test_mutation=True,
make_inputs_subclasses=True,
)
def test_input_data_and_metadata_mutation(self):
def f(a):
a.t_()
a[0].mul_(2)
return a.view(a.shape)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=True)]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
t = torch.ops.aten.t.default(clone)
select = torch.ops.aten.select.int(t, 0, 0); t = None
mul = torch.ops.aten.mul.Tensor(select, 2); select = None
t_1 = torch.ops.aten.t.default(clone); clone = None
select_scatter = torch.ops.aten.select_scatter.default(t_1, mul, 0, 0); t_1 = mul = None
t_2 = torch.ops.aten.t.default(select_scatter); select_scatter = None
t_4 = torch.ops.aten.t.default(t_2)
t_6 = torch.ops.aten.t.default(t_2); t_2 = None
view_1 = torch.ops.aten.view.default(t_6, [3, 3]); t_6 = None
return (t_4, view_1)""",
)
def test_view_and_inplace_view(self):
def f(a, b):
a.t_()
return b.view(b.shape), a.view(a.shape)
def create_inp(req_grad):
return [
torch.ones(3, 3, requires_grad=req_grad),
torch.ones(3, 3, requires_grad=req_grad),
]
self.verify_aot_autograd(f, create_inp(False), test_mutation=True)
fw_graph = self.verify_aot_autograd(f, create_inp(True), test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
t = torch.ops.aten.t.default(arg0_1); arg0_1 = None
view = torch.ops.aten.view.default(arg1_1, [3, 3]); arg1_1 = None
view_1 = torch.ops.aten.view.default(t, [3, 3])
return (t, view, view_1)""",
)
def test_view_detach(self):
def f(a):
tmp = a.detach()
a.mul_(2)
return a, tmp
inp = [torch.ones(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp, test_mutation=True)
inp = [torch.ones(3, 3, requires_grad=False)]
self.verify_aot_autograd(f, inp, test_mutation=True)
def test_input_inplace_requires_grad_true(self):
def f(a, b):
a.requires_grad_(True)
return a.mul(3), b.mul(4)
inp = [
# First inp doesn't require grad, but we switch it on
torch.ones(3, 3, requires_grad=False),
torch.ones(3, 3, requires_grad=True),
]
fw_graph = self.verify_aot_autograd(f, inp, test_mutation=True)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
mul = torch.ops.aten.mul.Tensor(primals_1, 3); primals_1 = None
mul_1 = torch.ops.aten.mul.Tensor(primals_2, 4); primals_2 = None
return (mul, mul_1)""",
)
# This is a torture test:
# a and b get turned into a synthetic base in the compiled graph
# One gets a data mutation, the other gets a metadata mutation.
# We need to make sure that the metadata mutation gets propagated
# back to the original input.
@skipIfDynamoInput("Dynamo removes runtime error")
def test_input_data_and_metadata_mutation_aliases_other_input(self):
# a and b are aliased
def f(a, b):
a.mul_(2)
b.t_()
return a.mul(b)
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
# Note: in our test, the add() is important because we need the graph inputs to be non-leaves so we can mutate them.
x = base.add(1)
inp1 = x[0]
inp2 = x[0]
return [base], [inp1, inp2]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
with self.assertRaisesRegex(
RuntimeError,
"Encountered aliased inputs that are mutated in the graph, but",
):
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
with self.assertRaisesRegex(
RuntimeError,
"Encountered aliased inputs that are mutated in the graph, but",
):
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=True),
test_mutation=True,
make_inputs_subclasses=True,
)
# https://github.com/pytorch/pytorch/issues/106456
def test_input_mutation_noncontiguous(self):
def f(a):
a.mul_(2)
return a + 1
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
x = base.add(1)
# create a non-contiguous view to pass as an input to the compiler
inp = x[:, 0]
return [base], [inp]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=True),
test_mutation=True,
make_inputs_subclasses=True,
)
def test_backward_mutation_data(self):
class BwMutation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
# bw mutation
x.mul_(2)
return grad_output.clone()
def f(a, b):
out = BwMutation.apply(b)
return a * out
inp_no_grad = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
# Mutation on buffer that does not require grad during the backward is allowed
self.verify_aot_autograd(f, inp_no_grad, test_mutation=True)
inp_grad = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
self.verify_aot_autograd(f, inp_grad, test_mutation=True)
def test_fw_bw_mutation_no_functionalization1(self):
class FwBwMutation(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
# input mutation
torch._foreach_mul_([b], [2])
x = b + 1
# intermediate mutation
torch._foreach_mul_([x], [3])
ctx.save_for_backward(x)
return x * a
@staticmethod
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
# bw mutation
torch._foreach_mul_([x], [4])
return grad_output * x, grad_output * x
def f(a, b):
return FwBwMutation.apply(a, b)
inps = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
inps_ref = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
fw_graph = [None]
bw_graph = [None]
def fw_compiler(gm, example_inputs):
fw_graph[0] = gm
return gm
def bw_compiler(gm, example_inputs):
bw_graph[0] = gm
return gm
compiled_f = compiled_function(
f,
fw_compiler,
bw_compiler,
dynamic=False,
partition_fn=default_partition,
keep_inference_input_mutations=True,
disable_functionalization=True,
)
out_ref = f(*inps_ref)
out = compiled_f(*inps)
self.assertEqual(out, out_ref)
out_ref.sum().backward()
out.sum().backward()
self.assertEqual(inps_ref[0].grad, inps[0].grad)
# important bit: there are 2 mutations in the fw
self.assertExpectedInline(
fw_graph[0].code.strip(),
"""\
def forward(self, primals_1, primals_2):
_foreach_mul_ = torch.ops.aten._foreach_mul_.ScalarList([primals_2], [2]); _foreach_mul_ = None
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
_foreach_mul__1 = torch.ops.aten._foreach_mul_.ScalarList([add], [3]); _foreach_mul__1 = None
mul = torch.ops.aten.mul.Tensor(add, primals_1); primals_1 = None
return (mul, add)""",
)
# important bit: there is 1 mutation in the bw
self.assertExpectedInline(
bw_graph[0].code.strip(),
"""\
def forward(self, add, tangents_1):
_foreach_mul__2 = torch.ops.aten._foreach_mul_.ScalarList([add], [4]); _foreach_mul__2 = None
mul_1 = torch.ops.aten.mul.Tensor(tangents_1, add); tangents_1 = add = None
return (mul_1, None)""",
)
def test_fw_bw_mutation_no_functionalization2(self):
class FwBwMutation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# input mutation
torch._foreach_mul_([x], [2])
x = x + 1
# intermediate mutation
torch._foreach_mul_([x], [3])
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
# bw mutation
torch._foreach_mul_([x], [4])
return grad_output * x
def f(a, b):
out = FwBwMutation.apply(b)
return out * a
inps = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
inps_ref = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
fw_graph = [None]
bw_graph = [None]
def fw_compiler(gm, example_inputs):
fw_graph[0] = gm
return gm
def bw_compiler(gm, example_inputs):
bw_graph[0] = gm
return gm
compiled_f = compiled_function(
f,
fw_compiler,
bw_compiler,
dynamic=False,
partition_fn=default_partition,
keep_inference_input_mutations=True,
disable_functionalization=True,
)
out_ref = f(*inps_ref)
out = compiled_f(*inps)
self.assertEqual(out, out_ref)
out_ref.sum().backward()
out.sum().backward()
self.assertEqual(inps_ref[0].grad, inps[0].grad)
# important bit: there are 2 mutations in the fw
# (the mutation on an activation doesn't get moved to bw)
self.assertExpectedInline(
fw_graph[0].code.strip(),
"""\
def forward(self, primals_1, primals_2):
_foreach_mul_ = torch.ops.aten._foreach_mul_.ScalarList([primals_2], [2]); _foreach_mul_ = None
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
_foreach_mul__1 = torch.ops.aten._foreach_mul_.ScalarList([add], [3]); _foreach_mul__1 = None
mul = torch.ops.aten.mul.Tensor(add, primals_1); primals_1 = None
return (mul, add)""",
)
self.assertExpectedInline(
bw_graph[0].code.strip(),
"""\
def forward(self, add, tangents_1):
mul_1 = torch.ops.aten.mul.Tensor(tangents_1, add); tangents_1 = add = None
return (mul_1, None)""",
)
def test_backward_mutation_metadata(self):
class BwMutation(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
ctx.save_for_backward(b)
return a.clone(), b.clone()
@staticmethod
def backward(ctx, grad_a, grad_b):
(b,) = ctx.saved_tensors
# bw metadata mutation
b.transpose_(1, 0)
return grad_a.clone(), grad_b.clone()
def f(a, b):
a_, b_ = BwMutation.apply(a, b)
out = a_ * b_
return out
inp_no_grad = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=False),
]
with self.assertRaisesRegex(
AssertionError, "input that had its metadata mutated in the backward"
):
self.verify_aot_autograd(f, inp_no_grad, test_mutation=True)
def test_backward_mutation_on_grad_out(self):
class BwMutation(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
grad_output.mul_(2)
return grad_output.clone()
def f(a, b):
tmp = a * b
out = BwMutation.apply(tmp)
return out
inp_grad = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
inp_grad_ref = [
torch.ones(3, 3, requires_grad=True),
torch.ones(3, 3, requires_grad=True),
]
f_compiled = aot_function(f, nop)
out = f_compiled(*inp_grad)
out.mul(2).sum().backward()
out_ref = f(*inp_grad_ref)
out_ref.mul(2).sum().backward()
self.assertEqual(inp_grad[0].grad, inp_grad_ref[0].grad)
self.assertEqual(inp_grad[1].grad, inp_grad_ref[1].grad)
def test_backward_mutation_forward_inputs(self):
@torch.library.custom_op("_test::_clone", mutates_args={})
def f(x: torch.Tensor, x1: torch.Tensor) -> torch.Tensor:
return x.clone()
def f_fake(x, x1):
return torch.empty_like(x)
def backward(ctx, grad):
with torch.no_grad():
ctx.x1.zero_()
return grad * 2, None
def setup_context(ctx, inputs, output):
(x, x1) = inputs
ctx.x = x
ctx.x1 = x1
f.register_fake(f_fake)
f.register_autograd(backward, setup_context=setup_context)
def fn(x: torch.Tensor, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor:
x2.mul_(5)
return torch.ops._test._clone(x, x1) + x2
inp_x, inp_x1, inp_x2 = (
torch.randn(3, requires_grad=True),
torch.randn(3, requires_grad=False),
torch.randn(3, requires_grad=False),
)
ref_x, ref_x1, ref_x2 = inp_x.clone(), inp_x1.clone(), inp_x2.clone()
ref_y = fn(ref_x, ref_x1, ref_x2)
compiled_f = aot_function(fn, nop, keep_inference_input_mutations=True)
x, x1, x2 = inp_x.clone(), inp_x1.clone(), inp_x2.clone()
y = compiled_f(x, x1, x2)
# Verify mutation in forward applied and mutation in backward is not in forward
self.assertEqual(ref_x, x)
self.assertEqual(ref_x1, x1)
self.assertEqual(ref_x2, x2)
self.assertEqual(ref_y, y)
ref_y.sum().backward()
y.sum().backward()
# Verify mutations in backward applied
self.assertEqual(ref_x, x)
self.assertEqual(ref_x1, x1)
self.assertEqual(ref_x2, x2)
self.assertEqual(ref_y, y)
self.assertEqual(ref_x.grad, x.grad)
self.assertEqual(ref_x1.grad, x1.grad)
self.assertEqual(ref_x2.grad, x2.grad)
def test_backward_mutation_forward_inputs_create_graph(self):
@torch.library.custom_op("_test::_clone_create_graph", mutates_args={})
def f(x: torch.Tensor, x1: torch.Tensor) -> torch.Tensor:
return x.clone()
def f_fake(x, x1):
return torch.empty_like(x)
def backward(ctx, grad):
with torch.no_grad():
ctx.x1.zero_()
return grad * 2, None
def setup_context(ctx, inputs, output):
(x, x1) = inputs
ctx.x = x
ctx.x1 = x1
f.register_fake(f_fake)
f.register_autograd(backward, setup_context=setup_context)
def fn(x: torch.Tensor, x1: torch.Tensor) -> torch.Tensor:
return torch.ops._test._clone_create_graph(x, x1)
inp_x, inp_x1 = (
torch.randn(3, requires_grad=True),
torch.randn(3, requires_grad=True),
)
ref_x, ref_x1 = inp_x.clone(), inp_x1.clone()
ref_y = f(ref_x, ref_x1)
ref_y.sum().backward()
x, x1 = inp_x.clone(), inp_x1.clone()
compiled_f = aot_function(fn, nop)
y = compiled_f(x, x1)
loss = y.sum()
with self.assertRaisesRegex(
RuntimeError,
"aot_autograd does not support input mutations with requires_grad in backward for create_graph=True",
):
torch.autograd.grad(loss, inp_x, create_graph=True)
# Not checking equality of ref and x as Exception is expected
# Partially addresses https://github.com/pytorch/pytorch/issues/106457
def test_input_mutation_false_aliasing(self):
def f(a, b):
a.mul_(3)
b.mul_(2)
return a.clone().view(-1) + b.clone().view(-1)
# No overlap, contiguous
def inp_callable1(req_grad):
base = torch.ones(4, 4, requires_grad=req_grad)
x = base.add(1)
# create two views that share storage, but are actually non-overlapping
a = x[0:2]
b = x[2:4]
return [base], [a, b]
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable1, req_grad=False), test_mutation=True
)
self.verify_aot_autograd(
f, partial(inp_callable1, req_grad=True), test_mutation=True
)
self.verify_aot_autograd(
f,
partial(inp_callable1, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
self.verify_aot_autograd(
f,
partial(inp_callable1, req_grad=True),
test_mutation=True,
make_inputs_subclasses=True,
)
# Important characteristic: the graph takes in 2 inputs!
# That shows that we didn't try to run our complicated synthetic base logic,
# because we successfully detected false aliasing across the two inputs.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
mul = torch.ops.aten.mul.Tensor(arg0_1, 3); arg0_1 = None
mul_1 = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
clone = torch.ops.aten.clone.default(mul)
view = torch.ops.aten.view.default(clone, [-1]); clone = None
clone_1 = torch.ops.aten.clone.default(mul_1)
view_1 = torch.ops.aten.view.default(clone_1, [-1]); clone_1 = None
add = torch.ops.aten.add.Tensor(view, view_1); view = view_1 = None
return (mul, mul_1, add)""",
)
# No overlap, non-contiguous: first tensor ends before second tensor start
def inp_callable2(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
a = x.as_strided((4, 4), (8, 1), storage_offset=0)
b = x.as_strided((4, 4), (8, 1), storage_offset=28)
return [base], [a, b]
# No overlap, non-contiguous: tensors are perfectly interleaved
def inp_callable3(req_grad):
base = torch.ones(4, 4, requires_grad=req_grad)
x = base.add(1)
a = x[:, 0:2]
b = x[:, 2:4]
return [base], [a, b]
# No overlap, non-contiguous
def inp_callable4(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
a = x.as_strided((4, 4), (9, 1), storage_offset=0)
b = x.as_strided((4, 4), (9, 1), storage_offset=22)
return [base], [a, b]
# No overlap, non-contiguous
def inp_callable5(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
a = x.as_strided((4, 4), (9, 1), storage_offset=0)
b = x.as_strided((4, 4), (9, 1), storage_offset=23)
return [base], [a, b]
# No overlap, non-contiguous
def inp_callable6(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
# a's last element is at offset 195 (24 total elements)
a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5)
# b's first element is at offset 196: no overlap
b = x[196 : 196 + a.numel()]
return [base], [a, b]
# overlap! non-contiguous
def inp_callable_overlap1(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
a = x.as_strided((4, 4), (9, 1), storage_offset=0)
b = x.as_strided((4, 4), (9, 1), storage_offset=24)
return [base], [a, b]
# overlap! non-contiguous
def inp_callable_overlap2(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
a = x.as_strided((4, 4), (9, 1), storage_offset=0)
b = x.as_strided((4, 4), (9, 1), storage_offset=25)
return [base], [a, b]
# overlap! non-contiguous
def inp_callable_overlap3(req_grad):
base = torch.ones(256, requires_grad=req_grad)
x = base.add(1)
# a's last element is at offset 195 (24 total elements)
a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5)
# b's first element is at offset 195: overlap!
b = x[195 : 195 + a.numel()]
return [base], [a, b]
fw_graph2 = self.verify_aot_autograd(
f, partial(inp_callable2, req_grad=False), test_mutation=True
)
fw_graph3 = self.verify_aot_autograd(
f, partial(inp_callable3, req_grad=False), test_mutation=True
)
fw_graph4 = self.verify_aot_autograd(
f, partial(inp_callable4, req_grad=False), test_mutation=True
)
fw_graph5 = self.verify_aot_autograd(
f, partial(inp_callable5, req_grad=False), test_mutation=True
)
fw_graph6 = self.verify_aot_autograd(
f, partial(inp_callable6, req_grad=False), test_mutation=True
)
fw_graph_overlap1 = self.verify_aot_autograd(
f, partial(inp_callable_overlap2, req_grad=False), test_mutation=True
)
fw_graph_overlap2 = self.verify_aot_autograd(
f, partial(inp_callable_overlap1, req_grad=False), test_mutation=True
)
# All non-overlap graphs should be the same since we detected false aliasing
self.assertEqual(str(fw_graph.code), str(fw_graph2.code))
self.assertEqual(str(fw_graph.code), str(fw_graph3.code))
self.assertEqual(str(fw_graph.code), str(fw_graph4.code))
self.assertEqual(str(fw_graph.code), str(fw_graph5.code))
self.assertEqual(str(fw_graph.code), str(fw_graph6.code))
# All overlap graphs should be the same since we detected real aliasing
self.assertNotEqual(str(fw_graph.code), str(fw_graph_overlap1.code))
self.assertNotEqual(str(fw_graph.code), str(fw_graph_overlap2.code))
self.assertTrue("as_strided_scatter" in str(fw_graph_overlap1.code))
self.assertTrue("as_strided_scatter" in str(fw_graph_overlap2.code))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_mem_leak_from_save_for_bw(self):
# See a full diagnosis at this issue: https://github.com/pytorch/pytorch/issues/94990
# Note [Detaching saved tensors in AOTAutograd]
# This program creates a ref-cycle. Long term, we should fix this ref cycle
# (since it can arise, naturally albeit rarely, from uses of autograd.Function).
# But AOTAutograd makes it more likely to show up from tracing user programs,
# so we deal with it by manually detaching the tensors that we save for backward.
# This is completely wrong and would give wrong results if we were to do double backward.
# Fortunately today, double backward is explicitly banned in AOTAutograd.
def f(a, b):
add = a + a
split = torch.functional.split(add, [4, 4], dim=1)
getitem_2 = split[1]
unsqueeze = getitem_2.unsqueeze(-1)
mul = unsqueeze * b
return (getitem_2, mul)
f_compiled = aot_function(f, nop)
inps = [
torch.ones(8, 8, device="cuda", requires_grad=True),
torch.ones(1, 4, 1, device="cuda", requires_grad=True),
]
mem_before = torch.cuda.memory_allocated()
f_compiled(*inps)
mem_after = torch.cuda.memory_allocated()
self.assertTrue(mem_after == mem_before)
def test_output_aliases_multiple_inputs_get_correct_one(self):
# a and b are aliased, but have different shapes
# The first output should view off the first input, the 2nd output should view off the 2nd input
def f(a, b):
return a.view(a.shape), b.view(b.shape)
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
# Note: in our test, the add() is important because we need the graph inputs to be non-leaves so we can mutate them.
x = base.mul(2)
inp1 = x.view(-1)
inp2 = x[0]
return [base], [inp1, inp2]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=True),
test_mutation=True,
make_inputs_subclasses=True,
)
def test_input_mutation_aliases_other_input(self):
def f(a, b):
a.add_(1)
return a + b
def inp_callable(req_grad):
base = torch.ones(4, 2, requires_grad=req_grad)
# Note: in our test, the add() is important because we need the graph inputs to be non-leaves so we can mutate them.
x = base.add(1)
inp1 = x[0]
inp2 = x[0]
return [base], [inp1, inp2]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
# Important parts of the graph:
# - the compiled graph takes in a base, and we generate a and b (the views) off of the base
# - clone() is still in the graph, because we need to call grad() on the original (non-mutated) inputs
# - We re-generate the views *after* the clone, to preserve view relationships.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [2], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, add, [2], [1], 0); clone = add = None
as_strided_2 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
add_1 = torch.ops.aten.add.Tensor(as_strided_2, as_strided_5); as_strided_2 = as_strided_5 = None
return (as_strided_scatter, add_1)""",
) # noqa: B950
def test_input_mutation_aliases_other_input2(self):
def f(a, b):
a.add_(1)
return a + b
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
x = base.add(1)
inp1 = x[0]
# Here, one of the aliased inputs is the base itself
inp2 = x
return [base], [inp1, inp2]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [2], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, add, [2], [1], 0); clone = add = None
as_strided_2 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [1], 0)
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [2, 2], [2, 1], 0)
add_1 = torch.ops.aten.add.Tensor(as_strided_2, as_strided_5); as_strided_2 = as_strided_5 = None
return (as_strided_scatter, add_1)""",
) # noqa: B950
def test_input_mutation_aliases_and_output_alias(self):
def f(a, b):
# Here, we need to take care:that because and b are aliased
# since a and b are aliased, we generate a view off of "updated b"
a.add_(1)
return b.view(b.shape)
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
x = base.add(1)
return [base], [x.view(-1), x.view(-1)]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, add, [4], [1], 0); clone = add = None
as_strided_9 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_1 = torch.ops.aten.view.default(as_strided_9, [4]); as_strided_9 = None
return (as_strided_scatter, view_1)""",
) # noqa: B950
def test_input_aliased_with_mutation_output_alias(self):
def f(a, b, c):
# a and c alias
c.mul_(2)
# The main thing we're testing here is that
# (1) We need to reconstruct c.view(-1) from the 3rd input to the forward
# (2) But we need to be careful to do this *before* converting aliased inputs into synthetic bases.
# The original fw takes in 3 args, but the compiled fw takes in only 2 args.
return b.add(1), c.view(-1)
def inp_callable(req_grad):
base1 = torch.ones(2, 2, requires_grad=req_grad)
base2 = torch.ones(2, 2, requires_grad=req_grad)
x = base1.add(1)
y = base2.add(1)
return [base1, base2], [x.view(-1), y, x.view(-1)]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided_1 = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
mul = torch.ops.aten.mul.Tensor(as_strided_1, 2); as_strided_1 = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, mul, [4], [1], 0); clone = mul = None
add = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
as_strided_7 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_1 = torch.ops.aten.view.default(as_strided_7, [-1]); as_strided_7 = None
return (as_strided_scatter, add, view_1)""",
) # noqa: B950
def test_input_metadata_mutation_aliases(self):
def f(a, b):
# a and b alias, and we do a metadata mutation on a
# Since we're not mutating data, then b isn't affected at all.
# We expect aot autograd to not bother with constructing a synthetic base.
a.t_()
return a + b
def inp_callable(req_grad):
base = torch.ones(2, 2, requires_grad=req_grad)
x = base.add(1)
return [base], [x.view(-1), x.view(-1)]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
# Expectation: fwd() takes in 2 args, and we don't construct a synthetic base.
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
t = torch.ops.aten.t.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(t, primals_2); t = primals_2 = None
return (add,)""",
)
def test_input_mutation_aliases_and_none_require_gradients(self):
def f(a, b, c):
# a and b alias, but neither require gradients (so they don't have a _base)
# aot autograd should construct the synthetic base from `torch.Tensor(a.storage())`
a.mul_(2)
return b + 1, c + 1
def inp_callable(req_grad):
base = torch.ones(2, 2)
c_arg = torch.ones(2, 2, requires_grad=req_grad)
x = base.add(1)
return [base, c_arg], [x.view(-1), x.view(-1), c_arg]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
with self.assertRaisesRegex(
RuntimeError, "is a tensor subclass. This is not supported today"
):
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
as_strided = torch.ops.aten.as_strided.default(primals_1, [4], [1], 0)
mul = torch.ops.aten.mul.Tensor(as_strided, 2); as_strided = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(primals_1, mul, [4], [1], 0); primals_1 = mul = None
as_strided_3 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided_3, 1); as_strided_3 = None
add_1 = torch.ops.aten.add.Tensor(primals_2, 1); primals_2 = None
return (as_strided_scatter, add, add_1)""",
) # noqa: B950
@skipIfDynamoInput("Fails with dynamo")
def test_input_mutation_aliases_bases_out_of_order(self):
# This tests our calling convention: if b and d are aliased, then the outer calling convention
# that we send to the compiled forward becomes:
# (b_d_base, a, c)
# Importantly, even though a and c alias in our test, neither inputs are mutated,
# So we don't need to do the base construction / deconstruction
def f(a, b, c, d):
b.add_(1)
d.unsqueeze_(0)
return a + c + d, b.view(-1)
def inp_callable(req_grad):
base1 = torch.ones(2, 2, requires_grad=req_grad)
base2 = torch.ones(2, 2, requires_grad=req_grad)
x1 = base1.add(1)
x2 = base2.add(1)
# a and c alias, b and d alias
return [base1, base2], [x1.view(-1), x2.view(-1), x1.view(-1), x2.view(-1)]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
with self.assertRaisesRegex(
RuntimeError,
"Metadata mutations are currently not allowed on tensor subclasses",
):
self.verify_aot_autograd(
f,
partial(inp_callable, req_grad=False),
test_mutation=True,
make_inputs_subclasses=True,
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
# 3 graph inputs: (b_d_base, a, c)
# 2 returns: (b_updated, a+c+d)
# (there are 2 original fw outs, but one is a view of b so it's not part of the graph)
# (there are also 2 input mutations, but one is a metadata-only mutation so the compiled forward doesn't return it)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
as_strided = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided, 1); as_strided = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, add, [4], [1], 0); clone = add = None
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
unsqueeze = torch.ops.aten.unsqueeze.default(as_strided_5, 0); as_strided_5 = None
add_1 = torch.ops.aten.add.Tensor(primals_2, primals_3); primals_2 = primals_3 = None
add_2 = torch.ops.aten.add.Tensor(add_1, unsqueeze); add_1 = None
as_strided_14 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
view_2 = torch.ops.aten.view.default(as_strided_14, [-1]); as_strided_14 = None
return (as_strided_scatter, add_2, view_2, unsqueeze)""",
) # noqa: B950
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_synthetic_base_base_attribute_is_none(self):
def f(a, b):
a.add_(1)
return a + b
def inp_callable():
base = torch.ones(4, 4, device="cuda")
# detach() so that none of the inputs have a ._base attribute.
a = base[0].detach()
b = base[1].detach()
base2 = torch.ones(2, 2, requires_grad=True) # noqa: F841
return [base], [a, b]
self.verify_aot_autograd(f, inp_callable, test_mutation=True)
def test_input_mutation_alias_everything(self):
# Mondo test that tests a combination of:
# input is mutated, that aliases another input (so we make a synthetic base)
# an output is an alias of another output
# an output is an alias of an intermediate
# a and c are aliased
def f(a, b, c):
c.mul_(2) # mutates c
b.t_() # metadata mutate b
tmp = a + c
out1 = tmp.view(-1)
out2 = b.t()
out3 = out1.unsqueeze(0)
# out1 and out3 are aliases of an intermediate, and alias each other!
# out2 aliases an input, so we don't return it
return out1, out2, out3
def inp_callable(req_grad):
base1 = torch.ones(2, 2, requires_grad=req_grad)
base2 = torch.ones(2, 2, requires_grad=req_grad)
# Note: in our test, the add() is important because we need the graph inputs to be non-leaves so we can mutate them.
base1_ = base1.add(1)
base2_ = base2.add(1)
a = base1_.view(-1)
b = base2_
c = base1_.view(-1)
return [base1, base2], [a, b, c]
self.verify_aot_autograd(
f, partial(inp_callable, req_grad=False), test_mutation=True
)
fw_graph = self.verify_aot_autograd(
f, partial(inp_callable, req_grad=True), test_mutation=True
)
# Expected:
# - 2 inputs in the forward: synthetic_base_a_c, b
# - 1 output in the forward: "tmp"
# out2 is an alias of an input, and will be generated off of b outside of the compiled fn
# out1 and out3 are aliases of tmp, that we generate outside of the compiled function
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
view = torch.ops.aten.view.default(primals_2, [2, 2]); primals_2 = None
as_strided_1 = torch.ops.aten.as_strided.default(clone, [4], [1], 0)
mul = torch.ops.aten.mul.Tensor(as_strided_1, 2); as_strided_1 = None
as_strided_scatter = torch.ops.aten.as_strided_scatter.default(clone, mul, [4], [1], 0); clone = mul = None
as_strided_2 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
t = torch.ops.aten.t.default(view); view = None
as_strided_5 = torch.ops.aten.as_strided.default(as_strided_scatter, [4], [1], 0)
add = torch.ops.aten.add.Tensor(as_strided_5, as_strided_2); as_strided_5 = as_strided_2 = None
view_1 = torch.ops.aten.view.default(add, [-1])
t_1 = torch.ops.aten.t.default(t)
unsqueeze = torch.ops.aten.unsqueeze.default(view_1, 0)
return (as_strided_scatter, t, view_1, t_1, unsqueeze, add)""",
) # noqa: B950
def test_dynamic_shape_output_not_in_bw_graph(self):
def f(x):
return [x + 1, x.shape[0]]
inp = torch.ones(5, requires_grad=True)
bw_graph_cell = [None]
compiled_f = aot_function(
f,
fw_compiler=nop,
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
decompositions={},
keep_inference_input_mutations=False,
dynamic=True,
)
out = compiled_f(inp)
out[0].sum().backward()
# The important bit: the forward fn returns 2 outputs,
# but one of them is a symint so we should only see
# 1 grad_output as an input to the backward graph.
# (Otherwise, autograd will plumb a None as the value of the grad_output,
# which causes inductor to complain).
self.assertExpectedInline(
bw_graph_cell[0].code.strip(),
"""\
def forward(self, tangents_1):
return (tangents_1,)""",
)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [
lambda: torch.randn(5, requires_grad=True),
lambda: torch.randn(5, requires_grad=False),
]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_some_output_requires_grad_input_doesnt(self):
def f(a, b):
a_view = a.view(-1)
a_view.requires_grad_(True)
return a_view
inp = [torch.randn(3, 3), torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def test_some_outputs_dont_require_grad_view(self):
def f(a, b):
return a.detach(), b
inp = [
torch.randn(3, 3, requires_grad=True),
torch.randn(3, 3, requires_grad=True),
]
self.verify_aot_autograd(f, inp)
def test_some_outputs_dont_require_grad_non_view(self):
def f(a, b):
return a.add(1).detach(), b
inp = [
torch.randn(3, 3, requires_grad=True),
torch.randn(3, 3, requires_grad=True),
]
self.verify_aot_autograd(f, inp)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def get_graph_size(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
f = aot_function(foo, nop, get_graph_size)
with torch.set_grad_enabled(False):
f(*inps)
self.assertIsNone(graph_size)
f = aot_function(foo, nop, get_graph_size)
with torch.set_grad_enabled(True):
out = f(*inps)
self.assertIsNone(graph_size)
out.sum().backward()
self.assertTrue(graph_size > 2)
def test_output_dict(self):
def f(x):
return {"a": x, "b": x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {"a": x, "b": y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
a = torch.randn(3, requires_grad=True)
b = torch.randn(3, requires_grad=True)
def inp_callable():
inps = [{"a": a, "b": b}]
return inps, inps
self.verify_aot_autograd(f, inp_callable)
def test_module(self):
mod = nn.Sequential(nn.Linear(32, 32), nn.ReLU())
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
ref_grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
out = compiled_mod(inp)
out.sum().backward()
grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
self.assertEqual((out, grads), (ref_out, ref_grads))
def test_batchnorm(self):
mod = compiled_module(nn.BatchNorm2d(4), nop, nop)
x = torch.ones(1, 4, 2, 2)
mod(x).sum().backward()
def test_list_codegen(self):
def list_nop(f, _):
def g(inps):
return f(*inps)
g._boxed_call = True
return g
def f(a, b, c):
return a.sin() * b.cos() * c.sin()
f = aot_function(f, list_nop)
inp = [torch.randn(5, requires_grad=True) for _ in range(3)]
f(*inp).sum().backward()
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
def test_compilation_context(self, counter):
def f(x):
return x.sin().sin()
count = []
def compiler(fx_g, _):
context = get_aot_compilation_context()
count.append((context[0], len(fx_g.graph.nodes)))
return fx_g
f = aot_function(f, compiler)
out = f(torch.randn(5, requires_grad=True))
f = aot_function(f, compiler)
f(torch.randn(5))
out.sum().backward()
self.assertExpectedInline(
str(count),
"""[(['0_forward'], 4), (['1_inference'], 4), (['0_backward'], 8)]""",
)
def test_dupe_arg(self):
def f(x, y):
return x + y
x = torch.randn(3, 3, requires_grad=True)
self.verify_aot_autograd(f, [x, x])
def test_dupe_arg_torture(self):
def f(x, y):
x.t_()
y.unsqueeze_(0)
return x + y
x = torch.randn(3, 3, requires_grad=True).clone()
self.verify_aot_autograd(f, [x, x])
# See https://github.com/pytorch/pytorch/issues/100224
def test_dupe_arg_returned_as_output(self):
def f(a, b, a_):
a[0].add_(1)
return a_
f_compiled = aot_function(f, nop)
a = torch.ones(2)
b = torch.ones(2)
out_ref = f(a, b, a)
a2 = torch.ones(2)
b2 = torch.ones(2)
out_test = f_compiled(a2, b2, a2)
self.assertEqual(out_ref, out_test)
self.assertEqual(a, a2)
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe_left_bias(self, counter):
# This test checks that, just because only the first
# argument did a metadata mutation, we still correctly
# switch to strategy 2 (deduplicate)
# See: https://github.com/pytorch/pytorch/pull/89896#discussion_r1036224447
class F(torch.nn.Module):
def forward(self, x, y):
x.t_()
return (x + y,)
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True)
self.verify_aot_autograd(F(), [x, x])
fxx = aot_module_simplified(F(), (x, x), nop)
self.assertExpectedRaisesInline(
AssertionError,
lambda: fxx(x, y),
"""At compilation time, graph 2 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe(self, counter):
self._test_invalid_dupe(counter, fake=False)
# See Note: Dynamo recompilation guarding invalid grad for why this test exists
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_dupe_fake(self, counter):
self._test_invalid_dupe(counter, fake=True)
def _test_invalid_dupe(self, counter, fake):
class F(torch.nn.Module):
def forward(self, x, y):
x.unsqueeze_(0)
y.unsqueeze_(0)
return (x + y,)
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True).clone()
if fake:
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
fake_x = fake_mode.from_tensor(x)
fake_y = fake_mode.from_tensor(y)
if fake:
fxy = aot_module_simplified(F(), (fake_x, fake_y), nop)
else:
fxy = aot_module_simplified(F(), (x, y), nop)
fxy(x, y)
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True).clone()
fxy(x, x) # is ok!
if fake:
fxx = aot_module_simplified(F(), (fake_x, fake_x), nop)
else:
fxx = aot_module_simplified(F(), (x, x), nop)
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True).clone()
fxx(x, x)
# Note This should not raise! Once we have guards in place here,
# we will have this working correctly, as it should recompile.
x = torch.randn(3, 3, requires_grad=True).clone()
y = torch.randn(3, 3, requires_grad=True).clone()
self.assertExpectedRaisesInline(
AssertionError,
lambda: fxx(x, y),
"""At compilation time, graph 1 was compiled under the assumption that input 1 would be a duplicate of input 0, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_requires_grad(self, counter):
self._test_invalid_requires_grad(counter, fake=False)
# See Note: Dynamo recompilation guarding invalid grad for why this test exists
@patch("torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count)
@patch("torch._functorch.config.debug_assert", True)
def test_invalid_requires_grad_fake(self, counter):
self._test_invalid_requires_grad(counter, fake=True)
def _test_invalid_requires_grad(self, counter, fake):
class F(torch.nn.Module):
def forward(self, x, y):
return (x + y,)
x = torch.randn(3, 3, requires_grad=True)
y = torch.randn(3, 3, requires_grad=True)
z = torch.randn(3, 3, requires_grad=False)
if fake:
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
fake_x = fake_mode.from_tensor(x)
fake_y = fake_mode.from_tensor(y)
fake_z = fake_mode.from_tensor(z)
if fake:
fxy = aot_module_simplified(F(), (fake_x, fake_y), nop)
else:
fxy = aot_module_simplified(F(), (x, y), nop)
compare_equal_outs_and_grads(self, F(), fxy, (x, y))
compare_equal_outs_and_grads(self, F(), fxy, (x, z))
if fake:
fxz = aot_module_simplified(F(), (fake_x, fake_z), nop)
else:
fxz = aot_module_simplified(F(), (x, z), nop)
compare_equal_outs_and_grads(self, F(), fxz, (x, z))
self.assertExpectedRaisesInline(
AssertionError,
lambda: fxz(x, y),
"""At compilation time, graph 1 was compiled under the assumption that input 1 would not require grad, but at runtime this was not the case. This indicates a guard bug in AOTAutograd or Dynamo, please file a bug to PyTorch.""", # noqa: B950
)
def test_custom_autograd(self):
class CustomFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
return grad_output + 1
def f(x):
return CustomFn.apply(x)
self.verify_aot_autograd(f, [torch.randn(3)])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_autocast_disable_guard(self):
with torch._C._DisableAutocast():
x = torch.rand([4, 4]).cuda()
y = x @ x
self.assertEqual(y.dtype, torch.float32)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_nonidempotent_amp(self):
def f(self_s_emb, add_3):
einsum_2 = torch.functional.einsum("ah,th->t", self_s_emb, add_3)
log_softmax_2 = einsum_2.log_softmax(-1)
return (log_softmax_2,)
args = [
torch.rand((1, 256), dtype=torch.float32, device="cuda"),
torch.rand((30, 256), dtype=torch.float16, device="cuda"),
]
with torch.cuda.amp.autocast(enabled=True):
self.verify_aot_autograd(f, args)
args = [e.requires_grad_(True) for e in args]
with torch.cuda.amp.autocast(enabled=True):
self.verify_aot_autograd(f, args)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
@unittest.skipIf(not torch.backends.cudnn.is_available(), "CUDNN is unavailable")
@skipIfRocm # https://github.com/pytorch/pytorch/issues/96560
def test_batch_norm_amp(self):
device = "cuda"
input_dtype = torch.float16
param_dtype = torch.float32
weight, bias = (
torch.ones(64, device=device, dtype=param_dtype, requires_grad=True)
for _ in range(2)
)
running_mean, running_var = (
torch.ones(64, device=device, dtype=param_dtype) for _ in range(2)
)
def bn(x):
return torch.ops.aten.cudnn_batch_norm(
x,
weight,
bias,
running_mean,
running_var,
False,
0.1,
1e-05,
)
inp = torch.ones(
torch.Size([16, 64, 112, 112]), dtype=input_dtype, device=device
)
ref = bn(inp)
cudnn_batch_norm_decomp = torch._decomp.get_decompositions(
{torch.ops.aten.cudnn_batch_norm}
)
aot_fn = make_fx(bn, decomposition_table=cudnn_batch_norm_decomp)(inp)
res = aot_fn(inp)
for a, b in zip(ref, res):
assert torch.allclose(a, b)
def test_output_op_depending_on_symint(self):
"""
It won't be obvious from reading this test what it's testing for. We should probably make it into a more
focused unit test.
An issue with the following program was the expand op would end up depending on a symint whose proxy was
incorrectly associated with one of the grad tensors rather than input tensors. It broke partitioner logic
and the net result was aot_function failed to produce a function and threw an exception instead.
"""
inp = torch.randn(5, requires_grad=True)
def f(x):
return x.expand(x.shape)
# TODO(whc) make this work (test setup is wrong somehow)
# joint_forward_backward = create_joint_forward_backward(f)
# out = f(inp)
# joint_inputs = ([inp], [out.detach().contiguous()])
# fx_g = make_fx(joint_forward_backward)(*joint_inputs)
# TODO: assert outputs of fwd graph trace to correct symint
# e2e test that fails without symint clone fix
af = aot_function(
f,
nop,
partition_fn=partial(
min_cut_rematerialization_partition, compiler="inductor"
),
dynamic=True,
)
out = af(inp)
self.assertEqual(out, f(inp))
def test_inference_mode(self):
m = torch.nn.Linear(4, 4)
inp = torch.randn(4, 4)
aot_mod = aot_module(m, fw_compiler=nop)
with torch.inference_mode():
out_ref = m(inp)
out_test = aot_mod(inp)
self.assertEqual(out_ref, out_test)
def test_default_partitioner_saves_symints_not_tensors_for_bw(self):
"""
In this test, the important thing is that primals_1 is **only** needed in the backward
in order to grab its sizes.
We need to assert that what we save for the backward are the tensor's sizes, and not the tensor itself.
The way this test is set up, it will actually fail if we try to save the input tensor for backward.
Why?
b.masked_fill_(c, 0) has a backward that requires knowing a's sizes
b.masked_fill_(c, 0) **also** mutates a (because b and a are aliased)
The autograd engine yells at us if we save "a" for backward, and then try to mutate it.
"""
def f(a):
b = a[0]
c = torch.ones_like(b, dtype=torch.bool)
d = b.masked_fill_(c, 0)
return d
compiled_f = aot_function(f, nop, dynamic=True)
inp_ref = torch.ones(2, 2, requires_grad=True)
inp_test = torch.ones(2, 2, requires_grad=True)
out_ref = f(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
self.assertEqual(inp_ref.grad, inp_test.grad)
def test_buffer_copied_in_graph(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.nn.Buffer(torch.zeros(1))
self.w1 = torch.nn.Parameter(torch.zeros(1))
self.w2 = torch.nn.Parameter(torch.zeros(1))
def forward(self, x):
self.buf.add_(1)
return (self.w1 * x * self.w2).sum() + self.buf.sum()
model_for_eager = MyModel()
model_for_compile = copy.deepcopy(model_for_eager)
fw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
keep_inference_input_mutations=True,
)
inp_ref = torch.ones(1, requires_grad=True)
inp_test = torch.ones(1, requires_grad=True)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3, primals_4):
add = torch.ops.aten.add.Tensor(primals_3, 1)
mul = torch.ops.aten.mul.Tensor(primals_1, primals_4)
mul_1 = torch.ops.aten.mul.Tensor(mul, primals_2)
sum_1 = torch.ops.aten.sum.default(mul_1); mul_1 = None
sum_2 = torch.ops.aten.sum.default(add)
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
copy_ = torch.ops.aten.copy_.default(primals_3, add); primals_3 = add = copy_ = None
return (add_1, primals_1, primals_2, primals_4, mul)""",
)
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
eager_grads = [p.grad for _, p in model_for_eager.named_parameters()]
compile_grads = [p.grad for _, p in model_for_compile.named_parameters()]
self.assertEqual(eager_grads, compile_grads)
self.assertEqual(inp_ref.grad, inp_test.grad)
def test_buffer_copied_in_graph_with_different_shapes(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.nn.Buffer(torch.ones(4, 4))
self.w = torch.nn.Parameter(
torch.Tensor([[4, 5], [1, 2], [6, 7], [8, 9]])
)
def forward(self, x):
self.buf.add_(1)
return (self.w @ x).sum() + self.buf.sum()
model_for_eager = MyModel()
model_for_compile = copy.deepcopy(model_for_eager)
fw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=nop,
keep_inference_input_mutations=True,
)
inp_ref = torch.ones(2, 4, requires_grad=True)
inp_test = torch.ones(2, 4, requires_grad=True)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
add = torch.ops.aten.add.Tensor(primals_2, 1)
mm = torch.ops.aten.mm.default(primals_1, primals_3)
sum_1 = torch.ops.aten.sum.default(mm); mm = None
sum_2 = torch.ops.aten.sum.default(add)
add_1 = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
copy_ = torch.ops.aten.copy_.default(primals_2, add); primals_2 = add = copy_ = None
return (add_1, primals_1, primals_3)""",
)
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
eager_grads = [p.grad for _, p in model_for_eager.named_parameters()]
compile_grads = [p.grad for _, p in model_for_compile.named_parameters()]
self.assertEqual(eager_grads, compile_grads)
self.assertEqual(inp_ref.grad, inp_test.grad)
def test_buffer_batch_norm(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = torch.nn.BatchNorm1d(100)
def forward(self, x):
return self.m(x)
model_for_eager = MyModel()
model_for_compile = copy.deepcopy(model_for_eager)
fw_graph_cell = [None]
bw_graph_cell = [None]
compiled_f = aot_module(
model_for_compile,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=bw_graph_cell)
),
keep_inference_input_mutations=True,
)
inp_ref = torch.ones(20, 100, requires_grad=True)
inp_test = torch.ones(20, 100, requires_grad=True)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6):
add = torch.ops.aten.add.Tensor(primals_5, 1)
_native_batch_norm_legit_functional = torch.ops.aten._native_batch_norm_legit_functional.default(primals_6, primals_1, primals_2, primals_3, primals_4, True, 0.1, 1e-05); primals_2 = None
getitem = _native_batch_norm_legit_functional[0]
getitem_1 = _native_batch_norm_legit_functional[1]
getitem_2 = _native_batch_norm_legit_functional[2]
getitem_3 = _native_batch_norm_legit_functional[3]
getitem_4 = _native_batch_norm_legit_functional[4]; _native_batch_norm_legit_functional = None
copy_ = torch.ops.aten.copy_.default(primals_3, getitem_3); primals_3 = copy_ = None
copy__1 = torch.ops.aten.copy_.default(primals_4, getitem_4); primals_4 = copy__1 = None
copy__2 = torch.ops.aten.copy_.default(primals_5, add); primals_5 = add = copy__2 = None
return (getitem, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem_4)""", # noqa: B950
)
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
eager_grads = [p.grad for _, p in model_for_eager.named_parameters()]
compile_grads = [p.grad for _, p in model_for_compile.named_parameters()]
self.assertEqual(eager_grads, compile_grads)
self.assertExpectedInline(
bw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_6, getitem_1, getitem_2, getitem_3, getitem_4, tangents_1):
native_batch_norm_backward = torch.ops.aten.native_batch_norm_backward.default(tangents_1, primals_6, primals_1, getitem_3, getitem_4, getitem_1, getitem_2, True, 1e-05, [True, True, True]); tangents_1 = primals_6 = primals_1 = getitem_3 = getitem_4 = getitem_1 = getitem_2 = None
getitem_5 = native_batch_norm_backward[0]
getitem_6 = native_batch_norm_backward[1]
getitem_7 = native_batch_norm_backward[2]; native_batch_norm_backward = None
return (getitem_6, getitem_7, None, None, None, getitem_5)""", # noqa: B950
)
self.assertEqual(inp_ref.grad, inp_test.grad)
def test_new_inp_requires_grad_now(self):
def f(x, y):
return x.add_(y)
fw_graph_cell = [None]
bw_graph_cell = [None]
compiled_f = aot_function(
f,
fw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=fw_graph_cell)
),
bw_compiler=make_boxed_compiler(
partial(extract_graph, graph_cell=bw_graph_cell)
),
keep_inference_input_mutations=True,
)
inp_ref = (
torch.ones(20, 100, requires_grad=False),
torch.ones(20, 100, requires_grad=True),
)
inp_test = (
torch.ones(20, 100, requires_grad=False),
torch.ones(20, 100, requires_grad=True),
)
out_ref = f(*inp_ref)
out_test = compiled_f(*inp_test)
# There is no copy_ method
self.assertExpectedInline(
fw_graph_cell[0].code.strip(),
"""\
def forward(self, primals_1, primals_2):
clone = torch.ops.aten.clone.default(primals_1); primals_1 = None
add = torch.ops.aten.add.Tensor(clone, primals_2); clone = primals_2 = None
return (add, add)""",
) # noqa: B950
self.assertEqual(out_ref, out_test)
out_ref.sum().backward()
out_test.sum().backward()
self.assertExpectedInline(
bw_graph_cell[0].code.strip(),
"""\
def forward(self, tangents_1):
return (None, tangents_1)""",
) # noqa: B950
def test_real_weights_in_symbolic_mode(self):
from functorch.experimental import functionalize
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.linear(x)
return x
m = M().eval()
inp = torch.randn(2, 5)
gm = make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=True)(inp)
self.assertEqual(gm(torch.ones(2, 5)), m(torch.ones(2, 5)))
gm_functionalized = make_fx(
functionalize(
gm,
),
tracing_mode="symbolic",
_allow_non_fake_inputs=True,
)(inp)
self.assertEqual(gm_functionalized(torch.ones(2, 5)), m(torch.ones(2, 5)))
inp_count = 0
for node in gm.graph.nodes:
if node.op == "placeholder":
inp_count += 1
# No more param lifting
self.assertEqual(inp_count, 1)
inp_count = 0
for node in gm_functionalized.graph.nodes:
if node.op == "placeholder":
inp_count += 1
# No more param lifting
self.assertEqual(inp_count, 1)
with self.assertRaisesRegex(
Exception, "Please convert all Tensors to FakeTensors"
):
make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=False)(
torch.randn(2, 5)
)
def test_real_weights_in_symbolic_mode_with_inplace_ops(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer = torch.nn.Buffer(torch.ones(4, 5))
def forward(self, x):
y = self.buffer.add_(3)
y.resize_([20])
assert y.shape == self.buffer.shape
return x.sum() + self.buffer.sum()
m = M().eval()
inp = torch.randn(2, 5)
# inplace mutation on attr is not allowed
with self.assertRaisesRegex(Exception, "Can't call metadata"):
make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=True)(inp)
def _compile_and_erase_bases(self, *output_view_indices):
# Overrides _base and _view_func tensor attributes, so as to avoid the view-replay
# execution path when reconstructing views.
class NoViewReplayTensor(torch.Tensor):
@property
def _base(self):
return None
@property
def _view_func(self):
return None
# Wraps the outputs that are views of the FX graph 'g' with NoViewReplayTensor,
# since they are the only ones that will get reconstructed.
def wrapper(g, *args, **kwargs):
outs = list(g(*args, **kwargs))
for i in output_view_indices:
outs[i] = NoViewReplayTensor(outs[i])
return tuple(outs)
return lambda f: aot_function(f, fw_compiler=lambda g, _: partial(wrapper, g))
def test_output_aliases_input_view_meta_replay(self):
@self._compile_and_erase_bases(0)
def f(a):
return a.view(-1)
inp = torch.ones(2, 2, requires_grad=True)
out = f(inp)
self.assertIsNotNone(out.grad_fn)
self.assertExpectedInline(
str(out.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
def test_output_aliases_intermediate_view_meta_replay(self):
@self._compile_and_erase_bases(0, 1)
def f(a):
b = a.clone()
return b.view(-1), b.view(-1)
inp = torch.ones(2, 2, requires_grad=True)
out1, out2 = f(inp)
self.assertIsNotNone(out1.grad_fn)
self.assertExpectedInline(
str(out1.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
self.assertIsNotNone(out2.grad_fn)
self.assertExpectedInline(
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
def test_output_aliases_output_view_meta_replay(self):
@self._compile_and_erase_bases(1)
def f(a):
b = a.add(10)
return b, b.view(-1)
inp = torch.ones(2, 2, requires_grad=True)
out1, out2 = f(inp)
self.assertEqual(out1.untyped_storage(), out2.untyped_storage())
self.assertIsNotNone(out2.grad_fn)
self.assertExpectedInline(
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
@patch("torch._dynamo.config.assume_static_by_default", False)
def test_dynamic_output_aliases_input_view_meta_replay(self):
# - torch.compile: using it so we can have a SymInt in the FX graph.
# - Compiling with inductor, so that tensor._base isn't tracked.
#
# This should force the use of as_strided in the view reconstruction path.
# The first 2 view-replay paths won't be taken because:
# - target_functional_tensor will be symbolic (_functionalize_is_symbolic call)
# - tensor._base will be None
@torch.compile(backend="inductor")
def f(a, sz):
return a.view(sz), a.view(-1)
inp = torch.ones(2, 2, requires_grad=True)
out1, out2 = f(inp, (4,))
self.assertIsNotNone(out1.grad_fn)
self.assertExpectedInline(
str(out1.grad_fn.__class__), """<class 'AsStridedBackward0'>"""
)
self.assertIsNotNone(out2.grad_fn)
self.assertExpectedInline(
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
def test_duplicated_arguments_on_tensor_overlap(self):
# Test whether we correctly handle duplicated arguments when changing the
# parameters, so that we take the base tensor as argument.
#
# - t0 and t1 must have storage overlap: triggers the target execution flow.
# - s0 and s1 must be equal: triggers the error in the target execution flow.
@torch.compile(dynamic=True)
def foo(t0, t1, s0, s1):
return t0.add_(s0), t1.add_(s1)
tensor = torch.rand(10)
foo(tensor, tensor[1:-1], 2, 2)
@parametrize("use_autograd", [False, True])
def test_mark_outputs_dynamic(self, use_autograd: bool):
counters.clear()
torch._dynamo.reset()
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x, y):
return torch.matmul(x, y)
@torch.compile(backend="aot_eager", fullgraph=True)
def fn2(z):
return z * 2
# 1. static
x = torch.randn(10, 10, requires_grad=use_autograd)
y = torch.randn(10, 10, requires_grad=use_autograd)
out = fn(x, y)
self.assertFalse(hasattr(out, "_dynamo_weak_dynamic_indices"))
out2 = fn2(out)
self.assertFalse(hasattr(out2, "_dynamo_weak_dynamic_indices"))
self.assertEqual(counters["aot_autograd"]["total"], 2)
counters.clear()
# 2. dynamic
x = torch.randn(20, 20)
y = torch.randn(20, 20)
out = fn(x, y)
self.assertTrue(hasattr(out, "_dynamo_weak_dynamic_indices"))
out2 = fn2(out)
self.assertTrue(hasattr(out2, "_dynamo_weak_dynamic_indices"))
self.assertEqual(counters["aot_autograd"]["total"], 2)
counters.clear()
torch._dynamo.reset()
def test_mark_activations_dynamic(self):
counters.clear()
torch._dynamo.reset()
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x, y):
out = torch.matmul(x, y)
out2 = torch.matmul(out, y)
out3 = torch.matmul(out2, y)
return torch.matmul(out3, y)
def make_assert_pack(dynamic):
def pack(activation):
assert hasattr(activation, "_dynamo_weak_dynamic_indices") == dynamic
return activation
return pack
def make_assert_unpack(dynamic):
def unpack(activation):
assert hasattr(activation, "_dynamo_weak_dynamic_indices") == dynamic
return activation
return unpack
# 1. static
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(
make_assert_pack(False), make_assert_unpack(False)
):
fn(x, y)
self.assertEqual(counters["aot_autograd"]["total"], 1)
counters.clear()
# 2. dynamic
x = torch.randn(20, 20, requires_grad=True)
y = torch.randn(20, 20, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(
make_assert_pack(True), make_assert_unpack(True)
):
fn(x, y)
self.assertEqual(counters["aot_autograd"]["total"], 1)
counters.clear()
torch._dynamo.reset()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
@torch._functorch.config.patch(saved_tensors_hooks_filtering_mode="no_static")
@torch._functorch.config.patch(recompute_views=True)
def test_saved_tensors_hooks_mutations_raise(self):
ctx = torch.autograd.graph.saved_tensors_hooks
device = "cuda"
class SAF(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gx):
(saved_x,) = ctx.saved_tensors
return gx + saved_x
def mutate(x):
return x.mul_(2)
def fn(x):
x = 2 * x
x = SAF.apply(x)
return x
def inp_fn():
x = torch.ones(2, 3, device=device, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
return x
with self.assertRaisesRegex(
AssertionError, "Saved tensors hooks with inputs mutations are not allowed"
):
try:
with ctx(*saved_tensors_hooks_to_gm(mutate, mutate, None, None)):
x = inp_fn()
y = torch.compile(fn, backend="aot_eager", fullgraph=True)(x)
y.sum().backward()
except torch._dynamo.exc.BackendCompilerFailed as e:
raise e.inner_exception from e
def test_mark_activations_dynamic_with_nested(self):
# The flattened tensors of the nested tensor aren't
# marked as activations, but they add some offset
# to the fw_outs. This test ensures that we handle
# that offset properly.
counters.clear()
torch._dynamo.reset()
def make_assert_pack(dynamic):
def pack(activation):
assert hasattr(activation, "_dynamo_weak_dynamic_indices") == dynamic
return activation
return pack
def make_assert_unpack(dynamic):
def unpack(activation):
assert hasattr(activation, "_dynamo_weak_dynamic_indices") == dynamic
return activation
return unpack
# 1. static
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x, y, nt):
out = torch.matmul(x, y)
return out.sum() + nt.clone()
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = torch.randn(2, 3, requires_grad=True, dtype=torch.float64)
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64)
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64)
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
with torch.autograd.graph.saved_tensors_hooks(
make_assert_pack(False), make_assert_unpack(False)
):
fn(x, y, nt)
self.assertEqual(counters["aot_autograd"]["total"], 1)
counters.clear()
# 2. dynamic
x = torch.randn(20, 20, requires_grad=True)
y = torch.randn(20, 20, requires_grad=True)
a = torch.randn(2, 3, requires_grad=True, dtype=torch.float64)
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64)
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64)
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
with torch.autograd.graph.saved_tensors_hooks(
make_assert_pack(True), make_assert_unpack(True)
):
fn(x, y, nt)
self.assertEqual(counters["aot_autograd"]["total"], 1)
counters.clear()
torch._dynamo.reset()
def extract_graph(fx_g, _, graph_cell):
graph_cell[0] = fx_g
return fx_g
def get_ins_outs(fx_g):
ins = []
outs = []
for n in fx_g.graph.nodes:
if n.op == "placeholder":
ins.append(n)
elif n.op == "output":
outs = tuple(n.args[0])
return ins, outs
def get_num_ins_outs(fx_g):
return tuple(len(i) for i in get_ins_outs(fx_g))
def get_fw_bw_graph(
f, inps, partitioner=min_cut_rematerialization_partition, dynamic=False
):
fw_graph_cell = [None]
bw_graph_cell = [None]
aot_function(
f,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=partitioner,
decompositions=default_decompositions,
dynamic=dynamic,
)(*inps).sum().backward()
return (fw_graph_cell[0], bw_graph_cell[0])
| TestAOTAutograd |
python | spyder-ide__spyder | spyder/plugins/switcher/widgets/switcher.py | {
"start": 1060,
"end": 1791
} | class ____(QObject):
"""Use with `installEventFilter` to get up/down arrow key press signal."""
sig_up_key_pressed = Signal()
sig_down_key_pressed = Signal()
sig_enter_key_pressed = Signal()
def eventFilter(self, src, e):
"""Override Qt eventFilter."""
if e.type() == QEvent.KeyPress:
if e.key() == Qt.Key_Up:
self.sig_up_key_pressed.emit()
return True
elif e.key() == Qt.Key_Down:
self.sig_down_key_pressed.emit()
return True
elif (e.key() == Qt.Key_Return):
self.sig_enter_key_pressed.emit()
return True
return super().eventFilter(src, e)
| KeyPressFilter |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 15343,
"end": 17502
} | class ____(TypedDict, total=False):
use_editable_dagster: bool
# ########################
# ##### CLI CONFIG
# ########################
def normalize_cli_config(
cli_options: Mapping[str, object], cli_context: click.Context
) -> DgRawCliConfig:
# Remove any options that weren't explicitly provided.
filtered_options = {
key: value
for key, value in cli_options.items()
if cli_context.get_parameter_source(key) != ParameterSource.DEFAULT
}
return _validate_cli_config(filtered_options)
def _validate_cli_config(cli_opts: Mapping[str, object]) -> DgRawCliConfig:
try:
for key, type_ in DgRawCliConfig.__annotations__.items():
_validate_cli_config_setting(cli_opts, key, type_)
_validate_cli_config_no_extraneous_keys(cli_opts)
except DgValidationError as e:
_raise_cli_config_validation_error(str(e))
return cast("DgRawCliConfig", cli_opts)
def _validate_cli_config_setting(cli_opts: Mapping[str, object], key: str, type_: type) -> None:
if key in cli_opts and not match_type(cli_opts[key], type_):
raise DgValidationError(f"`{key}` must be a {type_.__name__}.")
def _validate_cli_config_no_extraneous_keys(cli_opts: Mapping[str, object]) -> None:
extraneous_keys = [k for k in cli_opts.keys() if k not in DgRawCliConfig.__annotations__]
if extraneous_keys:
raise DgValidationError(f"Unrecognized fields:\n {extraneous_keys}")
def _raise_cli_config_validation_error(message: str) -> None:
raise DgError(f"Error in CLI options:\n {message}")
# ########################
# ##### FILE CONFIG
# ########################
# The Dg*FileConfig classes wrap config extracted from a config file. This may be either a dg.toml
# file or a pyproject.toml file. For `dg.toml`, the config is defined a the top level. For
# pyproject.toml the config must be mounted on the `tool.dg` section. Either way, once the config
# has been parsed and extracted into a Dg*FileConfig class, it does not matter which file type it
# was sourced from.
DgFileConfigDirectoryType = Literal["workspace", "project"]
| DgRawWorkspaceNewProjectOptions |
python | getsentry__sentry | tests/sentry/newsletter/test_dummy.py | {
"start": 169,
"end": 1617
} | class ____(TestCase):
def setUp(self) -> None:
self.newsletter = DummyNewsletter()
def test_defaults(self) -> None:
assert self.newsletter.DEFAULT_LISTS == self.newsletter.get_default_list_ids()
assert self.newsletter.DEFAULT_LIST_ID == self.newsletter.get_default_list_id()
def assert_subscriptions(self, user, count):
subscriptions = self.newsletter.get_subscriptions(user)
assert subscriptions.get("subscriptions") is not None
subscribed = [sub for sub in subscriptions["subscriptions"] if sub.subscribed]
assert len(subscribed) == count
def test_update_subscription(self) -> None:
user = self.create_user("subscriber@example.com")
self.assert_subscriptions(user, 0)
self.newsletter.create_or_update_subscription(user)
self.assert_subscriptions(user, 1)
def test_update_subscriptions(self) -> None:
user = self.create_user("subscriber@example.com")
self.assert_subscriptions(user, 0)
self.newsletter.create_or_update_subscriptions(user)
self.assert_subscriptions(user, 1)
def test_optout_email(self) -> None:
user = self.create_user("subscriber@example.com")
self.newsletter.create_or_update_subscriptions(user)
self.assert_subscriptions(user, 1)
self.newsletter.optout_email("subscriber@example.com")
self.assert_subscriptions(user, 0)
| DummyNewsletterTest |
python | astropy__astropy | astropy/coordinates/tests/test_spectral_quantity.py | {
"start": 306,
"end": 9729
} | class ____:
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_value(self, unit):
SpectralQuantity(1, unit=unit)
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_quantity(self, unit):
SpectralQuantity(1 * unit)
@pytest.mark.parametrize("unit", SPECTRAL_UNITS)
def test_init_spectralquantity(self, unit):
SpectralQuantity(SpectralQuantity(1, unit=unit))
@pytest.mark.parametrize("unit", (u.kg, u.byte))
def test_init_invalid(self, unit):
with pytest.raises(
u.UnitsError, match="SpectralQuantity instances require units"
):
SpectralQuantity(1, unit=unit)
with pytest.raises(
u.UnitsError, match="SpectralQuantity instances require units"
):
SpectralQuantity(1 * unit)
@pytest.mark.parametrize(
("unit1", "unit2"), list(zip(SPECTRAL_UNITS, SPECTRAL_UNITS))
)
def test_spectral_conversion(self, unit1, unit2):
sq1 = SpectralQuantity(1 * unit1)
sq2 = sq1.to(unit2)
sq3 = sq2.to(str(unit1)) # check that string units work
assert isinstance(sq2, SpectralQuantity)
assert isinstance(sq3, SpectralQuantity)
assert_quantity_allclose(sq1, sq3)
def test_doppler_conversion(self):
sq1 = SpectralQuantity(
1 * u.km / u.s, doppler_convention="optical", doppler_rest=500 * u.nm
)
sq2 = sq1.to(u.m / u.s)
assert_allclose(sq2.value, 1000)
sq3 = sq1.to(u.m / u.s, doppler_convention="radio")
assert_allclose(sq3.value, 999.996664)
sq4 = sq1.to(u.m / u.s, doppler_convention="relativistic")
assert_allclose(sq4.value, 999.998332)
sq5 = sq1.to(u.m / u.s, doppler_rest=499.9 * u.nm)
assert_allclose(sq5.value, 60970.685737)
val5 = sq1.to_value(u.m / u.s, doppler_rest=499.9 * u.nm)
assert_allclose(val5, 60970.685737)
def test_doppler_conversion_validation(self):
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = SpectralQuantity(1 * u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention not set, cannot convert to/from velocities",
):
sq1.to(u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention not set, cannot convert to/from velocities",
):
sq2.to(u.GHz)
with pytest.raises(
ValueError, match="doppler_rest not set, cannot convert to/from velocities"
):
sq1.to(u.km / u.s, doppler_convention="radio")
with pytest.raises(
ValueError, match="doppler_rest not set, cannot convert to/from velocities"
):
sq2.to(u.GHz, doppler_convention="radio")
with pytest.raises(
u.UnitsError,
match="Argument 'doppler_rest' to function 'to' must be in units",
):
sq1.to(u.km / u.s, doppler_convention="radio", doppler_rest=5 * u.kg)
with pytest.raises(
u.UnitsError,
match="Argument 'doppler_rest' to function 'to' must be in units",
):
sq2.to(u.GHz, doppler_convention="radio", doppler_rest=5 * u.kg)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq1.to(u.km / u.s, doppler_convention="banana", doppler_rest=5 * u.GHz)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq2.to(u.GHz, doppler_convention="banana", doppler_rest=5 * u.GHz)
with pytest.raises(ValueError, match="Original doppler_convention not set"):
sq2.to(u.km / u.s, doppler_convention="radio")
with pytest.raises(ValueError, match="Original doppler_rest not set"):
sq2.to(u.km / u.s, doppler_rest=5 * u.GHz)
def test_doppler_set_parameters(self):
sq1 = SpectralQuantity(1 * u.km / u.s)
with pytest.raises(
ValueError,
match="doppler_convention should be one of optical/radio/relativistic",
):
sq1.doppler_convention = "banana"
assert sq1.doppler_convention is None
sq1.doppler_convention = "radio"
assert sq1.doppler_convention == "radio"
with pytest.raises(
AttributeError,
match="doppler_convention has already been set, and cannot be changed",
):
sq1.doppler_convention = "optical"
assert sq1.doppler_convention == "radio"
with pytest.raises(
u.UnitsError,
match="Argument 'value' to function 'doppler_rest' must be in units",
):
sq1.doppler_rest = 5 * u.kg
sq1.doppler_rest = 5 * u.GHz
assert_quantity_allclose(sq1.doppler_rest, 5 * u.GHz)
with pytest.raises(
AttributeError,
match="doppler_rest has already been set, and cannot be changed",
):
sq1.doppler_rest = 4 * u.GHz
assert_quantity_allclose(sq1.doppler_rest, 5 * u.GHz)
def test_arithmetic(self):
# Checks for arithmetic - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity(10 * u.AA)
sq2 = sq1 * 2
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == 20
assert sq2.unit == u.AA
sq2 = sq1 / 2
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == 5
assert sq2.unit == u.AA
sq3 = SpectralQuantity(10 * u.AA)
sq3 *= 2
assert isinstance(sq3, SpectralQuantity)
assert sq3.value == 20
assert sq3.unit == u.AA
sq4 = SpectralQuantity(10 * u.AA)
sq4 /= 2
assert isinstance(sq4, SpectralQuantity)
assert sq4.value == 5
assert sq4.unit == u.AA
sq5 = SpectralQuantity(10 * u.AA)
with pytest.raises(
TypeError,
match="Cannot store the result of this operation in SpectralQuantity",
):
sq5 += 10 * u.AA
# Note different order to sq2
sq6 = SpectralQuantity(10 * u.AA)
sq6 = 2 * sq1
assert isinstance(sq6, SpectralQuantity)
assert sq6.value == 20
assert sq6.unit == u.AA
# Next, operations that should return Quantity
q1 = sq1 / u.s
assert isinstance(q1, u.Quantity) and not isinstance(q1, SpectralQuantity)
assert q1.value == 10
assert q1.unit.is_equivalent(u.AA / u.s)
q2 = sq1 / u.kg
assert isinstance(q2, u.Quantity) and not isinstance(q2, SpectralQuantity)
assert q2.value == 10
assert q2.unit.is_equivalent(u.AA / u.kg)
q3 = sq1 + 10 * u.AA
assert isinstance(q3, u.Quantity) and not isinstance(q3, SpectralQuantity)
assert q3.value == 20
assert q3.unit == u.AA
q4 = sq1 / SpectralQuantity(5 * u.AA)
assert isinstance(q4, u.Quantity) and not isinstance(q4, SpectralQuantity)
assert q4.value == 2
assert q4.unit == u.one
def test_ufuncs(self):
# Checks for ufuncs - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
for ufunc in (np.min, np.max):
sq2 = ufunc(sq1)
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == ufunc(sq1.value)
assert sq2.unit == u.AA
def test_functions(self):
# Checks for other functions - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
for func in (np.nanmin, np.nanmax):
sq2 = func(sq1)
assert isinstance(sq2, SpectralQuantity)
assert sq2.value == func(sq1.value)
assert sq2.unit == u.AA
# Next, operations that should return Quantity
for func in (np.sum,):
q3 = func(sq1)
assert isinstance(q3, u.Quantity) and not isinstance(q3, SpectralQuantity)
assert q3.value == func(sq1.value)
assert q3.unit == u.AA
@pytest.mark.xfail
def test_functions_std(self):
# np.std should return a Quantity but it returns a SpectralQuantity. We
# make this a separate xfailed test for now, but once this passes,
# np.std could also just be added to the main test_functions test.
# See https://github.com/astropy/astropy/issues/10245 for more details.
# Checks for other functions - some operations should return SpectralQuantity,
# while some should just return plain Quantity
# First, operations that should return SpectralQuantity
sq1 = SpectralQuantity([10, 20, 30] * u.AA)
q1 = np.std(sq1)
assert isinstance(q1, u.Quantity) and not isinstance(q1, SpectralQuantity)
assert q1.value == np.sum(sq1.value)
assert q1.unit == u.AA
| TestSpectralQuantity |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/asyncmy.py | {
"start": 3867,
"end": 5099
} | class ____(AsyncAdapt_dbapi_module):
def __init__(self, asyncmy: ModuleType):
super().__init__(asyncmy)
self.asyncmy = asyncmy
self.paramstyle = "format"
self._init_dbapi_attributes()
def _init_dbapi_attributes(self) -> None:
for name in (
"Warning",
"Error",
"InterfaceError",
"DataError",
"DatabaseError",
"OperationalError",
"InterfaceError",
"IntegrityError",
"ProgrammingError",
"InternalError",
"NotSupportedError",
):
setattr(self, name, getattr(self.asyncmy.errors, name))
STRING = util.symbol("STRING")
NUMBER = util.symbol("NUMBER")
BINARY = util.symbol("BINARY")
DATETIME = util.symbol("DATETIME")
TIMESTAMP = util.symbol("TIMESTAMP")
Binary = staticmethod(bytes)
def connect(self, *arg: Any, **kw: Any) -> AsyncAdapt_asyncmy_connection:
creator_fn = kw.pop("async_creator_fn", self.asyncmy.connect)
return await_(
AsyncAdapt_asyncmy_connection.create(
self,
creator_fn(*arg, **kw),
)
)
| AsyncAdapt_asyncmy_dbapi |
python | django__django | tests/model_forms/tests.py | {
"start": 3431,
"end": 3548
} | class ____(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = "__all__"
| ArticleStatusForm |
python | ansible__ansible | test/units/inventory/test_data.py | {
"start": 2419,
"end": 3438
} | class ____:
"""Recrursive visitor used to assert that an object is not tagged with, and does not contain, a TrustedAsTemplate tag."""
def __init__(self) -> None:
self.seen: set[int] = set()
def visit(self, obj: t.Any) -> None:
obj_id = id(obj)
if obj_id in self.seen:
return
self.seen.add(obj_id)
if TrustedAsTemplate.is_tagged_on(obj):
raise TrustFoundError(obj)
if isinstance(obj, (str, int, bool, types.NoneType)):
pass # expected scalar type
elif isinstance(obj, (InventoryData, Host, Group)):
self.visit(obj.__dict__)
elif isinstance(obj, c.Mapping):
for key, value in obj.items():
self.visit(key)
self.visit(value)
elif isinstance(obj, c.Iterable):
for item in obj:
self.visit(item)
else:
raise TypeError(f'Checking of {type(obj)} is not supported.') # pragma: nocover
| RecursiveChecker |
python | Textualize__textual | docs/examples/guide/input/mouse01.py | {
"start": 147,
"end": 539
} | class ____(App):
CSS_PATH = "mouse01.tcss"
def compose(self) -> ComposeResult:
yield RichLog()
yield Ball("Textual")
def on_mouse_move(self, event: events.MouseMove) -> None:
self.screen.query_one(RichLog).write(event)
self.query_one(Ball).offset = event.screen_offset - (8, 2)
if __name__ == "__main__":
app = MouseApp()
app.run()
| MouseApp |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 3425,
"end": 6332
} | class ____(DeprecatedTuple):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
dist: Optional['Distribution'] = None
def __init__(self, name, value, group):
vars(self).update(name=name, value=value, group=group)
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
def _for(self, dist):
vars(self).update(dist=dist)
return self
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints by name.
"""
msg = (
"Construction of dict of EntryPoints is deprecated in "
"favor of EntryPoints."
)
warnings.warn(msg, DeprecationWarning)
return iter((self.name, self))
def matches(self, **params):
attrs = (getattr(self, param) for param in params)
return all(map(operator.eq, params.values(), attrs))
def _key(self):
return self.name, self.value, self.group
def __lt__(self, other):
return self._key() < other._key()
def __eq__(self, other):
return self._key() == other._key()
def __setattr__(self, name, value):
raise AttributeError("EntryPoint objects are immutable.")
def __repr__(self):
return (
f'EntryPoint(name={self.name!r}, value={self.value!r}, '
f'group={self.group!r})'
)
def __hash__(self):
return hash(self._key())
| EntryPoint |
python | PyCQA__pylint | pylint/reporters/ureports/text_writer.py | {
"start": 690,
"end": 3616
} | class ____(BaseWriter):
"""Format layouts as text
(ReStructured inspiration but not totally handled yet).
"""
def __init__(self) -> None:
super().__init__()
self.list_level = 0
def visit_section(self, layout: Section) -> None:
"""Display a section as text."""
self.section += 1
self.writeln()
self.format_children(layout)
self.section -= 1
self.writeln()
def visit_evaluationsection(self, layout: EvaluationSection) -> None:
"""Display an evaluation section as a text."""
self.section += 1
self.format_children(layout)
self.section -= 1
self.writeln()
def visit_title(self, layout: Title) -> None:
title = "".join(list(self.compute_content(layout)))
self.writeln(title)
try:
self.writeln(TITLE_UNDERLINES[self.section] * len(title))
except IndexError:
print("FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT")
def visit_paragraph(self, layout: Paragraph) -> None:
"""Enter a paragraph."""
self.format_children(layout)
self.writeln()
def visit_table(self, layout: Table) -> None:
"""Display a table as text."""
table_content = self.get_table_content(layout)
# get columns width
cols_width = [0] * len(table_content[0])
for row in table_content:
for index, col in enumerate(row):
cols_width[index] = max(cols_width[index], len(col))
self.default_table(layout, table_content, cols_width)
self.writeln()
def default_table(
self, layout: Table, table_content: list[list[str]], cols_width: list[int]
) -> None:
"""Format a table."""
cols_width = [size + 1 for size in cols_width]
format_strings = " ".join(["%%-%ss"] * len(cols_width))
format_strings %= tuple(cols_width)
table_linesep = "\n+" + "+".join("-" * w for w in cols_width) + "+\n"
headsep = "\n+" + "+".join("=" * w for w in cols_width) + "+\n"
self.write(table_linesep)
split_strings = format_strings.split(" ")
for index, line in enumerate(table_content):
self.write("|")
for line_index, at_index in enumerate(line):
self.write(split_strings[line_index] % at_index)
self.write("|")
if index == 0 and layout.rheaders:
self.write(headsep)
else:
self.write(table_linesep)
def visit_verbatimtext(self, layout: VerbatimText) -> None:
"""Display a verbatim layout as text (so difficult ;)."""
self.writeln("::\n")
for line in layout.data.splitlines():
self.writeln(" " + line)
self.writeln()
def visit_text(self, layout: Text) -> None:
"""Add some text."""
self.write(f"{layout.data}")
| TextWriter |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_getitem.py | {
"start": 7803,
"end": 12995
} | class ____:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
@pytest.mark.parametrize(
"data1,data2,expected_data",
(
(
[[1, 2], [3, 4]],
[[0.5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]],
),
(
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]],
),
),
)
def test_getitem_bool_mask_duplicate_columns_mixed_dtypes(
self,
data1,
data2,
expected_data,
):
# GH#31954
df1 = DataFrame(np.array(data1))
df2 = DataFrame(np.array(data2))
df = concat([df1, df2], axis=1)
result = df[df > 2]
exdict = {i: np.array(col) for i, col in enumerate(expected_data)}
expected = DataFrame(exdict).rename(columns={2: 0, 3: 1})
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_dup_cols(self):
dups = ["A", "A", "C", "D"]
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
return df
def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self, df_dup_cols):
# `df.A > 6` is a DataFrame with a different shape from df
# boolean with the duplicate raises
df = df_dup_cols
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
def test_getitem_boolean_series_with_duplicate_columns(self, df_dup_cols):
# boolean indexing
# GH#4879
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df.C > 6]
tm.assert_frame_equal(result, expected)
def test_getitem_boolean_frame_with_duplicate_columns(self, df_dup_cols):
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
# `df > 6` is a DataFrame with the same shape+alignment as df
expected = df[df > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df > 6]
tm.assert_frame_equal(result, expected)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue GH#11859
df = DataFrame()
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
def test_getitem_returns_view_when_column_is_unique_in_df(self):
# GH#45316
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
df_orig = df.copy()
view = df["b"]
view.loc[:] = 100
expected = df_orig
tm.assert_frame_equal(df, expected)
def test_getitem_frozenset_unique_in_column(self):
# GH#41062
df = DataFrame([[1, 2, 3, 4]], columns=[frozenset(["KEY"]), "B", "C", "C"])
result = df[frozenset(["KEY"])]
expected = Series([1], name=frozenset(["KEY"]))
tm.assert_series_equal(result, expected)
| TestGetitemBooleanMask |
python | huggingface__transformers | src/transformers/models/sam3/modeling_sam3.py | {
"start": 23063,
"end": 28403
} | class ____(nn.Module):
"""
Construct the patch embeddings and position embeddings for SAM3 ViT.
Position embeddings are tiled (not interpolated) when resizing to match different input sizes.
"""
def __init__(self, config: Sam3ViTConfig):
super().__init__()
self.patch_embeddings = Sam3ViTPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(
torch.randn(1, num_patches, config.hidden_size)
) # !Remove cls token in convert weights!
self.dropout = nn.Dropout(config.hidden_dropout)
self.patch_size = config.patch_size
def _tile_position_embeddings(
self,
position_embeddings: torch.Tensor,
height: int,
width: int,
) -> torch.Tensor:
"""
Tile position embeddings to match target spatial dimensions.
Args:
position_embeddings: Shape [1, num_pretrain_patches, hidden_size]
height: Target height in patches
width: Target width in patches
Returns:
Shape [1, height * width, hidden_size]
"""
pretrain_size = int(position_embeddings.shape[1] ** 0.5)
# Skip tiling if sizes match (but always tile during tracing for consistent graph)
if not torch.jit.is_tracing() and pretrain_size == height and pretrain_size == width:
return position_embeddings.reshape(1, height * width, -1)
# Tile position embeddings to match target spatial dimensions
hidden_size = position_embeddings.shape[-1]
pos_embed = position_embeddings.reshape(1, pretrain_size, pretrain_size, hidden_size).permute(0, 3, 1, 2)
repeat_h = height // pretrain_size + 1
repeat_w = width // pretrain_size + 1
pos_embed = pos_embed.tile([1, 1, repeat_h, repeat_w])[:, :, :height, :width]
return pos_embed.permute(0, 2, 3, 1).reshape(1, height * width, hidden_size)
def forward(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
height, width = pixel_values.shape[-2:]
embeddings = self.patch_embeddings(pixel_values)
# Calculate spatial dimensions in patches
height_patches = height // self.patch_size
width_patches = width // self.patch_size
position_embeddings = self._tile_position_embeddings(
self.position_embeddings,
height_patches,
width_patches,
)
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def window_partition(hidden_state, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
hidden_state (`torch.Tensor`):
Input tokens with [batch_size, height, width, num_channels].
window_size (`int`):
Window size.
Returns:
`tuple(torch.FloatTensor)` comprising various elements:
- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].
- (padded_height, padded_width): padded height and width before partition
"""
batch_size, height, width, num_channels = hidden_state.shape
pad_height = (window_size - height % window_size) % window_size
pad_width = (window_size - width % window_size) % window_size
# Noop in case pad_width == 0 and pad_height == 0.
hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))
padded_height, padded_width = height + pad_height, width + pad_width
hidden_state = hidden_state.view(
batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels
)
windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows, (padded_height, padded_width)
def window_unpartition(windows, window_size, pad_height_width, height_width):
"""
Window unpartition into original sequences and removing padding.
Args:
windows (`torch.Tensor`):
Input tokens with [batch_size * num_windows, window_size, window_size, num_channels].
window_size (`int`):
Window size.
pad_height_width (`tuple[int]`):
Padded height and width (padded_height, padded_width).
height_width (`tuple[int]`):
Original height and width before padding.
Returns:
hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].
"""
padded_height, padded_width = pad_height_width
height, width = height_width
batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size)
hidden_state = windows.view(
batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1
)
hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous()
hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1)
# We always have height <= padded_height and width <= padded_width
hidden_state = hidden_state[:, :height, :width, :].contiguous()
return hidden_state
| Sam3ViTEmbeddings |
python | scipy__scipy | scipy/optimize/tests/test_minimize_constrained.py | {
"start": 503,
"end": 1655
} | class ____:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = None
def fun(self, x):
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
def grad(self, x):
return np.array([4*x[0]-1, 4*x[1]])
def hess(self, x):
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[2*x[0], 2*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
| Maratos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.