language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/gevent/subprocess.py | {
"start": 17854,
"end": 77384
} | class ____(object):
"""
The underlying process creation and management in this module is
handled by the Popen class. It offers a lot of flexibility so that
developers are able to handle the less common cases not covered by
the convenience functions.
.. seealso:: :class:`subprocess.Popen`
This class should have the same interface as the standard library class.
.. caution::
The default values of some arguments, notably ``buffering``, differ
between Python 2 and Python 3. For the most consistent behaviour across
versions, it's best to explicitly pass the desired values.
.. caution::
On Python 2, the ``read`` method of the ``stdout`` and ``stderr`` attributes
will not be buffered unless buffering is explicitly requested (e.g., `bufsize=-1`).
This is different than the ``read`` method of the standard library attributes,
which will buffer internally even if no buffering has been requested. This
matches the Python 3 behaviour. For portability, please explicitly request
buffering if you want ``read(n)`` to return all ``n`` bytes, making more than
one system call if needed. See `issue 1701 <https://github.com/gevent/gevent/issues/1701>`_
for more context.
.. versionchanged:: 1.2a1
Instances can now be used as context managers under Python 2.7. Previously
this was restricted to Python 3.
.. versionchanged:: 1.2a1
Instances now save the ``args`` attribute under Python 2.7. Previously this was
restricted to Python 3.
.. versionchanged:: 1.2b1
Add the ``encoding`` and ``errors`` parameters for Python 3.
.. versionchanged:: 1.3a1
Accept "path-like" objects for the *cwd* parameter on all platforms.
This was added to Python 3.6. Previously with gevent, it only worked
on POSIX platforms on 3.6.
.. versionchanged:: 1.3a1
Add the ``text`` argument as a synonym for ``universal_newlines``,
as added on Python 3.7.
.. versionchanged:: 1.3a2
Allow the same keyword arguments under Python 2 as Python 3:
``pass_fds``, ``start_new_session``, ``restore_signals``, ``encoding``
and ``errors``. Under Python 2, ``encoding`` and ``errors`` are ignored
because native handling of universal newlines is used.
.. versionchanged:: 1.3a2
Under Python 2, ``restore_signals`` defaults to ``False``. Previously it
defaulted to ``True``, the same as it did in Python 3.
.. versionchanged:: 20.6.0
Add the *group*, *extra_groups*, *user*, and *umask* arguments. These
were added to Python 3.9, but are available in any gevent version, provided
the underlying platform support is present.
.. versionchanged:: 20.12.0
On Python 2 only, if unbuffered binary communication is requested,
the ``stdin`` attribute of this object will have a ``write`` method that
actually performs internal buffering and looping, similar to the standard library.
It guarantees to write all the data given to it in a single call (but internally
it may make many system calls and/or trips around the event loop to accomplish this).
See :issue:`1711`.
.. versionchanged:: 21.12.0
Added the ``pipesize`` argument for compatibility with Python 3.10.
This is ignored on all platforms.
.. versionchanged:: 22.08.0
Added the ``process_group`` and ``check`` arguments for compatibility with
Python 3.11.
.. versionchanged:: 24.10.1
To match Python 3.13, ``stdout=STDOUT`` now raises a :exc:`ValueError`.
"""
if GenericAlias is not None:
# 3.9, annoying typing is creeping everywhere.
__class_getitem__ = classmethod(GenericAlias)
# The value returned from communicate() when there was nothing to read.
# Changes if we're in text mode or universal newlines mode.
_communicate_empty_value = b''
# pylint:disable-next=too-many-positional-arguments
def __init__(self, args,
bufsize=-1,
executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS, shell=False,
cwd=None, env=None, universal_newlines=None,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=(),
# Added in 3.6. These are kept as ivars
encoding=None, errors=None,
# Added in 3.7. Not an ivar directly.
text=None,
# Added in 3.9
group=None, extra_groups=None, user=None,
umask=-1,
# Added in 3.10, but ignored.
pipesize=-1,
# Added in 3.11
process_group=None,
# gevent additions
threadpool=None):
self.encoding = encoding
self.errors = errors
hub = get_hub()
if bufsize is None:
# Python 2 doesn't allow None at all, but Python 3 treats
# it the same as the default. We do as well.
bufsize = -1
if not isinstance(bufsize, integer_types):
raise TypeError("bufsize must be an integer")
if stdout is STDOUT:
raise ValueError("STDOUT can only be used for stderr")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if threadpool is None:
threadpool = hub.threadpool
self.threadpool = threadpool
self._waiting = False
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
# close_fds has different defaults on Py3/Py2
close_fds = True
if pass_fds and not close_fds:
import warnings
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
assert threadpool is None
self._loop = hub.loop
# Validate the combinations of text and universal_newlines
if (text is not None and universal_newlines is not None
and bool(universal_newlines) != bool(text)):
# pylint:disable=undefined-variable
raise SubprocessError('Cannot disambiguate when both text '
'and universal_newlines are supplied but '
'different. Pass one or the other.')
self.args = args # Previously this was Py3 only.
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
self.result = AsyncResult()
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
text_mode = self.encoding or self.errors or universal_newlines or text
if text_mode or universal_newlines:
# Always a native str in universal_newlines mode, even when that
# str type is bytes. Additionally, text_mode is only true under
# Python 3, so it's actually a unicode str
self._communicate_empty_value = ''
uid, gid, gids = self.__handle_uids(user, group, extra_groups)
if p2cwrite != -1:
if text_mode:
# Under Python 3, if we left on the 'b' we'd get different results
# depending on whether we used FileObjectPosix or FileObjectThread
self.stdin = FileObject(p2cwrite, 'w', bufsize,
encoding=self.encoding, errors=self.errors)
else:
self.stdin = FileObject(p2cwrite, 'wb', bufsize)
if c2pread != -1:
if universal_newlines or text_mode:
self.stdout = FileObject(c2pread, 'r', bufsize,
encoding=self.encoding, errors=self.errors)
# NOTE: Universal Newlines are broken on Windows/Py3, at least
# in some cases. This is true in the stdlib subprocess module
# as well; the following line would fix the test cases in
# test__subprocess.py that depend on python_universal_newlines,
# but would be inconsistent with the stdlib:
else:
self.stdout = FileObject(c2pread, 'rb', bufsize)
if errread != -1:
if universal_newlines or text_mode:
self.stderr = FileObject(errread, 'r', bufsize,
encoding=encoding, errors=errors)
else:
self.stderr = FileObject(errread, 'rb', bufsize)
self._closed_child_pipe_fds = False
# Convert here for the sake of all platforms. os.chdir accepts
# path-like objects natively under 3.6, but CreateProcess
# doesn't.
cwd = fsdecode(cwd) if cwd is not None else None
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals,
gid, gids, uid, umask,
start_new_session, process_group)
except:
# Cleanup if the child failed starting.
# (gevent: New in python3, but reported as gevent bug in #347.
# Note that under Py2, any error raised below will replace the
# original error so we have to use reraise)
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os_close(fd)
except OSError:
pass
raise
def __handle_uids(self, user, group, extra_groups):
gid = None
if group is not None:
if not hasattr(os, 'setregid'):
raise ValueError("The 'group' parameter is not supported on the "
"current platform")
if isinstance(group, str):
if grp is None:
raise ValueError("The group parameter cannot be a string "
"on systems without the grp module")
gid = grp.getgrnam(group).gr_gid
elif isinstance(group, int):
gid = group
else:
raise TypeError("Group must be a string or an integer, not {}"
.format(type(group)))
if gid < 0:
raise ValueError("Group ID cannot be negative, got %s" % gid)
gids = None
if extra_groups is not None:
if not hasattr(os, 'setgroups'):
raise ValueError("The 'extra_groups' parameter is not "
"supported on the current platform")
if isinstance(extra_groups, str):
raise ValueError("Groups must be a list, not a string")
gids = []
for extra_group in extra_groups:
if isinstance(extra_group, str):
if grp is None:
raise ValueError("Items in extra_groups cannot be "
"strings on systems without the "
"grp module")
gids.append(grp.getgrnam(extra_group).gr_gid)
elif isinstance(extra_group, int):
if extra_group >= 2**64:
# This check is implicit in the C version of _Py_Gid_Converter.
#
# We actually need access to the C type ``gid_t`` to get
# its actual length. This just makes the test that was added
# for the bug pass. That's OK though, if we guess too big here,
# we should get an OverflowError from the setgroups()
# call we make. The only difference is the type of exception.
#
# See https://bugs.python.org/issue42655
raise ValueError("Item in extra_groups is too large")
gids.append(extra_group)
else:
raise TypeError("Items in extra_groups must be a string "
"or integer, not {}"
.format(type(extra_group)))
# make sure that the gids are all positive here so we can do less
# checking in the C code
for gid_check in gids:
if gid_check < 0:
raise ValueError("Group ID cannot be negative, got %s" % (gid_check,))
uid = None
if user is not None:
if not hasattr(os, 'setreuid'):
raise ValueError("The 'user' parameter is not supported on "
"the current platform")
if isinstance(user, str):
if pwd is None:
raise ValueError("The user parameter cannot be a string "
"on systems without the pwd module")
uid = pwd.getpwnam(user).pw_uid
elif isinstance(user, int):
uid = user
else:
raise TypeError("User must be a string or an integer")
if uid < 0:
raise ValueError("User ID cannot be negative, got %s" % (uid,))
return uid, gid, gids
def __repr__(self):
return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode)
def _on_child(self, watcher):
watcher.stop()
status = watcher.rstatus
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
self.returncode = os.WEXITSTATUS(status)
self.result.set(self.returncode)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
_communicating_greenlets = None
def communicate(self, input=None, timeout=None):
"""
Interact with process and return its output and error.
- Send *input* data to stdin.
- Read data from stdout and stderr, until end-of-file is reached.
- Wait for process to terminate.
The optional *input* argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr).
:keyword timeout: Under Python 2, this is a gevent extension; if
given and it expires, we will raise :exc:`TimeoutExpired`, which
extends :exc:`gevent.timeout.Timeout` (note that this only extends :exc:`BaseException`,
*not* :exc:`Exception`)
Under Python 3, this raises the standard :exc:`TimeoutExpired` exception.
.. versionchanged:: 1.1a2
Under Python 2, if the *timeout* elapses, raise the :exc:`gevent.timeout.Timeout`
exception. Previously, we silently returned.
.. versionchanged:: 1.1b5
Honor a *timeout* even if there's no way to communicate with the child
(stdin, stdout, and stderr are not pipes).
"""
if self._communicating_greenlets is None:
self._communicating_greenlets = _CommunicatingGreenlets(self, input)
greenlets = self._communicating_greenlets
# If we were given stdin=stdout=stderr=None, we have no way to
# communicate with the child, and thus no greenlets to wait
# on. This is a nonsense case, but it comes up in the test
# case for Python 3.5 (test_subprocess.py
# RunFuncTestCase.test_timeout). Instead, we go directly to
# self.wait
if not greenlets and timeout is not None:
self.wait(timeout=timeout, _raise_exc=True)
done = joinall(greenlets, timeout=timeout)
# Allow finished greenlets, if any, to raise. This takes priority over
# the timeout exception.
for greenlet in done:
greenlet.get()
if timeout is not None and len(done) != len(self._communicating_greenlets):
raise TimeoutExpired(self.args, timeout)
# Close only after we're sure that everything is done
# (there was no timeout, or there was, but everything finished).
# There should be no greenlets still running, even from a prior
# attempt. If there are, then this can raise RuntimeError: 'reentrant call'.
# So we ensure that previous greenlets are dead.
for pipe in (self.stdout, self.stderr):
if pipe:
try:
pipe.close()
except RuntimeError:
pass
self.wait()
return (None if greenlets.stdout is None else greenlets.stdout.get(),
None if greenlets.stderr is None else greenlets.stderr.get())
def poll(self):
"""Check if child process has terminated. Set and return :attr:`returncode` attribute."""
return self._internal_poll()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
# JAM: gevent: If the process never terminates, this
# blocks forever.
self.wait()
def _gevent_result_wait(self, timeout=None, raise_exc=True):
result = self.result.wait(timeout=timeout)
if raise_exc and timeout is not None and not self.result.ready():
raise TimeoutExpired(self.args, timeout)
return result
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
# pylint:disable=undefined-variable
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
try:
DEVNULL
except NameError:
_devnull = object()
else:
_devnull = DEVNULL
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == _devnull:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == _devnull:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == _devnull:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
# pylint:disable=undefined-variable
return DuplicateHandle(GetCurrentProcess(),
handle, GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolute path to w9xpopen.exe"""
# pylint:disable=undefined-variable
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _filter_handle_list(self, handle_list):
"""Filter out console handles that can't be used
in lpAttributeList["handle_list"] and make sure the list
isn't empty. This also removes duplicate handles."""
# An handle with it's lowest two bits set might be a special console
# handle that if passed in lpAttributeList["handle_list"], will
# cause it to fail.
# Only works on 3.7+
return list({handle for handle in handle_list
if handle & 0x3 != 0x3
or _winapi.GetFileType(handle) !=
_winapi.FILE_TYPE_CHAR})
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals,
unused_gid, unused_gids, unused_uid, unused_umask,
unused_start_new_session, unused_process_group):
"""Execute program (MS Windows version)"""
# pylint:disable=undefined-variable
assert not pass_fds, "pass_fds not supported on Windows."
if isinstance(args, str):
pass
elif isinstance(args, bytes):
if shell:
raise TypeError('bytes args is not allowed on Windows')
args = list2cmdline([args])
elif isinstance(args, PathLike):
if shell:
raise TypeError('path-like args is not allowed when '
'shell is true')
args = list2cmdline([args])
else:
args = list2cmdline(args)
if executable is not None:
executable = fsdecode(executable)
if not isinstance(args, string_types):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
elif hasattr(startupinfo, 'copy'):
# bpo-34044: Copy STARTUPINFO since it is modified below,
# so the caller can reuse it multiple times.
startupinfo = startupinfo.copy()
elif hasattr(startupinfo, '_copy'):
# When the fix was backported to Python 3.7, copy() was
# made private as _copy.
startupinfo = startupinfo._copy()
use_std_handles = -1 not in (p2cread, c2pwrite, errwrite)
if use_std_handles:
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if hasattr(startupinfo, 'lpAttributeList'):
# Support for Python >= 3.7
attribute_list = startupinfo.lpAttributeList
have_handle_list = bool(attribute_list and
"handle_list" in attribute_list and
attribute_list["handle_list"])
# If we were given an handle_list or need to create one
if have_handle_list or (use_std_handles and close_fds):
if attribute_list is None:
attribute_list = startupinfo.lpAttributeList = {}
handle_list = attribute_list["handle_list"] = \
list(attribute_list.get("handle_list", []))
if use_std_handles:
handle_list += [int(p2cread), int(c2pwrite), int(errwrite)]
handle_list[:] = self._filter_handle_list(handle_list)
if handle_list:
if not close_fds:
import warnings
warnings.warn("startupinfo.lpAttributeList['handle_list'] "
"overriding close_fds", RuntimeWarning)
# When using the handle_list we always request to inherit
# handles but the only handles that will be inherited are
# the ones in the handle_list
close_fds = False
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format(comspec, args)
if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com":
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# PyPy 2.7 7.3.6 is now producing these errors. This
# happens automatically on Posix platforms, and is built
# in to the CreateProcess call on CPython 2 & 3. It's not
# clear why we don't pick it up for free from the
# CreateProcess call on PyPy. Currently we don't test PyPy3 on Windows,
# so we don't know for sure if it's built into CreateProcess there.
if PYPY:
def _check_nul(s, err_kind=ValueError):
if not s:
return
nul = b'\0' if isinstance(s, bytes) else '\0'
if nul in s:
# PyPy 2 expects a TypeError; Python 3 raises ValueError always.
raise err_kind("argument must be a string without NUL characters")
def _check_env():
if not env:
return
for k, v in env.items():
_check_nul(k)
_check_nul(v)
if '=' in k:
raise ValueError("'=' not allowed in environment keys")
_check_nul(executable)
_check_nul(args)
_check_env()
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd, # fsdecode handled earlier
startupinfo)
# except IOError as e: # From 2.6 on, pywintypes.error was defined as IOError
# # Translate pywintypes.error to WindowsError, which is
# # a subclass of OSError. FIXME: We should really
# # translate errno using _sys_errlist (or similar), but
# # how can this be done from Python?
# raise # don't remap here
# raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
def _close(x):
if x is not None and x != -1:
if hasattr(x, 'Close'):
x.Close()
else:
_winapi.CloseHandle(x)
_close(p2cread)
_close(c2pwrite)
_close(errwrite)
if hasattr(self, '_devnull'):
os_close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp) if not hasattr(hp, 'Close') else hp
self.pid = pid
_winapi.CloseHandle(ht) if not hasattr(ht, 'Close') else ht.Close()
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
# pylint:disable=undefined-variable
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
self.result.set(self.returncode)
return self.returncode
def rawlink(self, callback):
if not self.result.ready() and not self._waiting:
self._waiting = True
Greenlet.spawn(self._wait)
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
def _blocking_wait(self):
# pylint:disable=undefined-variable
WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _wait(self):
self.threadpool.spawn(self._blocking_wait).rawlink(self.result)
def wait(self, timeout=None, _raise_exc=True):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
if not self._waiting:
self._waiting = True
self._wait()
return self._gevent_result_wait(timeout, _raise_exc)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
# pylint:disable=undefined-variable
# Don't terminate a process that we know has already died.
if self.returncode is not None:
return
try:
TerminateProcess(self._handle, 1)
except OSError as e:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
if e.winerror != 5:
raise
rc = GetExitCodeProcess(self._handle)
if rc == STILL_ACTIVE:
raise
self.returncode = rc
self.result.set(self.returncode)
kill = terminate
else:
#
# POSIX methods
#
def rawlink(self, callback):
# Not public documented, part of the link protocol
self.result.rawlink(linkproxy(callback, self))
# XXX unlink
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
try:
DEVNULL
except NameError:
_devnull = object()
else:
_devnull = DEVNULL
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
elif stdin == _devnull:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
elif stdout == _devnull:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
elif stderr == STDOUT: # pylint:disable=undefined-variable
if c2pwrite != -1:
errwrite = c2pwrite
else: # child's stdout is not set, use parent's stdout
errwrite = sys.__stdout__.fileno()
elif stderr == _devnull:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _remove_nonblock_flag(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK)
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
_POSSIBLE_FD_DIRS = (
'/proc/self/fd', # Linux
'/dev/fd', # BSD, including macOS
)
@classmethod
def _close_fds(cls, keep, errpipe_write):
# From the C code:
# errpipe_write is part of keep. It must be closed at
# exec(), but kept open in the child process until exec() is
# called.
for path in cls._POSSIBLE_FD_DIRS:
if os.path.isdir(path):
return cls._close_fds_from_path(path, keep, errpipe_write)
return cls._close_fds_brute_force(keep, errpipe_write)
@classmethod
def _close_fds_from_path(cls, path, keep, errpipe_write):
# path names a directory whose only entries have
# names that are ascii strings of integers in base10,
# corresponding to the fds the current process has open
try:
fds = [int(fname) for fname in os.listdir(path)]
except (ValueError, OSError):
cls._close_fds_brute_force(keep, errpipe_write)
else:
for i in keep:
if i == errpipe_write:
continue
_set_inheritable(i, True)
for fd in fds:
if fd in keep or fd < 3:
continue
try:
os_close(fd)
except:
pass
@classmethod
def _close_fds_brute_force(cls, keep, errpipe_write):
# `keep` is a set of fds, so we
# use os.closerange from 3 to min(keep)
# and then from max(keep + 1) to MAXFD and
# loop through filling in the gaps.
# Under new python versions, we need to explicitly set
# passed fds to be inheritable or they will go away on exec
# XXX: Bug: We implicitly rely on errpipe_write being the largest open
# FD so that we don't change its cloexec flag.
assert hasattr(os, 'closerange') # Added in 2.7
keep = sorted(keep)
min_keep = min(keep)
max_keep = max(keep)
os.closerange(3, min_keep)
os.closerange(max_keep + 1, MAXFD)
for i in xrange(min_keep, max_keep):
if i in keep:
_set_inheritable(i, True)
continue
try:
os_close(i)
except:
pass
# pylint:disable-next=too-many-positional-arguments
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals,
gid, gids, uid, umask,
start_new_session, process_group):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
elif isinstance(args, PathLike):
if shell:
raise TypeError('path-like args is not allowed when '
'shell is true')
args = [fsencode(args)] # os.PathLike -> [str]
else:
args = list(args)
if shell:
# On Android the default shell is at '/system/bin/sh'.
unix_shell = (
'/system/bin/sh' if hasattr(sys, 'getandroidapilevel') else '/bin/sh'
)
args = [unix_shell, "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
self._loop.install_sigchld()
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = self.pipe_cloexec()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os_close(low_fd)
try:
try:
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = fork_and_watch(self._on_child, self._loop, True, fork)
except:
if gc_was_enabled:
gc.enable()
raise
if self.pid == 0:
# Child
# In various places on the child side of things, we catch OSError
# and add attributes to it that detail where in the process we failed;
# like all exceptions until we have exec'd, this exception is pickled
# and sent to the parent to raise in the calling process.
# The parent uses this to decide how to treat that exception,
# adjusting certain information about it as needed.
#
# Python 3.11.8 --- yes, a minor patch release --- stopped
# letting the 'filename' parameter get set in the resulting
# exception for many cases. We're not quite interpreting this
# the same way the stdlib is, I'm sure, but this makes the stdlib
# tests pass.
# XXX: Technically we're doing a lot of stuff here that
# may not be safe to do before a exec(), depending on the OS.
# CPython 3 goes to great lengths to precompute a lot
# of this info before the fork and pass it all to C functions that
# try hard not to call things like malloc(). (Of course,
# CPython 2 pretty much did what we're doing.)
try:
# Close parent's pipe ends
if p2cwrite != -1:
os_close(p2cwrite)
if c2pread != -1:
os_close(c2pread)
if errread != -1:
os_close(errread)
os_close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
_set_inheritable(c2pwrite, False)
while errwrite in (0, 1):
errwrite = os.dup(errwrite)
_set_inheritable(errwrite, False)
# Dup fds for child
def _dup2(existing, desired):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if existing == desired:
self._set_cloexec_flag(existing, False)
elif existing != -1:
os.dup2(existing, desired)
try:
self._remove_nonblock_flag(desired)
except OSError:
# Ignore EBADF, it may not actually be
# open yet.
# Tested beginning in 3.7.0b3 test_subprocess.py
pass
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
if not True:
closed = set([None])
for fd in (p2cread, c2pwrite, errwrite):
if fd not in closed and fd > 2:
os_close(fd)
closed.add(fd)
# Python 3 (with a working set_inheritable):
# We no longer manually close p2cread,
# c2pwrite, and errwrite here as
# _close_open_fds takes care when it is
# not already non-inheritable.
if cwd is not None:
try:
os.chdir(cwd)
except OSError as e:
e._failed_chdir = True
raise
# Python 3.9
if umask >= 0:
os.umask(umask)
# XXX: CPython does _Py_RestoreSignals here.
# Then setsid() based on ???
try:
if gids:
os.setgroups(gids)
if gid:
os.setregid(gid, gid)
if uid:
os.setreuid(uid, uid)
if process_group is not None:
os.setpgid(0, process_group)
except OSError as e:
e._failed_chuser = True
raise
if preexec_fn:
preexec_fn()
# Close all other fds, if asked for. This must be done
# after preexec_fn runs.
if close_fds:
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self._close_fds(fds_to_keep, errpipe_write)
if restore_signals:
# restore the documented signals back to sig_dfl;
# not all will be defined on every platform
for sig in 'SIGPIPE', 'SIGXFZ', 'SIGXFSZ':
sig = getattr(signal, sig, None)
if sig is not None:
signal.signal(sig, signal.SIG_DFL)
if start_new_session:
os.setsid()
try:
if env is None:
os.execvp(executable, args)
else:
# Python 3.6 started testing for
# bytes values in the env; it also
# started encoding strs using
# fsencode and using a lower-level
# API that takes a list of keys
# and values. We don't have access
# to that API, so we go the reverse direction.
env = {os.fsdecode(k) if isinstance(k, bytes) else k:
os.fsdecode(v) if isinstance(v, bytes) else v
for k, v in env.items()}
os.execvpe(executable, args, env)
except OSError as e:
e._failed_exec = True
raise
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
finally:
# Make sure that the process exits no matter what.
# The return code does not matter much as it won't be
# reported to the application
os._exit(1)
# Parent
self._child_created = True
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os_close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os_close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os_close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os_close(errwrite)
if devnull_fd is not None:
os_close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising exception
errpipe_read = FileObject(errpipe_read, 'rb')
data = errpipe_read.read()
finally:
try:
if hasattr(errpipe_read, 'close'):
errpipe_read.close()
else:
os_close(errpipe_read)
except OSError:
# Especially on PyPy, we sometimes see the above
# `os_close(errpipe_read)` raise an OSError.
# It's not entirely clear why, but it happens in
# InterprocessSignalTests.test_main sometimes, which must mean
# we have some sort of race condition.
pass
finally:
errpipe_read = -1
if data != b"":
self.wait()
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None and fd != -1:
os_close(fd)
if isinstance(child_exception, OSError):
child_exception.filename = executable
if hasattr(child_exception, '_failed_chdir'):
child_exception.filename = cwd
if getattr(child_exception, '_failed_chuser', False):
child_exception.filename = None
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS, _WIFSTOPPED=os.WIFSTOPPED,
_WSTOPSIG=os.WSTOPSIG):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
# (gevent: We don't have a __del__, that's in the CPython implementation.)
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
elif _WIFSTOPPED(sts):
self.returncode = -_WSTOPSIG(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self):
"""Check if child process has terminated. Returns returncode
attribute.
"""
if self.returncode is None:
if get_hub() is not getcurrent():
sig_pending = getattr(self._loop, 'sig_pending', True)
if sig_pending:
sleep(0.00001)
return self.returncode
def wait(self, timeout=None, _raise_exc=True):
"""
Wait for child process to terminate. Returns :attr:`returncode`
attribute.
:keyword timeout: The floating point number of seconds to
wait. Under Python 2, this is a gevent extension, and
we simply return if it expires. Under Python 3, if
this time elapses without finishing the process,
:exc:`TimeoutExpired` is raised.
"""
return self._gevent_result_wait(timeout, _raise_exc)
def send_signal(self, sig):
"""Send a signal to the process
"""
# Skip signalling a process that we know has already died.
if self.returncode is None:
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _with_stdout_stderr(exc, stderr):
# Prior to Python 3.5, most exceptions didn't have stdout
# and stderr attributes and can't take the stderr attribute in their
# constructor
exc.stdout = exc.output
exc.stderr = stderr
return exc
| Popen |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 23372,
"end": 29759
} | class ____(django_test.TestCase):
def tearDown(self):
super().tearDown()
for path in os.listdir(models.WITHFILE_UPLOAD_DIR):
# Remove temporary files written during tests.
os.unlink(os.path.join(models.WITHFILE_UPLOAD_DIR, path))
def test_default_build(self):
o = WithImageFactory.build()
self.assertIsNone(o.pk)
o.save()
self.assertEqual(100, o.animage.width)
self.assertEqual(100, o.animage.height)
self.assertEqual('django/example.jpg', o.animage.name)
def test_default_create(self):
o = WithImageFactory.create()
self.assertIsNotNone(o.pk)
o.save()
self.assertEqual(100, o.animage.width)
self.assertEqual(100, o.animage.height)
self.assertEqual('django/example.jpg', o.animage.name)
def test_complex_create(self):
o = WithImageFactory.create(
size=10,
animage__filename=factory.Sequence(lambda n: 'img%d.jpg' % n),
__sequence=42,
animage__width=factory.SelfAttribute('..size'),
animage__height=factory.SelfAttribute('width'),
)
self.assertIsNotNone(o.pk)
self.assertEqual('django/img42.jpg', o.animage.name)
def test_with_content(self):
o = WithImageFactory.build(animage__width=13, animage__color='red')
self.assertIsNone(o.pk)
o.save()
self.assertEqual(13, o.animage.width)
self.assertEqual(13, o.animage.height)
self.assertEqual('django/example.jpg', o.animage.name)
with Image.open(os.path.join(settings.MEDIA_ROOT, o.animage.name)) as i:
colors = i.getcolors()
# 169 pixels with rgb(254, 0, 0)
self.assertEqual([(169, (254, 0, 0))], colors)
self.assertEqual('JPEG', i.format)
def test_rgba_image(self):
o = WithImageFactory.create(
animage__palette='RGBA',
animage__format='PNG',
)
self.assertIsNotNone(o.pk)
with Image.open(os.path.join(settings.MEDIA_ROOT, o.animage.name)) as i:
self.assertEqual('RGBA', i.mode)
def test_gif(self):
o = WithImageFactory.build(animage__width=13, animage__color='blue', animage__format='GIF')
self.assertIsNone(o.pk)
o.save()
self.assertEqual(13, o.animage.width)
self.assertEqual(13, o.animage.height)
self.assertEqual('django/example.jpg', o.animage.name)
with Image.open(os.path.join(settings.MEDIA_ROOT, o.animage.name)) as i:
colors = i.convert('RGB').getcolors()
# 169 pixels with rgb(0, 0, 255)
self.assertEqual([(169, (0, 0, 255))], colors)
self.assertEqual('GIF', i.format)
def test_with_file(self):
with open(testdata.TESTIMAGE_PATH, 'rb') as f:
o = WithImageFactory.build(animage__from_file=f)
o.save()
with o.animage as f:
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertEqual('django/example.jpeg', o.animage.name)
def test_with_path(self):
o = WithImageFactory.build(animage__from_path=testdata.TESTIMAGE_PATH)
self.assertIsNone(o.pk)
with o.animage as f:
o.save()
f.seek(0)
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertEqual('django/example.jpeg', o.animage.name)
def test_with_file_empty_path(self):
with open(testdata.TESTIMAGE_PATH, 'rb') as f:
o = WithImageFactory.build(
animage__from_file=f,
animage__from_path=''
)
o.save()
with o.animage as f:
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertEqual('django/example.jpeg', o.animage.name)
def test_with_path_empty_file(self):
o = WithImageFactory.build(
animage__from_path=testdata.TESTIMAGE_PATH,
animage__from_file=None,
)
self.assertIsNone(o.pk)
with o.animage as f:
o.save()
f.seek(0)
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertEqual('django/example.jpeg', o.animage.name)
def test_error_both_file_and_path(self):
with self.assertRaises(ValueError):
WithImageFactory.build(
animage__from_file='fakefile',
animage__from_path=testdata.TESTIMAGE_PATH,
)
def test_override_filename_with_path(self):
o = WithImageFactory.build(
animage__from_path=testdata.TESTIMAGE_PATH,
animage__filename='example.foo',
)
self.assertIsNone(o.pk)
with o.animage as f:
o.save()
f.seek(0)
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertEqual('django/example.foo', o.animage.name)
def test_existing_file(self):
o1 = WithImageFactory.build(animage__from_path=testdata.TESTIMAGE_PATH)
o1.save()
with o1.animage as f:
o2 = WithImageFactory.build(animage__from_file=f)
self.assertIsNone(o2.pk)
o2.save()
with o2.animage as f:
# Image file for a 42x42 green jpeg: 301 bytes long.
self.assertEqual(301, len(f.read()))
self.assertNotEqual('django/example.jpeg', o2.animage.name)
self.assertRegex(o2.animage.name, r'django/example_\w+.jpeg')
def test_no_file(self):
o = WithImageFactory.build(animage=None)
self.assertIsNone(o.pk)
self.assertFalse(o.animage)
def _img_test_func(self):
img = Image.new('RGB', (32, 32), 'blue')
img_io = io.BytesIO()
img.save(img_io, format='JPEG')
img_io.seek(0)
return img_io
def test_with_func(self):
o = WithImageFactory.build(animage__from_func=self._img_test_func)
self.assertIsNone(o.pk)
i = Image.open(o.animage.file)
self.assertEqual('JPEG', i.format)
self.assertEqual(32, i.width)
self.assertEqual(32, i.height)
| DjangoImageFieldTestCase |
python | scikit-image__scikit-image | benchmarks/benchmark_morphology.py | {
"start": 263,
"end": 1645
} | class ____:
def setup(self, *args):
try:
# use a separate skeletonize_3d function on older scikit-image
if Version(skimage.__version__) < Version('0.16.0'):
self.skeletonize = morphology.skeletonize_3d
else:
self.skeletonize = morphology.skeletonize
except AttributeError:
raise NotImplementedError("3d skeletonize unavailable")
# we stack the horse data 5 times to get an example volume
self.image = np.stack(5 * [util.invert(data.horse())])
def time_skeletonize(self):
self.skeletonize(self.image)
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_skeletonize(self):
self.skeletonize(self.image)
| Skeletonize3d |
python | huggingface__transformers | examples/modular-transformers/modular_dummy_bert.py | {
"start": 270,
"end": 1218
} | class ____(BertModel):
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
return super().forward(input_ids, **kwargs)
| DummyBertModel |
python | pennersr__django-allauth | allauth/socialaccount/providers/flickr/views.py | {
"start": 227,
"end": 760
} | class ____(OAuth):
api_url = "https://api.flickr.com/services/rest"
def get_user_info(self):
default_params = {"nojsoncallback": "1", "format": "json"}
p = dict({"method": "flickr.test.login"}, **default_params)
u = self.query(self.api_url + "?" + urlencode(p)).json()
p = dict(
{"method": "flickr.people.getInfo", "user_id": u["user"]["id"]},
**default_params,
)
user = self.query(self.api_url + "?" + urlencode(p)).json()
return user
| FlickrAPI |
python | pytorch__pytorch | torch/_dynamo/eval_frame.py | {
"start": 23358,
"end": 38377
} | class ____:
def __init__(
self,
callback: DynamoCallback,
on_enter: Callable[[], Any] = nothing,
backend_ctx_ctor: Callable[
[], contextlib.AbstractContextManager[Any]
] = null_context,
patch_fn: Callable[[], Any] = nothing,
first_ctx: bool = False,
*,
fullgraph: bool = False,
error_on_graph_break: Optional[bool] = None,
export: bool = False,
dynamic: Optional[bool] = None,
compiler_config: Optional[Any] = None,
package: Optional[CompilePackage] = None,
hooks: Optional[Hooks] = None,
) -> None:
super().__init__()
assert callable(callback) or callback is False or callback is None
self.callback: DynamoCallback = callback
self._backend_ctx_ctor = backend_ctx_ctor
self.prior: Union[Unset, DynamoCallback] = unset
self.first_ctx = first_ctx
self.fullgraph = fullgraph
self.error_on_graph_break = error_on_graph_break
self.export = export
self._dynamic = dynamic
self.compiler_config = compiler_config
self.cleanup_fns: list[Callable[[], Any]] = []
self.enter_exit_hooks = []
self._package = package
self._hooks = hooks
patch_fn()
# Save the backends so that we can reset them during torch._dynamo.reset
backend = innermost_fn(callback, unaltered_fn_attr="_torchdynamo_orig_backend") # type: ignore[arg-type]
cached_backends.setdefault(id(backend), backend) # type: ignore[arg-type]
if dynamic is not None:
self.enter_exit_hooks.append(make_set_enable_dynamic(dynamic))
if on_enter is not nothing:
# this case is not common
def call_on_enter() -> Callable[[], None]:
on_enter()
return nothing
self.enter_exit_hooks.append(call_on_enter)
if backend_ctx_ctor is not contextlib.nullcontext:
# this case is not common
def call_backend_ctx() -> functools.partial[Optional[bool]]:
ctx = backend_ctx_ctor()
ctx.__enter__()
return functools.partial(ctx.__exit__, None, None, None)
self.enter_exit_hooks.append(call_backend_ctx)
def __enter__(self) -> None:
if config.raise_on_ctx_manager_usage:
raise RuntimeError(
"torch._dynamo.optimize(...) is used with a context manager. "
"Please refer to https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html "
"to use torch._dynamo.optimize(...) as an annotation/decorator. "
)
self.prior = set_eval_frame(None)
self.cleanup_fns = [enter() for enter in self.enter_exit_hooks]
self.prior_skip_guard_eval_unsafe = set_skip_guard_eval_unsafe(
_is_skip_guard_eval_unsafe_stance()
)
_maybe_set_eval_frame(_callback_from_stance(self.callback))
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[types.TracebackType],
) -> Optional[bool]:
assert self.prior is not unset
set_eval_frame(None)
set_skip_guard_eval_unsafe(self.prior_skip_guard_eval_unsafe)
for cleanup in self.cleanup_fns:
cleanup()
self.cleanup_fns.clear()
_maybe_set_eval_frame(_callback_from_stance(self.prior))
self.prior = unset
return None
def __call__(self, fn: Any) -> Any:
# public api for compiler config/options
def get_compiler_config() -> Any:
return self.compiler_config
from .package import DynamoCache
# If self._package is lazily initialized, we should check the dynamo cache now
if config.caching_precompile:
if self._package is not None and not self._package.is_initialized():
result = DynamoCache.load(fn)
if result is None:
# Create a fresh CompilePackage
self._package.initialize(fn, None, ignore_inlined_sources=False)
else:
try:
self._package.initialize(
fn, result.dynamo, ignore_inlined_sources=False
)
self._package.install(result.backends)
except RuntimeError:
log.warning(
"Failed to load entry from dynamo cache", exc_info=True
)
self._package.initialize(fn, None, ignore_inlined_sources=False)
fn = innermost_fn(fn)
def aot_compile(example_inputs: tuple[tuple[Any, ...], dict[str, Any]]) -> Any:
from torch._dynamo.aot_compile import aot_compile_fullgraph
if torch._inductor.config.force_disable_caches:
raise RuntimeError(
"Cannot precompile with torch._inductor.config.force_disable_caches=True; caching is required."
)
if not self.fullgraph:
raise RuntimeError(
"Graph breaks are not supported with aot compile. Please use torch.compile(fullgraph=True)."
)
if not callable(self.callback):
raise RuntimeError("aot compile requires a callable dynamo callback.")
assert self._hooks is not None
return aot_compile_fullgraph(
fn,
example_inputs,
hooks=self._hooks,
backend=innermost_fn(
self.callback, unaltered_fn_attr="_torchdynamo_orig_backend"
),
)
# add context containing GraphModule to any GraphModule forward functions
if isinstance(fn, GraphModule):
# add context containing GraphModule to any GraphModule forward functions
code_context.get_context(fn.forward.__code__)["orig_graphmodule"] = (
weakref.ref(fn)
)
# Optimize the forward method of torch.nn.Module object
if isinstance(fn, torch.nn.Module):
mod = fn
new_mod = OptimizedModule(mod, self)
# Save the function pointer to find the original callable while nesting
# of decorators.
new_mod._torchdynamo_orig_callable = mod.forward
# when compiling torch.nn.Module,
# provide public api OptimizedModule.get_compiler_config()
assert not hasattr(new_mod, "get_compiler_config")
new_mod.get_compiler_config = get_compiler_config
return new_mod
if inspect.isclass(fn):
# User has wrapped the class with compile/disable decorator. Apply
# disable to init/call method.
cls_obj = fn
cls_obj.__call__ = self(cls_obj.__call__)
if issubclass(cls_obj, torch.nn.Module):
# NN module variable tracker directly inlines the _call_impl.
cls_obj._call_impl = self(cls_obj._call_impl)
return cls_obj
assert callable(fn), (
f"A callable function is expected, but {type(fn)} is provided."
)
try:
filename = inspect.getsourcefile(fn)
except TypeError:
filename = None
if config.debug_force_nested_calls:
fn = external_utils.wrap_inline(fn)
elif config.wrap_top_frame or (
(filename is None or trace_rules.check(fn))
and (
getattr(fn, "__name__", "")
not in ["_call_impl", "_wrapped_call_impl", "_lazy_forward"]
)
and filename not in DONT_WRAP_FILES
):
# call to a builtin without a frame for us to capture
fn = external_utils.wrap_inline(fn)
def do_nothing(*arg: Any, **kwargs: Any) -> None:
pass
callback: Callable[..., Any] = do_nothing
if hasattr(self, "callback"):
callback = self.callback # type: ignore[assignment]
is_jit_tracing = torch._C._is_tracing
is_fx_symbolic_tracing = torch.fx._symbolic_trace.is_fx_symbolic_tracing
@functools.wraps(fn)
def compile_wrapper(*args: Any, **kwargs: Any) -> Any:
prior = set_eval_frame(None)
try:
# We shouldn't compile inside kernel invocation.
if tracing_context := torch._guards.TracingContext.try_get():
if (
tracing_context.fake_mode is not None
and tracing_context.fake_mode.in_kernel_invocation
):
return fn(*args, **kwargs)
# Skip nested compile - just inline the function
if is_fx_symbolic_tracing():
if config.error_on_nested_fx_trace:
raise RuntimeError(
"Detected that you are using FX to symbolically trace "
"a dynamo-optimized function. This is not supported at the moment."
)
else:
return fn(*args, **kwargs)
if is_jit_tracing():
raise RuntimeError(
"Detected that you are using FX to torch.jit.trace "
"a dynamo-optimized function. This is not supported at the moment."
)
cleanups = [enter() for enter in self.enter_exit_hooks]
prior_skip_guard_eval_unsafe = set_skip_guard_eval_unsafe(
_is_skip_guard_eval_unsafe_stance()
)
prior_error_on_graph_break = None
if not self.fullgraph and self.error_on_graph_break is not None:
prior_error_on_graph_break = _get_error_on_graph_break()
_set_error_on_graph_break(self.error_on_graph_break)
# Ensure that if an assertion occurs after graph pushes
# something onto the DynamicLayerStack then we pop it off (the
# constructed graph code isn't guarded with try/finally).
#
# This used to be a context but putting a `with` here is a noticeable
# perf regression (#126293)
saved_dynamic_layer_stack_depth = (
torch._C._functorch.get_dynamic_layer_stack_depth()
)
_maybe_set_eval_frame(_callback_from_stance(callback))
try:
return fn(*args, **kwargs)
except Unsupported as e:
if config.verbose:
raise
# strip internal tracebacks from causes
cur_exn: BaseException = e
while cur_exn.__cause__ is not None:
cur_exn.__cause__.with_traceback(None)
cur_exn = cur_exn.__cause__
# pyrefly: ignore [invalid-inheritance]
raise e.with_traceback(None) from e.__cause__ # User compiler error
except ShortenTraceback as e:
# Failures in the backend likely don't have useful
# data in the TorchDynamo frames, so we strip them out.
raise e.remove_dynamo_frames() from None # see TORCHDYNAMO_VERBOSE=1
finally:
# Restore the dynamic layer stack depth if necessary.
set_eval_frame(None)
if prior_error_on_graph_break is not None:
_set_error_on_graph_break(prior_error_on_graph_break)
torch._C._functorch.pop_dynamic_layer_stack_and_undo_to_depth(
saved_dynamic_layer_stack_depth
)
set_skip_guard_eval_unsafe(prior_skip_guard_eval_unsafe)
for cleanup in cleanups:
cleanup()
finally:
_maybe_set_eval_frame(prior)
# hooks to properly handle inlining
if self.error_on_graph_break is not None:
compile_wrapper._torchdynamo_inline = ( # type: ignore[attr-defined]
external_utils.wrap_inline_with_error_on_graph_break(
fn, self.error_on_graph_break
)
)
else:
compile_wrapper._torchdynamo_inline = fn # type: ignore[attr-defined]
# Save the function pointer to find the original callable while nesting
# of decorators.
compile_wrapper._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
# when compiling user function instead of nn.Module
# provide public api _fn.get_compiler_config()
assert not hasattr(compile_wrapper, "get_compiler_config")
compile_wrapper.get_compiler_config = get_compiler_config # type: ignore[attr-defined]
if torch._dynamo.config.enable_aot_compile:
compile_wrapper.aot_compile = aot_compile # type: ignore[attr-defined]
# If the function is called using torch._dynamo.optimize decorator, we
# should prevent any type of skipping.
if callback not in (None, False):
if not hasattr(fn, "__code__"):
raise RuntimeError(
textwrap.dedent(
"""
torch._dynamo.optimize is called on a non function object.
If this is a callable class, please wrap the relevant code into a function and optimize the
wrapper function.
>> class CallableClass:
>> def __init__(self) -> None:
>> super().__init__()
>> self.relu = torch.nn.ReLU()
>>
>> def __call__(self, x):
>> return self.relu(torch.sin(x))
>>
>> def print_hello(self):
>> print("Hello world")
>>
>> mod = CallableClass()
If you want to optimize the __call__ function and other code, wrap that up in a function
>> def wrapper_fn(x):
>> y = mod(x)
>> return y.sum()
and then optimize the wrapper_fn
>> opt_wrapper_fn = torch._dynamo.optimize(wrapper_fn)
"""
)
)
always_optimize_code_objects[fn.__code__] = True
return compile_wrapper
| _TorchDynamoContext |
python | django__django | tests/unmanaged_models/tests.py | {
"start": 1349,
"end": 2171
} | class ____(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be
created.
"""
table = Unmanaged2._meta.get_field("mm").m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(
table, tables, "Table '%s' should not exist, but it does." % table
)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should
be created.
"""
table = Managed1._meta.get_field("mm").m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
| ManyToManyUnmanagedTests |
python | kamyu104__LeetCode-Solutions | Python/gray-code.py | {
"start": 481,
"end": 658
} | class ____(object):
def grayCode(self, n):
"""
:type n: int
:rtype: List[int]
"""
return [i >> 1 ^ i for i in xrange(1 << n)]
| Solution2 |
python | pallets__click | src/click/types.py | {
"start": 24320,
"end": 28757
} | class ____(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Files can also be opened atomically in which case all writes go into a
separate file in the same folder and upon completion the file will
be moved over to the original location. This is useful if a file
regularly read by other users is modified.
See :ref:`file-args` for more information.
.. versionchanged:: 2.0
Added the ``atomic`` parameter.
"""
name = "filename"
envvar_list_splitter: t.ClassVar[str] = os.path.pathsep
def __init__(
self,
mode: str = "r",
encoding: str | None = None,
errors: str | None = "strict",
lazy: bool | None = None,
atomic: bool = False,
) -> None:
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def to_info_dict(self) -> dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(mode=self.mode, encoding=self.encoding)
return info_dict
def resolve_lazy_flag(self, value: str | os.PathLike[str]) -> bool:
if self.lazy is not None:
return self.lazy
if os.fspath(value) == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(
self,
value: str | os.PathLike[str] | t.IO[t.Any],
param: Parameter | None,
ctx: Context | None,
) -> t.IO[t.Any]:
if _is_file_like(value):
return value
value = t.cast("str | os.PathLike[str]", value)
try:
lazy = self.resolve_lazy_flag(value)
if lazy:
lf = LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
if ctx is not None:
ctx.call_on_close(lf.close_intelligently)
return t.cast("t.IO[t.Any]", lf)
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except OSError as e:
self.fail(f"'{format_filename(value)}': {e.strerror}", param, ctx)
def shell_complete(
self, ctx: Context, param: Parameter, incomplete: str
) -> list[CompletionItem]:
"""Return a special completion marker that tells the completion
system to use the shell to provide file path completions.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
return [CompletionItem(incomplete, type="file")]
def _is_file_like(value: t.Any) -> te.TypeGuard[t.IO[t.Any]]:
return hasattr(value, "read") or hasattr(value, "write")
| File |
python | google__pytype | pytype/abstract/_instance_base.py | {
"start": 8972,
"end": 11414
} | class ____(SimpleValue):
"""An instance of some object."""
def __init__(
self,
cls: "_base.BaseValue | _typing.LateAnnotation",
ctx: "context.Context",
container=None,
) -> None:
super().__init__(cls.name, ctx)
self.cls = cls
self._instance_type_parameters_loaded = False
self._container = container
cls.register_instance(self)
def _load_instance_type_parameters(self) -> None:
if self._instance_type_parameters_loaded:
return
all_formal_type_parameters: "datatypes.AliasingDict[str, SimpleValue]" = (
datatypes.AliasingDict()
)
abstract_utils.parse_formal_type_parameters(
self.cls, None, all_formal_type_parameters, self._container
)
self._instance_type_parameters: (
"datatypes.AliasingDict[str, cfg.Variable]"
) = self._instance_type_parameters.copy(
aliases=all_formal_type_parameters.aliases
)
for name, param in all_formal_type_parameters.items():
if param is None:
value = self.ctx.program.NewVariable()
log.info("Initializing type param %s: %r", name, value)
self._instance_type_parameters[name] = value
else:
self._instance_type_parameters[name] = param.instantiate(
self.ctx.root_node, self._container or self
)
# We purposely set this flag at the very end so that accidentally accessing
# instance_type_parameters during loading will trigger an obvious crash due
# to infinite recursion, rather than silently returning an incomplete dict.
self._instance_type_parameters_loaded = True
@property
def full_name(self) -> str:
return self.cls.full_name
@property
def instance_type_parameters(
self,
) -> "datatypes.AliasingDict[str, cfg.Variable]":
self._load_instance_type_parameters()
return self._instance_type_parameters
def get_type_key(
self, seen: set[_base.BaseValue] | None = None
) -> "frozenset[_base.BaseValue | _typing.LateAnnotation | tuple[str, frozenset]] | type[_base.BaseValue|_typing.LateAnnotation]":
if not self._type_key and not self._instance_type_parameters_loaded:
# If we might be the middle of loading this class, don't try to access
# instance_type_parameters. We don't cache this intermediate type key
# because we want it to be overwritten by the real one.
return frozenset([self.cls])
return super().get_type_key(seen)
| Instance |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 26596,
"end": 30490
} | class ____(Transform):
r"""
Transforms an unconstrained real vector :math:`x` with length :math:`D*(D-1)/2` into the
Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower
triangular matrix with positive diagonals and unit Euclidean norm for each row.
The transform is processed as follows:
1. First we convert x into a lower triangular matrix in row order.
2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of
class :class:`StickBreakingTransform` to transform :math:`X_i` into a
unit Euclidean length vector using the following steps:
- Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`.
- Transforms into an unsigned domain: :math:`z_i = r_i^2`.
- Applies :math:`s_i = StickBreakingTransform(z_i)`.
- Transforms back into signed domain: :math:`y_i = sign(r_i) * \sqrt{s_i}`.
"""
domain = constraints.real_vector
codomain = constraints.corr_cholesky
bijective = True
def _call(self, x):
x = torch.tanh(x)
eps = torch.finfo(x.dtype).eps
x = x.clamp(min=-1 + eps, max=1 - eps)
r = vec_to_tril_matrix(x, diag=-1)
# apply stick-breaking on the squared values
# Note that y = sign(r) * sqrt(z * z1m_cumprod)
# = (sign(r) * sqrt(z)) * sqrt(z1m_cumprod) = r * sqrt(z1m_cumprod)
# pyrefly: ignore [unsupported-operation]
z = r**2
z1m_cumprod_sqrt = (1 - z).sqrt().cumprod(-1)
# Diagonal elements must be 1.
r = r + torch.eye(r.shape[-1], dtype=r.dtype, device=r.device)
y = r * pad(z1m_cumprod_sqrt[..., :-1], [1, 0], value=1)
return y
def _inverse(self, y):
# inverse stick-breaking
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
y_cumsum = 1 - torch.cumsum(y * y, dim=-1)
y_cumsum_shifted = pad(y_cumsum[..., :-1], [1, 0], value=1)
y_vec = tril_matrix_to_vec(y, diag=-1)
y_cumsum_vec = tril_matrix_to_vec(y_cumsum_shifted, diag=-1)
t = y_vec / (y_cumsum_vec).sqrt()
# inverse of tanh
x = (t.log1p() - t.neg().log1p()) / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Because domain and codomain are two spaces with different dimensions, determinant of
# Jacobian is not well-defined. We return `log_abs_det_jacobian` of `x` and the
# flattened lower triangular part of `y`.
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
y1m_cumsum = 1 - (y * y).cumsum(dim=-1)
# by taking diagonal=-2, we don't need to shift z_cumprod to the right
# also works for 2 x 2 matrix
y1m_cumsum_tril = tril_matrix_to_vec(y1m_cumsum, diag=-2)
stick_breaking_logdet = 0.5 * (y1m_cumsum_tril).log().sum(-1)
tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.0)).sum(dim=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
# Reshape from (..., N) to (..., D, D).
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 + 0.5)
if D * (D - 1) // 2 != N:
raise ValueError("Input is not a flattened lower-diagonal number")
return shape[:-1] + (D, D)
def inverse_shape(self, shape):
# Reshape from (..., D, D) to (..., N).
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1]
N = D * (D - 1) // 2
return shape[:-2] + (N,)
| CorrCholeskyTransform |
python | pypa__warehouse | tests/unit/admin/views/test_macaroons.py | {
"start": 3338,
"end": 4188
} | class ____:
def test_delete_succeeds_and_redirects(self, db_request, macaroon_service):
user = UserFactory.create()
db_request.user = user
_, macaroon = macaroon_service.create_macaroon(
location="test",
description="test",
scopes=[caveats.RequestUser(user_id=str(user.id))],
user_id=user.id,
)
macaroon_id = str(macaroon.id)
db_request.matchdict["macaroon_id"] = macaroon_id
db_request.route_url = pretend.call_recorder(
lambda *a, **kw: "/admin/macaroons/decode"
)
result = views.macaroon_delete(db_request)
assert result.status_code == views.HTTPSeeOther.code
assert result.location == "/admin/macaroons/decode"
assert macaroon_service.find_macaroon(macaroon_id) is None
| TestMacaroonDelete |
python | kamyu104__LeetCode-Solutions | Python/maximum-compatibility-score-sum.py | {
"start": 2187,
"end": 3314
} | class ____(object):
def maxCompatibilitySum(self, students, mentors):
"""
:type students: List[List[int]]
:type mentors: List[List[int]]
:rtype: int
"""
def popcount(n): # Time: O(logn) ~= O(1) if n is a 32-bit number
result = 0
while n:
n &= n-1
result += 1
return result
def masks(vvi):
result = []
for vi in vvi:
mask, bit = 0, 1
for i in xrange(len(vi)):
if vi[i]:
mask |= bit
bit <<= 1
result.append(mask)
return result
nums1, nums2 = masks(students), masks(mentors)
dp = [(0, 0)]*(2**len(nums2))
for mask in xrange(len(dp)):
bit = 1
for i in xrange(len(nums2)):
if (mask&bit) == 0:
dp[mask|bit] = max(dp[mask|bit], (dp[mask][0]+(len(students[0])-popcount(nums1[dp[mask][1]]^nums2[i])), dp[mask][1]+1))
bit <<= 1
return dp[-1][0]
| Solution2 |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 40066,
"end": 40178
} | class ____(TestEnPh):
def setup_faker(self):
self.fake = Faker("tl_PH")
Faker.seed(0)
| TestTlPh |
python | tensorflow__tensorflow | tensorflow/python/distribute/multi_process_runner.py | {
"start": 45834,
"end": 46501
} | class ____(RuntimeError):
"""An error indicating there is at least one subprocess with unexpected exit.
When this is raised, a namedtuple object representing the multi-process run
result can be retrieved by
`tf.__internal__.distribute.multi_process_runner
.UnexpectedSubprocessExitError`'s
`mpr_result` attribute. See
`tf.__internal__.distribute.multi_process_runner.run` for more information.
"""
def __init__(self, msg, mpr_result):
super(UnexpectedSubprocessExitError, self).__init__(msg)
self.mpr_result = mpr_result
@tf_export(
'__internal__.distribute.multi_process_runner.NotInitializedError', v1=[])
| UnexpectedSubprocessExitError |
python | PyCQA__pylint | tests/functional/b/broad_exception/broad_exception_caught.py | {
"start": 104,
"end": 603
} | class ____(CustomBroadException):
pass
try:
__revision__ += 1
except Exception: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except BaseException: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except ValueError:
print('error')
try:
__revision__ += 1
except CustomBroadException: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except CustomNarrowException:
print('error')
| CustomNarrowException |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 19477,
"end": 20962
} | class ____(InlineProcessor):
""" Store raw inline html and return a placeholder. """
def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]:
""" Store the text of `group(1)` of a pattern and return a placeholder string. """
rawhtml = self.backslash_unescape(self.unescape(m.group(1)))
place_holder = self.md.htmlStash.store(rawhtml)
return place_holder, m.start(0), m.end(0)
def unescape(self, text: str) -> str:
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.md.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m: re.Match[str]) -> str:
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.md.serializer(value)
except Exception:
return r'\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
def backslash_unescape(self, text: str) -> str:
""" Return text with backslash escapes undone (backslashes are restored). """
try:
RE = self.md.treeprocessors['unescape'].RE
except KeyError: # pragma: no cover
return text
def _unescape(m: re.Match[str]) -> str:
return chr(int(m.group(1)))
return RE.sub(_unescape, text)
| HtmlInlineProcessor |
python | doocs__leetcode | solution/0600-0699/0621.Task Scheduler/Solution.py | {
"start": 0,
"end": 237
} | class ____:
def leastInterval(self, tasks: List[str], n: int) -> int:
cnt = Counter(tasks)
x = max(cnt.values())
s = sum(v == x for v in cnt.values())
return max(len(tasks), (x - 1) * (n + 1) + s)
| Solution |
python | huggingface__transformers | src/transformers/utils/import_utils.py | {
"start": 79337,
"end": 100397
} | class ____:
def __init__(self, backend_requirement: str):
self.package_name, self.version_comparison, self.version = split_package_version(backend_requirement)
if self.package_name not in BACKENDS_MAPPING:
raise ValueError(
f"Backends should be defined in the BACKENDS_MAPPING. Offending backend: {self.package_name}"
)
def get_installed_version(self) -> str:
"""Return the currently installed version of the backend"""
is_available, current_version = _is_package_available(self.package_name, return_version=True)
if not is_available:
raise RuntimeError(f"Backend {self.package_name} is not available.")
return current_version
def is_satisfied(self) -> bool:
return VersionComparison.from_string(self.version_comparison)(
version.parse(self.get_installed_version()), version.parse(self.version)
)
def __repr__(self) -> str:
return f'Backend("{self.package_name}", {VersionComparison[self.version_comparison]}, "{self.version}")'
@property
def error_message(self):
return (
f"{{0}} requires the {self.package_name} library version {self.version_comparison}{self.version}. That"
f" library was not found with this version in your environment."
)
def requires(*, backends=()):
"""
This decorator enables two things:
- Attaching a `__backends` tuple to an object to see what are the necessary backends for it
to execute correctly without instantiating it
- The '@requires' string is used to dynamically import objects
"""
if not isinstance(backends, tuple):
raise TypeError("Backends should be a tuple.")
applied_backends = []
for backend in backends:
if backend in BACKENDS_MAPPING:
applied_backends.append(backend)
else:
if any(key in backend for key in ["=", "<", ">"]):
applied_backends.append(Backend(backend))
else:
raise ValueError(f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}")
def inner_fn(fun):
fun.__backends = applied_backends
return fun
return inner_fn
BASE_FILE_REQUIREMENTS = {
lambda e: "modeling_" in e: ("torch",),
lambda e: e.startswith("tokenization_") and e.endswith("_fast"): ("tokenizers",),
lambda e: e.startswith("image_processing_") and e.endswith("_fast"): ("vision", "torch", "torchvision"),
lambda e: e.startswith("image_processing_"): ("vision",),
lambda e: e.startswith("video_processing_"): ("vision", "torch", "torchvision"),
}
def fetch__all__(file_content) -> list[str]:
"""
Returns the content of the __all__ variable in the file content.
Returns None if not defined, otherwise returns a list of strings.
"""
if "__all__" not in file_content:
return []
start_index = None
lines = file_content.splitlines()
for index, line in enumerate(lines):
if line.startswith("__all__"):
start_index = index
# There is no line starting with `__all__`
if start_index is None:
return []
lines = lines[start_index:]
if not lines[0].startswith("__all__"):
raise ValueError(
"fetch__all__ accepts a list of lines, with the first line being the __all__ variable declaration"
)
# __all__ is defined on a single line
if lines[0].endswith("]"):
return [obj.strip("\"' ") for obj in lines[0].split("=")[1].strip(" []").split(",")]
# __all__ is defined on multiple lines
else:
_all: list[str] = []
for __all__line_index in range(1, len(lines)):
if lines[__all__line_index].strip() == "]":
return _all
else:
_all.append(lines[__all__line_index].strip("\"', "))
return _all
@lru_cache
def create_import_structure_from_path(module_path):
"""
This method takes the path to a file/a folder and returns the import structure.
If a file is given, it will return the import structure of the parent folder.
Import structures are designed to be digestible by `_LazyModule` objects. They are
created from the __all__ definitions in each files as well as the `@require` decorators
above methods and objects.
The import structure allows explicit display of the required backends for a given object.
These backends are specified in two ways:
1. Through their `@require`, if they are exported with that decorator. This `@require` decorator
accepts a `backend` tuple kwarg mentioning which backends are required to run this object.
2. If an object is defined in a file with "default" backends, it will have, at a minimum, this
backend specified. The default backends are defined according to the filename:
- If a file is named like `modeling_*.py`, it will have a `torch` backend
- If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend
- If a file is named like `image_processing*_fast.py`, it will have a `torchvision` + `torch` backend
Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed.
Should an object be imported without its required backends being in the environment, any attempt to use the
object will raise an error mentioning which backend(s) should be added to the environment in order to use
that object.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizer'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
"""
import_structure = {}
if os.path.isfile(module_path):
module_path = os.path.dirname(module_path)
directory = module_path
adjacent_modules = []
for f in os.listdir(module_path):
if f != "__pycache__" and os.path.isdir(os.path.join(module_path, f)):
import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f))
elif not os.path.isdir(os.path.join(directory, f)):
adjacent_modules.append(f)
# We're only taking a look at files different from __init__.py
# We could theoretically require things directly from the __init__.py
# files, but this is not supported at this time.
if "__init__.py" in adjacent_modules:
adjacent_modules.remove("__init__.py")
# Modular files should not be imported
def find_substring(substring, list_):
return any(substring in x for x in list_)
if find_substring("modular_", adjacent_modules) and find_substring("modeling_", adjacent_modules):
adjacent_modules = [module for module in adjacent_modules if "modular_" not in module]
module_requirements = {}
for module_name in adjacent_modules:
# Only modules ending in `.py` are accepted here.
if not module_name.endswith(".py"):
continue
with open(os.path.join(directory, module_name), encoding="utf-8") as f:
file_content = f.read()
# Remove the .py suffix
module_name = module_name[:-3]
previous_line = ""
previous_index = 0
# Some files have some requirements by default.
# For example, any file named `modeling_xxx.py`
# should have torch as a required backend.
base_requirements = ()
for string_check, requirements in BASE_FILE_REQUIREMENTS.items():
if string_check(module_name):
base_requirements = requirements
break
# Objects that have a `@require` assigned to them will get exported
# with the backends specified in the decorator as well as the file backends.
exported_objects = set()
if "@requires" in file_content:
lines = file_content.split("\n")
for index, line in enumerate(lines):
# This allows exporting items with other decorators. We'll take a look
# at the line that follows at the same indentation level.
if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@requires"):
continue
# Skipping line enables putting whatever we want between the
# export() call and the actual class/method definition.
# This is what enables having # Copied from statements, docs, etc.
skip_line = False
if "@requires" in previous_line:
skip_line = False
# Backends are defined on the same line as export
if "backends" in previous_line:
backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0]
backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ") if b]))
# Backends are defined in the lines following export, for example such as:
# @export(
# backends=(
# "sentencepiece",
# "torch",
# )
# )
#
# or
#
# @export(
# backends=(
# "sentencepiece",
# )
# )
elif "backends" in lines[previous_index + 1]:
backends = []
for backend_line in lines[previous_index:index]:
if "backends" in backend_line:
backend_line = backend_line.split("=")[1]
if '"' in backend_line or "'" in backend_line:
if ", " in backend_line:
backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", "))
else:
backends.append(backend_line.strip("()\"', "))
# If the line is only a ')', then we reached the end of the backends and we break.
if backend_line.strip() == ")":
break
backends = tuple(backends)
# No backends are registered for export
else:
backends = ()
backends = frozenset(backends + base_requirements)
if backends not in module_requirements:
module_requirements[backends] = {}
if module_name not in module_requirements[backends]:
module_requirements[backends][module_name] = set()
if not line.startswith("class") and not line.startswith("def"):
skip_line = True
else:
start_index = 6 if line.startswith("class") else 4
object_name = line[start_index:].split("(")[0].strip(":")
module_requirements[backends][module_name].add(object_name)
exported_objects.add(object_name)
if not skip_line:
previous_line = line
previous_index = index
# All objects that are in __all__ should be exported by default.
# These objects are exported with the file backends.
if "__all__" in file_content:
for _all_object in fetch__all__(file_content):
if _all_object not in exported_objects:
backends = frozenset(base_requirements)
if backends not in module_requirements:
module_requirements[backends] = {}
if module_name not in module_requirements[backends]:
module_requirements[backends][module_name] = set()
module_requirements[backends][module_name].add(_all_object)
import_structure = {**module_requirements, **import_structure}
return import_structure
def spread_import_structure(nested_import_structure):
"""
This method takes as input an unordered import structure and brings the required backends at the top-level,
aggregating modules and objects under their required backends.
Here's an example of an input import structure at the src.transformers.models level:
{
'albert': {
frozenset(): {
'configuration_albert': {'AlbertConfig'}
},
frozenset({'tokenizers'}): {
'tokenization_albert_fast': {'AlbertTokenizer'}
},
},
'align': {
frozenset(): {
'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'processing_align': {'AlignProcessor'}
},
},
'altclip': {
frozenset(): {
'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'processing_altclip': {'AltCLIPProcessor'},
}
}
}
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizer'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
"""
def propagate_frozenset(unordered_import_structure):
frozenset_first_import_structure = {}
for _key, _value in unordered_import_structure.items():
# If the value is not a dict but a string, no need for custom manipulation
if not isinstance(_value, dict):
frozenset_first_import_structure[_key] = _value
elif any(isinstance(v, frozenset) for v in _value):
for k, v in _value.items():
if isinstance(k, frozenset):
# Here we want to switch around _key and k to propagate k upstream if it is a frozenset
if k not in frozenset_first_import_structure:
frozenset_first_import_structure[k] = {}
if _key not in frozenset_first_import_structure[k]:
frozenset_first_import_structure[k][_key] = {}
frozenset_first_import_structure[k][_key].update(v)
else:
# If k is not a frozenset, it means that the dictionary is not "level": some keys (top-level)
# are frozensets, whereas some are not -> frozenset keys are at an unknown depth-level of the
# dictionary.
#
# We recursively propagate the frozenset for this specific dictionary so that the frozensets
# are at the top-level when we handle them.
propagated_frozenset = propagate_frozenset({k: v})
for r_k, r_v in propagated_frozenset.items():
if isinstance(_key, frozenset):
if r_k not in frozenset_first_import_structure:
frozenset_first_import_structure[r_k] = {}
if _key not in frozenset_first_import_structure[r_k]:
frozenset_first_import_structure[r_k][_key] = {}
# _key is a frozenset -> we switch around the r_k and _key
frozenset_first_import_structure[r_k][_key].update(r_v)
else:
if _key not in frozenset_first_import_structure:
frozenset_first_import_structure[_key] = {}
if r_k not in frozenset_first_import_structure[_key]:
frozenset_first_import_structure[_key][r_k] = {}
# _key is not a frozenset -> we keep the order of r_k and _key
frozenset_first_import_structure[_key][r_k].update(r_v)
else:
frozenset_first_import_structure[_key] = propagate_frozenset(_value)
return frozenset_first_import_structure
def flatten_dict(_dict, previous_key=None):
items = []
for _key, _value in _dict.items():
_key = f"{previous_key}.{_key}" if previous_key is not None else _key
if isinstance(_value, dict):
items.extend(flatten_dict(_value, _key).items())
else:
items.append((_key, _value))
return dict(items)
# The tuples contain the necessary backends. We want these first, so we propagate them up the
# import structure.
ordered_import_structure = nested_import_structure
# 6 is a number that gives us sufficient depth to go through all files and foreseeable folder depths
# while not taking too long to parse.
for i in range(6):
ordered_import_structure = propagate_frozenset(ordered_import_structure)
# We then flatten the dict so that it references a module path.
flattened_import_structure = {}
for key, value in ordered_import_structure.copy().items():
if isinstance(key, str):
del ordered_import_structure[key]
else:
flattened_import_structure[key] = flatten_dict(value)
return flattened_import_structure
@lru_cache
def define_import_structure(module_path: str, prefix: str | None = None) -> IMPORT_STRUCTURE_T:
"""
This method takes a module_path as input and creates an import structure digestible by a _LazyModule.
Here's an example of an output import structure at the src.transformers.models level:
{
frozenset({'tokenizers'}): {
'albert.tokenization_albert_fast': {'AlbertTokenizer'}
},
frozenset(): {
'albert.configuration_albert': {'AlbertConfig'},
'align.processing_align': {'AlignProcessor'},
'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},
'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},
'altclip.processing_altclip': {'AltCLIPProcessor'}
}
}
The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects.
If `prefix` is not None, it will add that prefix to all keys in the returned dict.
"""
import_structure = create_import_structure_from_path(module_path)
spread_dict = spread_import_structure(import_structure)
if prefix is None:
return spread_dict
else:
spread_dict = {k: {f"{prefix}.{kk}": vv for kk, vv in v.items()} for k, v in spread_dict.items()}
return spread_dict
def clear_import_cache() -> None:
"""
Clear cached Transformers modules to allow reloading modified code.
This is useful when actively developing/modifying Transformers code.
"""
# Get all transformers modules
transformers_modules = [mod_name for mod_name in sys.modules if mod_name.startswith("transformers.")]
# Remove them from sys.modules
for mod_name in transformers_modules:
module = sys.modules[mod_name]
# Clear _LazyModule caches if applicable
if isinstance(module, _LazyModule):
module._objects = {} # Clear cached objects
del sys.modules[mod_name]
# Force reload main transformers module
if "transformers" in sys.modules:
main_module = sys.modules["transformers"]
if isinstance(main_module, _LazyModule):
main_module._objects = {} # Clear cached objects
importlib.reload(main_module)
| Backend |
python | simplejson__simplejson | simplejson/tests/test_scanstring.py | {
"start": 132,
"end": 7648
} | class ____(TestCase):
# The bytes type is intentionally not used in most of these tests
# under Python 3 because the decoder immediately coerces to str before
# calling scanstring. In Python 2 we are testing the code paths
# for both unicode and str.
#
# The reason this is done is because Python 3 would require
# entirely different code paths for parsing bytes and str.
#
def test_py_scanstring(self):
self._test_scanstring(simplejson.decoder.py_scanstring)
def test_c_scanstring(self):
if not simplejson.decoder.c_scanstring:
return
self._test_scanstring(simplejson.decoder.c_scanstring)
self.assertTrue(isinstance(simplejson.decoder.c_scanstring('""', 0)[0], str))
def _test_scanstring(self, scanstring):
if sys.maxunicode == 65535:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 6))
else:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 5))
self.assertEqual(
scanstring('"\\u007b"', 1, None, True),
(u'{', 8))
self.assertEqual(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
(u'A JSON payload should be an object or array, not a string.', 60))
self.assertEqual(
scanstring('["Unclosed array"', 2, None, True),
(u'Unclosed array', 17))
self.assertEqual(
scanstring('["extra comma",]', 2, None, True),
(u'extra comma', 14))
self.assertEqual(
scanstring('["double extra comma",,]', 2, None, True),
(u'double extra comma', 21))
self.assertEqual(
scanstring('["Comma after the close"],', 2, None, True),
(u'Comma after the close', 24))
self.assertEqual(
scanstring('["Extra close"]]', 2, None, True),
(u'Extra close', 14))
self.assertEqual(
scanstring('{"Extra comma": true,}', 2, None, True),
(u'Extra comma', 14))
self.assertEqual(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
(u'Extra value after close', 26))
self.assertEqual(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
(u'Illegal expression', 21))
self.assertEqual(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
(u'Illegal invocation', 21))
self.assertEqual(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
(u'Numbers cannot have leading zeroes', 37))
self.assertEqual(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
(u'Numbers cannot be hex', 24))
self.assertEqual(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
(u'Too deep', 30))
self.assertEqual(
scanstring('{"Missing colon" null}', 2, None, True),
(u'Missing colon', 16))
self.assertEqual(
scanstring('{"Double colon":: null}', 2, None, True),
(u'Double colon', 15))
self.assertEqual(
scanstring('{"Comma instead of colon", null}', 2, None, True),
(u'Comma instead of colon', 25))
self.assertEqual(
scanstring('["Colon instead of comma": false]', 2, None, True),
(u'Colon instead of comma', 25))
self.assertEqual(
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
for c in map(chr, range(0x00, 0x1f)):
self.assertEqual(
scanstring(c + '"', 0, None, False),
(c, 2))
self.assertRaises(
ValueError,
scanstring, c + '"', 0, None, True)
self.assertRaises(ValueError, scanstring, '', 0, None, True)
self.assertRaises(ValueError, scanstring, 'a', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\u', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\u0', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\u01', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\u012', 0, None, True)
self.assertRaises(ValueError, scanstring, '\\u0123', 0, None, True)
if sys.maxunicode > 65535:
self.assertRaises(ValueError,
scanstring, '\\ud834\\u"', 0, None, True)
self.assertRaises(ValueError,
scanstring, '\\ud834\\x0123"', 0, None, True)
self.assertRaises(json.JSONDecodeError, scanstring, '\\u-123"', 0, None, True)
# SJ-PT-23-01: Invalid Handling of Broken Unicode Escape Sequences
self.assertRaises(json.JSONDecodeError, scanstring, '\\u EDD"', 0, None, True)
def test_issue3623(self):
self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1,
"xxx")
self.assertRaises(UnicodeDecodeError,
json.encoder.encode_basestring_ascii, b("xx\xff"))
def test_overflow(self):
# Python 2.5 does not have maxsize, Python 3 does not have maxint
maxsize = getattr(sys, 'maxsize', getattr(sys, 'maxint', None))
assert maxsize is not None
self.assertRaises(OverflowError, json.decoder.scanstring, "xxx",
maxsize + 1)
def test_surrogates(self):
scanstring = json.decoder.scanstring
def assertScan(given, expect, test_utf8=True):
givens = [given]
if not PY3 and test_utf8:
givens.append(given.encode('utf8'))
for given in givens:
(res, count) = scanstring(given, 1, None, True)
self.assertEqual(len(given), count)
self.assertEqual(res, expect)
assertScan(
u'"z\\ud834\\u0079x"',
u'z\ud834yx')
assertScan(
u'"z\\ud834\\udd20x"',
u'z\U0001d120x')
assertScan(
u'"z\\ud834\\ud834\\udd20x"',
u'z\ud834\U0001d120x')
assertScan(
u'"z\\ud834x"',
u'z\ud834x')
assertScan(
u'"z\\udd20x"',
u'z\udd20x')
assertScan(
u'"z\ud834x"',
u'z\ud834x')
# It may look strange to join strings together, but Python is drunk.
# https://gist.github.com/etrepum/5538443
assertScan(
u'"z\\ud834\udd20x12345"',
u''.join([u'z\ud834', u'\udd20x12345']))
assertScan(
u'"z\ud834\\udd20x"',
u''.join([u'z\ud834', u'\udd20x']))
# these have different behavior given UTF8 input, because the surrogate
# pair may be joined (in maxunicode > 65535 builds)
assertScan(
u''.join([u'"z\ud834', u'\udd20x"']),
u''.join([u'z\ud834', u'\udd20x']),
test_utf8=False)
self.assertRaises(ValueError,
scanstring, u'"z\\ud83x"', 1, None, True)
self.assertRaises(ValueError,
scanstring, u'"z\\ud834\\udd2x"', 1, None, True)
| TestScanString |
python | doocs__leetcode | solution/3600-3699/3653.XOR After Range Multiplication Queries I/Solution.py | {
"start": 0,
"end": 280
} | class ____:
def xorAfterQueries(self, nums: List[int], queries: List[List[int]]) -> int:
mod = 10**9 + 7
for l, r, k, v in queries:
for idx in range(l, r + 1, k):
nums[idx] = nums[idx] * v % mod
return reduce(xor, nums)
| Solution |
python | pyodide__pyodide | docs/sphinx_pyodide/sphinx_pyodide/lexers.py | {
"start": 259,
"end": 1147
} | class ____(JavascriptLexer):
tokens = {
"root": [
(
r"(pyodide)(\.)(runPython|runPythonAsync)(\()",
bygroups(
Token.Name,
Token.Operator,
Token.Name,
Token.Punctuation,
),
"python-code",
),
inherit,
],
"python-code": [
(
rf"([A-Za-z.]*)({quotemark})((?:\\\\|\\[^\\]|[^{quotemark}\\])*)({quotemark})",
bygroups(
using(JavascriptLexer),
Token.Literal.String,
using(PythonLexer),
Token.Literal.String,
),
"#pop",
)
for quotemark in ["'", '"', "`"]
]
+ [default("#pop")],
}
| PyodideLexer |
python | faif__python-patterns | patterns/structural/front_controller.py | {
"start": 1180,
"end": 1617
} | class ____:
"""front controller"""
def __init__(self) -> None:
self.dispatcher = Dispatcher()
def dispatch_request(self, request: Any) -> None:
"""
This function takes a request object and sends it to the dispatcher.
"""
if isinstance(request, Request):
self.dispatcher.dispatch(request)
else:
print("request must be a Request object")
| RequestController |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 94808,
"end": 95655
} | class ____:
def test_hook_with_extra_default_arg(self):
data = {}
def hook(flow, flow_run, state, foo=42):
data.update(name=hook.__name__, state=state, foo=foo)
@flow(on_completion=[hook])
def foo_flow():
pass
state = foo_flow(return_state=True)
assert data == dict(name="hook", state=state, foo=42)
def test_hook_with_bound_kwargs(self):
data = {}
def hook(flow, flow_run, state, **kwargs):
data.update(name=hook.__name__, state=state, kwargs=kwargs)
hook_with_kwargs = partial(hook, foo=42)
@flow(on_completion=[hook_with_kwargs])
def foo_flow():
pass
state = foo_flow(return_state=True)
assert data == dict(name="hook", state=state, kwargs={"foo": 42})
| TestFlowHooksWithKwargs |
python | huggingface__transformers | tests/cli/test_serve.py | {
"start": 20723,
"end": 26803
} | class ____(ServeCompletionsMixin, unittest.TestCase):
"""Tests the `generate` version of the Completions API."""
@classmethod
def setUpClass(cls):
"""Starts a server for tests to connect to."""
cls.port = 8001
cls.server = Serve(port=cls.port, non_blocking=True)
@classmethod
def tearDownClass(cls):
cls.server.kill_server()
@slow
def test_tool_call(self):
"""Tests that the tool call is correctly handled and that the payloads are correctly structured."""
# TODO: move to the mixin when CB also supports tool calls
request = {
# This model is a small model that's very eager to call tools
# TODO: this is a 4B model. Find a smaller model that's eager to call tools
"model": "Menlo/Jan-nano",
# The request should produce a tool call
"messages": [{"role": "user", "content": "Generate an image of a cat."}],
"stream": True,
"max_tokens": 50,
# Reproducibility
"temperature": 0.0,
# This tool is a copy from the tool in the original tiny-agents demo
"tools": [
{
"function": {
"name": "flux1_schnell_infer",
"parameters": {
"type": "object",
"properties": {
"prompt": {"type": "string"},
"seed": {"type": "number", "description": "numeric value between 0 and 2147483647"},
"randomize_seed": {"type": "boolean", "default": True},
"width": {
"type": "number",
"description": "numeric value between 256 and 2048",
"default": 1024,
},
"height": {
"type": "number",
"description": "numeric value between 256 and 2048",
"default": 1024,
},
"num_inference_steps": {
"type": "number",
"description": "numeric value between 1 and 16",
"default": 4,
},
},
},
"description": "Generate an image using the Flux 1 Schnell Image Generator.",
},
"type": "function",
}
],
}
all_payloads = self.run_server(request)
# The first payload should contain the role
roles = [payload.choices[0].delta.role for payload in all_payloads]
self.assertEqual(roles[0], "assistant")
self.assertTrue(all(role is None for role in roles[1:]))
# All other payloads (except the last one) should be tool call related, for this specific request
contents = [payload.choices[0].delta.content for payload in all_payloads]
self.assertTrue(all(content is None for content in contents))
# The first tool call delta should contain the tool name. The other tool call deltas should contain the tool
# arguments.
tool_calls = [payload.choices[0].delta.tool_calls[0] for payload in all_payloads[1:-1]]
first_tool_call = tool_calls[0]
self.assertEqual(first_tool_call["function"]["name"], "flux1_schnell_infer")
self.assertEqual(first_tool_call["function"]["arguments"], None)
other_tool_calls = tool_calls[1:]
self.assertTrue(all(tool_call["function"]["name"] is None for tool_call in other_tool_calls))
self.assertTrue(all(tool_call["function"]["arguments"] is not None for tool_call in other_tool_calls))
# Finally, the last payload should contain a finish reason
finish_reasons = [payload.choices[0].finish_reason for payload in all_payloads]
# TODO: I think the finish reason for a tool call is different? double check this
self.assertTrue(finish_reasons[-1] in ["stop", "length"])
self.assertTrue(all(reason is None for reason in finish_reasons[:-1]))
def _get_scheduler(serve_command):
# Defensive navigation in case any layer is renamed in the future
cbm = getattr(serve_command, "running_continuous_batching_manager", None)
assert cbm is not None, "ServeCommand has no running_continuous_batching_manager"
bp = getattr(cbm, "batch_processor", None)
assert bp is not None, "running_continuous_batching_manager has no batch_processor"
sched = getattr(bp, "scheduler", None)
assert sched is not None, "batch_processor has no scheduler"
return sched
def _call_healthcheck(base_url: str):
response = None
retries = 10
while retries > 0:
try:
response = httpx.get(f"{base_url}/health")
break
except httpx.NetworkError:
time.sleep(0.1)
retries -= 1
return response
def _open_stream_and_cancel(base_url: str, request_id: str):
with httpx.Client() as s:
with s.stream(
"POST",
f"{base_url}/v1/chat/completions",
headers={"X-Request-ID": request_id},
json={
"model": "Qwen/Qwen2.5-0.5B-Instruct",
"stream": True,
"messages": [{"role": "user", "content": "Count slowly so I can cancel you."}],
},
timeout=30,
) as resp:
assert resp.status_code == 200
wait_for_n_chunks = 3
for i, _ in enumerate(resp.iter_bytes(chunk_size=None)):
if i >= wait_for_n_chunks:
resp.close()
break
@slow # server startup time is slow on our push CI
@require_openai
| ServeCompletionsGenerateIntegrationTest |
python | pytest-dev__pytest | testing/test_assertrewrite.py | {
"start": 38755,
"end": 46209
} | class ____:
def test_sys_meta_path_munged(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_meta_path():
import sys; sys.meta_path = []"""
)
assert pytester.runpytest().ret == 0
def test_write_pyc(self, pytester: Pytester, tmp_path) -> None:
from _pytest.assertion import AssertionState
from _pytest.assertion.rewrite import _write_pyc
config = pytester.parseconfig()
state = AssertionState(config, "rewrite")
tmp_path.joinpath("source.py").touch()
source_path = str(tmp_path)
pycpath = tmp_path.joinpath("pyc")
co = compile("1", "f.py", "single")
assert _write_pyc(state, co, os.stat(source_path), pycpath)
with mock.patch.object(os, "replace", side_effect=OSError):
assert not _write_pyc(state, co, os.stat(source_path), pycpath)
def test_resources_provider_for_loader(self, pytester: Pytester) -> None:
"""
Attempts to load resources from a package should succeed normally,
even when the AssertionRewriteHook is used to load the modules.
See #366 for details.
"""
pytest.importorskip("pkg_resources")
pytester.mkpydir("testpkg")
contents = {
"testpkg/test_pkg": """
import pkg_resources
import pytest
from _pytest.assertion.rewrite import AssertionRewritingHook
def test_load_resource():
assert isinstance(__loader__, AssertionRewritingHook)
res = pkg_resources.resource_string(__name__, 'resource.txt')
res = res.decode('ascii')
assert res == 'Load me please.'
"""
}
pytester.makepyfile(**contents)
pytester.maketxtfile(**{"testpkg/resource": "Load me please."})
result = pytester.runpytest_subprocess()
result.assert_outcomes(passed=1)
def test_read_pyc(self, tmp_path: Path) -> None:
"""
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
In those circumstances it should just give up instead of generating
an exception that is propagated to the caller.
"""
import py_compile
from _pytest.assertion.rewrite import _read_pyc
source = tmp_path / "source.py"
pyc = Path(str(source) + "c")
source.write_text("def test(): pass", encoding="utf-8")
py_compile.compile(str(source), str(pyc))
contents = pyc.read_bytes()
strip_bytes = 20 # header is around 16 bytes, strip a little more
assert len(contents) > strip_bytes
pyc.write_bytes(contents[:strip_bytes])
assert _read_pyc(source, pyc) is None # no error
def test_read_pyc_success(self, tmp_path: Path, pytester: Pytester) -> None:
"""
Ensure that the _rewrite_test() -> _write_pyc() produces a pyc file
that can be properly read with _read_pyc()
"""
from _pytest.assertion import AssertionState
from _pytest.assertion.rewrite import _read_pyc
from _pytest.assertion.rewrite import _rewrite_test
from _pytest.assertion.rewrite import _write_pyc
config = pytester.parseconfig()
state = AssertionState(config, "rewrite")
fn = tmp_path / "source.py"
pyc = Path(str(fn) + "c")
fn.write_text("def test(): assert True", encoding="utf-8")
source_stat, co = _rewrite_test(fn, config)
_write_pyc(state, co, source_stat, pyc)
assert _read_pyc(fn, pyc, state.trace) is not None
def test_read_pyc_more_invalid(self, tmp_path: Path) -> None:
from _pytest.assertion.rewrite import _read_pyc
source = tmp_path / "source.py"
pyc = tmp_path / "source.pyc"
source_bytes = b"def test(): pass\n"
source.write_bytes(source_bytes)
magic = importlib.util.MAGIC_NUMBER
flags = b"\x00\x00\x00\x00"
mtime = b"\x58\x3c\xb0\x5f"
mtime_int = int.from_bytes(mtime, "little")
os.utime(source, (mtime_int, mtime_int))
size = len(source_bytes).to_bytes(4, "little")
code = marshal.dumps(compile(source_bytes, str(source), "exec"))
# Good header.
pyc.write_bytes(magic + flags + mtime + size + code)
assert _read_pyc(source, pyc, print) is not None
# Too short.
pyc.write_bytes(magic + flags + mtime)
assert _read_pyc(source, pyc, print) is None
# Bad magic.
pyc.write_bytes(b"\x12\x34\x56\x78" + flags + mtime + size + code)
assert _read_pyc(source, pyc, print) is None
# Unsupported flags.
pyc.write_bytes(magic + b"\x00\xff\x00\x00" + mtime + size + code)
assert _read_pyc(source, pyc, print) is None
# Bad mtime.
pyc.write_bytes(magic + flags + b"\x58\x3d\xb0\x5f" + size + code)
assert _read_pyc(source, pyc, print) is None
# Bad size.
pyc.write_bytes(magic + flags + mtime + b"\x99\x00\x00\x00" + code)
assert _read_pyc(source, pyc, print) is None
def test_reload_is_same_and_reloads(self, pytester: Pytester) -> None:
"""Reloading a (collected) module after change picks up the change."""
pytester.makeini(
"""
[pytest]
python_files = *.py
"""
)
pytester.makepyfile(
file="""
def reloaded():
return False
def rewrite_self():
with open(__file__, 'w', encoding='utf-8') as self:
self.write('def reloaded(): return True')
""",
test_fun="""
import sys
from importlib import reload
def test_loader():
import file
assert not file.reloaded()
file.rewrite_self()
assert sys.modules["file"] is reload(file)
assert file.reloaded()
""",
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 passed*"])
def test_get_data_support(self, pytester: Pytester) -> None:
"""Implement optional PEP302 api (#808)."""
path = pytester.mkpydir("foo")
path.joinpath("test_foo.py").write_text(
textwrap.dedent(
"""\
class Test(object):
def test_foo(self):
import pkgutil
data = pkgutil.get_data('foo.test_foo', 'data.txt')
assert data == b'Hey'
"""
),
encoding="utf-8",
)
path.joinpath("data.txt").write_text("Hey", encoding="utf-8")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue731(pytester: Pytester) -> None:
pytester.makepyfile(
"""
class LongReprWithBraces(object):
def __repr__(self):
return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'
def some_method(self):
return False
def test_long_repr():
obj = LongReprWithBraces()
assert obj.some_method()
"""
)
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*unbalanced braces*")
| TestAssertionRewriteHookDetails |
python | numpy__numpy | numpy/ma/core.py | {
"start": 217422,
"end": 223042
} | class ____(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
copy = None if not copy else True
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
@property
def _data(self):
# Make sure that the _data part is a np.void
return super()._data[()]
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return str(self._data)
rdtype = _replace_dtype_fields(self._data.dtype, "O")
data_arr = super()._data
res = data_arr.astype(rdtype)
_recursive_printoption(res, self._mask, masked_print_option)
return str(res)
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
yield from _data
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or
non-scalar. If latter is the case, the filled array should
be broadcastable over input array. Default is None, in
which case the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(
data=[[1.0, --, --],
[--, 1.0, --],
[--, --, 1.0]],
mask=[[False, True, True],
[ True, False, True],
[ True, True, False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
| mvoid |
python | PrefectHQ__prefect | tests/server/utilities/test_database.py | {
"start": 2896,
"end": 6277
} | class ____:
async def test_write_to_Pydantic(self, session: AsyncSession):
p_model = PydanticModel(x=100)
s_model = SQLPydanticModel(data=p_model)
session.add(s_model)
await session.flush()
# clear cache
session.expire_all()
query = await session.scalars(sa.select(SQLPydanticModel))
results = query.all()
assert len(results) == 1
assert isinstance(results[0].data, PydanticModel)
assert results[0].data.y < datetime.datetime.now(datetime.timezone.utc)
async def test_write_dict_to_Pydantic(self, session: AsyncSession):
p_model = PydanticModel(x=100)
s_model = SQLPydanticModel(data=p_model.model_dump())
session.add(s_model)
await session.flush()
# clear cache
session.expire_all()
query = await session.scalars(sa.select(SQLPydanticModel))
results = query.all()
assert len(results) == 1
assert isinstance(results[0].data, PydanticModel)
async def test_nullable_Pydantic(self, session: AsyncSession):
s_model = SQLPydanticModel(data=None)
session.add(s_model)
await session.flush()
# clear cache
session.expire_all()
query = await session.scalars(sa.select(SQLPydanticModel))
results = query.all()
assert len(results) == 1
assert results[0].data is None
async def test_generic_model(self, session: AsyncSession):
p_model = PydanticModel(x=100)
s_model = SQLPydanticModel(data_list=[p_model])
session.add(s_model)
await session.flush()
# clear cache
session.expire_all()
query = await session.scalars(sa.select(SQLPydanticModel))
results = query.all()
assert len(results) == 1
assert results[0].data_list is not None
assert isinstance(results[0].data_list[0], PydanticModel)
assert results[0].data_list == [p_model]
async def test_generic_model_validates(self, session: AsyncSession):
p_model = PydanticModel(x=100)
s_model = SQLPydanticModel(data_list=p_model)
session.add(s_model)
with pytest.raises(sa.exc.StatementError, match="(validation error)"):
await session.flush()
async def test_write_to_enum_field(self, session: AsyncSession):
s_model = SQLPydanticModel(color="RED")
session.add(s_model)
await session.flush()
async def test_write_to_enum_field_is_validated(self, session: AsyncSession):
s_model = SQLPydanticModel(color="GREEN")
session.add(s_model)
with pytest.raises(sa.exc.StatementError, match="(validation error)"):
await session.flush()
async def test_enum_field_is_a_string_in_database(self, session: AsyncSession):
s_model = SQLPydanticModel(color="RED")
session.add(s_model)
await session.flush()
# write to the field, since it is an arbitrary string
await session.execute(
sa.text(
f"""
UPDATE {SQLPydanticModel.__tablename__}
SET color = 'GREEN';
"""
)
)
# enum enforced by application
stmt = sa.select(SQLPydanticModel)
with pytest.raises(pydantic.ValidationError):
await session.execute(stmt)
| TestPydantic |
python | ray-project__ray | release/serve_tests/workloads/locust_utils.py | {
"start": 563,
"end": 5989
} | class ____:
def __init__(
self,
worker_type: str,
host_url: str,
token: str,
expected_num_workers: int = None,
stages: List[LocustStage] = None,
wait_for_workers_timeout_s: float = None,
data: Any = None,
master_address: str = None,
):
self.worker_type = worker_type
self.host_url = host_url
self.token = token
self.expected_num_workers = expected_num_workers
self.stages = stages
self.wait_for_workers_timeout_s = wait_for_workers_timeout_s
self.data = data
self.master_address = master_address
def run(self):
# Create a temporary file for results
import tempfile
results_file = tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".json"
)
results_file.close()
# Prepare the subprocess script
if self.worker_type == "master":
script = f"""
import sys
import json
from ray.serve._private.benchmarks.locust_utils import run_locust_master, run_locust_worker, LocustStage
stages = json.loads(sys.argv[1])
stages = [LocustStage(**stage) for stage in stages]
results = run_locust_master(
host_url="{self.host_url}",
token="{self.token}",
expected_num_workers={self.expected_num_workers},
stages=stages,
wait_for_workers_timeout_s={self.wait_for_workers_timeout_s}
)
with open("{results_file.name}", 'w') as f:
json.dump(results, f)
"""
stages = json.dumps([asdict(stage) for stage in self.stages])
cmd_args = [sys.executable, "-c", script, stages]
else:
script = f"""
import sys
import json
from ray.serve._private.benchmarks.locust_utils import run_locust_master, run_locust_worker, LocustStage
data = sys.argv[1]
results = run_locust_worker(
master_address="{self.master_address}",
host_url="{self.host_url}",
token="{self.token}",
data=data,
)
"""
data = json.dumps(self.data)
cmd_args = [sys.executable, "-c", script, data]
# Start the Locust process
self.process = subprocess.Popen(
cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
)
print(f"Started {self.worker_type} subprocess ({self.process.pid})")
try:
# Wait for the process to complete first
for line in self.process.stdout: # yields as the child prints
sys.stdout.write(line) # stream to our stdout
return_code = self.process.wait()
if return_code != 0:
# Clean up the results file on error
try:
os.unlink(results_file.name)
except OSError:
pass
raise RuntimeError(f"Subprocess failed with return code {return_code}.")
# Read the result from the results file
with open(results_file.name, "r") as f:
result_data = f.read()
if result_data:
result_data = json.loads(result_data)
stats_in_stages = [
PerformanceStats(**stage)
for stage in result_data.pop("stats_in_stages")
]
result = LocustTestResults(
**result_data, stats_in_stages=stats_in_stages
)
return result
finally:
os.unlink(results_file.name)
def run_locust_load_test(config: LocustLoadTestConfig) -> LocustTestResults:
"""Runs a Locust load test against a service.
Returns:
Performance results (e.g. throughput and latency) from the test.
Raises:
RuntimeError if any requests failed during the load test.
"""
logger.info(f"Spawning {config.num_workers} Locust worker Ray tasks.")
master_address = ray.util.get_node_ip_address()
worker_refs = []
# Start Locust workers
for i in range(config.num_workers):
locust_worker = LocustProcess.options(name=f"LocustWorker-{i}").remote(
worker_type="worker",
host_url=config.host_url,
token=config.auth_token,
master_address=master_address,
data=config.data,
)
worker_refs.append(locust_worker.run.remote())
print(f"Started worker {i}")
# Start Locust master
master_worker = LocustProcess.options(name="LocustMaster").remote(
worker_type="master",
host_url=config.host_url,
token=config.auth_token,
expected_num_workers=config.num_workers,
stages=config.stages,
wait_for_workers_timeout_s=config.wait_for_workers_timeout_s,
)
master_ref = master_worker.run.remote()
# Collect results and metrics
stats: LocustTestResults = ray.get(master_ref)
ray.get(worker_refs)
return stats
if __name__ == "__main__":
ray.init(address="auto")
results = run_locust_load_test(
LocustLoadTestConfig(
num_workers=9,
host_url="https://services-canary-pinger-aws-zugs7.cld-kvedzwag2qa8i5bj.s.anyscaleuserdata.com/info",
auth_token="v9M8jb3tBbHOGoWrg7X1fCwF8wYn7gqZR5VZ1_h4t50",
data=None,
stages=[LocustStage(duration_s=10, users=10, spawn_rate=1)],
)
)
print(results)
| LocustProcess |
python | google__pytype | pytype/directors/directors_test.py | {
"start": 3755,
"end": 10927
} | class ____(DirectorTestCase):
def test_ignore_globally(self):
self._create("", ["my-error"])
self._should_report(False, 42, error_name="my-error")
def test_ignore_one_line(self):
self._create("""
# line 2
x = 123 # type: ignore
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_ignore_one_line_mypy_style(self):
self._create("""
# line 2
x = 123 # type: ignore[arg-type]
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_utf8(self):
self._create("""
x = u"abc□def\\n"
""")
def test_ignore_extra_characters(self):
self._create("""
# line 2
x = 123 # # type: ignore
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_ignore_until_end(self):
self._create("""
# line 2
# type: ignore
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(False, 4)
def test_out_of_scope(self):
self._create("""
# type: ignore
""")
self._should_report(False, 2)
self._should_report(True, 2, filename=None) # No file.
self._should_report(True, 2, filename="some_other_file.py") # Other file.
self._should_report(False, None) # No line number.
self._should_report(False, 0) # line number 0.
def test_disable(self):
self._create("""
# line 2
x = 123 # pytype: disable=test-error
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_disable_extra_characters(self):
self._create("""
# line 2
x = 123 # # pytype: disable=test-error
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_disable_until_end(self):
self._create("""
# line 2
# pytype: disable=test-error
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(False, 4)
def test_enable_after_disable(self):
self._create("""
# line 2
# pytype: disable=test-error
# line 4
# pytype: enable=test-error
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(False, 4)
self._should_report(True, 5)
self._should_report(True, 100)
def test_enable_one_line(self):
self._create("""
# line 2
# pytype: disable=test-error
# line 4
x = 123 # pytype: enable=test-error
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(False, 4)
self._should_report(True, 5)
self._should_report(False, 6)
self._should_report(False, 100)
def test_disable_other_error(self):
self._create("""
# line 2
x = 123 # pytype: disable=test-other-error
# line 4
""")
self._should_report(True, 2)
self._should_report(True, 3)
self._should_report(False, 3, error_name="test-other-error")
self._should_report(True, 4)
def test_disable_multiple_error(self):
self._create("""
# line 2
x = 123 # pytype: disable=test-error,test-other-error
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(False, 3, error_name="test-other-error")
self._should_report(True, 4)
def test_disable_all(self):
self._create("""
# line 2
x = 123 # pytype: disable=*
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_multiple_directives(self):
self._create("""
x = 123 # sometool: directive=whatever # pytype: disable=test-error
""")
self._should_report(False, 2)
def test_error_at_line_0(self):
self._create("""
x = "foo"
# pytype: disable=attribute-error
""")
self._should_report(False, 0, error_name="attribute-error")
def test_disable_without_space(self):
self._create("""
# line 2
x = 123 # pytype:disable=test-error
# line 4
""")
self._should_report(True, 2)
self._should_report(False, 3)
self._should_report(True, 4)
def test_invalid_disable(self):
def check_warning(message_regex, text):
self._create(text)
self.assertLessEqual(1, len(self._errorlog))
error = list(self._errorlog)[0]
self.assertEqual(_TEST_FILENAME, error._filename)
self.assertEqual(1, error.line)
self.assertRegex(str(error), message_regex)
check_warning(
"Unknown pytype directive.*disalbe.*", "# pytype: disalbe=test-error"
)
check_warning(
"Invalid error name.*bad-error-name.*",
"# pytype: disable=bad-error-name",
)
check_warning("Invalid directive syntax", "# pytype: disable")
check_warning("Invalid directive syntax", "# pytype: ")
check_warning(
"Unknown pytype directive.*foo.*",
"# pytype: disable=test-error foo=bar",
)
# Spaces aren't allowed in the comma-separated value list.
check_warning(
"Invalid directive syntax",
"# pytype: disable=test-error ,test-other-error",
)
# This will actually result in two warnings: the first because the
# empty string isn't a valid error name, the second because
# test-other-error isn't a valid command. We only verify the first
# warning.
check_warning(
"Invalid error name", "# pytype: disable=test-error, test-other-error"
)
def test_type_comments(self):
self._create("""
x = None # type: int
y = None # allow extra comments # type: str
z = None # type: int # and extra comments after, too
a = None # type:int # without a space
# type: (int, float) -> str
# comment with embedded # type: should-be-discarded
""")
self.assertEqual(
{
2: "int",
3: "str",
4: "int",
5: "int",
6: "(int, float) -> str",
},
self._director.type_comments,
)
def test_strings_that_look_like_directives(self):
# Line 2 is a string, not a type comment.
# Line 4 has a string and a comment.
self._create("""
s = "# type: int"
x = None # type: float
y = "# type: int" # type: str
""")
self.assertEqual(
{
3: "float",
4: "str",
},
self._director.type_comments,
)
def test_huge_string(self):
# Tests that the director doesn't choke on this huge, repetitive file.
src = ["x = ("]
for i in range(2000):
src.append(f" 'string{i}'")
src.append(")")
self._create("\n".join(src))
def test_try(self):
self._create("""
try:
x = None # type: int
except Exception:
x = None # type: str
else:
x = None # type: float
""")
self.assertEqual(
{
3: "int",
5: "str",
7: "float",
},
self._director.type_comments,
)
| DirectorTest |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 52034,
"end": 56659
} | class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoPromptEncoderConfig):
super().__init__()
self.shared_embedding = Sam3TrackerVideoPositionalEmbedding(config)
self.mask_embed = Sam3TrackerVideoMaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.view(*boxes.shape[:2], 2, 2)
# add padding point for consistency with the original implementation
coords = torch.nn.functional.pad(coords, (0, 0, 0, 1), mode="constant", value=0)
corner_embedding = self.shared_embedding(coords, (self.input_image_size, self.input_image_size))
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
corner_embedding[:, :, 2, :] = self.not_a_point_embed.weight.expand_as(corner_embedding[:, :, 2, :])
return corner_embedding
def forward(
self,
input_points: Optional[tuple[torch.Tensor, torch.Tensor]],
input_labels: Optional[torch.Tensor],
input_boxes: Optional[torch.Tensor],
input_masks: Optional[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense embeddings.
Args:
points (`torch.Tensor`, *optional*):
point coordinates and labels to embed.
boxes (`torch.Tensor`, *optional*):
boxes to embed
masks (`torch.Tensor`, *optional*):
masks to embed
"""
sparse_embeddings = None
batch_size = 1
if input_points is not None:
batch_size = input_points.shape[0]
if input_labels is None:
raise ValueError("If points are provided, labels must also be provided.")
point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
sparse_embeddings = point_embeddings
if input_boxes is not None:
batch_size = input_boxes.shape[0]
box_embeddings = self._embed_boxes(input_boxes)
if sparse_embeddings is None:
sparse_embeddings = box_embeddings
else:
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
if input_masks is not None:
dense_embeddings = self.mask_embed(input_masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
| Sam3TrackerVideoPromptEncoder |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 49955,
"end": 53149
} | class ____(NonStrictDataModel):
"""
:param view: View params
:type view: View
:param destination: Storage id. This is where output files will be stored.
:type destination: str
:param model: Model id.
:type model: str
:param result: Task result. Values: 'success', 'failure'
:type result: str
:param error: Last error text
:type error: str
"""
_schema = {
"properties": {
"destination": {
"description": "Storage id. This is where output files will be stored.",
"type": ["string", "null"],
},
"error": {"description": "Last error text", "type": ["string", "null"]},
"model": {"description": "Model id.", "type": ["string", "null"]},
"result": {
"description": "Task result. Values: 'success', 'failure'",
"type": ["string", "null"],
},
"view": {
"description": "View params",
"oneOf": [{"$ref": "#/definitions/view"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self, view=None, destination=None, model=None, result=None, error=None, **kwargs
):
super(Output, self).__init__(**kwargs)
self.view = view
self.destination = destination
self.model = model
self.result = result
self.error = error
@schema_property("view")
def view(self):
return self._property_view
@view.setter
def view(self, value):
if value is None:
self._property_view = None
return
if isinstance(value, dict):
value = View.from_dict(value)
else:
self.assert_isinstance(value, "view", View)
self._property_view = value
@schema_property("destination")
def destination(self):
return self._property_destination
@destination.setter
def destination(self, value):
if value is None:
self._property_destination = None
return
self.assert_isinstance(value, "destination", six.string_types)
self._property_destination = value
@schema_property("model")
def model(self):
return self._property_model
@model.setter
def model(self, value):
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("result")
def result(self):
return self._property_result
@result.setter
def result(self, value):
if value is None:
self._property_result = None
return
self.assert_isinstance(value, "result", six.string_types)
self._property_result = value
@schema_property("error")
def error(self):
return self._property_error
@error.setter
def error(self, value):
if value is None:
self._property_error = None
return
self.assert_isinstance(value, "error", six.string_types)
self._property_error = value
| Output |
python | huggingface__transformers | src/transformers/models/phi3/modeling_phi3.py | {
"start": 13548,
"end": 15612
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Phi3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
self.mlp = Phi3MLP(config)
self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.config = config
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
return hidden_states
@auto_docstring
| Phi3DecoderLayer |
python | doocs__leetcode | solution/2800-2899/2849.Determine if a Cell Is Reachable at a Given Time/Solution.py | {
"start": 0,
"end": 245
} | class ____:
def isReachableAtTime(self, sx: int, sy: int, fx: int, fy: int, t: int) -> bool:
if sx == fx and sy == fy:
return t != 1
dx = abs(sx - fx)
dy = abs(sy - fy)
return max(dx, dy) <= t
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/ignores.py | {
"start": 383,
"end": 2790
} | class ____(SanityVersionNeutral):
"""Sanity test for sanity test ignore entries."""
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
sanity_ignore = SanityIgnoreParser.load(args)
messages: list[SanityMessage] = []
# parse errors
messages.extend(SanityMessage(
message=message,
path=sanity_ignore.relative_path,
line=line,
column=column,
confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
) for line, column, message in sanity_ignore.parse_errors)
# file not found errors
messages.extend(SanityMessage(
message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
path=sanity_ignore.relative_path,
line=line,
column=1,
confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
) for line, path in sanity_ignore.file_not_found_errors)
# conflicting ignores and skips
for test_name, ignores in sanity_ignore.ignores.items():
for ignore_path, ignore_entry in ignores.items():
skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
if not skip_line_no:
continue
for ignore_line_no in ignore_entry.values():
messages.append(SanityMessage(
message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
path=sanity_ignore.relative_path,
line=ignore_line_no,
column=1,
confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
))
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
| IgnoresTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trends.py | {
"start": 1930,
"end": 2840
} | class ____(DiscoverQueryBuilder):
def convert_aggregate_filter_to_condition(
self, aggregate_filter: AggregateFilter
) -> WhereType | None:
name = aggregate_filter.key.name
if self.params.aliases is not None and name in self.params.aliases:
return self.params.aliases[name].converter(aggregate_filter)
else:
return super().convert_aggregate_filter_to_condition(aggregate_filter)
def resolve_function(
self,
function: str,
match: Match[str] | None = None,
resolve_only=False,
overwrite_alias: str | None = None,
) -> SelectType:
if self.params.aliases is not None and function in self.params.aliases:
return self.params.aliases[function].resolved_function
else:
return super().resolve_function(function, match, resolve_only, overwrite_alias)
| TrendQueryBuilder |
python | scrapy__scrapy | tests/spiders.py | {
"start": 4184,
"end": 4495
} | class ____(SimpleSpider):
name = "asyncdef_asyncio_return"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}")
return [{"id": 1}, {"id": 2}]
| AsyncDefAsyncioReturnSpider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/comparison2.py | {
"start": 1375,
"end": 1825
} | class ____:
bar: str
def func3(x: DC1):
# This should generate an error if reportUnnecessaryComparison is enabled.
if x == 42:
...
async def func4() -> bool:
return True
async def func5() -> None:
# This should generate an error if reportUnnecessaryComparison is enabled.
if func4():
pass
def func6() -> Coroutine[Any, Any, int] | None: ...
def func7():
coro = func6()
if coro:
pass
| DC1 |
python | kamyu104__LeetCode-Solutions | Python/delete-characters-to-make-fancy-string.py | {
"start": 48,
"end": 421
} | class ____(object):
def makeFancyString(self, s):
"""
:type s: str
:rtype: str
"""
s = list(s)
cnt = j = 0
for i, c in enumerate(s):
cnt = cnt+1 if i >= 1 and c == s[i-1] else 1
if cnt < 3:
s[j] = c
j += 1
s[:] = s[:j]
return "".join(s)
| Solution |
python | django__django | tests/schema/models.py | {
"start": 694,
"end": 832
} | class ____(models.Model):
text_field = models.TextField(db_index=True)
class Meta:
apps = new_apps
| AuthorTextFieldWithIndex |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_default_row02.py | {
"start": 315,
"end": 977
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_row02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_default_row(hide_unused_rows=True)
worksheet.write("A1", "Foo")
worksheet.write("A10", "Bar")
for row in range(1, 8 + 1):
worksheet.set_row(row, 15)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | mlflow__mlflow | tests/models/test_utils.py | {
"start": 966,
"end": 23012
} | class ____(NamedTuple):
model: Any
inference_data: Any
@pytest.fixture(scope="module")
def sklearn_knn_model():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
knn_model = knn.KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
def random_int(lo=1, hi=1000000000):
return random.randint(int(lo), int(hi))
def test_adding_libraries_to_model_default(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name=model_name,
)
wheeled_model_info = add_libraries_to_model(model_uri)
assert wheeled_model_info.run_id == run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_new_run(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
wheeled_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
wheeled_model_info = add_libraries_to_model(model_uri)
assert original_run_id != wheeled_run_id
assert wheeled_model_info.run_id == wheeled_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == wheeled_run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_run_id_passed(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
wheeled_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
wheeled_model_info = add_libraries_to_model(model_uri, run_id=wheeled_run_id)
assert original_run_id != wheeled_run_id
assert wheeled_model_info.run_id == wheeled_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == wheeled_run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_new_model_name(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
wheeled_model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{wheeled_model_name}/1"
# Log a model
with mlflow.start_run():
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
new_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
wheeled_model_info = add_libraries_to_model(
model_uri, registered_model_name=wheeled_model_name
)
assert wheeled_model_info.run_id == new_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == new_run_id
assert wheeled_model_version.name == wheeled_model_name
assert wheeled_model_name != model_name
def test_adding_libraries_to_model_when_version_source_None(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sklearn_knn_model.model,
name=artifact_path,
registered_model_name=model_name,
)
model_version_without_source = ModelVersion(name=model_name, version=1, creation_timestamp=124)
assert model_version_without_source.run_id is None
with mock.patch.object(
MlflowClient, "get_model_version", return_value=model_version_without_source
) as mlflow_client_mock:
wheeled_model_info = add_libraries_to_model(model_uri)
assert wheeled_model_info.run_id is not None
assert wheeled_model_info.run_id != original_run_id
mlflow_client_mock.assert_called_once_with(model_name, "1")
@pytest.mark.parametrize(
("data", "data_type"),
[
("string", DataType.string),
(np.int32(1), DataType.integer),
(np.int32(1), DataType.long),
(np.int32(1), DataType.double),
(True, DataType.boolean),
(1.0, DataType.double),
(np.float32(0.1), DataType.float),
(np.float32(0.1), DataType.double),
(np.int64(100), DataType.long),
(np.datetime64("2023-10-13 00:00:00"), DataType.datetime),
],
)
def test_enforce_datatype(data, data_type):
assert _enforce_datatype(data, data_type) == data
def test_enforce_datatype_with_errors():
with pytest.raises(MlflowException, match=r"Expected dtype to be DataType, got str"):
_enforce_datatype("string", "string")
with pytest.raises(
MlflowException, match=r"Failed to enforce schema of data `123` with dtype `string`"
):
_enforce_datatype(123, DataType.string)
def test_enforce_object():
data = {
"a": "some_sentence",
"b": b"some_bytes",
"c": ["sentence1", "sentence2"],
"d": {"str": "value", "arr": [0.1, 0.2]},
}
obj = Object(
[
Property("a", DataType.string),
Property("b", DataType.binary, required=False),
Property("c", Array(DataType.string)),
Property(
"d",
Object(
[
Property("str", DataType.string),
Property("arr", Array(DataType.double), required=False),
]
),
),
]
)
assert _enforce_object(data, obj) == data
data = {"a": "some_sentence", "c": ["sentence1", "sentence2"], "d": {"str": "some_value"}}
assert _enforce_object(data, obj) == data
def test_enforce_object_with_errors():
with pytest.raises(MlflowException, match=r"Expected data to be dictionary, got list"):
_enforce_object(["some_sentence"], Object([Property("a", DataType.string)]))
with pytest.raises(MlflowException, match=r"Expected obj to be Object, got Property"):
_enforce_object({"a": "some_sentence"}, Property("a", DataType.string))
obj = Object([Property("a", DataType.string), Property("b", DataType.string, required=False)])
with pytest.raises(MlflowException, match=r"Missing required properties: {'a'}"):
_enforce_object({}, obj)
with pytest.raises(
MlflowException, match=r"Invalid properties not defined in the schema found: {'c'}"
):
_enforce_object({"a": "some_sentence", "c": "some_sentence"}, obj)
with pytest.raises(
MlflowException,
match=r"Failed to enforce schema for key `a`. Expected type string, received type int",
):
_enforce_object({"a": 1}, obj)
def test_enforce_property():
data = "some_sentence"
prop = Property("a", DataType.string)
assert _enforce_property(data, prop) == data
data = ["some_sentence1", "some_sentence2"]
prop = Property("a", Array(DataType.string))
assert _enforce_property(data, prop) == data
prop = Property("a", Array(DataType.binary))
assert _enforce_property(data, prop) == [b"some_sentence1", b"some_sentence2"]
data = np.array([np.int32(1), np.int32(2)])
prop = Property("a", Array(DataType.integer))
assert (_enforce_property(data, prop) == data).all()
data = {
"a": "some_sentence",
"b": b"some_bytes",
"c": ["sentence1", "sentence2"],
"d": {"str": "value", "arr": [0.1, 0.2]},
}
prop = Property(
"any_name",
Object(
[
Property("a", DataType.string),
Property("b", DataType.binary, required=False),
Property("c", Array(DataType.string), required=False),
Property(
"d",
Object(
[
Property("str", DataType.string),
Property("arr", Array(DataType.double), required=False),
]
),
),
]
),
)
assert _enforce_property(data, prop) == data
data = {"a": "some_sentence", "d": {"str": "some_value"}}
assert _enforce_property(data, prop) == data
def test_enforce_property_with_errors():
with pytest.raises(
MlflowException, match=r"Failed to enforce schema of data `123` with dtype `string`"
):
_enforce_property(123, Property("a", DataType.string))
with pytest.raises(MlflowException, match=r"Missing required properties: {'a'}"):
_enforce_property(
{"b": ["some_sentence1", "some_sentence2"]},
Property(
"any_name",
Object([Property("a", DataType.string), Property("b", Array(DataType.string))]),
),
)
with pytest.raises(
MlflowException,
match=r"Failed to enforce schema for key `a`. Expected type string, received type list",
):
_enforce_property(
{"a": ["some_sentence1", "some_sentence2"]},
Property("any_name", Object([Property("a", DataType.string)])),
)
@pytest.mark.parametrize(
("data", "schema"),
[
# 1. Flat list
(["some_sentence1", "some_sentence2"], Array(DataType.string)),
# 2. Nested list
(
[
[["a", "b"], ["c", "d"]],
[["e", "f", "g"], ["h"]],
[[]],
],
Array(Array(Array(DataType.string))),
),
# 3. Array of Object
(
[
{"a": "some_sentence1", "b": "some_sentence2"},
{"a": "some_sentence3", "c": ["some_sentence4", "some_sentence5"]},
],
Array(
Object(
[
Property("a", DataType.string),
Property("b", DataType.string, required=False),
Property("c", Array(DataType.string), required=False),
]
)
),
),
# 4. Empty list
([], Array(DataType.string)),
],
)
def test_enforce_array_on_list(data, schema):
assert _enforce_array(data, schema) == data
@pytest.mark.parametrize(
("data", "schema"),
[
# 1. 1D array
(np.array(["some_sentence1", "some_sentence2"]), Array(DataType.string)),
# 2. 2D array
(
np.array(
[
["a", "b"],
["c", "d"],
]
),
Array(Array(DataType.string)),
),
# 3. Empty array
(np.array([[], []]), Array(Array(DataType.string))),
],
)
def test_enforce_array_on_numpy_array(data, schema):
assert (_enforce_array(data, schema) == data).all()
def test_enforce_array_with_errors():
with pytest.raises(MlflowException, match=r"Expected data to be list or numpy array, got str"):
_enforce_array("abc", Array(DataType.string))
with pytest.raises(MlflowException, match=r"Incompatible input types"):
_enforce_array([123, 456, 789], Array(DataType.string))
# Nested array with mixed type elements
with pytest.raises(MlflowException, match=r"Incompatible input types"):
_enforce_array([["a", "b"], [1, 2]], Array(Array(DataType.string)))
# Nested array with different nest level
with pytest.raises(MlflowException, match=r"Expected data to be list or numpy array, got str"):
_enforce_array([["a", "b"], "c"], Array(Array(DataType.string)))
# Missing priperties in Object
with pytest.raises(MlflowException, match=r"Missing required properties: {'b'}"):
_enforce_array(
[
{"a": "some_sentence1", "b": "some_sentence2"},
{"a": "some_sentence3", "c": ["some_sentence4", "some_sentence5"]},
],
Array(Object([Property("a", DataType.string), Property("b", DataType.string)])),
)
# Extra properties
with pytest.raises(
MlflowException, match=r"Invalid properties not defined in the schema found: {'c'}"
):
_enforce_array(
[
{"a": "some_sentence1", "b": "some_sentence2"},
{"a": "some_sentence3", "c": ["some_sentence4", "some_sentence5"]},
],
Array(
Object(
[Property("a", DataType.string), Property("b", DataType.string, required=False)]
)
),
)
def test_model_code_validation():
# Invalid code with dbutils
invalid_code = "dbutils.library.restartPython()\nsome_python_variable = 5"
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
_validate_model_code_from_notebook(invalid_code)
mock_warning.assert_called_once_with(
"The model file uses 'dbutils' commands which are not supported. To ensure your "
"code functions correctly, make sure that it does not rely on these dbutils "
"commands for correctness."
)
# Code with commented magic commands displays warning
warning_code = "# dbutils.library.restartPython()\n# MAGIC %run ../wheel_installer"
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
_validate_model_code_from_notebook(warning_code)
mock_warning.assert_called_once_with(
"The model file uses magic commands which have been commented out. To ensure your code "
"functions correctly, make sure that it does not rely on these magic commands for "
"correctness."
)
# Code with commented pip magic commands does not warn
warning_code = "# MAGIC %pip install mlflow"
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
_validate_model_code_from_notebook(warning_code)
mock_warning.assert_not_called()
# Test valid code
valid_code = "some_valid_python_code = 'valid'"
validated_code = _validate_model_code_from_notebook(valid_code).decode("utf-8")
assert validated_code == valid_code
# Test uncommented magic commands
code_with_magic_command = (
"valid_python_code = 'valid'\n%pip install sqlparse\nvalid_python_code = 'valid'\n# Comment"
)
expected_validated_code = (
"valid_python_code = 'valid'\n# MAGIC %pip install sqlparse\nvalid_python_code = "
"'valid'\n# Comment"
)
validated_code_with_magic_command = _validate_model_code_from_notebook(
code_with_magic_command
).decode("utf-8")
assert validated_code_with_magic_command == expected_validated_code
def test_config_context():
with _config_context("tests/langchain/config.yml"):
assert mlflow.models.model_config.__mlflow_model_config__ == "tests/langchain/config.yml"
assert mlflow.models.model_config.__mlflow_model_config__ is None
def test_flatten_nested_params():
nested_params = {
"a": 1,
"b": {"c": 2, "d": {"e": 3}},
"f": {"g": {"h": 4}},
}
expected_flattened_params = {
"a": 1,
"b.c": 2,
"b.d.e": 3,
"f.g.h": 4,
}
assert _flatten_nested_params(nested_params, sep=".") == expected_flattened_params
assert _flatten_nested_params(nested_params, sep="/") == {
"a": 1,
"b/c": 2,
"b/d/e": 3,
"f/g/h": 4,
}
assert _flatten_nested_params({}) == {}
params = {"a": 1, "b": 2, "c": 3}
assert _flatten_nested_params(params) == params
params = {
"a": 1,
"b": {"c": 2, "d": {"e": 3, "f": [1, 2, 3]}, "g": "hello"},
"h": {"i": None},
}
expected_flattened_params = {
"a": 1,
"b/c": 2,
"b/d/e": 3,
"b/d/f": [1, 2, 3],
"b/g": "hello",
"h/i": None,
}
assert _flatten_nested_params(params) == expected_flattened_params
nested_params = {1: {2: {3: 4}}, "a": {"b": {"c": 5}}}
expected_flattened_params_mixed = {
"1/2/3": 4,
"a/b/c": 5,
}
assert _flatten_nested_params(nested_params) == expected_flattened_params_mixed
rag_params = {
"workspace_url": "https://e2-dogfood.staging.cloud.databricks.com",
"vector_search_endpoint_name": "dbdemos_vs_endpoint",
"vector_search_index": "monitoring.rag.databricks_docs_index",
"embedding_model_endpoint_name": "databricks-bge-large-en",
"embedding_model_query_instructions": "Represent this sentence for searching",
"llm_model": "databricks-dbrx-instruct",
"llm_prompt_template": "You are a trustful assistant for Databricks users.",
"retriever_config": {"k": 5, "use_mmr": "false"},
"llm_parameters": {"temperature": 0.01, "max_tokens": 200},
"llm_prompt_template_variables": ["chat_history", "context", "question"],
"secret_scope": "dbdemos",
"secret_key": "rag_sunish",
}
expected_rag_flattened_params = {
"workspace_url": "https://e2-dogfood.staging.cloud.databricks.com",
"vector_search_endpoint_name": "dbdemos_vs_endpoint",
"vector_search_index": "monitoring.rag.databricks_docs_index",
"embedding_model_endpoint_name": "databricks-bge-large-en",
"embedding_model_query_instructions": "Represent this sentence for searching",
"llm_model": "databricks-dbrx-instruct",
"llm_prompt_template": "You are a trustful assistant for Databricks users.",
"retriever_config/k": 5,
"retriever_config/use_mmr": "false",
"llm_parameters/temperature": 0.01,
"llm_parameters/max_tokens": 200,
"llm_prompt_template_variables": ["chat_history", "context", "question"],
"secret_scope": "dbdemos",
"secret_key": "rag_sunish",
}
assert _flatten_nested_params(rag_params) == expected_rag_flattened_params
@pytest.mark.parametrize(
("data", "target", "target_type"),
[
(pd.DataFrame([{"a": [1, 2, 3]}]), [{"a": [1, 2, 3]}], list),
(pd.DataFrame([{"a": np.array([1, 2, 3])}]), [{"a": [1, 2, 3]}], list),
(pd.DataFrame([{0: np.array(["abc"])[0]}]), ["abc"], list),
(np.array([1, 2, 3]), [1, 2, 3], list),
(np.array([123])[0], 123, int),
(np.array(["abc"])[0], "abc", str),
],
)
def test_convert_llm_input_data(data, target, target_type):
result = _convert_llm_input_data(data)
assert result == target
assert type(result) == target_type
@pytest.mark.parametrize(
("model_path", "error_message"),
[
(
"model.py",
f"The provided model path '{os.getcwd()}/model.py' does not exist. "
"Ensure the file path is valid and try again.",
),
(
"model",
f"The provided model path '{os.getcwd()}/model' does not exist. "
"Ensure the file path is valid and try again. "
f"Perhaps you meant '{os.getcwd()}/model.py'?",
),
],
)
def test_validate_and_get_model_code_path_not_found(model_path, error_message, tmp_path):
with pytest.raises(MlflowException, match=error_message):
_validate_and_get_model_code_path(model_path, tmp_path)
def test_validate_and_get_model_code_path_success(tmp_path):
# if the model file exists, return the path as is
model_path = os.path.abspath(__file__)
actual = _validate_and_get_model_code_path(model_path, tmp_path)
assert actual == model_path
def test_suppress_schema_error(monkeypatch):
schema = Schema(
[
ColSpec("double", "id"),
ColSpec("string", "name"),
]
)
monkeypatch.setenv(MLFLOW_DISABLE_SCHEMA_DETAILS.name, "true")
data = pd.DataFrame({"id": [1, 2]}, dtype="float64")
with pytest.raises(
MlflowException,
match=r"Failed to enforce model input schema. Please check your input data.",
):
_validate_prediction_input(data, None, schema, None)
def test_enforce_schema_with_missing_and_extra_columns(monkeypatch):
schema = Schema(
[
ColSpec("long", "id"),
ColSpec("string", "name"),
]
)
monkeypatch.setenv(MLFLOW_DISABLE_SCHEMA_DETAILS.name, "true")
input_data = pd.DataFrame({"id": [1, 2], "extra_col": ["mlflow", "oss"]})
with pytest.raises(
MlflowException, match=r"Input schema validation failed.*extra inputs provided"
):
_enforce_schema(input_data, schema)
| ModelWithData |
python | pypa__pip | src/pip/_internal/metadata/base.py | {
"start": 24813,
"end": 24938
} | class ____(Protocol):
location: str
def as_zipfile(self) -> zipfile.ZipFile:
raise NotImplementedError()
| Wheel |
python | getsentry__sentry | src/sentry/issues/analytics.py | {
"start": 78,
"end": 182
} | class ____(analytics.Event):
num_groups: int
analytics.register(IssueForecastSaved)
| IssueForecastSaved |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/tests/test_observer.py | {
"start": 1612,
"end": 9896
} | class ____:
async def test_minimal(self, mock_events_client: AsyncMock):
flow_run_id = uuid.uuid4()
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test",
},
status={"phase": "Running"},
logger=MagicMock(),
)
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == "prefect.kubernetes.pod.running"
assert emitted_event.resource == Resource(
{
"prefect.resource.id": f"prefect.kubernetes.pod.{pod_id}",
"prefect.resource.name": "test",
"kubernetes.namespace": "test",
}
)
assert emitted_event.related == [
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": "test",
}
)
]
async def test_deterministic_event_id(self, mock_events_client: AsyncMock):
"""Test that the event ID is deterministic"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": "Running"},
logger=MagicMock(),
)
first_event_id = mock_events_client.emit.call_args[1]["event"].id
mock_events_client.emit.reset_mock()
# Call the function again
await _replicate_pod_event(
event={"type": "ADDED", "status": {"phase": "Running"}},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": "Running"},
logger=MagicMock(),
)
second_event_id = mock_events_client.emit.call_args[1]["event"].id
assert first_event_id == second_event_id
async def test_evicted_pod(self, mock_events_client: AsyncMock):
"""Test handling of evicted pods"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "MODIFIED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(uuid.uuid4()),
"prefect.io/flow-run-name": "test-run",
},
status={
"phase": "Failed",
"container_statuses": [
{"state": {"terminated": {"reason": "OOMKilled"}}}
],
},
logger=MagicMock(),
)
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == "prefect.kubernetes.pod.evicted"
assert emitted_event.resource == Resource(
{
"prefect.resource.id": f"prefect.kubernetes.pod.{pod_id}",
"prefect.resource.name": "test",
"kubernetes.namespace": "test",
"kubernetes.reason": "OOMKilled",
},
)
async def test_all_related_resources(self, mock_events_client: AsyncMock):
"""Test that all possible related resources are included"""
flow_run_id = uuid.uuid4()
deployment_id = uuid.uuid4()
flow_id = uuid.uuid4()
work_pool_id = uuid.uuid4()
pod_id = uuid.uuid4()
await _replicate_pod_event(
event={"type": "ADDED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test-run",
"prefect.io/deployment-id": str(deployment_id),
"prefect.io/deployment-name": "test-deployment",
"prefect.io/flow-id": str(flow_id),
"prefect.io/flow-name": "test-flow",
"prefect.io/work-pool-id": str(work_pool_id),
"prefect.io/work-pool-name": "test-pool",
"prefect.io/worker-name": "test-worker",
},
status={"phase": "Running"},
logger=MagicMock(),
)
mock_events_client.emit.assert_called_once()
emitted_event = mock_events_client.emit.call_args[1]["event"]
related_resources = emitted_event.related
# Verify all related resources are present
resource_ids = {
r.model_dump()["prefect.resource.id"] for r in related_resources
}
assert resource_ids == {
f"prefect.flow-run.{flow_run_id}",
f"prefect.deployment.{deployment_id}",
f"prefect.flow.{flow_id}",
f"prefect.work-pool.{work_pool_id}",
"prefect.worker.kubernetes.test-worker",
}
resource_names = {
r.model_dump()["prefect.resource.name"] for r in related_resources
}
assert resource_names == {
"test-run",
"test-deployment",
"test-flow",
"test-pool",
"test-worker",
}
async def test_event_deduplication(
self, mock_events_client: AsyncMock, mock_orchestration_client: AsyncMock
):
"""Test that checks from existing events when receiving events on startup"""
pod_id = uuid.uuid4()
await _replicate_pod_event(
# Event types with None are received when reading current cluster state
event={"type": None},
uid=str(pod_id),
name="test",
namespace="test",
labels={"prefect.io/flow-run-id": str(uuid.uuid4())},
status={"phase": "Running"},
logger=MagicMock(),
)
# Verify the request was made with correct payload structure
mock_orchestration_client.request.assert_called_once()
call_args = mock_orchestration_client.request.call_args
assert call_args[0] == ("POST", "/events/filter")
# Verify the json payload has the correct structure: {"filter": {...}}
json_payload = call_args[1]["json"]
assert "filter" in json_payload, "Expected 'filter' key in json payload"
# Verify the nested filter contains expected fields
event_filter = json_payload["filter"]
assert "event" in event_filter, "Expected 'event' field in filter"
assert "resource" in event_filter, "Expected 'resource' field in filter"
assert "occurred" in event_filter, "Expected 'occurred' field in filter"
# Verify no event was emitted since one already existed
mock_events_client.emit.assert_not_called()
@pytest.mark.parametrize("phase", ["Pending", "Running", "Succeeded", "Failed"])
async def test_different_phases(self, mock_events_client: AsyncMock, phase: str):
"""Test handling of different pod phases"""
pod_id = uuid.uuid4()
flow_run_id = uuid.uuid4()
mock_events_client.emit.reset_mock()
await _replicate_pod_event(
event={"type": "ADDED"},
uid=str(pod_id),
name="test",
namespace="test",
labels={
"prefect.io/flow-run-id": str(flow_run_id),
"prefect.io/flow-run-name": "test-run",
},
status={"phase": phase},
logger=MagicMock(),
)
mock_events_client.emit.assert_called_once()
emitted_event = mock_events_client.emit.call_args[1]["event"]
assert emitted_event.event == f"prefect.kubernetes.pod.{phase.lower()}"
| TestReplicatePodEvent |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_basics.py | {
"start": 7078,
"end": 8149
} | class ____(unittest.TestCase):
def test_gpu_ref(self):
# this crashes
dim = 256
training_data = np.random.randint(256, size=(10000, dim // 8)).astype('uint8')
centroids = 330
def create_cpu(dim):
quantizer = faiss.IndexBinaryFlat(dim)
return faiss.IndexBinaryIVF(quantizer, dim, centroids)
def create_gpu(dim):
gpu_quantizer = faiss.index_cpu_to_all_gpus(faiss.IndexFlatL2(dim))
index = create_cpu(dim)
index.clustering_index = gpu_quantizer
index.dont_dealloc_me = gpu_quantizer
return index
index = create_gpu(dim)
index.verbose = True
index.cp.verbose = True
index.train(training_data)
def make_t(num, d, clamp=False, seed=None):
rs = None
if seed is None:
rs = np.random.RandomState(123)
else:
rs = np.random.RandomState(seed)
x = rs.rand(num, d).astype(np.float32)
if clamp:
x = (x * 255).astype('uint8').astype('float32')
return x
| TestGpuRef |
python | pytorch__pytorch | test/nn/test_parametrization.py | {
"start": 82206,
"end": 83974
} | class ____(NNTestCase):
@swap([True, False])
def test_weight_norm_parametrization(self, device):
for dtype in [torch.float, torch.bfloat16]:
input = torch.randn(3, 4, dtype=dtype, device=device)
m = nn.Linear(4, 5, dtype=dtype, device=device)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.parametrizations.weight_norm(m)
self.assertEqual(
m.parametrizations.weight.original1.size(), m.weight.size()
)
self.assertEqual(m.parametrizations.weight.original0.size(), (5, 1))
self.assertEqual(m(input), expected_output)
# remove weight norm
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
self.assertFalse(hasattr(m, "parametrizations"))
self.assertEqual(m(input), expected_output)
# test with dim=1
m = torch.nn.utils.parametrizations.weight_norm(m, dim=1)
self.assertEqual(
m.parametrizations.weight.original1.size(), m.weight.size()
)
self.assertEqual(m.parametrizations.weight.original0.size(), (1, 4))
self.assertEqual(m(input), expected_output)
# test with dim=None
m = nn.Linear(4, 5, dtype=dtype, device=device)
expected_output = m(input)
m = torch.nn.utils.parametrizations.weight_norm(m, dim=None)
self.assertEqual(m(input), expected_output)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestNNParametrizationDevice, globals(), only_for=only_for)
instantiate_parametrized_tests(TestNNParametrization)
if __name__ == "__main__":
run_tests()
| TestNNParametrizationDevice |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 1773,
"end": 1960
} | class ____(CookiecutterException):
"""
Exception when version control is unavailable.
Raised if the version control system (git or hg) is not installed.
"""
| VCSNotInstalled |
python | realpython__materials | django-flashcards-app/source_code_final/cards/views.py | {
"start": 590,
"end": 1356
} | class ____(CardListView):
template_name = "cards/box.html"
form_class = CardCheckForm
def get_queryset(self):
return Card.objects.filter(box=self.kwargs["box_num"])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["box_number"] = self.kwargs["box_num"]
if self.object_list:
context["check_card"] = random.choice(self.object_list)
return context
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
card = get_object_or_404(Card, id=form.cleaned_data["card_id"])
card.move(form.cleaned_data["solved"])
return redirect(request.META.get("HTTP_REFERER"))
| BoxView |
python | keras-team__keras | keras/src/backend/common/backend_utils_test.py | {
"start": 1606,
"end": 2702
} | class ____(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test conversion with 'valid' padding and no output padding"""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="valid",
output_padding=None,
)
self.assertEqual(torch_padding, 0)
self.assertEqual(torch_output_padding, 0)
def test_same_padding_without_output_padding(self):
"""Test conversion with 'same' padding and no output padding"""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=None,
)
self.assertEqual(torch_padding, 1)
self.assertEqual(torch_output_padding, 1)
| ConvertConvTransposePaddingArgsTorchTest |
python | PyCQA__pylint | pylint/config/callback_actions.py | {
"start": 11411,
"end": 11780
} | class ____(_XableAction):
"""Callback action for enabling a message."""
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = "--enable",
) -> None:
self._call(self.linter.enable, values, option_string)
| _EnableAction |
python | simplejson__simplejson | simplejson/tests/test_namedtuple.py | {
"start": 846,
"end": 997
} | class ____(object):
def __init__(self, *args):
self.point = Point(*args)
def _asdict(self):
return self.point._asdict()
| DuckPoint |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 45719,
"end": 46650
} | class ____(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [("<madeupword0>", 0.0), ("<madeupword1>", 0.0), ("<madeupword2>", 0.0), ("<madeupword3>", 0.0), ("<madeupword4>", 0.0), ("<madeupword5>", 0.0), ("<madeupword6>", 0.0)] # fmt: skip
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(
single="</s> $A",
pair="</s> $A </s> </s> $B",
special_tokens=[
("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
],
)
| XGLMConverter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_format13.py | {
"start": 315,
"end": 1021
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("format13.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(0, 21)
font_format = workbook.add_format()
font_format.set_font_name("B Nazanin")
font_format.set_font_family(0)
font_format.set_font_charset(178)
worksheet.write("A1", "Foo", font_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 21614,
"end": 30449
} | class ____(Request):
"""
Get all queues
:param name: Get only queues whose name matches this pattern (python regular
expression syntax)
:type name: str
:param id: List of Queue IDs used to filter results
:type id: Sequence[str]
:param tags: User-defined tags list used to filter results. Prepend '-' to tag
name to indicate exclusion
:type tags: Sequence[str]
:param system_tags: System tags list used to filter results. Prepend '-' to
system tag name to indicate exclusion
:type system_tags: Sequence[str]
:param page: Page number, returns a specific page out of the result list of
results.
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param only_fields: List of document field names (nesting is supported using
'.', e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
"""
_service = "queues"
_action = "get_all"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"id": {
"description": "List of Queue IDs used to filter results",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only queues whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": "List of document field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the result list of results.",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)",
"minimum": 1,
"type": ["integer", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
name: Optional[str] = None,
id: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
order_by: Optional[List[str]] = None,
search_text: Optional[str] = None,
only_fields: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(GetAllRequest, self).__init__(**kwargs)
self.name = name
self.id = id
self.tags = tags
self.system_tags = system_tags
self.page = page
self.page_size = page_size
self.order_by = order_by
self.search_text = search_text
self.only_fields = only_fields
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("id")
def id(self) -> Optional[List[str]]:
return self._property_id
@id.setter
def id(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("page")
def page(self) -> Optional[int]:
return self._property_page
@page.setter
def page(self, value: Optional[int]) -> None:
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self) -> Optional[int]:
return self._property_page_size
@page_size.setter
def page_size(self, value: Optional[int]) -> None:
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self) -> Optional[List[str]]:
return self._property_order_by
@order_by.setter
def order_by(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("search_text")
def search_text(self) -> Optional[str]:
return self._property_search_text
@search_text.setter
def search_text(self, value: Optional[str]) -> None:
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("only_fields")
def only_fields(self) -> Optional[List[str]]:
return self._property_only_fields
@only_fields.setter
def only_fields(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
| GetAllRequest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 5159,
"end": 7701
} | class ____(Processor):
"""
Processor that highlights search matches in the document.
Note that this doesn't support multiline search matches yet.
The style classes 'search' and 'search.current' will be applied to the
content.
"""
_classname = "search"
_classname_current = "search.current"
def _get_search_text(self, buffer_control: BufferControl) -> str:
"""
The text we are searching for.
"""
return buffer_control.search_state.text
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
search_text = self._get_search_text(buffer_control)
searchmatch_fragment = f" class:{self._classname} "
searchmatch_current_fragment = f" class:{self._classname_current} "
if search_text and not get_app().is_done:
# For each search match, replace the style string.
line_text = fragment_list_to_text(fragments)
fragments = explode_text_fragments(fragments)
if buffer_control.search_state.ignore_case():
flags = re.IGNORECASE
else:
flags = re.RegexFlag(0)
# Get cursor column.
cursor_column: int | None
if document.cursor_position_row == lineno:
cursor_column = source_to_display(document.cursor_position_col)
else:
cursor_column = None
for match in re.finditer(re.escape(search_text), line_text, flags=flags):
if cursor_column is not None:
on_cursor = match.start() <= cursor_column < match.end()
else:
on_cursor = False
for i in range(match.start(), match.end()):
old_fragment, text, *_ = fragments[i]
if on_cursor:
fragments[i] = (
old_fragment + searchmatch_current_fragment,
fragments[i][1],
)
else:
fragments[i] = (
old_fragment + searchmatch_fragment,
fragments[i][1],
)
return Transformation(fragments)
| HighlightSearchProcessor |
python | bokeh__bokeh | release/enums.py | {
"start": 773,
"end": 874
} | class ____(Enum):
PENDING = "PENDING"
STARTED = "STARTED"
COMPLETED = "COMPLETED"
| ActionState |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 804142,
"end": 804344
} | class ____(VegaLiteSchema):
"""OffsetDef schema wrapper."""
_schema = {"$ref": "#/definitions/OffsetDef"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| OffsetDef |
python | huggingface__transformers | src/transformers/models/vits/configuration_vits.py | {
"start": 814,
"end": 13892
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VitsModel`]. It is used to instantiate a VITS
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the VITS
[facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 38):
Vocabulary size of the VITS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed to the forward method of [`VitsModel`].
hidden_size (`int`, *optional*, defaults to 192):
Dimensionality of the text encoder layers.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
window_size (`int`, *optional*, defaults to 4):
Window size for the relative positional embeddings in the attention layers of the Transformer encoder.
use_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the key, query, value projection layers in the Transformer encoder.
ffn_dim (`int`, *optional*, defaults to 768):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
ffn_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the 1D convolution layers used by the feed-forward network in the Transformer encoder.
flow_size (`int`, *optional*, defaults to 192):
Dimensionality of the flow layers.
spectrogram_bins (`int`, *optional*, defaults to 513):
Number of frequency bins in the target spectrogram.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
use_stochastic_duration_prediction (`bool`, *optional*, defaults to `True`):
Whether to use the stochastic duration prediction module or the regular duration predictor.
num_speakers (`int`, *optional*, defaults to 1):
Number of speakers if this is a multi-speaker model.
speaker_embedding_size (`int`, *optional*, defaults to 0):
Number of channels used by the speaker embeddings. Is zero for single-speaker models.
upsample_initial_channel (`int`, *optional*, defaults to 512):
The number of input channels into the HiFi-GAN upsampling network.
upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
A tuple of integers defining the stride of each 1D convolutional layer in the HiFi-GAN upsampling network.
The length of `upsample_rates` defines the number of convolutional layers and has to match the length of
`upsample_kernel_sizes`.
upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the HiFi-GAN upsampling
network. The length of `upsample_kernel_sizes` defines the number of convolutional layers and has to match
the length of `upsample_rates`.
resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`):
A tuple of integers defining the kernel sizes of the 1D convolutional layers in the HiFi-GAN
multi-receptive field fusion (MRF) module.
resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
HiFi-GAN multi-receptive field fusion (MRF) module.
leaky_relu_slope (`float`, *optional*, defaults to 0.1):
The angle of the negative slope used by the leaky ReLU activation.
depth_separable_channels (`int`, *optional*, defaults to 2):
Number of channels to use in each depth-separable block.
depth_separable_num_layers (`int`, *optional*, defaults to 3):
Number of convolutional layers to use in each depth-separable block.
duration_predictor_flow_bins (`int`, *optional*, defaults to 10):
Number of channels to map using the unonstrained rational spline in the duration predictor model.
duration_predictor_tail_bound (`float`, *optional*, defaults to 5.0):
Value of the tail bin boundary when computing the unconstrained rational spline in the duration predictor
model.
duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the 1D convolution layers used in the duration predictor model.
duration_predictor_dropout (`float`, *optional*, defaults to 0.5):
The dropout ratio for the duration predictor model.
duration_predictor_num_flows (`int`, *optional*, defaults to 4):
Number of flow stages used by the duration predictor model.
duration_predictor_filter_channels (`int`, *optional*, defaults to 256):
Number of channels for the convolution layers used in the duration predictor model.
prior_encoder_num_flows (`int`, *optional*, defaults to 4):
Number of flow stages used by the prior encoder flow model.
prior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 4):
Number of WaveNet layers used by the prior encoder flow model.
posterior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 16):
Number of WaveNet layers used by the posterior encoder model.
wavenet_kernel_size (`int`, *optional*, defaults to 5):
Kernel size of the 1D convolution layers used in the WaveNet model.
wavenet_dilation_rate (`int`, *optional*, defaults to 1):
Dilation rates of the dilated 1D convolutional layers used in the WaveNet model.
wavenet_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the WaveNet layers.
speaking_rate (`float`, *optional*, defaults to 1.0):
Speaking rate. Larger values give faster synthesised speech.
noise_scale (`float`, *optional*, defaults to 0.667):
How random the speech prediction is. Larger values create more variation in the predicted speech.
noise_scale_duration (`float`, *optional*, defaults to 0.8):
How random the duration prediction is. Larger values create more variation in the predicted durations.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the output audio waveform is digitalized expressed in hertz (Hz).
Example:
```python
>>> from transformers import VitsModel, VitsConfig
>>> # Initializing a "facebook/mms-tts-eng" style configuration
>>> configuration = VitsConfig()
>>> # Initializing a model (with random weights) from the "facebook/mms-tts-eng" style configuration
>>> model = VitsModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vits"
def __init__(
self,
vocab_size=38,
hidden_size=192,
num_hidden_layers=6,
num_attention_heads=2,
window_size=4,
use_bias=True,
ffn_dim=768,
layerdrop=0.1,
ffn_kernel_size=3,
flow_size=192,
spectrogram_bins=513,
hidden_act="relu",
hidden_dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_stochastic_duration_prediction=True,
num_speakers=1,
speaker_embedding_size=0,
upsample_initial_channel=512,
upsample_rates=[8, 8, 2, 2],
upsample_kernel_sizes=[16, 16, 4, 4],
resblock_kernel_sizes=[3, 7, 11],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
leaky_relu_slope=0.1,
depth_separable_channels=2,
depth_separable_num_layers=3,
duration_predictor_flow_bins=10,
duration_predictor_tail_bound=5.0,
duration_predictor_kernel_size=3,
duration_predictor_dropout=0.5,
duration_predictor_num_flows=4,
duration_predictor_filter_channels=256,
prior_encoder_num_flows=4,
prior_encoder_num_wavenet_layers=4,
posterior_encoder_num_wavenet_layers=16,
wavenet_kernel_size=5,
wavenet_dilation_rate=1,
wavenet_dropout=0.0,
speaking_rate=1.0,
noise_scale=0.667,
noise_scale_duration=0.8,
sampling_rate=16_000,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.window_size = window_size
self.use_bias = use_bias
self.ffn_dim = ffn_dim
self.layerdrop = layerdrop
self.ffn_kernel_size = ffn_kernel_size
self.flow_size = flow_size
self.spectrogram_bins = spectrogram_bins
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_stochastic_duration_prediction = use_stochastic_duration_prediction
self.num_speakers = num_speakers
self.speaker_embedding_size = speaker_embedding_size
self.upsample_initial_channel = upsample_initial_channel
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.leaky_relu_slope = leaky_relu_slope
self.depth_separable_channels = depth_separable_channels
self.depth_separable_num_layers = depth_separable_num_layers
self.duration_predictor_flow_bins = duration_predictor_flow_bins
self.duration_predictor_tail_bound = duration_predictor_tail_bound
self.duration_predictor_kernel_size = duration_predictor_kernel_size
self.duration_predictor_dropout = duration_predictor_dropout
self.duration_predictor_num_flows = duration_predictor_num_flows
self.duration_predictor_filter_channels = duration_predictor_filter_channels
self.prior_encoder_num_flows = prior_encoder_num_flows
self.prior_encoder_num_wavenet_layers = prior_encoder_num_wavenet_layers
self.posterior_encoder_num_wavenet_layers = posterior_encoder_num_wavenet_layers
self.wavenet_kernel_size = wavenet_kernel_size
self.wavenet_dilation_rate = wavenet_dilation_rate
self.wavenet_dropout = wavenet_dropout
self.speaking_rate = speaking_rate
self.noise_scale = noise_scale
self.noise_scale_duration = noise_scale_duration
self.sampling_rate = sampling_rate
if len(upsample_kernel_sizes) != len(upsample_rates):
raise ValueError(
f"The length of `upsample_kernel_sizes` ({len(upsample_kernel_sizes)}) must match the length of "
f"`upsample_rates` ({len(upsample_rates)})"
)
super().__init__(**kwargs)
__all__ = ["VitsConfig"]
| VitsConfig |
python | anthropics__anthropic-sdk-python | tests/api_resources/beta/test_messages.py | {
"start": 19382,
"end": 38804
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> None:
message = await async_client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
)
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncAnthropic) -> None:
message = await async_client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
container={
"id": "id",
"skills": [
{
"skill_id": "x",
"type": "anthropic",
"version": "x",
}
],
},
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
service_tier="auto",
stop_sequences=["string"],
stream=False,
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
temperature=1,
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
top_k=5,
top_p=0.7,
betas=["string"],
)
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(BetaMessage, message, path=["response"])
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> None:
message_stream = await async_client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
)
await message_stream.response.aclose()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncAnthropic) -> None:
message_stream = await async_client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
container={
"id": "id",
"skills": [
{
"skill_id": "x",
"type": "anthropic",
"version": "x",
}
],
},
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
service_tier="auto",
stop_sequences=["string"],
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
temperature=1,
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
top_k=5,
top_p=0.7,
betas=["string"],
)
await message_stream.response.aclose()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
await stream.close()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
await stream.close()
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_count_tokens(self, async_client: AsyncAnthropic) -> None:
message = await async_client.beta.messages.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
)
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_method_count_tokens_with_all_params(self, async_client: AsyncAnthropic) -> None:
message = await async_client.beta.messages.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
betas=["string"],
)
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_raw_response_count_tokens(self, async_client: AsyncAnthropic) -> None:
response = await async_client.beta.messages.with_raw_response.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
async def test_streaming_response_count_tokens(self, async_client: AsyncAnthropic) -> None:
async with async_client.beta.messages.with_streaming_response.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_pydantic_error_in_create(self, async_client: AsyncAnthropic) -> None:
class MyModel(pydantic.BaseModel):
name: str
age: int
with pytest.raises(TypeError) as exc_info:
await async_client.beta.messages.create(
max_tokens=1024,
messages=[{"role": "user", "content": "Test"}],
model="claude-sonnet-4-5-20250929",
output_format=MyModel, # type: ignore
)
error_message = str(exc_info.value)
assert "parse()" in error_message
| TestAsyncMessages |
python | modin-project__modin | modin/experimental/batch/pipeline.py | {
"start": 1318,
"end": 3667
} | class ____(object):
"""
Internal representation of a single query in a pipeline.
This object represents a single function to be pipelined in a batch pipeline.
Parameters
----------
func : Callable
The function to apply to the dataframe.
is_output : bool, default: False
Whether this query is an output query and should be passed both to the next query, and
directly to postprocessing.
repartition_after : bool, default: False
Whether to repartition after this query is computed. Currently, repartitioning is only
supported if there is 1 partition prior to repartitioning.
fan_out : bool, default: False
Whether to fan out this node. If True and only 1 partition is passed as input, the partition
is replicated `PandasQueryPipeline.num_partitions` (default: `NPartitions.get`) times, and
the function is called on each. The `reduce_fn` must also be specified.
pass_partition_id : bool, default: False
Whether to pass the numerical partition id to the query.
reduce_fn : Callable, default: None
The reduce function to apply if `fan_out` is set to True. This takes the
`PandasQueryPipeline.num_partitions` (default: `NPartitions.get`) partitions that result from
this query, and combines them into 1 partition.
output_id : int, default: None
An id to assign to this node if it is an output.
Notes
-----
`func` must be a function that is applied along an axis of the dataframe.
Use `pandas` for any module level functions inside `func` since it operates directly on
partitions.
"""
def __init__(
self,
func: Callable,
is_output: bool = False,
repartition_after: bool = False,
fan_out: bool = False,
pass_partition_id: bool = False,
reduce_fn: Optional[Callable] = None,
output_id: Optional[int] = None,
):
self.func = func
self.is_output = is_output
self.repartition_after = repartition_after
self.fan_out = fan_out
self.pass_partition_id = pass_partition_id
self.reduce_fn = reduce_fn
self.output_id = output_id
# List of sub-queries to feed into this query, if this query is an output node.
self.operators = None
| PandasQuery |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 12115,
"end": 13009
} | class ____(TestCase):
"""
Ensure keyword arguments passed in the `@action` decorator
are properly handled. Refs #940.
"""
def setUp(self):
class TestViewSet(viewsets.ModelViewSet):
permission_classes = []
@action(methods=['post'], detail=True, permission_classes=[permissions.AllowAny])
def custom(self, request, *args, **kwargs):
return Response({
'permission_classes': self.permission_classes
})
self.router = SimpleRouter()
self.router.register(r'test', TestViewSet, basename='test')
self.view = self.router.urls[-1].callback
def test_action_kwargs(self):
request = factory.post('/test/0/custom/')
response = self.view(request)
assert response.data == {'permission_classes': [permissions.AllowAny]}
| TestActionKeywordArgs |
python | kamyu104__LeetCode-Solutions | Python/find-the-number-of-copy-arrays.py | {
"start": 38,
"end": 548
} | class ____(object):
def countArrays(self, original, bounds):
"""
:type original: List[int]
:type bounds: List[List[int]]
:rtype: int
"""
left, right = bounds[0]
result = right-left+1
for i in xrange(1, len(original)):
diff = original[i]-original[i-1]
left = max(left+diff, bounds[i][0])
right = min(right+diff, bounds[i][1])
result = min(result, max(right-left+1, 0))
return result
| Solution |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 27811,
"end": 28468
} | class ____(GradientCheckpointingLayer):
def __init__(self, c_in):
super().__init__()
self.downConv = nn.Conv1d(
in_channels=c_in,
out_channels=c_in,
kernel_size=3,
padding=1,
padding_mode="circular",
)
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
| InformerConvLayer |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_base_monitor_environment_details.py | {
"start": 4811,
"end": 6200
} | class ____(MonitorTestCase):
__test__ = False
def setUp(self) -> None:
self.login_as(user=self.user)
super().setUp()
def test_simple(self) -> None:
monitor = self._create_monitor(status=MonitorStatus.ACTIVE)
monitor_environment = self._create_monitor_environment(monitor)
monitor_environment_2 = self._create_monitor_environment(monitor, name="second")
self.get_success_response(
self.organization.slug,
monitor.slug,
monitor_environment.get_environment().name,
method="DELETE",
status_code=202,
)
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == ObjectStatus.ACTIVE
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.PENDING_DELETION
assert RegionScheduledDeletion.objects.filter(
object_id=monitor_environment.id, model_name="MonitorEnvironment"
).exists()
monitor_environment_2 = MonitorEnvironment.objects.get(id=monitor_environment_2.id)
assert monitor_environment_2.status == MonitorStatus.ACTIVE
assert not RegionScheduledDeletion.objects.filter(
object_id=monitor_environment_2.id, model_name="MonitorEnvironment"
).exists()
| BaseDeleteMonitorTest |
python | zarr-developers__zarr-python | src/zarr/codecs/sharding.py | {
"start": 1721,
"end": 2024
} | class ____(Enum):
"""
Enum for index location used by the sharding codec.
"""
start = "start"
end = "end"
def parse_index_location(data: object) -> ShardingCodecIndexLocation:
return parse_enum(data, ShardingCodecIndexLocation)
@dataclass(frozen=True)
| ShardingCodecIndexLocation |
python | huggingface__transformers | examples/pytorch/question-answering/trainer_seq2seq_qa.py | {
"start": 1018,
"end": 6616
} | class ____(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
# def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples=None,
ignore_keys: Optional[list[str]] = None,
metric_key_prefix: str = "eval",
**gen_kwargs,
) -> dict[str, float]:
gen_kwargs = gen_kwargs.copy()
# Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
# training args
if gen_kwargs.get("max_length") is None and self.args.generation_max_length is not None:
gen_kwargs["max_length"] = self.args.generation_max_length
if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
gen_kwargs["num_beams"] = self.args.generation_num_beams
self._gen_kwargs = gen_kwargs
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
try:
output = self.evaluation_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
metrics.update(output.metrics)
else:
metrics = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(metrics)
if self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(
self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test", **gen_kwargs
):
self._gen_kwargs = gen_kwargs.copy()
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
try:
output = self.evaluation_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict")
metrics = self.compute_metrics(predictions)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
| QuestionAnsweringSeq2SeqTrainer |
python | jina-ai__jina | tests/integration/docarray_v2/csp/SampleRerankerExecutor/executor.py | {
"start": 547,
"end": 1446
} | class ____(Executor):
@requests(on="/rerank")
def foo(self, docs: DocList[RerankerInput], **kwargs) -> DocList[RankedOutput]:
ret = []
for doc in docs:
ret.append(
RankedOutput(
results=[
RankedObjectOutput(
id=doc.id,
index=0,
document=TextDoc(text="first result"),
relevance_score=-1,
),
RankedObjectOutput(
id=doc.id,
index=1,
document=TextDoc(text="second result"),
relevance_score=-2,
),
]
)
)
return DocList[RankedOutput](ret)
| SampleRerankerExecutor |
python | optuna__optuna | optuna/samplers/_brute_force.py | {
"start": 786,
"end": 4063
} | class ____:
# This is a class to represent the tree of search space.
# A tree node has three states:
# 1. Unexpanded. This is represented by children=None.
# 2. Leaf. This is represented by children={} and param_name=None.
# 3. Normal node. It has a param_name and non-empty children.
param_name: str | None = None
children: dict[float, "_TreeNode"] | None = None
is_running: bool = False
def expand(self, param_name: str | None, search_space: Iterable[float]) -> None:
# If the node is unexpanded, expand it.
# Otherwise, check if the node is compatible with the given search space.
if self.children is None:
# Expand the node
self.param_name = param_name
self.children = {value: _TreeNode() for value in search_space}
else:
if self.param_name != param_name:
raise ValueError(f"param_name mismatch: {self.param_name} != {param_name}")
if self.children.keys() != set(search_space):
raise ValueError(
f"search_space mismatch: {set(self.children.keys())} != {set(search_space)}"
)
def set_running(self) -> None:
self.is_running = True
def set_leaf(self) -> None:
self.expand(None, [])
def add_path(
self, params_and_search_spaces: Iterable[tuple[str, Iterable[float], float]]
) -> _TreeNode | None:
# Add a path (i.e. a list of suggested parameters in one trial) to the tree.
current_node = self
for param_name, search_space, value in params_and_search_spaces:
current_node.expand(param_name, search_space)
assert current_node.children is not None
if value not in current_node.children:
return None
current_node = current_node.children[value]
return current_node
def count_unexpanded(self, exclude_running: bool) -> int:
# Count the number of unexpanded nodes in the subtree.
if self.children is None:
return 0 if exclude_running and self.is_running else 1
else:
return sum(child.count_unexpanded(exclude_running) for child in self.children.values())
def sample_child(self, rng: np.random.RandomState, exclude_running: bool) -> float:
assert self.children is not None
# Sample an unexpanded node in the subtree uniformly, and return the first
# parameter value in the path to the node.
# Equivalently, we sample the child node with weights proportional to the number
# of unexpanded nodes in the subtree.
weights = np.array(
[child.count_unexpanded(exclude_running) for child in self.children.values()],
dtype=np.float64,
)
if any(
not value.is_running and weights[i] > 0
for i, value in enumerate(self.children.values())
):
# Prioritize picking non-running and unexpanded nodes.
for i, child in enumerate(self.children.values()):
if child.is_running:
weights[i] = 0.0
weights /= weights.sum()
return rng.choice(list(self.children.keys()), p=weights)
@experimental_class("3.1.0")
| _TreeNode |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/idna/codec.py | {
"start": 186,
"end": 769
} | class ____(codecs.Codec):
def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return b"", 0
return encode(data), len(data)
def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return '', 0
return decode(data), len(data)
| Codec |
python | PyCQA__pylint | tests/functional/a/access/access_to_protected_members.py | {
"start": 1138,
"end": 1501
} | class ____:
"""Test for GitHub issue 1031"""
_attr = 1
def correct_access(self):
"""Demonstrates correct access"""
return type(self)._attr
def incorrect_access(self):
"""Demonstrates incorrect access"""
if self._attr == 1:
return type(INST)._protected # [protected-access]
return None
| Issue1031 |
python | pydantic__pydantic | pydantic/v1/mypy.py | {
"start": 30208,
"end": 31515
} | class ____:
def __init__(
self, name: str, is_required: bool, alias: Optional[str], has_dynamic_alias: bool, line: int, column: int
):
self.name = name
self.is_required = is_required
self.alias = alias
self.has_dynamic_alias = has_dynamic_alias
self.line = line
self.column = column
def to_var(self, info: TypeInfo, use_alias: bool) -> Var:
name = self.name
if use_alias and self.alias is not None:
name = self.alias
return Var(name, info[self.name].type)
def to_argument(self, info: TypeInfo, typed: bool, force_optional: bool, use_alias: bool) -> Argument:
if typed and info[self.name].type is not None:
type_annotation = info[self.name].type
else:
type_annotation = AnyType(TypeOfAny.explicit)
return Argument(
variable=self.to_var(info, use_alias),
type_annotation=type_annotation,
initializer=None,
kind=ARG_NAMED_OPT if force_optional or not self.is_required else ARG_NAMED,
)
def serialize(self) -> JsonDict:
return self.__dict__
@classmethod
def deserialize(cls, info: TypeInfo, data: JsonDict) -> 'PydanticModelField':
return cls(**data)
| PydanticModelField |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 10838,
"end": 11590
} | class ____(InstallationError):
"""Built metadata contains inconsistent information.
This is raised when the metadata contains values (e.g. name and version)
that do not match the information previously obtained from sdist filename,
user-supplied ``#egg=`` value, or an install requirement name.
"""
def __init__(
self, ireq: InstallRequirement, field: str, f_val: str, m_val: str
) -> None:
self.ireq = ireq
self.field = field
self.f_val = f_val
self.m_val = m_val
def __str__(self) -> str:
return (
f"Requested {self.ireq} has inconsistent {self.field}: "
f"expected {self.f_val!r}, but metadata has {self.m_val!r}"
)
| MetadataInconsistent |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 31106,
"end": 35721
} | class ____(MegatronBertPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight",
"cls.predictions.decoder.bias": "cls.predictions.bias",
}
def __init__(self, config, add_binary_head=True):
r"""
add_binary_head (`bool`, *optional*, defaults to `True`):
Whether or not to add a binary head.
"""
super().__init__(config)
self.bert = MegatronBertModel(config)
self.cls = MegatronBertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
next_sentence_label: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MegatronBertForPreTrainingOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, MegatronBertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForPreTraining.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MegatronBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| MegatronBertForPreTraining |
python | TheAlgorithms__Python | digital_image_processing/resize/resize.py | {
"start": 122,
"end": 2220
} | class ____:
"""
Simplest and fastest version of image resizing.
Source: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
"""
def __init__(self, img, dst_width: int, dst_height: int):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0")
self.img = img
self.src_w = img.shape[1]
self.src_h = img.shape[0]
self.dst_w = dst_width
self.dst_h = dst_height
self.ratio_x = self.src_w / self.dst_w
self.ratio_y = self.src_h / self.dst_h
self.output = self.output_img = (
np.ones((self.dst_h, self.dst_w, 3), np.uint8) * 255
)
def process(self):
for i in range(self.dst_h):
for j in range(self.dst_w):
self.output[i][j] = self.img[self.get_y(i)][self.get_x(j)]
def get_x(self, x: int) -> int:
"""
Get parent X coordinate for destination X
:param x: Destination X coordinate
:return: Parent X coordinate based on `x ratio`
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
... 1), 100, 100)
>>> nn.ratio_x = 0.5
>>> nn.get_x(4)
2
"""
return int(self.ratio_x * x)
def get_y(self, y: int) -> int:
"""
Get parent Y coordinate for destination Y
:param y: Destination X coordinate
:return: Parent X coordinate based on `y ratio`
>>> nn = NearestNeighbour(imread("digital_image_processing/image_data/lena.jpg",
... 1), 100, 100)
>>> nn.ratio_y = 0.5
>>> nn.get_y(4)
2
"""
return int(self.ratio_y * y)
if __name__ == "__main__":
dst_w, dst_h = 800, 600
im = imread("image_data/lena.jpg", 1)
n = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| NearestNeighbour |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_in_set_spark_optimized.py | {
"start": 2343,
"end": 8415
} | class ____(ColumnAggregateExpectation):
"""Expect each column value to be in a given set; optimized using **join** for spark backends.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
strict (boolean or None) : If True, percentage of values in set must exceed mostly.
"""
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column_values.in_set.spark_optimized",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("column", "value_set", "mostly", "strict")
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {"mostly": 1, "strict": True, "value_set": []}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
value_set = configuration.kwargs.get("value_set") or self._get_default_value("value_set")
column = configuration.kwargs.get("column")
try:
assert column is not None, "`column` must be specified"
assert "value_set" in configuration.kwargs or value_set, "value_set is required"
assert isinstance(value_set, (list, set, dict)), (
"value_set must be a list, set, or dict"
)
if isinstance(value_set, dict):
assert "$PARAMETER" in value_set, (
'Evaluation Parameter dict for value_set kwarg must have "$PARAMETER" key.'
)
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
configuration = self.configuration
mostly = configuration["kwargs"].get("mostly")
strict = configuration["kwargs"].get("strict")
result = metrics.get("column_values.in_set.spark_optimized")
result = dict(result)
if strict is True and mostly < 1:
success = (sum(result.values()) / len(result.values())) > mostly
else:
success = (sum(result.values()) / len(result.values())) >= mostly
return {"success": success, "result": {"observed_values": result}}
examples = [
{
"data": {
"col1": ["ES", "BE", "FR", "DE", "CH"],
"col2": [1, 2, 3, 5, 8],
},
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col1",
"value_set": ["ES", "BE", "UK"],
"mostly": 0.4,
"strict": False,
},
"out": {
"success": True,
},
},
{
"title": "negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value_set": [3, 8, 1, 22, 74],
"mostly": 0.6,
"strict": True,
},
"out": {
"success": False,
},
},
{
"title": "failing_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value_set": ["ES", "BE", "UK"],
"mostly": 0.6,
"strict": True,
"catch_exceptions": True,
},
"out": {},
"error": {
"traceback_substring": "`value_set` must be of same type as `column`",
},
},
],
"only_for": ["spark"],
}
]
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"column values in set",
"experimental",
"spark optimized",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@austiezr",
"@derek-hk", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeInSetSparkOptimized().print_diagnostic_checklist()
| ExpectColumnValuesToBeInSetSparkOptimized |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_index.py | {
"start": 1027,
"end": 1102
} | class ____:
def __index__(self):
raise NotImplementedError
| Index5 |
python | doocs__leetcode | solution/2000-2099/2085.Count Common Words With One Occurrence/Solution.py | {
"start": 0,
"end": 218
} | class ____:
def countWords(self, words1: List[str], words2: List[str]) -> int:
cnt1 = Counter(words1)
cnt2 = Counter(words2)
return sum(v == 1 and cnt2[w] == 1 for w, v in cnt1.items())
| Solution |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_details.py | {
"start": 29370,
"end": 63640
} | class ____(AlertRuleDetailsBase):
method = "put"
def test_simple(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["name"] = "what"
with self.feature("organizations:incidents"), outbox_runner():
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
alert_rule.name = "what"
alert_rule.date_modified = resp.data["dateModified"]
assert resp.data == serialize(alert_rule)
assert resp.data["name"] == "what"
assert resp.data["dateModified"] > serialized_alert_rule["dateModified"]
with assume_test_silo_mode(SiloMode.CONTROL):
audit_log_entry = AuditLogEntry.objects.filter(
event=audit_log.get_event_id("ALERT_RULE_EDIT"), target_object=alert_rule.id
)
assert len(audit_log_entry) == 1
assert (
resp.renderer_context["request"].META["REMOTE_ADDR"]
== list(audit_log_entry)[0].ip_address
)
@patch("sentry.incidents.serializers.alert_rule.are_any_projects_error_upsampled")
def test_update_to_count_converts_internally_but_shows_count_on_upsampled_project(
self, mock_are_any_projects_error_upsampled
) -> None:
"""Test updating to count() converts to upsampled_count() internally but shows count() to user"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
# Mock that projects are upsampled
mock_are_any_projects_error_upsampled.return_value = True
alert_rule = self.alert_rule
serialized_alert_rule = self.get_serialized_alert_rule()
# Update to count() aggregate - should convert internally but return count() to user
serialized_alert_rule["aggregate"] = "count()"
serialized_alert_rule["dataset"] = "events"
serialized_alert_rule["name"] = "Updated to Count Rule"
with self.feature("organizations:incidents"), outbox_runner():
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
# User should see count() in response
assert resp.data["aggregate"] == "count()"
# But internally it should be stored as upsampled_count()
alert_rule.refresh_from_db()
assert (
alert_rule.snuba_query.aggregate == "upsampled_count()"
), "UPDATE should convert count() to upsampled_count() internally for upsampled projects"
@patch("sentry.incidents.serializers.alert_rule.are_any_projects_error_upsampled")
def test_update_non_aggregate_field_preserves_transparency_on_upsampled_project(
self, mock_are_any_projects_error_upsampled
) -> None:
"""Test updating non-aggregate fields maintains transparency of upsampled_count()"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
mock_are_any_projects_error_upsampled.return_value = True
# Manually set the existing alert rule to have upsampled_count() internally
self.alert_rule.snuba_query.aggregate = "upsampled_count()"
self.alert_rule.snuba_query.save()
original_aggregate = self.alert_rule.snuba_query.aggregate
alert_rule = self.alert_rule
serialized_alert_rule = self.get_serialized_alert_rule()
# Update only the name, not the aggregate
serialized_alert_rule["name"] = "Updated Name Only"
with self.feature("organizations:incidents"), outbox_runner():
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
# User should see count() even though it's stored as upsampled_count()
assert (
resp.data["aggregate"] == "count()"
), "UPDATE response should show count() to user, hiding internal upsampled_count() storage"
assert resp.data["name"] == "Updated Name Only"
# Internal storage should be unchanged
alert_rule.refresh_from_db()
assert alert_rule.snuba_query.aggregate == original_aggregate # Still upsampled_count()
def test_workflow_engine_serializer(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
ard = AlertRuleDetector.objects.get(alert_rule_id=self.alert_rule.id)
self.detector = Detector.objects.get(id=ard.detector_id)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["name"] = "what"
with (
self.feature("organizations:incidents"),
self.feature("organizations:workflow-engine-rule-serializers"),
outbox_runner(),
):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
alert_rule.name = "what"
alert_rule.date_modified = resp.data["dateModified"]
detector = Detector.objects.get(alertruledetector__alert_rule_id=alert_rule.id)
assert resp.data == serialize(detector, serializer=WorkflowEngineDetectorSerializer())
assert resp.data["name"] == "what"
assert resp.data["dateModified"] > serialized_alert_rule["dateModified"]
def test_not_updated_fields(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
existing_sub = self.alert_rule.snuba_query.subscriptions.first()
alert_rule.refresh_from_db()
# Alert rule should be exactly the same
assert resp.data == serialize(self.alert_rule)
# If the aggregate changed we'd have a new subscription, validate that
# it hasn't changed explicitly
updated_alert_rule = AlertRule.objects.get(id=self.alert_rule.id)
updated_sub = updated_alert_rule.snuba_query.subscriptions.get()
assert updated_sub.subscription_id == existing_sub.subscription_id
def test_update_trigger_label_to_unallowed_value(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"][0]["label"] = "goodbye"
with self.feature("organizations:incidents"):
resp = self.get_error_response(
self.organization.slug, alert_rule.id, status_code=400, **serialized_alert_rule
)
assert resp.data == {"nonFieldErrors": ['Trigger 1 must be labeled "critical"']}
serialized_alert_rule["triggers"][0]["label"] = "critical"
serialized_alert_rule["triggers"][1]["label"] = "goodbye"
resp = self.get_error_response(
self.organization.slug, alert_rule.id, status_code=400, **serialized_alert_rule
)
assert resp.data == {"nonFieldErrors": ['Trigger 2 must be labeled "warning"']}
def test_update_trigger_alert_threshold(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"][1]["alertThreshold"] = 125
serialized_alert_rule["name"] = "AUniqueName"
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert resp.data["name"] == "AUniqueName"
assert resp.data["triggers"][1]["alertThreshold"] == 125
def test_delete_resolve_alert_threshold(self) -> None:
# This is a test to make sure we can remove a resolveThreshold after it has been set.
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
alert_rule.update(resolve_threshold=75)
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["resolveThreshold"] = None
serialized_alert_rule["name"] = "AUniqueName"
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert resp.data["name"] == "AUniqueName"
assert resp.data["resolveThreshold"] is None
def test_update_resolve_alert_threshold(self) -> None:
# This is a test to make sure we can remove a resolveThreshold after it has been set.
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
alert_rule.update(resolve_threshold=75)
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["resolveThreshold"] = 75
serialized_alert_rule["name"] = "AUniqueName"
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert resp.data["name"] == "AUniqueName"
assert resp.data["resolveThreshold"] == 75
def test_delete_trigger(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"].pop(1)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert len(resp.data["triggers"]) == 1
@mock.patch("sentry.incidents.serializers.alert_rule.dual_delete_migrated_alert_rule_trigger")
def test_dual_delete_trigger(self, mock_dual_delete: MagicMock) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"].pop(1)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert len(resp.data["triggers"]) == 1
# we test the logic for this method elsewhere, so just test that it's correctly called
assert mock_dual_delete.call_count == 1
def test_delete_trigger_dual_update_resolve(self) -> None:
"""
If there is no explicit resolve threshold on an alert rule, then we need to dual update the
comparison on the DataCondition corresponding to alert resolution.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule_dict = deepcopy(self.alert_rule_dict)
alert_rule_dict.update({"resolveThreshold": None})
alert_rule = self.new_alert_rule(data=alert_rule_dict)
serialized_alert_rule = self.get_serialized_alert_rule()
# the new resolution threshold should be the critical alert threshold
new_threshold = serialized_alert_rule["triggers"][0]["alertThreshold"]
old_threshold = serialized_alert_rule["triggers"][1]["alertThreshold"]
assert_dual_written_resolution_threshold_equals(alert_rule, old_threshold)
serialized_alert_rule["triggers"].pop(1)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert len(resp.data["triggers"]) == 1
assert_dual_written_resolution_threshold_equals(alert_rule, new_threshold)
def test_update_trigger_threshold_dual_update_resolve(self) -> None:
"""
If there is no explicit resolve threshold on an alert rule, then we need to dual update the
comparison on the DataCondition corresponding to alert resolution if trigger thresholds
are updated.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule_dict = deepcopy(self.alert_rule_dict)
alert_rule_dict.update({"resolveThreshold": None})
alert_rule = self.new_alert_rule(data=alert_rule_dict)
serialized_alert_rule = self.get_serialized_alert_rule()
# the new resolution threshold should be the critical alert threshold
# original thresholds: critical = 200, warning = 150
old_threshold = serialized_alert_rule["triggers"][1]["alertThreshold"]
assert_dual_written_resolution_threshold_equals(alert_rule, old_threshold)
# TEST 1: if we update the critical trigger threshold, the resolve threshold shouldn't change
serialized_alert_rule["triggers"][0]["alertThreshold"] = 300
with self.feature("organizations:incidents"):
self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert_dual_written_resolution_threshold_equals(alert_rule, old_threshold)
# TEST 2: if we update the warning trigger threshold, the resolve threshold also changes
new_threshold = 100
serialized_alert_rule["triggers"][1]["alertThreshold"] = new_threshold
with self.feature("organizations:incidents"):
self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert_dual_written_resolution_threshold_equals(alert_rule, new_threshold)
def test_update_trigger_threshold_dual_update_resolve_noop(self) -> None:
"""
If there is an explicit resolve threshold on an alert rule, then updating triggers should
not affect the resolve action filter.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
serialized_alert_rule = self.get_serialized_alert_rule()
resolve_threshold = alert_rule.resolve_threshold
assert_dual_written_resolution_threshold_equals(alert_rule, resolve_threshold)
new_threshold = 125
serialized_alert_rule["triggers"][1]["alertThreshold"] = new_threshold
with self.feature("organizations:incidents"):
self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
# remains unchanged
assert_dual_written_resolution_threshold_equals(alert_rule, resolve_threshold)
def test_remove_resolve_threshold_dual_update_resolve(self) -> None:
"""
If we set the remove the resolve threshold from an alert rule, then we need to update the
resolve action filter according to the triggers.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
serialized_alert_rule = self.get_serialized_alert_rule()
resolve_threshold = alert_rule.resolve_threshold
assert_dual_written_resolution_threshold_equals(alert_rule, resolve_threshold)
serialized_alert_rule["resolveThreshold"] = None
new_threshold = serialized_alert_rule["triggers"][1]["alertThreshold"]
with self.feature("organizations:incidents"):
self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
# resolve threshold changes to the warning threshold
assert_dual_written_resolution_threshold_equals(alert_rule, new_threshold)
def test_dual_update_resolve_all_triggers_removed_and_recreated(self) -> None:
"""
If a PUT request is made via the API and the trigger IDs are not specified in the
request (as is usually the case), then the triggers + their actions are deleted and
recreated. Make sure that we can update the resolution threshold accordingly
in this case.
"""
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
test_params = self.valid_params.copy()
test_params["resolve_threshold"] = None
test_params["triggers"][0]["alertThreshold"] = 300
test_params["triggers"][1]["alertThreshold"] = 50
with self.feature("organizations:incidents"), outbox_runner():
self.get_success_response(self.organization.slug, self.alert_rule.id, **test_params)
# resolve threshold changes to the warning threshold
assert_dual_written_resolution_threshold_equals(self.alert_rule, 50)
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_anomaly_detection_alert_update_timeout(self, mock_seer_request: MagicMock) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
alert_rule = self.dynamic_alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
mock_seer_request.return_value = HTTPResponse(orjson.dumps({"success": True}), status=200)
data = self.get_serialized_alert_rule()
mock_seer_request.side_effect = TimeoutError
resp = self.get_error_response(
self.organization.slug,
alert_rule.id,
status_code=408,
**data,
)
assert resp.data["detail"]["message"] == "Proxied request timed out"
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_anomaly_detection_alert_update_max_retry(self, mock_seer_request: MagicMock) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
alert_rule = self.dynamic_alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
mock_seer_request.side_effect = MaxRetryError(
seer_anomaly_detection_connection_pool, SEER_ANOMALY_DETECTION_STORE_DATA_URL
)
data = self.get_serialized_alert_rule()
resp = self.get_error_response(
self.organization.slug,
alert_rule.id,
status_code=408,
**data,
)
assert resp.data["detail"]["message"] == "Proxied request timed out"
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_anomaly_detection_alert_update_other_error(self, mock_seer_request: MagicMock) -> None:
"""
Test the catch-all in case Seer returns something that we don't expect.
"""
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
alert_rule = self.dynamic_alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
mock_seer_request.side_effect = HTTPError
data = self.get_serialized_alert_rule()
resp = self.get_error_response(
self.organization.slug,
alert_rule.id,
status_code=400,
**data,
)
assert resp.data[0] == ErrorDetail(
string="Failed to send data to Seer - cannot update alert rule.", code="invalid"
)
assert mock_seer_request.call_count == 1
@with_feature("organizations:anomaly-detection-alerts")
@with_feature("organizations:incidents")
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def test_anomaly_detection_alert_update_validation_error(
self, mock_seer_request: MagicMock
) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
alert_rule = self.dynamic_alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
data = self.get_serialized_alert_rule()
data["timeWindow"] = 10
resp = self.get_error_response(
self.organization.slug,
alert_rule.id,
status_code=400,
**data,
)
assert resp.data[0] == INVALID_TIME_WINDOW
# We don't call send_historical_data_to_seer if we encounter a validation error.
assert mock_seer_request.call_count == 0
data2 = self.get_serialized_alert_rule()
data2["query"] = "is:unresolved"
resp = self.get_error_response(
self.organization.slug,
alert_rule.id,
status_code=400,
**data2,
)
assert resp.data[0] == "Dynamic alerts do not support 'is:unresolved' queries"
# We don't call send_historical_data_to_seer if we encounter a validation error.
assert mock_seer_request.call_count == 0
def test_delete_action(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"][1]["actions"].pop(1)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert len(resp.data["triggers"][1]["actions"]) == 1
# Delete the last one.
serialized_alert_rule["triggers"][1]["actions"].pop()
with self.feature("organizations:incidents"):
resp = self.get_error_response(
self.organization.slug, alert_rule.id, status_code=400, **serialized_alert_rule
)
assert resp.data == {
"nonFieldErrors": [
"Each trigger must have an associated action for this alert to fire."
]
}
@mock.patch(
"sentry.incidents.serializers.alert_rule_trigger.dual_delete_migrated_alert_rule_trigger_action"
)
def test_dual_delete_action(self, mock_dual_delete: MagicMock) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_action = serialized_alert_rule["triggers"][1]["actions"].pop(1)
action = AlertRuleTriggerAction.objects.get(id=serialized_action["id"])
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
assert len(resp.data["triggers"][1]["actions"]) == 1
# we test the logic for this method elsewhere, so just test that it's correctly called
assert mock_dual_delete.call_count == 1
assert mock_dual_delete.call_args_list[0][0][0] == action
def test_update_trigger_action_type(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
# Then we send it back with one of the actions changed:
serialized_alert_rule["triggers"][0]["actions"][0]["targetType"] = "user"
serialized_alert_rule["triggers"][0]["actions"][0]["targetIdentifier"] = self.user.id
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
# And it comes back successfully changed:
assert resp.data["triggers"][0]["actions"][0]["targetType"] == "user"
assert resp.data["triggers"][0]["actions"][0]["targetIdentifier"] == str(self.user.id)
# And make sure we still only have two triggers, the first with 1 action and the second with 2 actions
# This is ensures they were updated and not new ones created, etc.
assert len(resp.data["triggers"]) == 2
assert len(resp.data["triggers"][0]["actions"]) == 1
assert len(resp.data["triggers"][1]["actions"]) == 2
def test_invalid_thresholds(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["triggers"][0]["alertThreshold"] = 50 # Invalid
serialized_alert_rule.pop("resolveThreshold")
with self.feature("organizations:incidents"):
self.get_error_response(
self.organization.slug, alert_rule.id, status_code=400, **serialized_alert_rule
)
def test_update_snapshot(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
# Archive the rule so that the endpoint 404's, without this, it should 200 and the test would fail:
alert_rule.status = AlertRuleStatus.SNAPSHOT.value
alert_rule.save()
with self.feature("organizations:incidents"):
self.get_error_response(
self.organization.slug, alert_rule.id, status_code=404, **serialized_alert_rule
)
def test_no_owner(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["owner"] = None
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
alert_rule.refresh_from_db()
assert resp.data == serialize(alert_rule, self.user)
assert resp.data["owner"] is None
def test_team_permission(self) -> None:
# Test ensures you can only edit alerts owned by your team or no one.
om = self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
alert_rule.team = self.team
alert_rule.user_id = None
alert_rule.save()
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
OrganizationMemberTeam.objects.filter(
organizationmember__user_id=self.user.id,
team=self.team,
).delete()
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, alert_rule.id, **serialized_alert_rule)
assert resp.status_code == 200
self.create_team_membership(team=self.team, member=om)
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
alert_rule.refresh_from_db()
assert resp.data == serialize(alert_rule, self.user)
def test_change_name_of_existing_alert(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
test_params = self.valid_params.copy()
test_params["resolve_threshold"] = self.alert_rule.resolve_threshold
test_params.update({"name": "what"})
with self.feature("organizations:incidents"), outbox_runner():
resp = self.get_success_response(
self.organization.slug, self.alert_rule.id, **test_params
)
self.alert_rule.refresh_from_db()
self.alert_rule.name = "what"
self.alert_rule.snuba_query.refresh_from_db()
assert resp.data == serialize(self.alert_rule)
assert resp.data["name"] == "what"
# We validate that there's only been one change to the alert
with assume_test_silo_mode(SiloMode.CONTROL):
audit_log_entry = AuditLogEntry.objects.filter(
event=audit_log.get_event_id("ALERT_RULE_EDIT"), target_object=resp.data["id"]
)
assert len(audit_log_entry) == 1
def test_invalid_extrapolation_mode(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
alert_rule_dict = deepcopy(self.alert_rule_dict)
alert_rule_dict["dataset"] = "events_analytics_platform"
alert_rule_dict["alertType"] = "eap_metrics"
alert_rule_dict["extrapolation_mode"] = "server_weighted"
with self.feature("organizations:incidents"):
resp = self.get_error_response(
self.organization.slug, alert_rule.id, status_code=400, **alert_rule_dict
)
assert resp.data[0] == "Invalid extrapolation mode for this alert type."
| AlertRuleDetailsPutEndpointTest |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 18183,
"end": 24118
} | class ____(VariableTracker):
# The ExceptionVariable corresponds to the BaseException class in Python
def __init__(
self, exc_type, args, init_kwargs=None, source=None, mutation_type=None
) -> None:
super().__init__(source=source, mutation_type=mutation_type)
self.exc_type = exc_type
self.args = args
if init_kwargs:
unimplemented(
gb_type="Keyword args passed to exception constructor",
context=f"{self} with kwargs {init_kwargs}",
explanation="Dynamo does not know how to handle keyword args passed to an exception constructor",
hints=[*graph_break_hints.SUPPORTABLE],
)
# When raising a new exception while another exception is already being
# handled, the new exception's __context__ attribute is automatically
# set to the handled exception.
self.__context__ = ConstantVariable(None)
# Set when user raised an exception from another:
# raise ... from ...
self.__cause__ = ConstantVariable(None)
# Boolean flag that controls whether the __context__ attribute is set
self.__suppress_context__ = ConstantVariable(False)
# Contains the call stack where the exception was raised. Dynamo does
# not track traceback. So, this variable is always set to None
self.__traceback__ = ConstantVariable(None)
def set_context(self, context: "ExceptionVariable"):
self.__context__ = context
def reconstruct(self, codegen: "PyCodegen"):
codegen.add_push_null(
lambda: codegen.load_import_from("builtins", self.exc_type.__name__)
)
codegen.foreach(self.args)
codegen.call_function(len(self.args), False)
def codegen_attr(name: str) -> None:
attr = getattr(self, name)
if istype(attr, ConstantVariable):
assert attr.value in (True, False, None), attr
else:
codegen.dup_top()
codegen(attr)
codegen.extend_output(codegen.rot_n(2))
codegen.store_attr(name)
codegen_attr("__context__")
codegen_attr("__cause__")
codegen_attr("__suppress_context__")
def python_type(self):
return self.exc_type
def call_setattr(
self,
tx: "InstructionTranslator",
name_var: VariableTracker,
val: VariableTracker,
):
def raise_error(msg):
raise_observed_exception(TypeError, tx, args=[ConstantVariable(msg)])
name = name_var.as_python_constant()
if name == "__context__":
self.set_context(val)
elif name == "__cause__":
if (isinstance(val, ConstantVariable) and val.value is None) or isinstance(
val,
(
variables.BuiltinVariable,
variables.ExceptionVariable,
variables.UserDefinedExceptionClassVariable,
variables.UserDefinedExceptionObjectVariable,
),
):
self.__cause__ = val
self.__suppress_context__ = variables.ConstantVariable(True)
else:
raise_error("exception cause must be None or derive from BaseException")
elif name == "__suppress_context__":
if isinstance(val, ConstantVariable) and val.value in (True, False):
self.__suppress_context__ = val
else:
raise_error("exception cause must be None or derive from BaseException")
elif name == "__traceback__":
if isinstance(val, ConstantVariable) and val.value is None:
self.__traceback__ = val
else:
unimplemented(
gb_type="Set Exception object `__traceback__` attribute to not-`None`",
context=f"call_setattr {self} {name}",
explanation="Dynamo does not support setting the attribute "
"'__traceback__' on tracked exception objects to anything "
"other than None.",
hints=[
"Avoid setting '__traceback__' on exception objects "
"within traced code, or set it to None."
],
)
else:
unimplemented(
gb_type="Unsupported attribute assignment on Exception object",
context=f"call_setattr {self} {name}",
explanation="Dynamo does not support setting the attribute "
f"'{name}' on tracked exception objects. Only `__context__`, "
"`__cause__`, `__suppress_context__`, and `__traceback__` are supported.",
hints=[*graph_break_hints.SUPPORTABLE],
)
return variables.ConstantVariable(None)
def call_method(self, tx, name, args, kwargs):
if name == "__setattr__":
return self.call_setattr(tx, *args)
elif name == "with_traceback":
[tb] = args
self.call_setattr(tx, ConstantVariable("__traceback__"), tb)
return self
else:
return super().call_method(tx, name, args, kwargs)
def var_getattr(self, tx, name):
if name == "__context__":
return self.__context__
elif name == "__cause__":
return self.__cause__
elif name == "__suppress_context__":
return self.__suppress_context__
elif name == "__traceback__":
return variables.ConstantVariable(None)
elif name == "args":
return variables.ListVariable(self.args, source=self.source)
return super().var_getattr(tx, name)
def __str__(self):
return f"{self.__class__.__name__}({self.exc_type})"
__repr__ = __str__
| ExceptionVariable |
python | kubernetes-client__python | kubernetes/e2e_test/test_client.py | {
"start": 1603,
"end": 23885
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = base.get_e2e_configuration()
def test_pod_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'busybox-test-' + short_uuid()
pod_manifest = manifest_with_command(
name, "while true;do date;sleep 5; done")
# wait for the default service account to be created
timeout = time.time() + 30
while True:
if time.time() > timeout:
print('timeout waiting for default service account creation')
break
try:
resp = api.read_namespaced_service_account(name='default',
namespace='default')
except ApiException as e:
if (six.PY3 and e.status != HTTPStatus.NOT_FOUND) or (
six.PY3 is False and e.status != httplib.NOT_FOUND):
print('error: %s' % e)
self.fail(
msg="unexpected error getting default service account")
print('default service not found yet: %s' % e)
time.sleep(1)
continue
self.assertEqual('default', resp.metadata.name)
break
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase != 'Pending':
break
time.sleep(1)
exec_command = ['/bin/sh',
'-c',
'for i in $(seq 1 3); do date; done']
resp = stream(api.connect_get_namespaced_pod_exec, name, 'default',
command=exec_command,
stderr=False, stdin=False,
stdout=True, tty=False)
print('EXEC response : %s (%s)' % (repr(resp), type(resp)))
self.assertIsInstance(resp, str)
self.assertEqual(3, len(resp.splitlines()))
exec_command = ['/bin/sh',
'-c',
'echo -n "This is a test string" | gzip']
resp = stream(api.connect_get_namespaced_pod_exec, name, 'default',
command=exec_command,
stderr=False, stdin=False,
stdout=True, tty=False,
binary=True)
print('EXEC response : %s (%s)' % (repr(resp), type(resp)))
self.assertIsInstance(resp, bytes)
self.assertEqual("This is a test string", gzip.decompress(resp).decode('utf-8'))
exec_command = 'uptime'
resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
command=exec_command,
stderr=False, stdin=False,
stdout=True, tty=False)
print('EXEC response : %s' % repr(resp))
self.assertEqual(1, len(resp.splitlines()))
resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
command='/bin/sh',
stderr=True, stdin=True,
stdout=True, tty=False,
_preload_content=False)
resp.write_stdin("echo test string 1\n")
line = resp.readline_stdout(timeout=5)
self.assertFalse(resp.peek_stderr())
self.assertEqual("test string 1", line)
resp.write_stdin("echo test string 2 >&2\n")
line = resp.readline_stderr(timeout=5)
self.assertFalse(resp.peek_stdout())
self.assertEqual("test string 2", line)
resp.write_stdin("exit\n")
resp.update(timeout=5)
while True:
line = resp.read_channel(ERROR_CHANNEL)
if line != '':
break
time.sleep(1)
status = json.loads(line)
self.assertEqual(status['status'], 'Success')
resp.update(timeout=5)
self.assertFalse(resp.is_open())
resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
command='/bin/sh',
stderr=True, stdin=True,
stdout=True, tty=False,
binary=True,
_preload_content=False)
resp.write_stdin(b"echo test string 1\n")
line = resp.readline_stdout(timeout=5)
self.assertFalse(resp.peek_stderr())
self.assertEqual(b"test string 1", line)
resp.write_stdin(b"echo test string 2 >&2\n")
line = resp.readline_stderr(timeout=5)
self.assertFalse(resp.peek_stdout())
self.assertEqual(b"test string 2", line)
resp.write_stdin(b"exit\n")
resp.update(timeout=5)
while True:
line = resp.read_channel(ERROR_CHANNEL)
if len(line) != 0:
break
time.sleep(1)
status = json.loads(line)
self.assertEqual(status['status'], 'Success')
resp.update(timeout=5)
self.assertFalse(resp.is_open())
number_of_pods = len(api.list_pod_for_all_namespaces().items)
self.assertTrue(number_of_pods > 0)
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
def test_exit_code(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'busybox-test-' + short_uuid()
pod_manifest = manifest_with_command(
name, "while true;do date;sleep 5; done")
# wait for the default service account to be created
timeout = time.time() + 30
while True:
if time.time() > timeout:
print('timeout waiting for default service account creation')
break
try:
resp = api.read_namespaced_service_account(name='default',
namespace='default')
except ApiException as e:
if (six.PY3 and e.status != HTTPStatus.NOT_FOUND) or (
six.PY3 is False and e.status != httplib.NOT_FOUND):
print('error: %s' % e)
self.fail(
msg="unexpected error getting default service account")
print('default service not found yet: %s' % e)
time.sleep(1)
continue
self.assertEqual('default', resp.metadata.name)
break
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase == 'Running':
break
time.sleep(1)
commands_expected_values = (
(["false", 1]),
(["/bin/sh", "-c", "sleep 1; exit 3"], 3),
(["true", 0]),
(["/bin/sh", "-c", "ls /"], 0)
)
for command, value in commands_expected_values:
client = stream(
api.connect_get_namespaced_pod_exec,
name,
'default',
command=command,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False)
self.assertIsNone(client.returncode)
client.run_forever(timeout=10)
self.assertEqual(client.returncode, value)
self.assertEqual(client.returncode, value) # check idempotence
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
def test_portforward_raw(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
with open(os.path.join(os.path.dirname(__file__), 'port_server.py')) as fh:
port_server_py = fh.read()
name = 'portforward-raw-' + short_uuid()
resp = api.create_namespaced_config_map(
body={
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': name,
},
'data': {
'port-server.py': port_server_py,
}
},
namespace='default',
)
resp = api.create_namespaced_pod(
body={
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': name
},
'spec': {
'containers': [
{
'name': 'port-server',
'image': 'python',
'command': [
'python', '-u', '/opt/port-server.py', '1234', '1235',
],
'volumeMounts': [
{
'name': 'port-server',
'mountPath': '/opt',
'readOnly': True,
},
],
'startupProbe': {
'tcpSocket': {
'port': 1235,
},
'periodSeconds': 1,
'failureThreshold': 30,
},
},
],
'restartPolicy': 'Never',
'volumes': [
{
'name': 'port-server',
'configMap': {
'name': name,
},
},
],
},
},
namespace='default',
)
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
timeout = time.time() + 60
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
if resp.status.phase == 'Running':
if resp.status.container_statuses[0].ready:
break
else:
self.assertEqual(resp.status.phase, 'Pending')
self.assertTrue(time.time() < timeout)
time.sleep(1)
for ix in range(10):
ix = str(ix + 1).encode()
pf = portforward(api.connect_get_namespaced_pod_portforward,
name, 'default',
ports='1234,1235,1236')
self.assertTrue(pf.connected)
sock1234 = pf.socket(1234)
sock1235 = pf.socket(1235)
sock1234.setblocking(True)
sock1235.setblocking(True)
sent1234 = b'Test ' + ix + b' port 1234 forwarding'
sent1235 = b'Test ' + ix + b' port 1235 forwarding'
sock1234.sendall(sent1234)
sock1235.sendall(sent1235)
reply1234 = b''
reply1235 = b''
timeout = time.time() + 60
while reply1234 != sent1234 or reply1235 != sent1235:
self.assertNotEqual(sock1234.fileno(), -1)
self.assertNotEqual(sock1235.fileno(), -1)
self.assertTrue(time.time() < timeout)
r, _w, _x = select.select([sock1234, sock1235], [], [], 1)
if sock1234 in r:
data = sock1234.recv(1024)
self.assertNotEqual(data, b'', 'Unexpected socket close')
reply1234 += data
self.assertTrue(sent1234.startswith(reply1234))
if sock1235 in r:
data = sock1235.recv(1024)
self.assertNotEqual(data, b'', 'Unexpected socket close')
reply1235 += data
self.assertTrue(sent1235.startswith(reply1235))
self.assertTrue(pf.connected)
sock = pf.socket(1236)
sock.setblocking(True)
self.assertEqual(sock.recv(1024), b'')
self.assertIsNotNone(pf.error(1236))
sock.close()
for sock in (sock1234, sock1235):
self.assertTrue(pf.connected)
sent = b'Another test ' + ix + b' using fileno ' + str(sock.fileno()).encode()
sock.sendall(sent)
reply = b''
timeout = time.time() + 60
while reply != sent:
self.assertNotEqual(sock.fileno(), -1)
self.assertTrue(time.time() < timeout)
r, _w, _x = select.select([sock], [], [], 1)
if r:
data = sock.recv(1024)
self.assertNotEqual(data, b'', 'Unexpected socket close')
reply += data
self.assertTrue(sent.startswith(reply))
sock.close()
time.sleep(1)
self.assertFalse(pf.connected)
self.assertIsNone(pf.error(1234))
self.assertIsNone(pf.error(1235))
resp = api.delete_namespaced_pod(name=name, namespace='default')
resp = api.delete_namespaced_config_map(name=name, namespace='default')
def test_portforward_http(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'portforward-http-' + short_uuid()
pod_manifest = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': name
},
'spec': {
'containers': [{
'name': 'nginx',
'image': 'nginx',
}]
}
}
resp = api.create_namespaced_pod(body=pod_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
while True:
resp = api.read_namespaced_pod(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status.phase)
if resp.status.phase != 'Pending':
break
time.sleep(1)
def kubernetes_create_connection(address, *args, **kwargs):
dns_name = address[0]
if isinstance(dns_name, bytes):
dns_name = dns_name.decode()
dns_name = dns_name.split(".")
if len(dns_name) != 3 or dns_name[2] != "kubernetes":
return socket_create_connection(address, *args, **kwargs)
pf = portforward(api.connect_get_namespaced_pod_portforward,
dns_name[0], dns_name[1], ports=str(address[1]))
return pf.socket(address[1])
socket_create_connection = socket.create_connection
try:
socket.create_connection = kubernetes_create_connection
response = urllib_request.urlopen(
'http://%s.default.kubernetes/' % name)
html = response.read().decode('utf-8')
finally:
socket.create_connection = socket_create_connection
self.assertEqual(response.code, 200)
self.assertTrue('<h1>Welcome to nginx!</h1>' in html)
resp = api.delete_namespaced_pod(name=name, body={},
namespace='default')
def test_service_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'frontend-' + short_uuid()
service_manifest = {'apiVersion': 'v1',
'kind': 'Service',
'metadata': {'labels': {'name': name},
'name': name,
'resourceversion': 'v1'},
'spec': {'ports': [{'name': 'port',
'port': 80,
'protocol': 'TCP',
'targetPort': 80}],
'selector': {'name': name}}}
resp = api.create_namespaced_service(body=service_manifest,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status)
resp = api.read_namespaced_service(name=name,
namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertTrue(resp.status)
service_manifest['spec']['ports'] = [{'name': 'new',
'port': 8080,
'protocol': 'TCP',
'targetPort': 8080}]
resp = api.patch_namespaced_service(body=service_manifest,
name=name,
namespace='default')
self.assertEqual(2, len(resp.spec.ports))
self.assertTrue(resp.status)
resp = api.delete_namespaced_service(name=name, body={},
namespace='default')
def test_replication_controller_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'frontend-' + short_uuid()
rc_manifest = {
'apiVersion': 'v1',
'kind': 'ReplicationController',
'metadata': {'labels': {'name': name},
'name': name},
'spec': {'replicas': 2,
'selector': {'name': name},
'template': {'metadata': {
'labels': {'name': name}},
'spec': {'containers': [{
'image': 'nginx',
'name': 'nginx',
'ports': [{'containerPort': 80,
'protocol': 'TCP'}]}]}}}}
resp = api.create_namespaced_replication_controller(
body=rc_manifest, namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = api.read_namespaced_replication_controller(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
self.assertEqual(2, resp.spec.replicas)
resp = api.delete_namespaced_replication_controller(
name=name, namespace='default', propagation_policy='Background')
def test_configmap_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
name = 'test-configmap-' + short_uuid()
test_configmap = {
"kind": "ConfigMap",
"apiVersion": "v1",
"metadata": {
"name": name,
"labels": {"e2e-tests": "true"},
},
"data": {
"config.json": "{\"command\":\"/usr/bin/mysqld_safe\"}",
"frontend.cnf": "[mysqld]\nbind-address = 10.0.0.3\nport = 3306\n"
}
}
resp = api.create_namespaced_config_map(
body=test_configmap, namespace='default'
)
self.assertEqual(name, resp.metadata.name)
resp = api.read_namespaced_config_map(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
json_patch_name = "json_patch_name"
json_patch_body = [{"op": "replace", "path": "/data",
"value": {"new_value": json_patch_name}}]
resp = api.patch_namespaced_config_map(
name=name, namespace='default', body=json_patch_body)
self.assertEqual(json_patch_name, resp.data["new_value"])
self.assertEqual(None, resp.data.get("config.json"))
self.assertEqual(None, resp.data.get("frontend.cnf"))
merge_patch_name = "merge_patch_name"
merge_patch_body = {"data": {"new_value": merge_patch_name}}
resp = api.patch_namespaced_config_map(
name=name, namespace='default', body=merge_patch_body)
self.assertEqual(merge_patch_name, resp.data["new_value"])
self.assertEqual(None, resp.data.get("config.json"))
self.assertEqual(None, resp.data.get("frontend.cnf"))
resp = api.delete_namespaced_config_map(
name=name, body={}, namespace='default')
resp = api.list_namespaced_config_map(
'default', pretty=True, label_selector="e2e-tests=true")
self.assertEqual([], resp.items)
def test_node_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = core_v1_api.CoreV1Api(client)
for item in api.list_node().items:
node = api.read_node(name=item.metadata.name)
self.assertTrue(len(node.metadata.labels) > 0)
self.assertTrue(isinstance(node.metadata.labels, dict))
| TestClient |
python | huggingface__transformers | src/transformers/models/mlcd/modeling_mlcd.py | {
"start": 2293,
"end": 4096
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:
"""
Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.
Args:
num_patches_height (int): Number of patches in the height dimension.
num_patches_width (int): Number of patches in the width dimension.
Returns:
torch.Tensor: Rotary positional embeddings for the given grid size.
"""
# Generate position IDs for height and width dimensions
hpos_ids = (
torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)
)
wpos_ids = (
torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1)
)
# Flatten and stack the position IDs
pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1)
# Generate the full rotary positional embeddings for the maximum grid size
max_grid_size = max(num_patches_height, num_patches_width)
seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
# Select and flatten the embeddings based on the position IDs
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
| MLCDRotaryEmbedding |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 30719,
"end": 31674
} | class ____(ORMBaseModel):
"""An ORM representation of a block document reference."""
parent_block_document_id: UUID = Field(
default=..., description="ID of block document the reference is nested within"
)
parent_block_document: Optional[BlockDocument] = Field(
default=None, description="The block document the reference is nested within"
)
reference_block_document_id: UUID = Field(
default=..., description="ID of the nested block document"
)
reference_block_document: Optional[BlockDocument] = Field(
default=None, description="The nested block document"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
@model_validator(mode="before")
def validate_parent_and_ref_are_different(
cls, values: dict[str, Any]
) -> dict[str, Any]:
return validate_parent_and_ref_diff(values)
| BlockDocumentReference |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 135222,
"end": 137439
} | class ____(TestCase):
@parametrize(
"dtype",
[
np.uint8,
np.int8,
np.int16,
np.int32,
np.int64,
np.float16,
np.float32,
np.float64,
],
)
def test_basic(self, dtype):
a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
assert_array_equal(a[idx], np.sort(a))
def test_mixed(self):
a = np.array([1, 2, 1, 3, 1, 5])
b = np.array([0, 4, 5, 6, 2, 3], dtype="datetime64[D]")
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0, 0, 0], dtype="datetime64[D]")
b = np.array([2, 1, 0], dtype="datetime64[D]")
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0, 0, 0], dtype="timedelta64[D]")
b = np.array([2, 1, 0], dtype="timedelta64[D]")
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(["abc", "xy", "wz", "efghi", "qwst", "x"], 1000)
for u in a, b:
left = np.lexsort((u.astype("O"),))
right = np.argsort(u, kind="mergesort")
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype("O"), v)))
assert_array_equal(idx, np.lexsort((u, v.astype("O"))))
u, v = np.array(u, dtype="object"), np.array(v, dtype="object")
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0.0, 1.0, 42 * 3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
@skip(reason="dont worry about IO")
| TestLexsort |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 12438,
"end": 13706
} | class ____(TestBaseAsyncSpiderMiddleware):
"""process_start tests for simple start"""
ITEM_TYPE = (Request, dict)
MW_SIMPLE = ProcessStartSimpleMiddleware
async def _get_processed_start(
self, *mw_classes: type[Any]
) -> AsyncIterator[Any] | None:
class TestSpider(Spider):
name = "test"
async def start(self):
for i in range(2):
yield Request(f"https://example.com/{i}", dont_filter=True)
yield {"name": "test item"}
setting = self._construct_mw_setting(*mw_classes)
self.crawler = get_crawler(
TestSpider, {"SPIDER_MIDDLEWARES_BASE": {}, "SPIDER_MIDDLEWARES": setting}
)
self.crawler.spider = self.crawler._create_spider()
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
return await self.mwman.process_start()
@deferred_f_from_coro_f
async def test_simple(self):
"""Simple mw"""
start = await self._get_processed_start(self.MW_SIMPLE)
assert isasyncgen(start)
start_list = await collect_asyncgen(start)
assert len(start_list) == self.RESULT_COUNT
assert isinstance(start_list[0], self.ITEM_TYPE)
| TestProcessStartSimple |
python | pytorch__pytorch | test/test_autograd.py | {
"start": 534303,
"end": 551182
} | class ____(TestCase):
@unittest.skipIf(not TEST_CUDA, "requires CUDA")
def test_flops_and_mem(self):
# From https://github.com/pytorch/pytorch/pull/126320
def get_act_mem(f):
out = f()
out.backward()
# Why do one forward and backward?
start_mem = torch.cuda.memory_stats()["requested_bytes.all.current"]
out = f()
cur_mem = torch.cuda.memory_stats()["requested_bytes.all.current"]
act_mem = (cur_mem - start_mem) / (1024 * 1024)
out.backward()
return act_mem
def get_bw_flops(f):
# Normalized so that a 512 square matmul returns 1
f().backward()
out = f()
# NB: FlopCounterMode is pushed onto the mode stack before CachedMode, so
# it will be able to observe whether an op is cached or not.
with FlopCounterMode(display=False) as mode:
out.backward()
return mode.get_total_flops() / (512**3 * 2)
x = torch.randn(512, 512, requires_grad=True, device="cuda")
y = torch.randn(512, 512, requires_grad=True, device="cuda")
def fn(x, y):
return torch.mm(x.cos(), y).sin().sum()
def fn_ac(x, y):
return checkpoint(fn, x, y, use_reentrant=False)
def fn_sac(x, y):
context_fn = functools.partial(
create_selective_checkpoint_contexts,
[torch.ops.aten.mm.default],
)
out = checkpoint(fn, x, y, use_reentrant=False, context_fn=context_fn)
return out
def policy_fn(ctx, op, *args, **kwargs):
if op == torch.ops.aten.mm.default:
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
def fn_sac2(x, y):
context_fn = functools.partial(
create_selective_checkpoint_contexts,
policy_fn,
)
out = checkpoint(fn, x, y, use_reentrant=False, context_fn=context_fn)
return out
def policy_fn_bool(ctx, op, *args, **kwargs):
return op == torch.ops.aten.mm.default
def fn_sac3(x, y):
context_fn = functools.partial(
create_selective_checkpoint_contexts,
policy_fn_bool,
)
out = checkpoint(fn, x, y, use_reentrant=False, context_fn=context_fn)
return out
act_mem_noac = get_act_mem(lambda: fn(x, y))
bw_flops_noac = get_bw_flops(lambda: fn(x, y))
self.assertEqual(act_mem_noac, 2.0)
self.assertEqual(bw_flops_noac, 2.0)
act_mem_ac = get_act_mem(lambda: fn_ac(x, y))
bw_flops_ac = get_bw_flops(lambda: fn_ac(x, y))
self.assertEqual(act_mem_ac, 0.0)
self.assertEqual(bw_flops_ac, 3.0)
act_mem_sac = get_act_mem(lambda: fn_sac(x, y))
bw_flops_sac = get_bw_flops(lambda: fn_sac(x, y))
self.assertEqual(act_mem_sac, 1.0)
self.assertEqual(bw_flops_sac, 2.0)
act_mem_sac2 = get_act_mem(lambda: fn_sac2(x, y))
bw_flops_sac2 = get_bw_flops(lambda: fn_sac2(x, y))
self.assertEqual(act_mem_sac2, 1.0)
self.assertEqual(bw_flops_sac2, 2.0)
act_mem_sac3 = get_act_mem(lambda: fn_sac3(x, y))
bw_flops_sac3 = get_bw_flops(lambda: fn_sac3(x, y))
self.assertEqual(act_mem_sac3, 1.0)
self.assertEqual(bw_flops_sac3, 2.0)
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_output_already_has_autograd_meta(self):
# View of tensor of non-differentiable dtype still has AutogradMeta
def fn(x, y):
return x.view(-1), y.sin().cos()
x = torch.tensor([1, 2, 3], dtype=torch.int64)
y = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts,
[torch.ops.aten.view.default],
)
out = checkpoint(fn, x, y, use_reentrant=False, context_fn=context_fn)
out[1].sum().backward()
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_subclass_dispatching_sizes(self):
# Test that we ignore ops that grab metadata like torch.ops.aten.sym_size.default
# Caching such metadata ops can be problematic when the following are satisfied:
#
# 1. size/strides are dispatched upon
# 2. our policy saves sizes
ta = torch.randn(6, 2)
class CustomSizeDynamicShapesTensor(torch.Tensor):
@staticmethod
def __new__(cls, inner):
return torch.Tensor._make_wrapper_subclass(
# TODO: right now, _make_wrapper_subclass's dynamic shape interaction is not great.
# Calling the overload that has kwargs causes us to go down the first overload path,
# which will **always** specialize sizes.
# We should probably eventually fix this so that the first overload can just handle dynamic shapes.
cls,
inner.size(),
inner.stride(),
None,
None,
inner.dtype,
inner.layout,
inner.device,
False,
inner.requires_grad,
"sizes",
)
def __init__(self, inner):
self.inner = inner
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args_inner = torch.utils._pytree.tree_map_only(
cls, lambda x: x.inner, args
)
out_inner = func(*args_inner, **kwargs)
return torch.utils._pytree.tree_map_only(
torch.Tensor, lambda x: cls(x), out_inner
)
def policy_fn(ctx, op, *args, **kwargs):
if op is torch.ops.aten.sym_size.default:
# Silently ignored!
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
def fn(x):
# We avoid the following case
#
# saved :[4, 3], [], [], [4, 3], [4, 3], [4, 3], [12]
# forward :sum ,sum,mul, mul , mul ,view , view
# recompute :sum ,sum,mul, view , view
#
# Views save the shape of their input, so we expect the second
# view to save 12, but because during AC packing during forward
# saves the shapes of the input for metadata checks later,
# we would save the wrong shape during the recompute.
view_out = (x * x.sum()).view(-1).view(4, 3)
self.assertEqual(view_out.grad_fn._saved_self_sym_sizes, [12])
return view_out.exp()
x = torch.randn(4, 3, requires_grad=True)
x_wrapper = CustomSizeDynamicShapesTensor(x)
context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
out = checkpoint(fn, x_wrapper, use_reentrant=False, context_fn=context_fn)
out.sum().backward()
def test_bad_inputs(self):
bad_op_list1 = [2]
with self.assertRaisesRegex(
ValueError, "Expected op in `op_list` to be an OpOverload"
):
create_selective_checkpoint_contexts(bad_op_list1)
bad_op_list2 = [torch.ops.aten.sin]
with self.assertRaisesRegex(
ValueError, "update the OpOverloadPacket to a specific OpOverload"
):
create_selective_checkpoint_contexts(bad_op_list2)
with self.assertRaisesRegex(TypeError, "either a function or a list of ops."):
create_selective_checkpoint_contexts(2)
# Dynamo fails for various reasons:
# - some tests using custom op that does not implement Fake
# - dynamo is trying to trace into saved variable hooks unpack hook for some reason
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_policy_with_state(self):
# If I have a stateful callable, state is shared between the original
# forward and the recompute.
counters = []
class Policy:
def __init__(self) -> None:
self.counter = [0]
self.recompute_counter = [0]
def __call__(self, ctx, func, *args, **kwargs):
counter = self.recompute_counter if ctx.is_recompute else self.counter
counter[0] += 1
counters.append(counter[0])
if counter == 1 and func is torch.ops.aten.mm.default:
return CheckpointPolicy.MUST_SAVE
return CheckpointPolicy.PREFER_RECOMPUTE
def fn(x):
return x.sin().sin().sin()
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts,
Policy(),
allow_cache_entry_mutation=True,
)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
out.sum().backward()
# 1. counter properly reset to 0 for the recompute
# 2. due to early-stop we do not recompute the final op
self.assertEqual(counters, [1, 2, 3, 1, 2])
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_storage_lifetime(self):
from torch.utils._python_dispatch import _get_current_dispatch_mode
from torch.utils.checkpoint import (
_CachedTorchDispatchMode,
_CachingTorchDispatchMode,
)
def policy_fn(ctx, op, *args, **kwargs):
return CheckpointPolicy.MUST_SAVE
ref = None
def fn(x):
nonlocal ref
self.assertIsInstance(
_get_current_dispatch_mode(),
(_CachingTorchDispatchMode, _CachedTorchDispatchMode),
)
out = x.cos().exp()
if isinstance(_get_current_dispatch_mode(), _CachingTorchDispatchMode):
raw_val = (
_get_current_dispatch_mode()
.storage[torch.ops.aten.exp.default][0]
.val
)
# ref should've been detached
# to avoid graph -> the saved variable hooks -> recompute_context -> storage -> graph
self.assertFalse(raw_val.requires_grad)
ref = weakref.ref(raw_val)
# Careful for early-stop
return out.sin()
with disable_gc():
# Case 1: If graph goes away without backward, make sure there's no reference cycle
# keeping storage alive.
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts, policy_fn
)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
self.assertIsNotNone(ref())
del out
self.assertIsNone(ref())
# Case 2: After backward, even if retain_graph=True, the storage should go away
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts, policy_fn
)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
self.assertIsNotNone(ref())
out.sum().backward(retain_graph=True)
# The dispatch mode's storage should still be alive, but the entries should've
# been cleared.
self.assertIsNone(ref())
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_version_counter(self):
def policy_fn(ctx, op, *args, **kwargs):
if op == torch.ops.aten.sin.default:
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
def fn(x):
return x.sin().mul_(2).cos().exp()
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
# 1) Error because the output of sin is saved and mutated by mul_
with self.assertRaisesRegex(RuntimeError, "has been mutated"):
out.sum().backward()
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts,
policy_fn,
allow_cache_entry_mutation=True,
)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
# 2) No longer should be an error because of allow_cache_entry_mutation
out.sum().backward()
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_function_with_more_than_one_output(self):
# maybe there is a more systematic way:
counter = [0]
def policy_fn(ctx, op, *args, **kwargs):
if op == torch.ops.aten.var_mean.correction:
counter[0] += 1
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
# var_mean has two outputs
def fn(x):
a, b = torch.var_mean(x)
return a * b
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
x_grad = torch.autograd.grad(out.sum(), (x,))
x_grad_ref = torch.autograd.grad(fn(x).sum(), (x,))
self.assertEqual(x_grad, x_grad_ref)
self.assertEqual(counter[0], 2)
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_function_with_non_tensor_output(self):
# When SAC is enabled, the op is not computed a second time
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
counter = [0]
@torch.library.custom_op("mylib::sin_with_extra", mutates_args=())
def sin_with_extra(x: torch.Tensor) -> tuple[torch.Tensor, int]:
counter[0] += 1
return x.sin(), 2
def setup_context(ctx, inputs, output) -> torch.Tensor:
(x,) = inputs
ctx.save_for_backward(x)
def backward(ctx, grad, _unused):
(x,) = ctx.saved_tensors
return grad * x.cos()
torch.library.register_autograd(
"mylib::sin_with_extra", backward, setup_context=setup_context
)
x = torch.randn(3, requires_grad=True)
def fn(x):
return (torch.ops.mylib.sin_with_extra(x)[0] * x.sin().exp()).sin()
ops_list = [torch.ops.mylib.sin_with_extra.default]
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(
create_selective_checkpoint_contexts, ops_list
)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
x_grad = torch.autograd.grad(out.sum(), (x,))
self.assertEqual(counter[0], 1)
x_grad_ref = torch.autograd.grad(fn(x).sum(), (x,))
self.assertEqual(x_grad, x_grad_ref)
@skipIfTorchDynamo("compile tested in test/dynamo/test_activation_checkpointing.py")
def test_can_only_trigger_recompute_once(self):
# We don't support this to avoid adding extra complexity for now.
# If there's a need, we could probably do some kind of use_count tracking.
# TODO: have a nice error message here.
def policy_fn(ctx, op, *args, **kwargs):
if op == torch.ops.aten.sin.default:
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
def fn(x):
return x.sin().cos().exp()
x = torch.randn(3, requires_grad=True)
context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
out = checkpoint(fn, x, use_reentrant=False, context_fn=context_fn)
out.sum().backward(retain_graph=True)
with self.assertRaisesRegex(RuntimeError, "Trying to backward an extra time"):
out.sum().backward(retain_graph=True)
| TestSelectiveActivationCheckpoint |
python | numba__numba | numba/tests/test_jitmethod.py | {
"start": 109,
"end": 1284
} | class ____(unittest.TestCase):
def test_bound_jit_method_with_loop_lift(self):
class Something(object):
def __init__(self, x0):
self.x0 = x0
@jit(forceobj=True)
def method(self, x):
a = np.empty(shape=5, dtype=np.float32)
x0 = self.x0
for i in range(a.shape[0]):
a[i] = x0 * x
return a
something = Something(3)
np.testing.assert_array_equal(something.method(5),
np.array([15, 15, 15, 15, 15], dtype=np.float32))
# Check that loop lifting in nopython mode was successful
[cres] = something.method.overloads.values()
jitloop = cres.lifted[0]
[loopcres] = jitloop.overloads.values()
self.assertTrue(loopcres.fndesc.native)
def test_unbound_jit_method(self):
class Something(object):
def __init__(self, x0):
self.x0 = x0
@jit(forceobj=True)
def method(self):
return self.x0
something = Something(3)
self.assertEqual(Something.method(something), 3)
| TestJITMethod |
python | coleifer__peewee | peewee.py | {
"start": 160237,
"end": 160302
} | class ____(IntegerField):
field_type = 'BIGINT'
| BigIntegerField |
python | doocs__leetcode | solution/0000-0099/0080.Remove Duplicates from Sorted Array II/Solution.py | {
"start": 0,
"end": 219
} | class ____:
def removeDuplicates(self, nums: List[int]) -> int:
k = 0
for x in nums:
if k < 2 or x != nums[k - 2]:
nums[k] = x
k += 1
return k
| Solution |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/__init__.py | {
"start": 5345,
"end": 5551
} | class ____:
def __init__(self, *args, **kwargs):
output = kwargs.get("output", ["" for _ in range(10)])
self.readline = MagicMock(side_effect=[line.encode() for line in output])
| MockStdOut |
python | huggingface__transformers | src/transformers/models/cpmant/modeling_cpmant.py | {
"start": 6687,
"end": 8995
} | class ____(nn.Module):
def __init__(self, config: CpmAntConfig, layer_idx=None):
super().__init__()
self.layernorm_before_attention = CpmAntLayerNorm(config)
self.self_attention = CpmAntAttention(config, layer_idx=layer_idx)
if config.dropout_p:
self.dropout = torch.nn.Dropout(config.dropout_p)
else:
self.dropout = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_bias: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
):
"""
Args:
hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):
Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.
attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
Avoid invalid areas to participate in the calculation of self-attention.
position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):
Provide positional information to self-attention block.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
past_key_values (`Cache`, *optional*):
Cached past key and value projection states.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
"""
outputs = self.layernorm_before_attention(hidden_states)
outputs, attn_weights = self.self_attention(
outputs,
outputs,
attention_mask,
position_bias,
output_attentions,
past_key_values,
use_cache,
cache_position,
)
if self.dropout is not None:
outputs = self.dropout(outputs)
hidden_states = hidden_states + outputs
return hidden_states, attn_weights
| CpmAntSelfAttentionBlock |
python | kamyu104__LeetCode-Solutions | Python/recover-a-tree-from-preorder-traversal.py | {
"start": 218,
"end": 1041
} | class ____(object):
def recoverFromPreorder(self, S):
"""
:type S: str
:rtype: TreeNode
"""
i = 0
stack = []
while i < len(S):
level = 0
while i < len(S) and S[i] == '-':
level += 1
i += 1
while len(stack) > level:
stack.pop()
val = []
while i < len(S) and S[i] != '-':
val.append(S[i])
i += 1
node = TreeNode(int("".join(val)))
if stack:
if stack[-1].left is None:
stack[-1].left = node
else:
stack[-1].right = node
stack.append(node)
return stack[0]
# Time: O(n)
# Space: O(h)
# recursive solution
| Solution |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 4486,
"end": 5296
} | class ____(Operation):
def call(self, x):
return backend.numpy.absolute(x)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.absolute", "keras.ops.numpy.absolute"])
def absolute(x):
"""Compute the absolute value element-wise.
`keras.ops.abs` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
An array containing the absolute value of each element in `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-1.2, 1.2])
>>> keras.ops.absolute(x)
array([1.2, 1.2], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Absolute().symbolic_call(x)
return backend.numpy.absolute(x)
| Absolute |
python | huggingface__transformers | tests/models/qwen2_vl/test_video_processing_qwen2_vl.py | {
"start": 4551,
"end": 17573
} | class ____(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = Qwen2VLVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = Qwen2VLVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
# OVERRIDDEN BECAUSE QWEN2_VL HAS SPECIAL OUTPUT SHAPES
def test_video_processor_from_dict_with_kwargs(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
self.assertEqual(video_processor.min_pixels, self.video_processor_tester.min_pixels)
self.assertEqual(video_processor.max_pixels, self.video_processor_tester.max_pixels)
video_processor = video_processing_class.from_dict(
self.video_processor_dict, min_pixels=256 * 256, max_pixels=640 * 640
)
self.assertEqual(video_processor.min_pixels, 256 * 256)
self.assertEqual(video_processor.max_pixels, 640 * 640)
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
# Each video is a list of PIL Images
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
for video in video_inputs:
self.assertIsInstance(video, np.ndarray)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
for video in video_inputs:
self.assertIsInstance(video, torch.Tensor)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
list(encoded_videos.shape),
expected_output_video_shape,
)
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
video_inputs_nested = [list(video) for video in video_inputs]
encoded_videos = video_processing(video_inputs_nested[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs_nested, return_tensors="pt")[self.input_name]
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
@unittest.skip("Skip for now, the test needs adjustment fo Qwen2VL")
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=4)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=4)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(
[video_inputs[0]], num_frames=4
)
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(
video_inputs, num_frames=4
)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", fps=3, video_metadata=batched_metadata
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(
[video_inputs[0]], num_frames=6
)
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(
video_inputs, num_frames=6
)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
# We should raise error when asked to sample more frames than there are in input video
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=10)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=10)[
self.input_name
]
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
def test_num_frames_equal_temporal_patch_size_plus_two(self):
for video_processing_class in self.video_processor_list:
video_processor_dict = self.video_processor_dict.copy()
video_processor_dict["size"] = {"longest_edge": 5 * 28 * 28, "shortest_edge": 28 * 28}
video_processor_dict["do_sample_frames"] = False
temporal_patch_size = 3
video_processor_dict["temporal_patch_size"] = temporal_patch_size
video_processing = video_processing_class(**video_processor_dict)
n, w, h = 5, 28, 28
video_inputs = [(np.random.randint(0, 256, (h, w, 3), dtype=np.uint8)) for _ in range(n)]
video_processed = video_processing(video_inputs, return_tensors="pt")
encoded_videos = video_processed[self.input_name]
self.assertEqual(list(encoded_videos.shape), [8, temporal_patch_size * 3 * 14 * 14])
video_grid_thw = video_processed["video_grid_thw"]
self.assertEqual(video_grid_thw.tolist(), [[2, 2, 2]])
| Qwen2VLVideoProcessingTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/gcs_to_gcs.py | {
"start": 1277,
"end": 26015
} | class ____(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:param source_objects: A list of source name of the objects to copy in the Google cloud
storage bucket. (templated)
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:param destination_object: The destination name of the object in the
destination Google Cloud Storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
The same thing applies to source objects inside source_objects.
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param replace: Whether you want to replace existing destination files or not.
:param delimiter: (Deprecated) This is used to restrict the result to only the 'files' in a given
'folder'. If source_objects = ['foo/bah/'] and delimiter = '.avro', then only the 'files' in the
folder 'foo/bah/' with '.avro' delimiter will be copied to the destination object.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:param maximum_modified_time: When specified, the objects will be copied or moved,
only if they were modified before maximum_modified_time.
If tzinfo has not been set, UTC will be assumed.
:param is_older_than: When specified, the objects will be copied if they are older
than the specified time in seconds.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param source_object_required: Whether you want to raise an exception when the source object
doesn't exist. It doesn't have any effect when the source objects are folders or patterns.
:param exact_match: When specified, only exact match of the source object (filename) will be
copied.
:param match_glob: (Optional) filters objects based on the glob pattern given by the string (
e.g, ``'**/*/.json'``)
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GCSToGCSOperator(
task_id="copy_single_file",
source_bucket="data",
source_objects=["sales/sales-2017/january.avro"],
destination_bucket="data_backup",
destination_object="copied_sales/2017/january-backup.avro",
exact_match=True,
gcp_conn_id=google_cloud_conn_id,
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. all files with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_objects=['sales/sales-2017'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
match_glob='**/*.avro'
gcp_conn_id=google_cloud_conn_id
)
Or ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. all files with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GCSToGCSOperator(
task_id="move_files",
source_bucket="data",
source_object="sales/sales-2017/*.avro",
destination_bucket="data_backup",
move_object=True,
gcp_conn_id=google_cloud_conn_id,
)
The following Operator would move all the Avro files from ``sales/sales-2019``
and ``sales/sales-2020`` folder in ``data`` bucket to the same folder in the
``data_backup`` bucket, deleting the original files in the process. ::
move_files = GCSToGCSOperator(
task_id="move_files",
source_bucket="data",
source_objects=["sales/sales-2019/*.avro", "sales/sales-2020"],
destination_bucket="data_backup",
delimiter=".avro",
move_object=True,
gcp_conn_id=google_cloud_conn_id,
)
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"source_objects",
"destination_bucket",
"destination_object",
"delimiter",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
source_bucket,
source_object=None,
source_objects=None,
destination_bucket=None,
destination_object=None,
delimiter=None,
move_object=False,
replace=True,
gcp_conn_id="google_cloud_default",
last_modified_time=None,
maximum_modified_time=None,
is_older_than=None,
impersonation_chain: str | Sequence[str] | None = None,
source_object_required=False,
exact_match=False,
match_glob: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.source_bucket = source_bucket
if source_object and WILDCARD in source_object:
warnings.warn(
"Usage of wildcard (*) in 'source_object' is deprecated, utilize 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.source_object = source_object
if source_objects and any(WILDCARD in obj for obj in source_objects):
warnings.warn(
"Usage of wildcard (*) in 'source_objects' is deprecated, utilize 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.source_objects = source_objects
self.destination_bucket = destination_bucket
self.destination_object = destination_object
if delimiter:
warnings.warn(
"Usage of 'delimiter' is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.delimiter = delimiter
self.move_object = move_object
self.replace = replace
self.gcp_conn_id = gcp_conn_id
self.last_modified_time = last_modified_time
self.maximum_modified_time = maximum_modified_time
self.is_older_than = is_older_than
self.impersonation_chain = impersonation_chain
self.source_object_required = source_object_required
self.exact_match = exact_match
self.match_glob = match_glob
def execute(self, context: Context):
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.source_objects and self.source_object:
error_msg = (
f"You can either set source_object parameter or source_objects parameter but not both. "
f"Found source_object={self.source_object} and source_objects={self.source_objects}"
)
raise AirflowException(error_msg)
if not self.source_object and not self.source_objects:
error_msg = "You must set source_object parameter or source_objects parameter. None set"
raise AirflowException(error_msg)
if self.source_objects and not all(isinstance(item, str) for item in self.source_objects):
raise AirflowException("At least, one of the `objects` in the `source_objects` is not a string")
# If source_object is set, default it to source_objects
if self.source_object:
self.source_objects = [self.source_object]
if self.destination_bucket is None:
self.log.warning(
"destination_bucket is None. Defaulting it to source_bucket (%s)", self.source_bucket
)
self.destination_bucket = self.source_bucket
# An empty source_object means to copy all files
if len(self.source_objects) == 0:
self.source_objects = [""]
# Raise exception if empty string `''` is used twice in source_object, this is to avoid double copy
if self.source_objects.count("") > 1:
raise AirflowException("You can't have two empty strings inside source_object")
# Iterate over the source_objects and do the copy
for prefix in self.source_objects:
# Check if prefix contains wildcard
if WILDCARD in prefix:
self._copy_source_with_wildcard(hook=hook, prefix=prefix)
# Now search with prefix using provided delimiter if any
else:
self._copy_source_without_wildcard(hook=hook, prefix=prefix)
def _ignore_existing_files(self, hook, prefix, **kwargs):
# list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
delimiter = kwargs.get("delimiter")
match_glob = kwargs.get("match_glob")
objects = kwargs.get("objects")
if self.destination_object is None:
existing_objects = hook.list(
self.destination_bucket, prefix=prefix, delimiter=delimiter, match_glob=match_glob
)
else:
self.log.info("Replaced destination_object with source_object prefix.")
destination_objects = hook.list(
self.destination_bucket,
prefix=self.destination_object,
delimiter=delimiter,
match_glob=match_glob,
)
existing_objects = [
dest_object.replace(self.destination_object, prefix, 1) for dest_object in destination_objects
]
objects = list(set(objects) - set(existing_objects))
if objects:
self.log.info("%s files are going to be synced: %s.", len(objects), objects)
else:
self.log.info("There are no new files to sync. Have a nice day!")
return objects
def _copy_source_without_wildcard(self, hook, prefix):
"""
List all files in source_objects, copy files to destination_object, and rename each source file.
For source_objects with no wildcard, this operator would first list
all files in source_objects, using provided delimiter if any. Then copy
files from source_objects to destination_object and rename each source
file. Note that if the flag exact_match=False, then each item in the source_objects
(or source_object itself) will be considered as a prefix for the source objects search.
Example 1:
The following Operator would copy all the files from ``a/`` folder
(i.e a/a.csv, a/b.csv, a/c.csv) in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::
copy_files = GCSToGCSOperator(
task_id="copy_files_without_wildcard",
source_bucket="data",
source_objects=["a/"],
destination_bucket="data_backup",
destination_object="b/",
gcp_conn_id=google_cloud_conn_id,
)
Example 2:
The following Operator would copy all avro files from ``a/`` folder
(i.e a/a.avro, a/b.avro, a/c.avro) in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::
copy_files = GCSToGCSOperator(
task_id="copy_files_without_wildcard",
source_bucket="data",
source_objects=["a/"],
destination_bucket="data_backup",
destination_object="b/",
delimiter=".avro",
gcp_conn_id=google_cloud_conn_id,
)
Example 3:
The following Operator would copy files (a/file_1.txt, a/file_2.csv, a/file_3.avro)
in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/file_1.txt, b/file_2.csv, b/file_3.avro) ::
copy_files = GCSToGCSOperator(
task_id="copy_files_without_wildcard",
source_bucket="data",
source_objects=["a/file_1.txt", "a/file_2.csv", "a/file_3.avro"],
destination_bucket="data_backup",
destination_object="b/",
gcp_conn_id=google_cloud_conn_id,
)
Example 4:
The following Operator would copy files corresponding to the prefix 'a/foo.txt'
(a/foo.txt, a/foo.txt.abc, a/foo.txt/subfolder/file.txt) in ``data`` bucket to
the ``b/`` folder in the ``data_backup`` bucket
(b/foo.txt, b/foo.txt.abc, b/foo.txt/subfolder/file.txt) ::
copy_files = GCSToGCSOperator(
task_id="copy_files_without_wildcard",
source_bucket="data",
source_object="a/foo.txt",
destination_bucket="data_backup",
destination_object="b/",
gcp_conn_id=google_cloud_conn_id,
)
"""
objects = hook.list(
self.source_bucket, prefix=prefix, delimiter=self.delimiter, match_glob=self.match_glob
)
objects = [obj for obj in objects if self._check_exact_match(obj, prefix)]
if not self.replace:
# If we are not replacing, ignore files already existing in source buckets
objects = self._ignore_existing_files(
hook, prefix, objects=objects, delimiter=self.delimiter, match_glob=self.match_glob
)
# If objects is empty, and we have prefix, let's check if prefix is a blob
# and copy directly
if len(objects) == 0 and prefix:
if hook.exists(self.source_bucket, prefix):
self._copy_single_object(
hook=hook, source_object=prefix, destination_object=self.destination_object
)
elif self.source_object_required:
msg = f"{prefix} does not exist in bucket {self.source_bucket}"
self.log.warning(msg)
raise AirflowException(msg)
if len(objects) == 1 and objects[0][-1] != "/":
self._copy_file(hook=hook, source_object=objects[0])
elif len(objects):
self._copy_multiple_objects(hook=hook, source_objects=objects, prefix=prefix)
def _copy_file(self, hook, source_object):
destination_object = self.destination_object or source_object
if self.destination_object and self.destination_object[-1] == "/":
file_name = source_object.split("/")[-1]
destination_object += file_name
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_multiple_objects(self, hook, source_objects, prefix):
# Check whether the prefix is a root directory for all the rest of objects.
_pref = prefix.rstrip("/")
is_directory = prefix.endswith("/") or all(
obj.replace(_pref, "", 1).startswith("/") for obj in source_objects
)
if is_directory:
base_path = prefix.rstrip("/") + "/"
else:
base_path = prefix[0 : prefix.rfind("/") + 1] if "/" in prefix else ""
for source_obj in source_objects:
if not self._check_exact_match(source_obj, prefix):
continue
if self.destination_object is None:
destination_object = source_obj
else:
file_name_postfix = source_obj.replace(base_path, "", 1)
destination_object = self.destination_object.rstrip("/") + "/" + file_name_postfix
self._copy_single_object(
hook=hook, source_object=source_obj, destination_object=destination_object
)
def _check_exact_match(self, source_object: str, prefix: str) -> bool:
"""Check whether source_object's name matches the prefix according to the exact_match flag."""
if self.exact_match and (source_object != prefix or not source_object.endswith(prefix)):
return False
return True
def _copy_source_with_wildcard(self, hook, prefix):
total_wildcards = prefix.count(WILDCARD)
if total_wildcards > 1:
error_msg = (
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {prefix}."
)
raise AirflowException(error_msg)
self.log.info("Delimiter ignored because wildcard is in prefix")
prefix_, delimiter = prefix.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix_, delimiter=delimiter)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# match_glob = f"**/*{delimiter}" if delimiter else None
# objects = hook.list(self.source_bucket, prefix=prefix_, match_glob=match_glob)
if not self.replace:
# If we are not replacing, list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
objects = self._ignore_existing_files(hook, prefix_, delimiter=delimiter, objects=objects)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# objects = self._ignore_existing_files(hook, prefix_, match_glob=match_glob, objects=objects)
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix_, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_single_object(self, hook, source_object, destination_object):
if self.is_older_than:
# Here we check if the given object is older than the given time
# If given, last_modified_time and maximum_modified_time is ignored
if hook.is_older_than(self.source_bucket, source_object, self.is_older_than):
self.log.info("Object is older than %s seconds ago", self.is_older_than)
else:
self.log.debug("Object is not older than %s seconds ago", self.is_older_than)
return
elif self.last_modified_time and self.maximum_modified_time:
# check to see if object was modified between last_modified_time and
# maximum_modified_time
if hook.is_updated_between(
self.source_bucket, source_object, self.last_modified_time, self.maximum_modified_time
):
self.log.info(
"Object has been modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
else:
self.log.debug(
"Object was not modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
return
elif self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket, source_object, self.last_modified_time):
self.log.info("Object has been modified after %s ", self.last_modified_time)
else:
self.log.debug("Object was not modified after %s ", self.last_modified_time)
return
elif self.maximum_modified_time is not None:
# Check to see if object was modified before maximum_modified_time
if hook.is_updated_before(self.source_bucket, source_object, self.maximum_modified_time):
self.log.info("Object has been modified before %s ", self.maximum_modified_time)
else:
self.log.debug("Object was not modified before %s ", self.maximum_modified_time)
return
self.log.info(
"Executing copy of gs://%s/%s to gs://%s/%s",
self.source_bucket,
source_object,
self.destination_bucket,
destination_object,
)
hook.rewrite(self.source_bucket, source_object, self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
def get_openlineage_facets_on_complete(self, task_instance):
"""
Implement _on_complete because execute method does preprocessing on internals.
This means we won't have to normalize self.source_object and self.source_objects,
destination bucket and so on.
"""
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.google.cloud.openlineage.utils import extract_ds_name_from_gcs_path
from airflow.providers.openlineage.extractors import OperatorLineage
inputs = [extract_ds_name_from_gcs_path(path) for path in self.source_objects]
if self.destination_object is None:
outputs = inputs.copy()
else:
outputs = [extract_ds_name_from_gcs_path(self.destination_object)]
return OperatorLineage(
inputs=[
Dataset(namespace=f"gs://{self.source_bucket}", name=source) for source in sorted(set(inputs))
],
outputs=[
Dataset(namespace=f"gs://{self.destination_bucket}", name=target)
for target in sorted(set(outputs))
],
)
| GCSToGCSOperator |
python | pytorch__pytorch | torch/optim/lr_scheduler.py | {
"start": 60434,
"end": 68933
} | class ____(LRScheduler):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): The number of allowed epochs with no improvement after
which the learning rate will be reduced.
For example, consider the case of having no patience (`patience = 0`).
In the first epoch, a baseline is established and is always considered good as there's no previous baseline.
In the second epoch, if the performance is worse than the baseline,
we have what is considered an intolerable epoch.
Since the count of intolerable epochs (1) is greater than the patience level (0),
the learning rate is reduced at the end of this epoch.
From the third epoch onwards, the learning rate continues to be reduced at the end of each epoch
if the performance is worse than the baseline. If the performance improves or remains the same,
the learning rate is not adjusted.
Default: 10.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
>>> # xdoctest: +SKIP
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = ReduceLROnPlateau(optimizer, "min")
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
.. image:: ../scripts/lr_scheduler_images/ReduceLROnPlateau.png
"""
def __init__(
self,
optimizer: Optimizer,
mode: Literal["min", "max"] = "min",
factor: float = 0.1,
patience: int = 10,
threshold: float = 1e-4,
threshold_mode: Literal["rel", "abs"] = "rel",
cooldown: int = 0,
min_lr: Union[list[float], float] = 0,
eps: float = 1e-8,
) -> None: # noqa: D107
if factor >= 1.0:
raise ValueError("Factor should be < 1.0.")
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
if isinstance(min_lr, (list, tuple)):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError(
f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}"
)
self.default_min_lr = None
self.min_lrs = list(min_lr)
else:
# pyrefly: ignore [bad-assignment]
self.default_min_lr = min_lr
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.cooldown = cooldown
self.eps = eps
self.last_epoch = 0
self._last_lr = _param_groups_val_list(self.optimizer, "lr")
self._init_is_better(
mode=mode, threshold=threshold, threshold_mode=threshold_mode
)
self._reset()
def _reset(self) -> None:
"""Reset num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metrics: SupportsFloat, epoch=None) -> None: # type: ignore[override]
"""Perform a step."""
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch + 1
else:
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning, stacklevel=2)
self.last_epoch = epoch
if self._is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self._last_lr = _param_groups_val_list(self.optimizer, "lr")
def _reduce_lr(self, epoch) -> None:
if len(self.optimizer.param_groups) != len(self.min_lrs):
if self.default_min_lr is None:
raise RuntimeError(
"The number of param groups in the `optimizer` "
f"({len(self.optimizer.param_groups)}) differs "
f"from when `ReduceLROnPlateau` was initialized "
f"({len(self.min_lrs)}), usually due to a new "
"param group being added to the optimizer. Please "
"modify the `min_lrs` field to match the length "
"of the `optimizer` param groups."
)
else:
# pyrefly: ignore [bad-assignment]
self.min_lrs = [self.default_min_lr] * len(self.optimizer.param_groups)
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
_update_param_group_val(param_group, "lr", new_lr)
@property
def in_cooldown(self): # noqa: D102
return self.cooldown_counter > 0
def _is_better(self, a, best): # noqa: D102
if self.mode == "min" and self.threshold_mode == "rel":
rel_epsilon = 1.0 - self.threshold
return a < best * rel_epsilon
elif self.mode == "min" and self.threshold_mode == "abs":
return a < best - self.threshold
elif self.mode == "max" and self.threshold_mode == "rel":
rel_epsilon = self.threshold + 1.0
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + self.threshold
def _init_is_better(self, mode, threshold, threshold_mode) -> None:
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if threshold_mode not in {"rel", "abs"}:
raise ValueError("threshold mode " + threshold_mode + " is unknown!")
# the worse value for the chosen mode
if mode == "min":
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Load the scheduler's state."""
self.__dict__.update(state_dict)
self._init_is_better(
mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode
)
| ReduceLROnPlateau |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/nodes.py | {
"start": 144,
"end": 2249
} | class ____:
__slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.comment = comment
self.anchor = anchor
def __repr__(self):
# type: () -> Any
value = self.value
# if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = f'<{len(value)} items>'
# else:
# if len(value) > 75:
# value = repr(value[:70]+' ... ')
# else:
# value = repr(value)
value = repr(value)
return _F(
'{class_name!s}(tag={self_tag!r}, value={value!s})',
class_name=self.__class__.__name__,
self_tag=self.tag,
value=value,
)
def dump(self, indent=0):
# type: (int) -> None
if isinstance(self.value, str):
sys.stdout.write(
'{}{}(tag={!r}, value={!r})\n'.format(
' ' * indent, self.__class__.__name__, self.tag, self.value
)
)
if self.comment:
sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
return
sys.stdout.write(
'{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag)
)
if self.comment:
sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
for v in self.value:
if isinstance(v, tuple):
for v1 in v:
v1.dump(indent + 1)
elif isinstance(v, Node):
v.dump(indent + 1)
else:
sys.stdout.write('Node value type? {}\n'.format(type(v)))
| Node |
python | PyCQA__pylint | tests/functional/p/postponed/postponed_evaluation_activated_with_alias.py | {
"start": 187,
"end": 342
} | class ____:
@classmethod
def from_string(cls, source) -> MyClass:
...
def validate_b(self, obj: OtherClass) -> bool:
...
| MyClass |
python | ApeWorX__ape | src/ape/utils/rpc.py | {
"start": 2033,
"end": 5248
} | class ____(CaseInsensitiveDict):
"""
A dict-like data-structure for HTTP-headers.
It is case-insensitive and appends user-agent strings
rather than overrides.
"""
def __setitem__(self, key, value):
if key.lower() != "user-agent" or not self.__contains__("user-agent"):
return super().__setitem__(key, value)
# Handle appending the user-agent (without replacing).
existing_user_agent = self.__getitem__("user-agent")
parts = [a.strip() for a in value.split(" ")]
new_parts = []
for part in parts:
if part in existing_user_agent:
# Already added.
continue
else:
new_parts.append(part)
if new_user_agent := " ".join(new_parts):
super().__setitem__(key, f"{existing_user_agent} {new_user_agent}")
def request_with_retry(
func: Callable,
min_retry_delay: int = 1_000,
retry_backoff_factor: int = 2,
max_retry_delay: int = 30_000,
max_retries: int = 10,
retry_jitter: int = 250,
is_rate_limit: Optional[Callable[[Exception], bool]] = None,
):
"""
Make a request with 429/rate-limit retry logic.
Args:
func (Callable): The function to run with rate-limit handling logic.
min_retry_delay (int): The amount of milliseconds to wait before
retrying the request. Defaults to ``1_000`` (one second).
retry_backoff_factor (int): The multiplier applied to the retry delay
after each failed attempt. Defaults to ``2``.
max_retry_delay (int): The maximum length of the retry delay.
Defaults to ``30_000`` (30 seconds).
max_retries (int): The maximum number of retries.
Defaults to ``10``.
retry_jitter (int): A random number of milliseconds up to this limit
is added to each retry delay. Defaults to ``250`` milliseconds.
is_rate_limit (Callable[[Exception], bool] | None): A custom handler
for detecting rate-limits. Defaults to checking for a 429 status
code on an HTTPError.
"""
if not is_rate_limit:
# Use default checker.
def checker(err: Exception) -> bool:
return isinstance(err, requests.HTTPError) and err.response.status_code == 429
is_rate_limit = checker
for attempt in range(max_retries):
try:
return func()
except Exception as err:
if not is_rate_limit(err):
# It was not a rate limit error. Raise whatever exception it is.
raise
else:
# We were rate-limited. Invoke retry/backoff logic.
logger.warning("Request was rate-limited. Backing-off and then retrying...")
retry_interval = min(
max_retry_delay, min_retry_delay * retry_backoff_factor**attempt
)
delay = retry_interval + randint(0, retry_jitter)
time.sleep(delay / 1000)
continue
# If we get here, we over-waited. Raise custom exception.
raise ProviderError(f"Rate limit retry-mechanism exceeded after '{max_retries}' attempts.")
| RPCHeaders |
python | celery__celery | celery/backends/redis.py | {
"start": 25579,
"end": 28560
} | class ____(RedisBackend):
"""Redis sentinel task result store."""
# URL looks like `sentinel://0.0.0.0:26347/3;sentinel://0.0.0.0:26348/3`
_SERVER_URI_SEPARATOR = ";"
sentinel = getattr(redis, "sentinel", None)
connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
def __init__(self, *args, **kwargs):
if self.sentinel is None:
raise ImproperlyConfigured(E_REDIS_SENTINEL_MISSING.strip())
super().__init__(*args, **kwargs)
def as_uri(self, include_password=False):
"""Return the server addresses as URIs, sanitizing the password or not."""
# Allow superclass to do work if we don't need to force sanitization
if include_password:
return super().as_uri(
include_password=include_password,
)
# Otherwise we need to ensure that all components get sanitized rather
# by passing them one by one to the `kombu` helper
uri_chunks = (
maybe_sanitize_url(chunk)
for chunk in (self.url or "").split(self._SERVER_URI_SEPARATOR)
)
# Similar to the superclass, strip the trailing slash from URIs with
# all components empty other than the scheme
return self._SERVER_URI_SEPARATOR.join(
uri[:-1] if uri.endswith(":///") else uri
for uri in uri_chunks
)
def _params_from_url(self, url, defaults):
chunks = url.split(self._SERVER_URI_SEPARATOR)
connparams = dict(defaults, hosts=[])
for chunk in chunks:
data = super()._params_from_url(
url=chunk, defaults=defaults)
connparams['hosts'].append(data)
for param in ("host", "port", "db", "password"):
connparams.pop(param)
# Adding db/password in connparams to connect to the correct instance
for param in ("db", "password"):
if connparams['hosts'] and param in connparams['hosts'][0]:
connparams[param] = connparams['hosts'][0].get(param)
return connparams
def _get_sentinel_instance(self, **params):
connparams = params.copy()
hosts = connparams.pop("hosts")
min_other_sentinels = self._transport_options.get("min_other_sentinels", 0)
sentinel_kwargs = self._transport_options.get("sentinel_kwargs", {})
sentinel_instance = self.sentinel.Sentinel(
[(cp['host'], cp['port']) for cp in hosts],
min_other_sentinels=min_other_sentinels,
sentinel_kwargs=sentinel_kwargs,
**connparams)
return sentinel_instance
def _get_pool(self, **params):
sentinel_instance = self._get_sentinel_instance(**params)
master_name = self._transport_options.get("master_name", None)
return sentinel_instance.master_for(
service_name=master_name,
redis_class=self._get_client(),
).connection_pool
| SentinelBackend |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.