id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
177,974 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
import platform
The provided code snippet includes necessary dependencies for implementing the `get_closure` function. Write a Python function `def get_closure(f)` to solve the following problem:
Get a function's closure attribute
Here is the function:
def get_closure(f):
"""Get a function's closure attribute"""
return f.func_closure | Get a function's closure attribute |
177,975 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
import platform
The provided code snippet includes necessary dependencies for implementing the `u_format` function. Write a Python function `def u_format(s)` to solve the following problem:
{u}'abc'" --> "u'abc'" (Python 2) Accepts a string or a function, so it can be used as a decorator.
Here is the function:
def u_format(s):
""""{u}'abc'" --> "u'abc'" (Python 2)
Accepts a string or a function, so it can be used as a decorator."""
return s.format(u='u') | {u}'abc'" --> "u'abc'" (Python 2) Accepts a string or a function, so it can be used as a decorator. |
177,976 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
import platform
def execfile(fname, glob=None, loc=None, compiler=None):
loc = loc if (loc is not None) else glob
scripttext = builtin_mod.open(fname).read()+ '\n'
# compile converts unicode filename to str assuming
# ascii. Let's do the conversion before calling compile
if isinstance(fname, unicode):
filename = unicode_to_str(fname)
else:
filename = fname
compiler = compiler or compile
exec(compiler(scripttext, filename, 'exec'), glob, loc) | null |
177,977 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
def encode(u, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return u.encode(encoding, "replace")
import platform
if sys.version_info[0] >= 3 or platform.python_implementation() == 'IronPython':
str_to_unicode = no_code
unicode_to_str = no_code
str_to_bytes = encode
bytes_to_str = decode
cast_bytes_py2 = no_code
cast_unicode_py2 = no_code
buffer_to_bytes_py2 = no_code
string_types = (str,)
unicode_type = str
else:
str_to_unicode = decode
unicode_to_str = encode
str_to_bytes = no_code
bytes_to_str = no_code
cast_bytes_py2 = cast_bytes
cast_unicode_py2 = cast_unicode
buffer_to_bytes_py2 = buffer_to_bytes
string_types = (str, unicode)
unicode_type = unicode
if sys.version_info[0] >= 3:
PY3 = True
# keep reference to builtin_mod because the kernel overrides that value
# to forward requests to a frontend.
builtin_mod_name = "builtins"
import builtins as builtin_mod
which = shutil.which
xrange = range
getcwd = os.getcwd
MethodType = types.MethodType
# Refactor print statements in doctests.
_print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE)
# Abstract u'abc' syntax:
else:
PY3 = False
# keep reference to builtin_mod because the kernel overrides that value
# to forward requests to a frontend.
builtin_mod_name = "__builtin__"
import __builtin__ as builtin_mod
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
xrange = xrange
getcwd = os.getcwdu
which = _shutil_which
# Abstract u'abc' syntax:
if sys.platform == 'win32':
else:
def execfile(fname, glob=None, loc=None, compiler=None):
if isinstance(fname, unicode):
filename = fname.encode(sys.getfilesystemencoding())
else:
filename = fname
where = [ns for ns in [glob, loc] if ns is not None]
if compiler is None:
builtin_mod.execfile(filename, *where)
else:
scripttext = builtin_mod.open(fname).read().rstrip() + '\n'
exec(compiler(scripttext, filename, 'exec'), glob, loc) | null |
177,978 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
import platform
The provided code snippet includes necessary dependencies for implementing the `annotate` function. Write a Python function `def annotate(**kwargs)` to solve the following problem:
Python 3 compatible function annotation for Python 2.
Here is the function:
def annotate(**kwargs):
"""Python 3 compatible function annotation for Python 2."""
if not kwargs:
raise ValueError('annotations must be provided as keyword arguments')
def dec(f):
if hasattr(f, '__annotations__'):
for k, v in kwargs.items():
f.__annotations__[k] = v
else:
f.__annotations__ = kwargs
return f
return dec | Python 3 compatible function annotation for Python 2. |
177,979 | import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
import platform
The provided code snippet includes necessary dependencies for implementing the `with_metaclass` function. Write a Python function `def with_metaclass(meta, *bases)` to solve the following problem:
Create a base class with a metaclass.
Here is the function:
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("_NewBase", bases, {}) | Create a base class with a metaclass. |
177,980 |
The provided code snippet includes necessary dependencies for implementing the `import_item` function. Write a Python function `def import_item(name)` to solve the following problem:
Import and return ``bar`` given the string ``foo.bar``. Calling ``bar = import_item("foo.bar")`` is the functional equivalent of executing the code ``from foo import bar``. Parameters ---------- name : string The fully qualified name of the module/package being imported. Returns ------- mod : module object The module that was imported.
Here is the function:
def import_item(name):
"""Import and return ``bar`` given the string ``foo.bar``.
Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
executing the code ``from foo import bar``.
Parameters
----------
name : string
The fully qualified name of the module/package being imported.
Returns
-------
mod : module object
The module that was imported.
"""
parts = name.rsplit('.', 1)
if len(parts) == 2:
# called with 'foo.bar....'
package, obj = parts
module = __import__(package, fromlist=[obj])
try:
pak = getattr(module, obj)
except AttributeError:
raise ImportError('No module named %s' % obj)
return pak
else:
# called with un-dotted string
return __import__(parts[0]) | Import and return ``bar`` given the string ``foo.bar``. Calling ``bar = import_item("foo.bar")`` is the functional equivalent of executing the code ``from foo import bar``. Parameters ---------- name : string The fully qualified name of the module/package being imported. Returns ------- mod : module object The module that was imported. |
177,981 | import errno
import os
import site
import stat
import sys
import tempfile
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
import platformdirs
from .utils import deprecation
Any = object()
Optional: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `is_file_hidden_win` function. Write a Python function `def is_file_hidden_win(abs_path: str, stat_res: Optional[Any] = None) -> bool` to solve the following problem:
Is a file hidden? This only checks the file itself; it should be called in combination with checking the directory containing the file. Use is_hidden() instead to check the file and its parent directories. Parameters ---------- abs_path : unicode The absolute path to check. stat_res : os.stat_result, optional The result of calling stat() on abs_path. If not passed, this function will call stat() internally.
Here is the function:
def is_file_hidden_win(abs_path: str, stat_res: Optional[Any] = None) -> bool:
"""Is a file hidden?
This only checks the file itself; it should be called in combination with
checking the directory containing the file.
Use is_hidden() instead to check the file and its parent directories.
Parameters
----------
abs_path : unicode
The absolute path to check.
stat_res : os.stat_result, optional
The result of calling stat() on abs_path. If not passed, this function
will call stat() internally.
"""
if os.path.basename(abs_path).startswith("."):
return True
if stat_res is None:
try:
stat_res = os.stat(abs_path)
except OSError as e:
if e.errno == errno.ENOENT:
return False
raise
try:
if stat_res.st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN: # type:ignore
return True
except AttributeError:
# allow AttributeError on PyPy for Windows
# 'stat_result' object has no attribute 'st_file_attributes'
# https://foss.heptapod.net/pypy/pypy/-/issues/3469
warnings.warn(
"hidden files are not detectable on this system, so no file will be marked as hidden."
)
pass
return False | Is a file hidden? This only checks the file itself; it should be called in combination with checking the directory containing the file. Use is_hidden() instead to check the file and its parent directories. Parameters ---------- abs_path : unicode The absolute path to check. stat_res : os.stat_result, optional The result of calling stat() on abs_path. If not passed, this function will call stat() internally. |
177,982 | import errno
import os
import site
import stat
import sys
import tempfile
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
import platformdirs
from .utils import deprecation
UF_HIDDEN = getattr(stat, "UF_HIDDEN", 32768)
Any = object()
Optional: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `is_file_hidden_posix` function. Write a Python function `def is_file_hidden_posix(abs_path: str, stat_res: Optional[Any] = None) -> bool` to solve the following problem:
Is a file hidden? This only checks the file itself; it should be called in combination with checking the directory containing the file. Use is_hidden() instead to check the file and its parent directories. Parameters ---------- abs_path : unicode The absolute path to check. stat_res : os.stat_result, optional The result of calling stat() on abs_path. If not passed, this function will call stat() internally.
Here is the function:
def is_file_hidden_posix(abs_path: str, stat_res: Optional[Any] = None) -> bool:
"""Is a file hidden?
This only checks the file itself; it should be called in combination with
checking the directory containing the file.
Use is_hidden() instead to check the file and its parent directories.
Parameters
----------
abs_path : unicode
The absolute path to check.
stat_res : os.stat_result, optional
The result of calling stat() on abs_path. If not passed, this function
will call stat() internally.
"""
if os.path.basename(abs_path).startswith("."):
return True
if stat_res is None or stat.S_ISLNK(stat_res.st_mode):
try:
stat_res = os.stat(abs_path)
except OSError as e:
if e.errno == errno.ENOENT:
return False
raise
# check that dirs can be listed
if stat.S_ISDIR(stat_res.st_mode): # type:ignore[misc] # noqa
# use x-access, not actual listing, in case of slow/large listings
if not os.access(abs_path, os.X_OK | os.R_OK):
return True
# check UF_HIDDEN
if getattr(stat_res, "st_flags", 0) & UF_HIDDEN:
return True
return False | Is a file hidden? This only checks the file itself; it should be called in combination with checking the directory containing the file. Use is_hidden() instead to check the file and its parent directories. Parameters ---------- abs_path : unicode The absolute path to check. stat_res : os.stat_result, optional The result of calling stat() on abs_path. If not passed, this function will call stat() internally. |
177,983 | import errno
import os
import site
import stat
import sys
import tempfile
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
import platformdirs
from .utils import deprecation
UF_HIDDEN = getattr(stat, "UF_HIDDEN", 32768)
def exists(path: str) -> bool:
"""Replacement for `os.path.exists` which works for host mapped volumes
on Windows containers
"""
try:
os.lstat(path)
except OSError:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `is_hidden` function. Write a Python function `def is_hidden(abs_path: str, abs_root: str = "") -> bool` to solve the following problem:
Is a file hidden or contained in a hidden directory? This will start with the rightmost path element and work backwards to the given root to see if a path is hidden or in a hidden directory. Hidden is determined by either name starting with '.' or the UF_HIDDEN flag as reported by stat. If abs_path is the same directory as abs_root, it will be visible even if that is a hidden folder. This only checks the visibility of files and directories *within* abs_root. Parameters ---------- abs_path : unicode The absolute path to check for hidden directories. abs_root : unicode The absolute path of the root directory in which hidden directories should be checked for.
Here is the function:
def is_hidden(abs_path: str, abs_root: str = "") -> bool:
"""Is a file hidden or contained in a hidden directory?
This will start with the rightmost path element and work backwards to the
given root to see if a path is hidden or in a hidden directory. Hidden is
determined by either name starting with '.' or the UF_HIDDEN flag as
reported by stat.
If abs_path is the same directory as abs_root, it will be visible even if
that is a hidden folder. This only checks the visibility of files
and directories *within* abs_root.
Parameters
----------
abs_path : unicode
The absolute path to check for hidden directories.
abs_root : unicode
The absolute path of the root directory in which hidden directories
should be checked for.
"""
abs_path = os.path.normpath(abs_path)
abs_root = os.path.normpath(abs_root)
if abs_path == abs_root:
return False
if is_file_hidden(abs_path):
return True
if not abs_root:
abs_root = abs_path.split(os.sep, 1)[0] + os.sep
inside_root = abs_path[len(abs_root) :]
if any(part.startswith(".") for part in inside_root.split(os.sep)):
return True
# check UF_HIDDEN on any location up to root.
# is_file_hidden() already checked the file, so start from its parent dir
path = os.path.dirname(abs_path)
while path and path.startswith(abs_root) and path != abs_root:
if not exists(path):
path = os.path.dirname(path)
continue
try:
# may fail on Windows junctions
st = os.lstat(path)
except OSError:
return True
if getattr(st, "st_flags", 0) & UF_HIDDEN:
return True
path = os.path.dirname(path)
return False | Is a file hidden or contained in a hidden directory? This will start with the rightmost path element and work backwards to the given root to see if a path is hidden or in a hidden directory. Hidden is determined by either name starting with '.' or the UF_HIDDEN flag as reported by stat. If abs_path is the same directory as abs_root, it will be visible even if that is a hidden folder. This only checks the visibility of files and directories *within* abs_root. Parameters ---------- abs_path : unicode The absolute path to check for hidden directories. abs_root : unicode The absolute path of the root directory in which hidden directories should be checked for. |
177,984 | import argparse
import errno
import json
import os
import site
import sys
import sysconfig
from shutil import which
from subprocess import Popen
from typing import List
from . import paths
from .version import __version__
class JupyterParser(argparse.ArgumentParser):
"""A Jupyter argument parser."""
def epilog(self):
"""Add subcommands to epilog on request
Avoids searching PATH for subcommands unless help output is requested.
"""
return "Available subcommands: %s" % " ".join(list_subcommands())
def epilog(self, x):
"""Ignore epilog set in Parser.__init__"""
pass
def argcomplete(self):
"""Trigger auto-completion, if enabled"""
try:
import argcomplete # type: ignore[import]
argcomplete.autocomplete(self)
except ImportError:
pass
def list_subcommands() -> List[str]:
"""List all jupyter subcommands
searches PATH for `jupyter-name`
Returns a list of jupyter's subcommand names, without the `jupyter-` prefix.
Nested children (e.g. jupyter-sub-subsub) are not included.
"""
subcommand_tuples = set()
# construct a set of `('foo', 'bar') from `jupyter-foo-bar`
for d in _path_with_self():
try:
names = os.listdir(d)
except OSError:
continue
for name in names:
if name.startswith("jupyter-"):
if sys.platform.startswith("win"):
# remove file-extension on Windows
name = os.path.splitext(name)[0] # noqa
subcommand_tuples.add(tuple(name.split("-")[1:]))
# build a set of subcommand strings, excluding subcommands whose parents are defined
subcommands = set()
# Only include `jupyter-foo-bar` if `jupyter-foo` is not already present
for sub_tup in subcommand_tuples:
if not any(sub_tup[:i] in subcommand_tuples for i in range(1, len(sub_tup))):
subcommands.add("-".join(sub_tup))
return sorted(subcommands)
The provided code snippet includes necessary dependencies for implementing the `jupyter_parser` function. Write a Python function `def jupyter_parser() -> JupyterParser` to solve the following problem:
Create a jupyter parser object.
Here is the function:
def jupyter_parser() -> JupyterParser:
"""Create a jupyter parser object."""
parser = JupyterParser(
description="Jupyter: Interactive Computing",
)
group = parser.add_mutually_exclusive_group(required=False)
# don't use argparse's version action because it prints to stderr on py2
group.add_argument(
"--version", action="store_true", help="show the versions of core jupyter packages and exit"
)
subcommand_action = group.add_argument(
"subcommand", type=str, nargs="?", help="the subcommand to launch"
)
# For argcomplete, supply all known subcommands
subcommand_action.completer = lambda *args, **kwargs: list_subcommands() # type: ignore[attr-defined]
group.add_argument("--config-dir", action="store_true", help="show Jupyter config dir")
group.add_argument("--data-dir", action="store_true", help="show Jupyter data dir")
group.add_argument("--runtime-dir", action="store_true", help="show Jupyter runtime dir")
group.add_argument(
"--paths",
action="store_true",
help="show all Jupyter paths. Add --json for machine-readable format.",
)
parser.add_argument("--json", action="store_true", help="output paths as machine-readable json")
parser.add_argument("--debug", action="store_true", help="output debug information about paths")
return parser | Create a jupyter parser object. |
177,985 | import argparse
import errno
import json
import os
import site
import sys
import sysconfig
from shutil import which
from subprocess import Popen
from typing import List
from . import paths
from .version import __version__
class Popen(Generic[AnyStr]):
args: _CMD
stdin: Optional[IO[AnyStr]]
stdout: Optional[IO[AnyStr]]
stderr: Optional[IO[AnyStr]]
pid: int
returncode: int
universal_newlines: bool
# Technically it is wrong that Popen provides __new__ instead of __init__
# but this shouldn't come up hopefully?
if sys.version_info >= (3, 7):
# text is added in 3.7
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
text: Optional[bool] = ...,
encoding: str,
errors: Optional[str] = ...,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
text: Optional[bool] = ...,
encoding: Optional[str] = ...,
errors: str,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
*,
universal_newlines: Literal[True],
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
# where the *real* keyword only args start
text: Optional[bool] = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
text: Literal[True],
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: Literal[False] = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
text: Literal[None, False] = ...,
encoding: None = ...,
errors: None = ...,
) -> Popen[bytes]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
text: Optional[bool] = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
) -> Popen[Any]: ...
else:
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
encoding: str,
errors: Optional[str] = ...,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
encoding: Optional[str] = ...,
errors: str,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
*,
universal_newlines: Literal[True],
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
# where the *real* keyword only args start
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
) -> Popen[str]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: Literal[False] = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
encoding: None = ...,
errors: None = ...,
) -> Popen[bytes]: ...
def __new__(
cls,
args: _CMD,
bufsize: int = ...,
executable: Optional[AnyPath] = ...,
stdin: Optional[_FILE] = ...,
stdout: Optional[_FILE] = ...,
stderr: Optional[_FILE] = ...,
preexec_fn: Optional[Callable[[], Any]] = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: Optional[AnyPath] = ...,
env: Optional[_ENV] = ...,
universal_newlines: bool = ...,
startupinfo: Optional[Any] = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Any = ...,
*,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
) -> Popen[Any]: ...
def poll(self) -> Optional[int]: ...
if sys.version_info >= (3, 7):
def wait(self, timeout: Optional[float] = ...) -> int: ...
else:
def wait(self, timeout: Optional[float] = ..., endtime: Optional[float] = ...) -> int: ...
# Return str/bytes
def communicate(
self,
input: Optional[AnyStr] = ...,
timeout: Optional[float] = ...,
# morally this should be optional
) -> Tuple[AnyStr, AnyStr]: ...
def send_signal(self, sig: int) -> None: ...
def terminate(self) -> None: ...
def kill(self) -> None: ...
def __enter__(self: _S) -> _S: ...
def __exit__(
self, type: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]
) -> None: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
The provided code snippet includes necessary dependencies for implementing the `_execvp` function. Write a Python function `def _execvp(cmd, argv)` to solve the following problem:
execvp, except on Windows where it uses Popen Python provides execvp on Windows, but its behavior is problematic (Python bug#9148).
Here is the function:
def _execvp(cmd, argv):
"""execvp, except on Windows where it uses Popen
Python provides execvp on Windows, but its behavior is problematic (Python bug#9148).
"""
if sys.platform.startswith("win"):
# PATH is ignored when shell=False,
# so rely on shutil.which
cmd_path = which(cmd)
if cmd_path is None:
raise OSError("%r not found" % cmd, errno.ENOENT)
p = Popen([cmd_path] + argv[1:])
# Don't raise KeyboardInterrupt in the parent process.
# Set this after spawning, to avoid subprocess inheriting handler.
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
p.wait()
sys.exit(p.returncode)
else:
os.execvp(cmd, argv) | execvp, except on Windows where it uses Popen Python provides execvp on Windows, but its behavior is problematic (Python bug#9148). |
177,986 | import argparse
import errno
import json
import os
import site
import sys
import sysconfig
from shutil import which
from subprocess import Popen
from typing import List
from . import paths
from .version import __version__
def _path_with_self():
"""Put `jupyter`'s dir at the front of PATH
Ensures that /path/to/jupyter subcommand
will do /path/to/jupyter-subcommand
even if /other/jupyter-subcommand is ahead of it on PATH
"""
path_list = (os.environ.get("PATH") or os.defpath).split(os.pathsep)
# Insert the "scripts" directory for this Python installation
# This allows the "jupyter" command to be relocated, while still
# finding subcommands that have been installed in the default
# location.
# We put the scripts directory at the *end* of PATH, so that
# if the user explicitly overrides a subcommand, that override
# still takes effect.
try:
bindir = sysconfig.get_path("scripts")
except KeyError:
# The Python environment does not specify a "scripts" location
pass
else:
path_list.append(bindir)
scripts = [sys.argv[0]]
if os.path.islink(scripts[0]):
# include realpath, if `jupyter` is a symlink
scripts.append(os.path.realpath(scripts[0]))
for script in scripts:
bindir = os.path.dirname(script)
if os.path.isdir(bindir) and os.access(script, os.X_OK): # only if it's a script
# ensure executable's dir is on PATH
# avoids missing subcommands when jupyter is run via absolute path
path_list.insert(0, bindir)
return path_list
The provided code snippet includes necessary dependencies for implementing the `_jupyter_abspath` function. Write a Python function `def _jupyter_abspath(subcommand)` to solve the following problem:
This method get the abspath of a specified jupyter-subcommand with no changes on ENV.
Here is the function:
def _jupyter_abspath(subcommand):
"""This method get the abspath of a specified jupyter-subcommand with no
changes on ENV.
"""
# get env PATH with self
search_path = os.pathsep.join(_path_with_self())
# get the abs path for the jupyter-<subcommand>
jupyter_subcommand = f"jupyter-{subcommand}"
abs_path = which(jupyter_subcommand, path=search_path)
if abs_path is None:
msg = f"\nJupyter command `{jupyter_subcommand}` not found."
raise Exception(msg)
if not os.access(abs_path, os.X_OK):
msg = f"\nJupyter command `{jupyter_subcommand}` is not executable."
raise Exception(msg)
return abs_path | This method get the abspath of a specified jupyter-subcommand with no changes on ENV. |
177,987 | import argparse
import errno
import json
import os
import site
import sys
import sysconfig
from shutil import which
from subprocess import Popen
from typing import List
from . import paths
from .version import __version__
class JupyterParser(argparse.ArgumentParser):
"""A Jupyter argument parser."""
def epilog(self):
"""Add subcommands to epilog on request
Avoids searching PATH for subcommands unless help output is requested.
"""
return "Available subcommands: %s" % " ".join(list_subcommands())
def epilog(self, x):
"""Ignore epilog set in Parser.__init__"""
pass
def argcomplete(self):
"""Trigger auto-completion, if enabled"""
try:
import argcomplete # type: ignore[import]
argcomplete.autocomplete(self)
except ImportError:
pass
List = _Alias()
def get_argcomplete_cwords() -> t.Optional[t.List[str]]:
"""Get current words prior to completion point
This is normally done in the `argcomplete.CompletionFinder` constructor,
but is exposed here to allow `traitlets` to follow dynamic code-paths such
as determining whether to evaluate a subcommand.
"""
if "_ARGCOMPLETE" not in os.environ:
return None
comp_line = os.environ["COMP_LINE"]
comp_point = int(os.environ["COMP_POINT"])
# argcomplete.debug("splitting COMP_LINE for:", comp_line, comp_point)
comp_words: t.List[str]
try:
(
cword_prequote,
cword_prefix,
cword_suffix,
comp_words,
last_wordbreak_pos,
) = argcomplete.split_line(comp_line, comp_point)
except ModuleNotFoundError:
return None
# _ARGCOMPLETE is set by the shell script to tell us where comp_words
# should start, based on what we're completing.
# 1: <script> [args]
# 2: python <script> [args]
# 3: python -m <module> [args]
start = int(os.environ["_ARGCOMPLETE"]) - 1
comp_words = comp_words[start:]
# argcomplete.debug("prequote=", cword_prequote, "prefix=", cword_prefix, "suffix=", cword_suffix, "words=", comp_words, "last=", last_wordbreak_pos)
return comp_words
def increment_argcomplete_index():
"""Assumes ``$_ARGCOMPLETE`` is set and `argcomplete` is importable
Increment the index pointed to by ``$_ARGCOMPLETE``, which is used to
determine which word `argcomplete` should start evaluating the command-line.
This may be useful to "inform" `argcomplete` that we have already evaluated
the first word as a subcommand.
"""
try:
os.environ["_ARGCOMPLETE"] = str(int(os.environ["_ARGCOMPLETE"]) + 1)
except Exception:
try:
argcomplete.debug("Unable to increment $_ARGCOMPLETE", os.environ["_ARGCOMPLETE"])
except (KeyError, ModuleNotFoundError):
pass
The provided code snippet includes necessary dependencies for implementing the `_evaluate_argcomplete` function. Write a Python function `def _evaluate_argcomplete(parser: JupyterParser) -> List[str]` to solve the following problem:
If argcomplete is enabled, trigger autocomplete or return current words If the first word looks like a subcommand, return the current command that is attempting to be completed so that the subcommand can evaluate it; otherwise auto-complete using the main parser.
Here is the function:
def _evaluate_argcomplete(parser: JupyterParser) -> List[str]:
"""If argcomplete is enabled, trigger autocomplete or return current words
If the first word looks like a subcommand, return the current command
that is attempting to be completed so that the subcommand can evaluate it;
otherwise auto-complete using the main parser.
"""
try:
# traitlets >= 5.8 provides some argcomplete support,
# use helper methods to jump to argcomplete
from traitlets.config.argcomplete_config import (
get_argcomplete_cwords,
increment_argcomplete_index,
)
cwords = get_argcomplete_cwords()
if cwords and len(cwords) > 1 and not cwords[1].startswith("-"):
# If first completion word looks like a subcommand,
# increment word from which to start handling arguments
increment_argcomplete_index()
return cwords
else:
# Otherwise no subcommand, directly autocomplete and exit
parser.argcomplete()
except ImportError:
# traitlets >= 5.8 not available, just try to complete this without
# worrying about subcommands
parser.argcomplete()
msg = "Control flow should not reach end of autocomplete()"
raise AssertionError(msg) | If argcomplete is enabled, trigger autocomplete or return current words If the first word looks like a subcommand, return the current command that is attempting to be completed so that the subcommand can evaluate it; otherwise auto-complete using the main parser. |
177,988 | import os
import re
import shutil
from datetime import datetime, timezone
from traitlets.config.loader import JSONFileConfigLoader, PyFileConfigLoader
from traitlets.log import get_logger
from .application import JupyterApp
from .paths import jupyter_config_dir, jupyter_data_dir
from .utils import ensure_dir_exists
migrations = {
pjoin("{ipython_dir}", "nbextensions"): pjoin("{jupyter_data}", "nbextensions"),
pjoin("{ipython_dir}", "kernels"): pjoin("{jupyter_data}", "kernels"),
pjoin("{profile}", "nbconfig"): pjoin("{jupyter_config}", "nbconfig"),
}
custom_src_t = pjoin("{profile}", "static", "custom")
custom_dst_t = pjoin("{jupyter_config}", "custom")
config_migrations = ["notebook", "nbconvert", "qtconsole"]
def get_ipython_dir():
"""Return the IPython directory location.
Not imported from IPython because the IPython implementation
ensures that a writable directory exists,
creating a temporary directory if not.
We don't want to trigger that when checking if migration should happen.
We only need to support the IPython < 4 behavior for migration,
so importing for forward-compatibility and edge cases is not important.
"""
return os.environ.get("IPYTHONDIR", os.path.expanduser("~/.ipython"))
def migrate_one(src, dst):
"""Migrate one item
dispatches to migrate_dir/_file
"""
log = get_logger()
if os.path.isfile(src):
return migrate_file(src, dst)
elif os.path.isdir(src):
return migrate_dir(src, dst)
else:
log.debug("Nothing to migrate for %s", src)
return False
def migrate_static_custom(src, dst):
"""Migrate non-empty custom.js,css from src to dst
src, dst are 'custom' directories containing custom.{js,css}
"""
log = get_logger()
migrated = False
custom_js = pjoin(src, "custom.js")
custom_css = pjoin(src, "custom.css")
# check if custom_js is empty:
custom_js_empty = True
if os.path.isfile(custom_js):
with open(custom_js, encoding="utf-8") as f:
js = f.read().strip()
for line in js.splitlines():
if not (line.isspace() or line.strip().startswith(("/*", "*", "//"))):
custom_js_empty = False
break
# check if custom_css is empty:
custom_css_empty = True
if os.path.isfile(custom_css):
with open(custom_css, encoding="utf-8") as f:
css = f.read().strip()
custom_css_empty = css.startswith("/*") and css.endswith("*/")
if custom_js_empty:
log.debug("Ignoring empty %s", custom_js)
if custom_css_empty:
log.debug("Ignoring empty %s", custom_css)
if custom_js_empty and custom_css_empty:
# nothing to migrate
return False
ensure_dir_exists(dst)
if not custom_js_empty or not custom_css_empty:
ensure_dir_exists(dst)
if not custom_js_empty and migrate_file(custom_js, pjoin(dst, "custom.js")):
migrated = True
if not custom_css_empty and migrate_file(custom_css, pjoin(dst, "custom.css")):
migrated = True
return migrated
def migrate_config(name, env):
"""Migrate a config file.
Includes substitutions for updated configurable names.
"""
log = get_logger()
src_base = pjoin("{profile}", "ipython_{name}_config").format(name=name, **env)
dst_base = pjoin("{jupyter_config}", "jupyter_{name}_config").format(name=name, **env)
loaders = {
".py": PyFileConfigLoader,
".json": JSONFileConfigLoader,
}
migrated = []
for ext in (".py", ".json"):
src = src_base + ext
dst = dst_base + ext
if os.path.exists(src):
cfg = loaders[ext](src).load_config()
if cfg:
if migrate_file(src, dst, substitutions=config_substitutions):
migrated.append(src)
else:
# don't migrate empty config files
log.debug("Not migrating empty config file: %s", src)
return migrated
class datetime(date):
min: ClassVar[datetime]
max: ClassVar[datetime]
resolution: ClassVar[timedelta]
if sys.version_info >= (3, 6):
def __new__(
cls: Type[_S],
year: int,
month: int,
day: int,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
tzinfo: Optional[_tzinfo] = ...,
*,
fold: int = ...,
) -> _S: ...
else:
def __new__(
cls: Type[_S],
year: int,
month: int,
day: int,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
tzinfo: Optional[_tzinfo] = ...,
) -> _S: ...
def year(self) -> int: ...
def month(self) -> int: ...
def day(self) -> int: ...
def hour(self) -> int: ...
def minute(self) -> int: ...
def second(self) -> int: ...
def microsecond(self) -> int: ...
def tzinfo(self) -> Optional[_tzinfo]: ...
if sys.version_info >= (3, 6):
def fold(self) -> int: ...
def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ...
def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ...
def today(cls: Type[_S]) -> _S: ...
def fromordinal(cls: Type[_S], n: int) -> _S: ...
if sys.version_info >= (3, 8):
def now(cls: Type[_S], tz: Optional[_tzinfo] = ...) -> _S: ...
else:
def now(cls: Type[_S], tz: None = ...) -> _S: ...
def now(cls, tz: _tzinfo) -> datetime: ...
def utcnow(cls: Type[_S]) -> _S: ...
if sys.version_info >= (3, 6):
def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ...
else:
def combine(cls, date: _date, time: _time) -> datetime: ...
if sys.version_info >= (3, 7):
def fromisoformat(cls: Type[_S], date_string: str) -> _S: ...
def strftime(self, fmt: _Text) -> str: ...
if sys.version_info >= (3,):
def __format__(self, fmt: str) -> str: ...
else:
def __format__(self, fmt: AnyStr) -> AnyStr: ...
def toordinal(self) -> int: ...
def timetuple(self) -> struct_time: ...
if sys.version_info >= (3, 3):
def timestamp(self) -> float: ...
def utctimetuple(self) -> struct_time: ...
def date(self) -> _date: ...
def time(self) -> _time: ...
def timetz(self) -> _time: ...
if sys.version_info >= (3, 6):
def replace(
self,
year: int = ...,
month: int = ...,
day: int = ...,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
tzinfo: Optional[_tzinfo] = ...,
*,
fold: int = ...,
) -> datetime: ...
else:
def replace(
self,
year: int = ...,
month: int = ...,
day: int = ...,
hour: int = ...,
minute: int = ...,
second: int = ...,
microsecond: int = ...,
tzinfo: Optional[_tzinfo] = ...,
) -> datetime: ...
if sys.version_info >= (3, 8):
def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ...
elif sys.version_info >= (3, 3):
def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ...
else:
def astimezone(self, tz: _tzinfo) -> datetime: ...
def ctime(self) -> str: ...
if sys.version_info >= (3, 6):
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
else:
def isoformat(self, sep: str = ...) -> str: ...
def strptime(cls, date_string: _Text, format: _Text) -> datetime: ...
def utcoffset(self) -> Optional[timedelta]: ...
def tzname(self) -> Optional[str]: ...
def dst(self) -> Optional[timedelta]: ...
def __le__(self, other: datetime) -> bool: ... # type: ignore
def __lt__(self, other: datetime) -> bool: ... # type: ignore
def __ge__(self, other: datetime) -> bool: ... # type: ignore
def __gt__(self, other: datetime) -> bool: ... # type: ignore
if sys.version_info >= (3, 8):
def __add__(self: _S, other: timedelta) -> _S: ...
def __radd__(self: _S, other: timedelta) -> _S: ...
else:
def __add__(self, other: timedelta) -> datetime: ...
def __radd__(self, other: timedelta) -> datetime: ...
def __sub__(self, other: datetime) -> timedelta: ...
def __sub__(self, other: timedelta) -> datetime: ...
def __hash__(self) -> int: ...
def weekday(self) -> int: ...
def isoweekday(self) -> int: ...
def isocalendar(self) -> Tuple[int, int, int]: ...
def jupyter_config_dir() -> str:
"""Get the Jupyter config directory for this platform and user.
Returns JUPYTER_CONFIG_DIR if defined, otherwise the appropriate
directory for the platform.
"""
env = os.environ
if env.get("JUPYTER_NO_CONFIG"):
return _mkdtemp_once("jupyter-clean-cfg")
if env.get("JUPYTER_CONFIG_DIR"):
return env["JUPYTER_CONFIG_DIR"]
if use_platform_dirs():
return platformdirs.user_config_dir(APPNAME, appauthor=False)
home_dir = get_home_dir()
return pjoin(home_dir, ".jupyter")
def jupyter_data_dir() -> str:
"""Get the config directory for Jupyter data files for this platform and user.
These are non-transient, non-configuration files.
Returns JUPYTER_DATA_DIR if defined, else a platform-appropriate path.
"""
env = os.environ
if env.get("JUPYTER_DATA_DIR"):
return env["JUPYTER_DATA_DIR"]
if use_platform_dirs():
return platformdirs.user_data_dir(APPNAME, appauthor=False)
home = get_home_dir()
if sys.platform == "darwin":
return os.path.join(home, "Library", "Jupyter")
elif os.name == "nt":
appdata = os.environ.get("APPDATA", None)
if appdata:
return str(Path(appdata, "jupyter").resolve())
else:
return pjoin(jupyter_config_dir(), "data")
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = env.get("XDG_DATA_HOME", None)
if not xdg:
xdg = pjoin(home, ".local", "share")
return pjoin(xdg, "jupyter")
def ensure_dir_exists(path, mode=0o777):
"""Ensure that a directory exists
If it doesn't exist, try to create it, protecting against a race condition
if another process is doing the same.
The default permissions are determined by the current umask.
"""
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(path):
raise OSError("%r exists but is not a directory" % path)
The provided code snippet includes necessary dependencies for implementing the `migrate` function. Write a Python function `def migrate()` to solve the following problem:
Migrate IPython configuration to Jupyter
Here is the function:
def migrate():
"""Migrate IPython configuration to Jupyter"""
env = {
"jupyter_data": jupyter_data_dir(),
"jupyter_config": jupyter_config_dir(),
"ipython_dir": get_ipython_dir(),
"profile": os.path.join(get_ipython_dir(), "profile_default"),
}
migrated = False
for src_t, dst_t in migrations.items():
src = src_t.format(**env)
dst = dst_t.format(**env)
if os.path.exists(src) and migrate_one(src, dst):
migrated = True
for name in config_migrations:
if migrate_config(name, env):
migrated = True
custom_src = custom_src_t.format(**env)
custom_dst = custom_dst_t.format(**env)
if os.path.exists(custom_src) and migrate_static_custom(custom_src, custom_dst):
migrated = True
# write a marker to avoid re-running migration checks
ensure_dir_exists(env["jupyter_config"])
with open(os.path.join(env["jupyter_config"], "migrated"), "w", encoding="utf-8") as f:
f.write(datetime.now(tz=timezone.utc).isoformat())
return migrated | Migrate IPython configuration to Jupyter |
177,989 | import os
import platform
import subprocess
import sys
from typing import Any, Dict, List, Optional, Union
def subs(cmd: Union[List[str], str]) -> Optional[str]:
"""
get data from commands that we need to run outside of python
"""
try:
stdout = subprocess.check_output(cmd)
return stdout.decode("utf-8", "replace").strip()
except (OSError, subprocess.CalledProcessError):
return None
Any = object()
Dict = _Alias()
The provided code snippet includes necessary dependencies for implementing the `get_data` function. Write a Python function `def get_data() -> Dict[str, Any]` to solve the following problem:
returns a dict of various user environment data
Here is the function:
def get_data() -> Dict[str, Any]:
"""
returns a dict of various user environment data
"""
env: Dict[str, Any] = {}
env["path"] = os.environ.get("PATH")
env["sys_path"] = sys.path
env["sys_exe"] = sys.executable
env["sys_version"] = sys.version
env["platform"] = platform.platform()
# FIXME: which on Windows?
if sys.platform == "win32":
env["where"] = subs(["where", "jupyter"])
env["which"] = None
else:
env["which"] = subs(["which", "-a", "jupyter"])
env["where"] = None
env["pip"] = subs([sys.executable, "-m", "pip", "list"])
env["conda"] = subs(["conda", "list"])
env["conda-env"] = subs(["conda", "env", "export"])
return env | returns a dict of various user environment data |
177,991 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(version: str) -> "Version"` to solve the following problem:
Parse the given version string. >>> parse('1.0.dev1') <Version('1.0.dev1')> :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version.
Here is the function:
def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version) | Parse the given version string. >>> parse('1.0.dev1') <Version('1.0.dev1')> :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version. |
177,992 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
Union: _SpecialForm = ...
Optional: _SpecialForm = ...
class SupportsInt(Protocol, metaclass=ABCMeta):
def __int__(self) -> int:
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
def py__simple_getitem__(self, index):
def py__iter__(self, contextualized_node=None):
def py__getitem__(self, index_value_set, contextualized_node):
def _get_wrapped_value(self):
def name(self):
def infer_type_vars(self, value_set):
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None | null |
177,993 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
_local_version_separators = re.compile(r"[\._-]")
Optional: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `_parse_local_version` function. Write a Python function `def _parse_local_version(local: str) -> Optional[LocalType]` to solve the following problem:
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
Here is the function:
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None | Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). |
177,994 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
Optional: _SpecialForm = ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
Infinity = InfinityType()
NegativeInfinity = NegativeInfinityType()
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local | null |
177,995 | import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
Union: _SpecialForm = ...
class InvalidVersion(ValueError):
"""Raised when a version string is not a valid version.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
"""
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
The provided code snippet includes necessary dependencies for implementing the `canonicalize_version` function. Write a Python function `def canonicalize_version( version: Union[Version, str], *, strip_trailing_zero: bool = True ) -> str` to solve the following problem:
This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment.
Here is the function:
def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
else:
parsed = version
parts = []
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
# Release segment
release_segment = ".".join(str(x) for x in parsed.release)
if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
parts.append(release_segment)
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
return "".join(parts) | This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. |
177,996 | import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
FrozenSet = _Alias()
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
def interpreter(self) -> str:
return self._interpreter
def abi(self) -> str:
return self._abi
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def parse_tag(tag: str) -> FrozenSet[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
name = canonicalize_name(name_part)
version = Version(parts[1])
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags) | null |
177,997 | import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
NormalizedName = NewType("NormalizedName", str)
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
name = canonicalize_name(name_part)
version = Version(version_part)
return (name, version) | null |
177,998 | import logging
import platform
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from . import _manylinux, _musllinux
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
def interpreter(self) -> str:
return self._interpreter
def abi(self) -> str:
return self._abi
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
class Iterator(Iterable[_T_co], Protocol[_T_co]):
def __next__(self) -> _T_co: ...
def __iter__(self) -> Iterator[_T_co]: ...
The provided code snippet includes necessary dependencies for implementing the `sys_tags` function. Write a Python function `def sys_tags(*, warn: bool = False) -> Iterator[Tag]` to solve the following problem:
Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important.
Here is the function:
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp) | Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. |
177,999 | import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
Any = object()
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
The provided code snippet includes necessary dependencies for implementing the `_normalize_extra_values` function. Write a Python function `def _normalize_extra_values(results: Any) -> Any` to solve the following problem:
Normalize extra values.
Here is the function:
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results | Normalize extra values. |
178,000 | import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
Union: _SpecialForm = ...
Optional: _SpecialForm = ...
List = _Alias()
MarkerAtom = Any
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker | null |
178,001 | import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
List = _Alias()
Dict = _Alias()
class Variable(Node):
def serialize(self) -> str:
MarkerList = List[Any]
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups) | null |
178,002 | import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
def format_full_version(info: "sys._version_info") -> str:
Dict = _Alias()
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
} | null |
178,003 | import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class ParsedRequirement(NamedTuple):
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
def consume(self, name: str) -> None:
def check(self, name: str, *, peek: bool = False) -> bool:
def expect(self, name: str, *, expected: str) -> Token:
def read(self) -> Token:
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
def enclosing_tokens(self, open_token: str, close_token: str) -> Iterator[bool]:
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) | null |
178,004 | import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
MarkerList = List[Any]
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
def enclosing_tokens(self, open_token: str, close_token: str) -> Iterator[bool]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield open_position is not None
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected closing {close_token}",
span_start=open_position,
)
self.read()
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) | null |
178,005 | import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
class Version(_BaseVersion):
def __init__(self, version: str) -> None:
def __repr__(self) -> str:
def __str__(self) -> str:
def epoch(self) -> int:
def release(self) -> Tuple[int, ...]:
def pre(self) -> Optional[Tuple[str, int]]:
def post(self) -> Optional[int]:
def dev(self) -> Optional[int]:
def local(self) -> Optional[str]:
def public(self) -> str:
def base_version(self) -> str:
def is_prerelease(self) -> bool:
def is_postrelease(self) -> bool:
def is_devrelease(self) -> bool:
def major(self) -> int:
def minor(self) -> int:
def micro(self) -> int:
def _coerce_version(version: UnparsedVersion) -> Version:
if not isinstance(version, Version):
version = Version(version)
return version | null |
178,006 | import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
List = _Alias()
def _version_split(version: str) -> List[str]:
result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result | null |
178,007 | import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
) | null |
178,008 | import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
List = _Alias()
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) | null |
178,009 | from __future__ import print_function, absolute_import
import sys
import warnings
from xml.etree.ElementTree import ParseError
from xml.etree.ElementTree import TreeBuilder as _TreeBuilder
from xml.etree.ElementTree import parse as _parse
from xml.etree.ElementTree import tostring
from .common import PY3
from .common import (
DTDForbidden,
EntitiesForbidden,
ExternalReferenceForbidden,
_generate_etree_functions,
)
The provided code snippet includes necessary dependencies for implementing the `_get_py3_cls` function. Write a Python function `def _get_py3_cls()` to solve the following problem:
Python 3.3 hides the pure Python code but defusedxml requires it. The code is based on test.support.import_fresh_module().
Here is the function:
def _get_py3_cls():
"""Python 3.3 hides the pure Python code but defusedxml requires it.
The code is based on test.support.import_fresh_module().
"""
pymodname = "xml.etree.ElementTree"
cmodname = "_elementtree"
pymod = sys.modules.pop(pymodname, None)
cmod = sys.modules.pop(cmodname, None)
sys.modules[cmodname] = None
try:
pure_pymod = importlib.import_module(pymodname)
finally:
# restore module
sys.modules[pymodname] = pymod
if cmod is not None:
sys.modules[cmodname] = cmod
else:
sys.modules.pop(cmodname, None)
# restore attribute on original package
etree_pkg = sys.modules["xml.etree"]
if pymod is not None:
etree_pkg.ElementTree = pymod
elif hasattr(etree_pkg, "ElementTree"):
del etree_pkg.ElementTree
_XMLParser = pure_pymod.XMLParser
_iterparse = pure_pymod.iterparse
# patch pure module to use ParseError from C extension
pure_pymod.ParseError = ParseError
return _XMLParser, _iterparse | Python 3.3 hides the pure Python code but defusedxml requires it. The code is based on test.support.import_fresh_module(). |
178,010 | from __future__ import print_function, absolute_import
import threading
import warnings
from lxml import etree as _etree
from .common import DTDForbidden, EntitiesForbidden, NotSupportedError
getDefaultParser = _parser_tls.getDefaultParser
def check_docinfo(elementtree, forbid_dtd=False, forbid_entities=True):
"""Check docinfo of an element tree for DTD and entity declarations
The check for entity declarations needs lxml 3 or newer. lxml 2.x does
not support dtd.iterentities().
"""
docinfo = elementtree.docinfo
if docinfo.doctype:
if forbid_dtd:
raise DTDForbidden(docinfo.doctype, docinfo.system_url, docinfo.public_id)
if forbid_entities and not LXML3:
# lxml < 3 has no iterentities()
raise NotSupportedError("Unable to check for entity declarations " "in lxml 2.x")
if forbid_entities:
for dtd in docinfo.internalDTD, docinfo.externalDTD:
if dtd is None:
continue
for entity in dtd.iterentities():
raise EntitiesForbidden(entity.name, entity.content, None, None, None, None)
def parse(source, parser=None, base_url=None, forbid_dtd=False, forbid_entities=True):
if parser is None:
parser = getDefaultParser()
elementtree = _etree.parse(source, parser, base_url=base_url)
check_docinfo(elementtree, forbid_dtd, forbid_entities)
return elementtree | null |
178,011 | from __future__ import print_function, absolute_import
import threading
import warnings
from lxml import etree as _etree
from .common import DTDForbidden, EntitiesForbidden, NotSupportedError
getDefaultParser = _parser_tls.getDefaultParser
def check_docinfo(elementtree, forbid_dtd=False, forbid_entities=True):
"""Check docinfo of an element tree for DTD and entity declarations
The check for entity declarations needs lxml 3 or newer. lxml 2.x does
not support dtd.iterentities().
"""
docinfo = elementtree.docinfo
if docinfo.doctype:
if forbid_dtd:
raise DTDForbidden(docinfo.doctype, docinfo.system_url, docinfo.public_id)
if forbid_entities and not LXML3:
# lxml < 3 has no iterentities()
raise NotSupportedError("Unable to check for entity declarations " "in lxml 2.x")
if forbid_entities:
for dtd in docinfo.internalDTD, docinfo.externalDTD:
if dtd is None:
continue
for entity in dtd.iterentities():
raise EntitiesForbidden(entity.name, entity.content, None, None, None, None)
def fromstring(text, parser=None, base_url=None, forbid_dtd=False, forbid_entities=True):
if parser is None:
parser = getDefaultParser()
rootelement = _etree.fromstring(text, parser, base_url=base_url)
elementtree = rootelement.getroottree()
check_docinfo(elementtree, forbid_dtd, forbid_entities)
return rootelement | null |
178,012 | from __future__ import print_function, absolute_import
import threading
import warnings
from lxml import etree as _etree
from .common import DTDForbidden, EntitiesForbidden, NotSupportedError
class NotSupportedError(DefusedXmlException):
"""The operation is not supported"""
def iterparse(*args, **kwargs):
raise NotSupportedError("defused lxml.etree.iterparse not available") | null |
178,013 | from __future__ import print_function, absolute_import
from xml.dom.minidom import _do_pulldom_parse
from . import expatbuilder as _expatbuilder
from . import pulldom as _pulldom
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse( file, parser=None, bufsize=None, forbid_dtd=False, forbid_entities=True, forbid_external=True )` to solve the following problem:
Parse a file into a DOM by filename or file object.
Here is the function:
def parse(
file, parser=None, bufsize=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
return _expatbuilder.parse(
file,
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external,
)
else:
return _do_pulldom_parse(
_pulldom.parse,
(file,),
{
"parser": parser,
"bufsize": bufsize,
"forbid_dtd": forbid_dtd,
"forbid_entities": forbid_entities,
"forbid_external": forbid_external,
},
) | Parse a file into a DOM by filename or file object. |
178,014 | from __future__ import print_function, absolute_import
from xml.dom.minidom import _do_pulldom_parse
from . import expatbuilder as _expatbuilder
from . import pulldom as _pulldom
The provided code snippet includes necessary dependencies for implementing the `parseString` function. Write a Python function `def parseString( string, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True )` to solve the following problem:
Parse a file into a DOM from a string.
Here is the function:
def parseString(
string, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
"""Parse a file into a DOM from a string."""
if parser is None:
return _expatbuilder.parseString(
string,
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external,
)
else:
return _do_pulldom_parse(
_pulldom.parseString,
(string,),
{
"parser": parser,
"forbid_dtd": forbid_dtd,
"forbid_entities": forbid_entities,
"forbid_external": forbid_external,
},
) | Parse a file into a DOM from a string. |
178,015 | from __future__ import print_function, absolute_import
import io
from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden, PY3
def defused_gzip_decode(data, limit=None):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip: # pragma: no cover
raise NotImplementedError
if limit is None:
limit = MAX_DATA
f = io.BytesIO(data)
gzf = gzip.GzipFile(mode="rb", fileobj=f)
try:
if limit < 0: # no limit
decoded = gzf.read()
else:
decoded = gzf.read(limit + 1)
except IOError: # pragma: no cover
raise ValueError("invalid data")
f.close()
gzf.close()
if limit >= 0 and len(decoded) > limit:
raise ValueError("max gzipped payload length exceeded")
return decoded
class DefusedGzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response, limit=None):
# response doesn't support tell() and read(), required by
# GzipFile
if not gzip: # pragma: no cover
raise NotImplementedError
self.limit = limit = limit if limit is not None else MAX_DATA
if limit < 0: # no limit
data = response.read()
self.readlength = None
else:
data = response.read(limit + 1)
self.readlength = 0
if limit >= 0 and len(data) > limit:
raise ValueError("max payload length exceeded")
self.stringio = io.BytesIO(data)
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.stringio)
def read(self, n):
if self.limit >= 0:
left = self.limit - self.readlength
n = min(n, left + 1)
data = gzip.GzipFile.read(self, n)
self.readlength += len(data)
if self.readlength > self.limit:
raise ValueError("max payload length exceeded")
return data
else:
return gzip.GzipFile.read(self, n)
def close(self):
gzip.GzipFile.close(self)
self.stringio.close()
class DefusedExpatParser(ExpatParser):
def __init__(self, target, forbid_dtd=False, forbid_entities=True, forbid_external=True):
ExpatParser.__init__(self, target)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
parser = self._parser
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(
self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def gzip_decode(data: str, max_decode: int = ...) -> str: ...
class GzipDecodedResponse(GzipFile):
stringio: StringIO[Any]
def __init__(self, response: HTTPResponse) -> None: ...
def close(self): ...
def monkey_patch():
xmlrpc_client.FastParser = DefusedExpatParser
xmlrpc_client.GzipDecodedResponse = DefusedGzipDecodedResponse
xmlrpc_client.gzip_decode = defused_gzip_decode
if xmlrpc_server:
xmlrpc_server.gzip_decode = defused_gzip_decode | null |
178,016 | from __future__ import print_function, absolute_import
import io
from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden, PY3
def gzip_decode(data: str, max_decode: int = ...) -> str: ...
class GzipDecodedResponse(GzipFile):
stringio: StringIO[Any]
def __init__(self, response: HTTPResponse) -> None: ...
def close(self): ...
def unmonkey_patch():
xmlrpc_client.FastParser = None
xmlrpc_client.GzipDecodedResponse = _OrigGzipDecodedResponse
xmlrpc_client.gzip_decode = _orig_gzip_decode
if xmlrpc_server:
xmlrpc_server.gzip_decode = _orig_gzip_decode | null |
178,017 | from __future__ import print_function, absolute_import
from xml.dom.expatbuilder import ExpatBuilder as _ExpatBuilder
from xml.dom.expatbuilder import Namespaces as _Namespaces
from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden
class DefusedExpatBuilder(_ExpatBuilder):
"""Defused document builder"""
def __init__(
self, options=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
_ExpatBuilder.__init__(self, options)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(
self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def install(self, parser):
_ExpatBuilder.install(self, parser)
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
# if self._options.entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
class DefusedExpatBuilderNS(_Namespaces, DefusedExpatBuilder):
"""Defused document builder that supports namespaces."""
def install(self, parser):
DefusedExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = self.start_namespace_decl_handler
def reset(self):
DefusedExpatBuilder.reset(self)
self._initNamespaces()
The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(file, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True)` to solve the following problem:
Parse a document, returning the resulting Document node. 'file' may be either a file name or an open file object.
Here is the function:
def parse(file, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
build_builder = DefusedExpatBuilderNS
else:
build_builder = DefusedExpatBuilder
builder = build_builder(
forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
)
if isinstance(file, str):
fp = open(file, "rb")
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result | Parse a document, returning the resulting Document node. 'file' may be either a file name or an open file object. |
178,018 | from __future__ import print_function, absolute_import
from xml.dom.expatbuilder import ExpatBuilder as _ExpatBuilder
from xml.dom.expatbuilder import Namespaces as _Namespaces
from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden
class DefusedExpatBuilder(_ExpatBuilder):
"""Defused document builder"""
def __init__(
self, options=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
_ExpatBuilder.__init__(self, options)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(
self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def install(self, parser):
_ExpatBuilder.install(self, parser)
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
# if self._options.entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
class DefusedExpatBuilderNS(_Namespaces, DefusedExpatBuilder):
"""Defused document builder that supports namespaces."""
def install(self, parser):
DefusedExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = self.start_namespace_decl_handler
def reset(self):
DefusedExpatBuilder.reset(self)
self._initNamespaces()
The provided code snippet includes necessary dependencies for implementing the `parseString` function. Write a Python function `def parseString( string, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True )` to solve the following problem:
Parse a document from a string, returning the resulting Document node.
Here is the function:
def parseString(
string, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
build_builder = DefusedExpatBuilderNS
else:
build_builder = DefusedExpatBuilder
builder = build_builder(
forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
)
return builder.parseString(string) | Parse a document from a string, returning the resulting Document node. |
178,019 | from __future__ import print_function, absolute_import
from xml.dom.pulldom import parse as _parse
from xml.dom.pulldom import parseString as _parseString
from .sax import make_parser
def make_parser(parser_list=[]):
return expatreader.create_parser()
def parse(
stream_or_string,
parser=None,
bufsize=None,
forbid_dtd=False,
forbid_entities=True,
forbid_external=True,
):
if parser is None:
parser = make_parser()
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
return _parse(stream_or_string, parser, bufsize) | null |
178,020 | from __future__ import print_function, absolute_import
from xml.dom.pulldom import parse as _parse
from xml.dom.pulldom import parseString as _parseString
from .sax import make_parser
def make_parser(parser_list=[]):
return expatreader.create_parser()
def parseString(
string, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
if parser is None:
parser = make_parser()
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
return _parseString(string, parser) | null |
178,021 | import sys
import xml.parsers.expat
def _apply_defusing(defused_mod):
assert defused_mod is sys.modules[defused_mod.__name__]
stdlib_name = defused_mod.__origin__
__import__(stdlib_name, {}, {}, ["*"])
stdlib_mod = sys.modules[stdlib_name]
stdlib_names = set(dir(stdlib_mod))
for name, obj in vars(defused_mod).items():
if name.startswith("_") or name not in stdlib_names:
continue
setattr(stdlib_mod, name, obj)
return stdlib_mod | null |
178,022 | import sys
import xml.parsers.expat
The provided code snippet includes necessary dependencies for implementing the `_generate_etree_functions` function. Write a Python function `def _generate_etree_functions(DefusedXMLParser, _TreeBuilder, _parse, _iterparse)` to solve the following problem:
Factory for functions needed by etree, dependent on whether cElementTree or ElementTree is used.
Here is the function:
def _generate_etree_functions(DefusedXMLParser, _TreeBuilder, _parse, _iterparse):
"""Factory for functions needed by etree, dependent on whether
cElementTree or ElementTree is used."""
def parse(source, parser=None, forbid_dtd=False, forbid_entities=True, forbid_external=True):
if parser is None:
parser = DefusedXMLParser(
target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external,
)
return _parse(source, parser)
def iterparse(
source,
events=None,
parser=None,
forbid_dtd=False,
forbid_entities=True,
forbid_external=True,
):
if parser is None:
parser = DefusedXMLParser(
target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external,
)
return _iterparse(source, events, parser)
def fromstring(text, forbid_dtd=False, forbid_entities=True, forbid_external=True):
parser = DefusedXMLParser(
target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external,
)
parser.feed(text)
return parser.close()
return parse, iterparse, fromstring | Factory for functions needed by etree, dependent on whether cElementTree or ElementTree is used. |
178,023 | from __future__ import print_function, absolute_import
from xml.sax import InputSource as _InputSource
from xml.sax import ErrorHandler as _ErrorHandler
from . import expatreader
def parse(
source,
handler,
errorHandler=_ErrorHandler(),
forbid_dtd=False,
forbid_entities=True,
forbid_external=True,
):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
parser.parse(source)
def make_parser(parser_list=[]):
return expatreader.create_parser()
class BytesIO(BufferedIOBase, BinaryIO):
def __init__(self, initial_bytes: bytes = ...) -> None: ...
# BytesIO does not contain a "name" field. This workaround is necessary
# to allow BytesIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def __enter__(self: _T) -> _T: ...
def getvalue(self) -> bytes: ...
def getbuffer(self) -> memoryview: ...
if sys.version_info >= (3, 7):
def read1(self, __size: Optional[int] = ...) -> bytes: ...
else:
def read1(self, __size: Optional[int]) -> bytes: ... # type: ignore
def parseString(
string,
handler,
errorHandler=_ErrorHandler(),
forbid_dtd=False,
forbid_entities=True,
forbid_external=True,
):
from io import BytesIO
if errorHandler is None:
errorHandler = _ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.forbid_dtd = forbid_dtd
parser.forbid_entities = forbid_entities
parser.forbid_external = forbid_external
inpsrc = _InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc) | null |
178,024 | import glob
import os
import shutil
import sys
import sysconfig
try:
import winreg as winreg
except:
import winreg
import tempfile
if sys.stdout is None:
sys.stdout = sys.stderr
sys.stderr = Tee(sys.stderr)
sys.stdout = Tee(sys.stdout)
verbose = 1
root_key_name = "Software\\Python\\PythonCore\\" + sys.winver
try:
# When this script is run from inside the bdist_wininst installer,
# file_created() and directory_created() are additional builtin
# functions which write lines to Python23\pywin32-install.log. This is
# a list of actions for the uninstaller, the format is inspired by what
# the Wise installer also creates.
file_created
is_bdist_wininst = True
except NameError:
is_bdist_wininst = False # we know what it is not - but not what it is :)
def file_created(file):
pass
def directory_created(directory):
pass
def get_root_hkey():
try:
winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, root_key_name, 0, winreg.KEY_CREATE_SUB_KEY
)
return winreg.HKEY_LOCAL_MACHINE
except OSError:
# Either not exist, or no permissions to create subkey means
# must be HKCU
return winreg.HKEY_CURRENT_USER
try:
create_shortcut
except NameError:
# Create a function with the same signature as create_shortcut provided
# by bdist_wininst
def create_shortcut(
path, description, filename, arguments="", workdir="", iconpath="", iconindex=0
):
import pythoncom
from win32com.shell import shell
ilink = pythoncom.CoCreateInstance(
shell.CLSID_ShellLink,
None,
pythoncom.CLSCTX_INPROC_SERVER,
shell.IID_IShellLink,
)
ilink.SetPath(path)
ilink.SetDescription(description)
if arguments:
ilink.SetArguments(arguments)
if workdir:
ilink.SetWorkingDirectory(workdir)
if iconpath or iconindex:
ilink.SetIconLocation(iconpath, iconindex)
# now save it.
ipf = ilink.QueryInterface(pythoncom.IID_IPersistFile)
ipf.Save(filename, 0)
# Support the same list of "path names" as bdist_wininst.
def CopyTo(desc, src, dest):
import win32api
import win32con
while 1:
try:
win32api.CopyFile(src, dest, 0)
return
except win32api.error as details:
if details.winerror == 5: # access denied - user not admin.
raise
if silent:
# Running silent mode - just re-raise the error.
raise
full_desc = (
"Error %s\n\n"
"If you have any Python applications running, "
"please close them now\nand select 'Retry'\n\n%s"
% (desc, details.strerror)
)
rc = win32api.MessageBox(
0, full_desc, "Installation Error", win32con.MB_ABORTRETRYIGNORE
)
if rc == win32con.IDABORT:
raise
elif rc == win32con.IDIGNORE:
return
# else retry - around we go again.
def LoadSystemModule(lib_dir, modname):
# See if this is a debug build.
import importlib.machinery
import importlib.util
suffix = "_d" if "_d.pyd" in importlib.machinery.EXTENSION_SUFFIXES else ""
filename = "%s%d%d%s.dll" % (
modname,
sys.version_info[0],
sys.version_info[1],
suffix,
)
filename = os.path.join(lib_dir, "pywin32_system32", filename)
loader = importlib.machinery.ExtensionFileLoader(modname, filename)
spec = importlib.machinery.ModuleSpec(name=modname, loader=loader, origin=filename)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
def RegisterCOMObjects(register=True):
import win32com.server.register
if register:
func = win32com.server.register.RegisterClasses
else:
func = win32com.server.register.UnregisterClasses
flags = {}
if not verbose:
flags["quiet"] = 1
for module, klass_name in com_modules:
__import__(module)
mod = sys.modules[module]
flags["finalize_register"] = getattr(mod, "DllRegisterServer", None)
flags["finalize_unregister"] = getattr(mod, "DllUnregisterServer", None)
klass = getattr(mod, klass_name)
func(klass, **flags)
def RegisterHelpFile(register=True, lib_dir=None):
if lib_dir is None:
lib_dir = sysconfig.get_paths()["platlib"]
if register:
# Register the .chm help file.
chm_file = os.path.join(lib_dir, "PyWin32.chm")
if os.path.isfile(chm_file):
# This isn't recursive, so if 'Help' doesn't exist, we croak
SetPyKeyVal("Help", None, None)
SetPyKeyVal("Help\\Pythonwin Reference", None, chm_file)
return chm_file
else:
print("NOTE: PyWin32.chm can not be located, so has not " "been registered")
else:
UnsetPyKeyVal("Help\\Pythonwin Reference", None, delete_key=True)
return None
def RegisterPythonwin(register=True, lib_dir=None):
"""Add (or remove) Pythonwin to context menu for python scripts.
??? Should probably also add Edit command for pys files also.
Also need to remove these keys on uninstall, but there's no function
like file_created to add registry entries to uninstall log ???
"""
import os
if lib_dir is None:
lib_dir = sysconfig.get_paths()["platlib"]
classes_root = get_root_hkey()
## Installer executable doesn't seem to pass anything to postinstall script indicating if it's a debug build,
pythonwin_exe = os.path.join(lib_dir, "Pythonwin", "Pythonwin.exe")
pythonwin_edit_command = pythonwin_exe + ' -edit "%1"'
keys_vals = [
(
"Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\Pythonwin.exe",
"",
pythonwin_exe,
),
(
"Software\\Classes\\Python.File\\shell\\Edit with Pythonwin",
"command",
pythonwin_edit_command,
),
(
"Software\\Classes\\Python.NoConFile\\shell\\Edit with Pythonwin",
"command",
pythonwin_edit_command,
),
]
try:
if register:
for key, sub_key, val in keys_vals:
## Since winreg only uses the character Api functions, this can fail if Python
## is installed to a path containing non-ascii characters
hkey = winreg.CreateKey(classes_root, key)
if sub_key:
hkey = winreg.CreateKey(hkey, sub_key)
winreg.SetValueEx(hkey, None, 0, winreg.REG_SZ, val)
hkey.Close()
else:
for key, sub_key, val in keys_vals:
try:
if sub_key:
hkey = winreg.OpenKey(classes_root, key)
winreg.DeleteKey(hkey, sub_key)
hkey.Close()
winreg.DeleteKey(classes_root, key)
except OSError as why:
winerror = getattr(why, "winerror", why.errno)
if winerror != 2: # file not found
raise
finally:
# tell windows about the change
from win32com.shell import shell, shellcon
shell.SHChangeNotify(
shellcon.SHCNE_ASSOCCHANGED, shellcon.SHCNF_IDLIST, None, None
)
def get_shortcuts_folder():
if get_root_hkey() == winreg.HKEY_LOCAL_MACHINE:
try:
fldr = get_special_folder_path("CSIDL_COMMON_PROGRAMS")
except OSError:
# No CSIDL_COMMON_PROGRAMS on this platform
fldr = get_special_folder_path("CSIDL_PROGRAMS")
else:
# non-admin install - always goes in this user's start menu.
fldr = get_special_folder_path("CSIDL_PROGRAMS")
try:
install_group = winreg.QueryValue(
get_root_hkey(), root_key_name + "\\InstallPath\\InstallGroup"
)
except OSError:
vi = sys.version_info
install_group = "Python %d.%d" % (vi[0], vi[1])
return os.path.join(fldr, install_group)
def get_system_dir():
import win32api # we assume this exists.
try:
import pythoncom
import win32process
from win32com.shell import shell, shellcon
try:
if win32process.IsWow64Process():
return shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_SYSTEMX86)
return shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_SYSTEM)
except (pythoncom.com_error, win32process.error):
return win32api.GetSystemDirectory()
except ImportError:
return win32api.GetSystemDirectory()
def fixup_dbi():
# We used to have a dbi.pyd with our .pyd files, but now have a .py file.
# If the user didn't uninstall, they will find the .pyd which will cause
# problems - so handle that.
import win32api
import win32con
pyd_name = os.path.join(os.path.dirname(win32api.__file__), "dbi.pyd")
pyd_d_name = os.path.join(os.path.dirname(win32api.__file__), "dbi_d.pyd")
py_name = os.path.join(os.path.dirname(win32con.__file__), "dbi.py")
for this_pyd in (pyd_name, pyd_d_name):
this_dest = this_pyd + ".old"
if os.path.isfile(this_pyd) and os.path.isfile(py_name):
try:
if os.path.isfile(this_dest):
print(
"Old dbi '%s' already exists - deleting '%s'"
% (this_dest, this_pyd)
)
os.remove(this_pyd)
else:
os.rename(this_pyd, this_dest)
print("renamed '%s'->'%s.old'" % (this_pyd, this_pyd))
file_created(this_pyd + ".old")
except os.error as exc:
print("FAILED to rename '%s': %s" % (this_pyd, exc))
def install(lib_dir):
import traceback
# The .pth file is now installed as a regular file.
# Create the .pth file in the site-packages dir, and use only relative paths
# We used to write a .pth directly to sys.prefix - clobber it.
if os.path.isfile(os.path.join(sys.prefix, "pywin32.pth")):
os.unlink(os.path.join(sys.prefix, "pywin32.pth"))
# The .pth may be new and therefore not loaded in this session.
# Setup the paths just in case.
for name in "win32 win32\\lib Pythonwin".split():
sys.path.append(os.path.join(lib_dir, name))
# It is possible people with old versions installed with still have
# pywintypes and pythoncom registered. We no longer need this, and stale
# entries hurt us.
for name in "pythoncom pywintypes".split():
keyname = "Software\\Python\\PythonCore\\" + sys.winver + "\\Modules\\" + name
for root in winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER:
try:
winreg.DeleteKey(root, keyname + "\\Debug")
except WindowsError:
pass
try:
winreg.DeleteKey(root, keyname)
except WindowsError:
pass
LoadSystemModule(lib_dir, "pywintypes")
LoadSystemModule(lib_dir, "pythoncom")
import win32api
# and now we can get the system directory:
files = glob.glob(os.path.join(lib_dir, "pywin32_system32\\*.*"))
if not files:
raise RuntimeError("No system files to copy!!")
# Try the system32 directory first - if that fails due to "access denied",
# it implies a non-admin user, and we use sys.prefix
for dest_dir in [get_system_dir(), sys.prefix]:
# and copy some files over there
worked = 0
try:
for fname in files:
base = os.path.basename(fname)
dst = os.path.join(dest_dir, base)
CopyTo("installing %s" % base, fname, dst)
if verbose:
print("Copied %s to %s" % (base, dst))
# Register the files with the uninstaller
file_created(dst)
worked = 1
# Nuke any other versions that may exist - having
# duplicates causes major headaches.
bad_dest_dirs = [
os.path.join(sys.prefix, "Library\\bin"),
os.path.join(sys.prefix, "Lib\\site-packages\\win32"),
]
if dest_dir != sys.prefix:
bad_dest_dirs.append(sys.prefix)
for bad_dest_dir in bad_dest_dirs:
bad_fname = os.path.join(bad_dest_dir, base)
if os.path.exists(bad_fname):
# let exceptions go here - delete must succeed
os.unlink(bad_fname)
if worked:
break
except win32api.error as details:
if details.winerror == 5:
# access denied - user not admin - try sys.prefix dir,
# but first check that a version doesn't already exist
# in that place - otherwise that one will still get used!
if os.path.exists(dst):
msg = (
"The file '%s' exists, but can not be replaced "
"due to insufficient permissions. You must "
"reinstall this software as an Administrator" % dst
)
print(msg)
raise RuntimeError(msg)
continue
raise
else:
raise RuntimeError(
"You don't have enough permissions to install the system files"
)
# Pythonwin 'compiles' config files - record them for uninstall.
pywin_dir = os.path.join(lib_dir, "Pythonwin", "pywin")
for fname in glob.glob(os.path.join(pywin_dir, "*.cfg")):
file_created(fname[:-1] + "c") # .cfg->.cfc
# Register our demo COM objects.
try:
try:
RegisterCOMObjects()
except win32api.error as details:
if details.winerror != 5: # ERROR_ACCESS_DENIED
raise
print("You do not have the permissions to install COM objects.")
print("The sample COM objects were not registered.")
except Exception:
print("FAILED to register the Python COM objects")
traceback.print_exc()
# There may be no main Python key in HKCU if, eg, an admin installed
# python itself.
winreg.CreateKey(get_root_hkey(), root_key_name)
chm_file = None
try:
chm_file = RegisterHelpFile(True, lib_dir)
except Exception:
print("Failed to register help file")
traceback.print_exc()
else:
if verbose:
print("Registered help file")
# misc other fixups.
fixup_dbi()
# Register Pythonwin in context menu
try:
RegisterPythonwin(True, lib_dir)
except Exception:
print("Failed to register pythonwin as editor")
traceback.print_exc()
else:
if verbose:
print("Pythonwin has been registered in context menu")
# Create the win32com\gen_py directory.
make_dir = os.path.join(lib_dir, "win32com", "gen_py")
if not os.path.isdir(make_dir):
if verbose:
print("Creating directory %s" % (make_dir,))
directory_created(make_dir)
os.mkdir(make_dir)
try:
# create shortcuts
# CSIDL_COMMON_PROGRAMS only available works on NT/2000/XP, and
# will fail there if the user has no admin rights.
fldr = get_shortcuts_folder()
# If the group doesn't exist, then we don't make shortcuts - its
# possible that this isn't a "normal" install.
if os.path.isdir(fldr):
dst = os.path.join(fldr, "PythonWin.lnk")
create_shortcut(
os.path.join(lib_dir, "Pythonwin\\Pythonwin.exe"),
"The Pythonwin IDE",
dst,
"",
sys.prefix,
)
file_created(dst)
if verbose:
print("Shortcut for Pythonwin created")
# And the docs.
if chm_file:
dst = os.path.join(fldr, "Python for Windows Documentation.lnk")
doc = "Documentation for the PyWin32 extensions"
create_shortcut(chm_file, doc, dst)
file_created(dst)
if verbose:
print("Shortcut to documentation created")
else:
if verbose:
print("Can't install shortcuts - %r is not a folder" % (fldr,))
except Exception as details:
print(details)
# importing win32com.client ensures the gen_py dir created - not strictly
# necessary to do now, but this makes the installation "complete"
try:
import win32com.client # noqa
except ImportError:
# Don't let this error sound fatal
pass
print("The pywin32 extensions were successfully installed.")
if is_bdist_wininst:
# Open a web page with info about the .exe installers being deprecated.
import webbrowser
try:
webbrowser.open("https://mhammond.github.io/pywin32_installers.html")
except webbrowser.Error:
print("Please visit https://mhammond.github.io/pywin32_installers.html") | null |
178,025 | import glob
import os
import shutil
import sys
import sysconfig
import tempfile
if sys.stdout is None:
sys.stdout = sys.stderr
sys.stderr = Tee(sys.stderr)
sys.stdout = Tee(sys.stdout)
verbose = 1
def LoadSystemModule(lib_dir, modname):
# See if this is a debug build.
import importlib.machinery
import importlib.util
suffix = "_d" if "_d.pyd" in importlib.machinery.EXTENSION_SUFFIXES else ""
filename = "%s%d%d%s.dll" % (
modname,
sys.version_info[0],
sys.version_info[1],
suffix,
)
filename = os.path.join(lib_dir, "pywin32_system32", filename)
loader = importlib.machinery.ExtensionFileLoader(modname, filename)
spec = importlib.machinery.ModuleSpec(name=modname, loader=loader, origin=filename)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
def RegisterCOMObjects(register=True):
import win32com.server.register
if register:
func = win32com.server.register.RegisterClasses
else:
func = win32com.server.register.UnregisterClasses
flags = {}
if not verbose:
flags["quiet"] = 1
for module, klass_name in com_modules:
__import__(module)
mod = sys.modules[module]
flags["finalize_register"] = getattr(mod, "DllRegisterServer", None)
flags["finalize_unregister"] = getattr(mod, "DllUnregisterServer", None)
klass = getattr(mod, klass_name)
func(klass, **flags)
def RegisterHelpFile(register=True, lib_dir=None):
if lib_dir is None:
lib_dir = sysconfig.get_paths()["platlib"]
if register:
# Register the .chm help file.
chm_file = os.path.join(lib_dir, "PyWin32.chm")
if os.path.isfile(chm_file):
# This isn't recursive, so if 'Help' doesn't exist, we croak
SetPyKeyVal("Help", None, None)
SetPyKeyVal("Help\\Pythonwin Reference", None, chm_file)
return chm_file
else:
print("NOTE: PyWin32.chm can not be located, so has not " "been registered")
else:
UnsetPyKeyVal("Help\\Pythonwin Reference", None, delete_key=True)
return None
def RegisterPythonwin(register=True, lib_dir=None):
"""Add (or remove) Pythonwin to context menu for python scripts.
??? Should probably also add Edit command for pys files also.
Also need to remove these keys on uninstall, but there's no function
like file_created to add registry entries to uninstall log ???
"""
import os
if lib_dir is None:
lib_dir = sysconfig.get_paths()["platlib"]
classes_root = get_root_hkey()
## Installer executable doesn't seem to pass anything to postinstall script indicating if it's a debug build,
pythonwin_exe = os.path.join(lib_dir, "Pythonwin", "Pythonwin.exe")
pythonwin_edit_command = pythonwin_exe + ' -edit "%1"'
keys_vals = [
(
"Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\Pythonwin.exe",
"",
pythonwin_exe,
),
(
"Software\\Classes\\Python.File\\shell\\Edit with Pythonwin",
"command",
pythonwin_edit_command,
),
(
"Software\\Classes\\Python.NoConFile\\shell\\Edit with Pythonwin",
"command",
pythonwin_edit_command,
),
]
try:
if register:
for key, sub_key, val in keys_vals:
## Since winreg only uses the character Api functions, this can fail if Python
## is installed to a path containing non-ascii characters
hkey = winreg.CreateKey(classes_root, key)
if sub_key:
hkey = winreg.CreateKey(hkey, sub_key)
winreg.SetValueEx(hkey, None, 0, winreg.REG_SZ, val)
hkey.Close()
else:
for key, sub_key, val in keys_vals:
try:
if sub_key:
hkey = winreg.OpenKey(classes_root, key)
winreg.DeleteKey(hkey, sub_key)
hkey.Close()
winreg.DeleteKey(classes_root, key)
except OSError as why:
winerror = getattr(why, "winerror", why.errno)
if winerror != 2: # file not found
raise
finally:
# tell windows about the change
from win32com.shell import shell, shellcon
shell.SHChangeNotify(
shellcon.SHCNE_ASSOCCHANGED, shellcon.SHCNF_IDLIST, None, None
)
def get_shortcuts_folder():
if get_root_hkey() == winreg.HKEY_LOCAL_MACHINE:
try:
fldr = get_special_folder_path("CSIDL_COMMON_PROGRAMS")
except OSError:
# No CSIDL_COMMON_PROGRAMS on this platform
fldr = get_special_folder_path("CSIDL_PROGRAMS")
else:
# non-admin install - always goes in this user's start menu.
fldr = get_special_folder_path("CSIDL_PROGRAMS")
try:
install_group = winreg.QueryValue(
get_root_hkey(), root_key_name + "\\InstallPath\\InstallGroup"
)
except OSError:
vi = sys.version_info
install_group = "Python %d.%d" % (vi[0], vi[1])
return os.path.join(fldr, install_group)
def get_system_dir():
import win32api # we assume this exists.
try:
import pythoncom
import win32process
from win32com.shell import shell, shellcon
try:
if win32process.IsWow64Process():
return shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_SYSTEMX86)
return shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_SYSTEM)
except (pythoncom.com_error, win32process.error):
return win32api.GetSystemDirectory()
except ImportError:
return win32api.GetSystemDirectory()
def uninstall(lib_dir):
# First ensure our system modules are loaded from pywin32_system, so
# we can remove the ones we copied...
LoadSystemModule(lib_dir, "pywintypes")
LoadSystemModule(lib_dir, "pythoncom")
try:
RegisterCOMObjects(False)
except Exception as why:
print("Failed to unregister COM objects: %s" % (why,))
try:
RegisterHelpFile(False, lib_dir)
except Exception as why:
print("Failed to unregister help file: %s" % (why,))
else:
if verbose:
print("Unregistered help file")
try:
RegisterPythonwin(False, lib_dir)
except Exception as why:
print("Failed to unregister Pythonwin: %s" % (why,))
else:
if verbose:
print("Unregistered Pythonwin")
try:
# remove gen_py directory.
gen_dir = os.path.join(lib_dir, "win32com", "gen_py")
if os.path.isdir(gen_dir):
shutil.rmtree(gen_dir)
if verbose:
print("Removed directory %s" % (gen_dir,))
# Remove pythonwin compiled "config" files.
pywin_dir = os.path.join(lib_dir, "Pythonwin", "pywin")
for fname in glob.glob(os.path.join(pywin_dir, "*.cfc")):
os.remove(fname)
# The dbi.pyd.old files we may have created.
try:
os.remove(os.path.join(lib_dir, "win32", "dbi.pyd.old"))
except os.error:
pass
try:
os.remove(os.path.join(lib_dir, "win32", "dbi_d.pyd.old"))
except os.error:
pass
except Exception as why:
print("Failed to remove misc files: %s" % (why,))
try:
fldr = get_shortcuts_folder()
for link in ("PythonWin.lnk", "Python for Windows Documentation.lnk"):
fqlink = os.path.join(fldr, link)
if os.path.isfile(fqlink):
os.remove(fqlink)
if verbose:
print("Removed %s" % (link,))
except Exception as why:
print("Failed to remove shortcuts: %s" % (why,))
# Now remove the system32 files.
files = glob.glob(os.path.join(lib_dir, "pywin32_system32\\*.*"))
# Try the system32 directory first - if that fails due to "access denied",
# it implies a non-admin user, and we use sys.prefix
try:
for dest_dir in [get_system_dir(), sys.prefix]:
# and copy some files over there
worked = 0
for fname in files:
base = os.path.basename(fname)
dst = os.path.join(dest_dir, base)
if os.path.isfile(dst):
try:
os.remove(dst)
worked = 1
if verbose:
print("Removed file %s" % (dst))
except Exception:
print("FAILED to remove %s" % (dst,))
if worked:
break
except Exception as why:
print("FAILED to remove system files: %s" % (why,)) | null |
178,026 | import glob
import os
import shutil
import sys
import sysconfig
import tempfile
def verify_destination(location):
if not os.path.isdir(location):
raise argparse.ArgumentTypeError('Path "{}" does not exist!'.format(location))
return location | null |
178,027 | from flask import Flask, render_template, request, redirect
import os
import pandas as pd
import matplotlib.pyplot as plt
import base64
from io import BytesIO
from wordcloud import WordCloud
import snscrape.modules.twitter as sntwitter
from tqdm.notebook import tqdm_notebook
import datetime
import re
from textblob import TextBlob
from scrape import scrape_tweets
from sentiment import analyze_sentiment
def home():
return render_template('index.html') | null |
178,028 | from flask import Flask, render_template, request, redirect
import os
import pandas as pd
import matplotlib.pyplot as plt
import base64
from io import BytesIO
from wordcloud import WordCloud
import snscrape.modules.twitter as sntwitter
from tqdm.notebook import tqdm_notebook
import datetime
import re
from textblob import TextBlob
from scrape import scrape_tweets
from sentiment import analyze_sentiment
class BytesIO(BufferedIOBase, BinaryIO):
def __init__(self, initial_bytes: bytes = ...) -> None: ...
# BytesIO does not contain a "name" field. This workaround is necessary
# to allow BytesIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def __enter__(self: _T) -> _T: ...
def getvalue(self) -> bytes: ...
def getbuffer(self) -> memoryview: ...
if sys.version_info >= (3, 7):
def read1(self, __size: Optional[int] = ...) -> bytes: ...
else:
def read1(self, __size: Optional[int]) -> bytes: ... # type: ignore
def scrape_tweets(text, username, since, until, count, retweet, replies):
# Define a function to search for tweets using snscrape
def search(text, username, since, until, retweet, replies):
global filename
q = text
if username != '':
q += f" from:{username}"
if until == '':
until = datetime.datetime.strftime(datetime.date.today(), '%Y-%m-%d')
q += f" until:{until}"
if since == '':
since = datetime.datetime.strftime(datetime.datetime.strptime(until, '%Y-%m-%d') - datetime.timedelta(days=365), '%Y-%m-%d')
q += f" since:{since}"
if retweet == 'y':
q += f" exclude:retweets"
if replies == 'y':
q += f" exclude:replies"
if username != '' and text != '':
filename = f"{since}_{until}_{username}_{text}.csv"
elif username != "":
filename = f"{since}_{until}_{username}.csv"
else:
filename = f"{since}_{until}_{text}.csv"
print(filename)
return q
q = search(text,username,since,until,retweet,replies)
# Creating list to append tweet data
tweets_list1 = []
# Using TwitterSearchScraper to scrape data and append tweets to list
if count == -1:
for i, tweet in enumerate(tqdm_notebook(sntwitter.TwitterSearchScraper(q).get_items())):
# Check if tweet is in English
if tweet.lang == 'en':
tweets_list1.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.lang, tweet.hashtags, tweet.replyCount, tweet.retweetCount, tweet.likeCount, tweet.quoteCount, tweet.media])
else:
with tqdm_notebook(total=count) as pbar:
for i, tweet in enumerate(sntwitter.TwitterSearchScraper(q).get_items()):
if i >= count:
break
# Check if tweet is in English
if tweet.lang == 'en':
tweets_list1.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.lang, tweet.hashtags, tweet.replyCount, tweet.retweetCount, tweet.likeCount, tweet.quoteCount, tweet.media])
pbar.update(1)
# Creating a dataframe from the tweets list above
df = pd.DataFrame(tweets_list1, columns=['DateTime', 'TweetId', 'Text', 'Username', 'Language', 'Hashtags', 'ReplyCount', 'RetweetCount', 'LikeCount', 'QuoteCount', 'Media'])
'''# Save the DataFrame with the scraped tweets to an Excel file
df.to_csv(f'{filename}.csv', index=False)'''
# Return the DataFrame with the scraped tweets
return df, filename
def analyze_sentiment(tweets, filename):
# Creating a dataframe from the tweets list
df = pd.DataFrame(tweets, columns=['DateTime', 'TweetId', 'Text', 'Username', 'Language', 'Hashtags', 'ReplyCount', 'RetweetCount', 'LikeCount', 'QuoteCount', 'Media'])
# Clean the tweets
df['clean_text'] = df['Text'].apply(clean_tweet)
# Apply sentiment analysis
df['sentiment_polarity'], df['sentiment_subjectivity'] = zip(*df['clean_text'].apply(get_sentiment))
# Map polarity values to sentiment labels
df['sentiment'] = df['sentiment_polarity'].apply(get_sentiment_label)
df.to_csv(f'{filename}', index=False)
# Get sentiment counts
sentiment_counts = df['sentiment'].value_counts()
# Create a bar plot of the sentiment counts
plt.bar(sentiment_counts.index, sentiment_counts.values)
# Set the plot title and axis labels
plt.title('Sentiment Analysis Results')
plt.xlabel('Sentiment')
plt.ylabel('Count')
# Return the sentiment counts as a dictionary
return sentiment_counts.to_dict()
def result():
if request.method == 'POST':
# Get the form data
text = request.form['text']
username = request.form['username']
since = request.form['since']
until = request.form['until']
count = request.form['count']
retweet = request.form.get('retweet','n')
replies = request.form.get('replies','n')
# Scrape the tweets
tweets,filename = scrape_tweets(text, username, since, until, int(count), retweet, replies)
# Analyze the sentiment
sentiment_counts = analyze_sentiment(tweets, filename)
# Read the CSV file into a pandas DataFrame
data = pd.read_csv(f'{filename}')
first_five = data.head()
last_five = data.tail()
count_df = data.shape[0]
sentiment_counts = data['sentiment'].value_counts()
# Create a figure with three subplots
fig, axs = plt.subplots(1, 4, figsize=(20,5))
# Create a bar chart of the sentiment counts in the first subplot
axs[0].bar(sentiment_counts.index, sentiment_counts.values)
axs[0].set_title('Sentiment Analysis Results')
axs[0].set_xlabel('Sentiment')
axs[0].set_ylabel('Count')
# Generate a word cloud from the positive tweets in the second subplot
positive_df = data[data['sentiment'] == 'positive']
text = ' '.join(positive_df['clean_text'].tolist())
wordcloud_p = WordCloud(width=800, height=800, background_color='white').generate(text)
axs[1].imshow(wordcloud_p, interpolation='bilinear')
axs[1].set_title('Positive Word Cloud')
axs[1].axis('off')
# Generate a word cloud from the negative tweets in the third subplot
negative_df = data[data['sentiment'] == 'negative']
text = ' '.join(negative_df['clean_text'].tolist())
wordcloud_n = WordCloud(width=800, height=800, background_color='white').generate(text)
axs[2].imshow(wordcloud_n, interpolation='bilinear')
axs[2].set_title('Negative Word Cloud')
axs[2].axis('off')
# Create a pie chart of the sentiment counts
axs[3].pie(sentiment_counts.values, labels=sentiment_counts.index, autopct='%1.1f%%')
axs[3].set_title('Sentiment Analysis Results')
# Save the figure as a base64-encoded string
buffer = BytesIO()
plt.savefig(buffer, format='png')
buffer.seek(0)
plot_url = base64.b64encode(buffer.getvalue()).decode()
# Render the result template with the sentiment counts and the URLs of the bar chart, pie chart, and word cloud
return render_template('result.html', first_five=first_five, last_five=last_five, sentiment_counts=sentiment_counts, result=count_df, plot_url=plot_url)
else:
return redirect('/') | null |
178,029 | from setuptools import setup, find_packages
from typing import List
HYPEN_E_DOT='-e .'
List = _Alias()
def get_requirements(file_path:str)->List[str]:
requirements=[]
with open(file_path) as file_obj:
requirements=file_obj.readlines()
requirements=[req.replace("\n","") for req in requirements]
if HYPEN_E_DOT in requirements:
requirements.remove(HYPEN_E_DOT)
return requirements | null |
178,030 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.Heart.logger import logging
from sklearn.metrics import accuracy_score
from src.Heart.exception import customexception
import os
import sys
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
# My older explanation for this was that the "AddCounter" process
# forced the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None,
inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
"""
Return virtual memory size in bytes of the running python.
"""
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[22])
except Exception:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
self.error_message = error_message
_,_,exc_tb = error_details.exc_info()
self.lineno=exc_tb.tb_lineno
self.file_name=exc_tb.tb_frame.f_code.co_filename
def __str__(self):
return "Error occured in python script name [{0}] line number [{1}] error message [{2}]".format(
self.file_name, self.lineno, str(self.error_message))
def save_object(file_path, obj):
try:
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
with open(file_path, "wb") as file_obj:
pickle.dump(obj, file_obj)
except Exception as e:
raise customexception(e, sys) | null |
178,031 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.Heart.logger import logging
from sklearn.metrics import accuracy_score
from src.Heart.exception import customexception
import sys
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
import logging
logging.basicConfig(level = logging.INFO,
filename = LOG_FILEPATH,
format = "[%(asctime)s] %(lineno)d %(name)s - %(levelname)s - %(message)s")
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
self.error_message = error_message
_,_,exc_tb = error_details.exc_info()
self.lineno=exc_tb.tb_lineno
self.file_name=exc_tb.tb_frame.f_code.co_filename
def __str__(self):
return "Error occured in python script name [{0}] line number [{1}] error message [{2}]".format(
self.file_name, self.lineno, str(self.error_message))
def evaluate_model(X_train, y_train, X_test, y_test, models):
try:
report = {}
for model_name, model in models.items():
model.fit(X_train, y_train)
y_test_pred = model.predict(X_test)
test_model_score = accuracy_score(y_test, y_test_pred)
report[model_name] = test_model_score
return report
except Exception as e:
logging.info('Exception occurred during model training')
raise customexception(e, sys) | null |
178,032 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.Heart.logger import logging
from sklearn.metrics import accuracy_score
from src.Heart.exception import customexception
import sys
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
import logging
logging.basicConfig(level = logging.INFO,
filename = LOG_FILEPATH,
format = "[%(asctime)s] %(lineno)d %(name)s - %(levelname)s - %(message)s")
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
self.error_message = error_message
_,_,exc_tb = error_details.exc_info()
self.lineno=exc_tb.tb_lineno
self.file_name=exc_tb.tb_frame.f_code.co_filename
def __str__(self):
return "Error occured in python script name [{0}] line number [{1}] error message [{2}]".format(
self.file_name, self.lineno, str(self.error_message))
def load_object(file_path):
try:
with open(file_path,'rb') as file_obj:
return pickle.load(file_obj)
except Exception as e:
logging.info('Exception Occured in load_object function utils')
raise customexception(e,sys) | null |
178,033 | from flask import Flask, request, render_template
from src.Heart.pipeline.Prediction_pipeline import CustomData, PredictPipeline
class PredictPipeline:
def __init__(self):
pass
def predict(self,features):
try:
preprocessor_path=os.path.join("Artifacts","Preprocessor.pkl")
model_path=os.path.join("Artifacts","Model.pkl")
preprocessor=load_object(preprocessor_path)
model=load_object(model_path)
scaled_data=preprocessor.transform(features)
pred=model.predict(scaled_data)
return pred
except Exception as e:
raise customexception(e,sys)
class CustomData:
def __init__(self,
age:int,
sex:int,
cp:int,
trestbps:int,
chol:int,
fbs:int,
restecg:int,
thalach:int,
exang:int,
oldpeak:float,
slope:int,
ca:int,
thal:int):
self.age = age
self.sex = sex
self.cp = cp
self.trestbps = trestbps
self.chol = chol
self.fbs = fbs
self.restecg = restecg
self.thalach = thalach
self.exang = exang
self.oldpeak = oldpeak
self.slope = slope
self.ca = ca
self.thal = thal
def get_data_as_dataframe(self):
try:
custom_data_input_dict = {
'age':[self.age],
'sex':[self.sex],
'cp':[self.cp],
'trestbps':[self.trestbps],
'chol':[self.chol],
'fbs':[self.fbs],
'restecg':[self.restecg],
'thalach':[self.thalach],
'exang':[self.exang],
'oldpeak':[self.oldpeak],
'slope':[self.slope],
'ca':[self.ca],
'thal':[self.thal]
}
df = pd.DataFrame(custom_data_input_dict)
print(df)
logging.info('Dataframe Gathered')
return df
except Exception as e:
logging.info('Exception Occured in prediction pipeline')
raise customexception(e,sys)
def home():
if request.method == "POST":
try:
# Validate and convert form data to CustomData object
data = CustomData(
age=request.form.get("age"),
sex=request.form.get("sex"),
cp=(request.form.get("cp")),
trestbps=(request.form.get("trestbps")),
chol=(request.form.get("chol")),
fbs=request.form.get("fbs"),
restecg=request.form.get("restecg"),
thalach=(request.form.get("thalach")),
exang=request.form.get("exang"),
oldpeak=request.form.get("oldpeak"),
slope=request.form.get("slope"),
ca=request.form.get("ca"),
thal=(request.form.get("thal"))
)
final_data = data.get_data_as_dataframe()
# Make prediction
predict_pipeline = PredictPipeline()
pred = predict_pipeline.predict(final_data)
result = round(pred[0], 2)
return render_template("result.html", final_result=result)
except Exception as e:
# Handle exceptions gracefully
error_message = f"Error during prediction: {str(e)}"
return render_template("error.html", error_message=error_message)
else:
# Render the initial page
return render_template("index.html") | null |
178,035 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.DiamondPricePrediction.logger import logging
from src.DiamondPricePrediction.exception import customexception
from sklearn.metrics import r2_score, mean_absolute_error,mean_squared_error
import os
import sys
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
def memusage(processName="python", instance=0):
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
else:
def memusage():
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
def __str__(self):
def save_object(file_path, obj):
try:
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
with open(file_path, "wb") as file_obj:
pickle.dump(obj, file_obj)
except Exception as e:
raise customexception(e, sys) | null |
178,036 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.DiamondPricePrediction.logger import logging
from src.DiamondPricePrediction.exception import customexception
from sklearn.metrics import r2_score, mean_absolute_error,mean_squared_error
import sys
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
import logging
logging.basicConfig(level = logging.INFO,
filename = LOG_FILEPATH,
format = "[%(asctime)s] %(lineno)d %(name)s - %(levelname)s - %(message)s")
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
def __str__(self):
def evaluate_model(X_train,y_train,X_test,y_test,models):
try:
report = {}
for i in range(len(models)):
model = list(models.values())[i]
# Train model
model.fit(X_train,y_train)
# Predict Testing data
y_test_pred =model.predict(X_test)
# Get R2 scores for train and test data
#train_model_score = r2_score(ytrain,y_train_pred)
test_model_score = r2_score(y_test,y_test_pred)
report[list(models.keys())[i]] = test_model_score
return report
except Exception as e:
logging.info('Exception occured during model training')
raise customexception(e,sys) | null |
178,037 | import os
import sys
import pickle
import numpy as np
import pandas as pd
from src.DiamondPricePrediction.logger import logging
from src.DiamondPricePrediction.exception import customexception
from sklearn.metrics import r2_score, mean_absolute_error,mean_squared_error
import sys
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
import logging
logging.basicConfig(level = logging.INFO,
filename = LOG_FILEPATH,
format = "[%(asctime)s] %(lineno)d %(name)s - %(levelname)s - %(message)s")
class customexception(Exception):
def __init__(self,error_message,error_details:sys):
self.error_message = error_message
_,_,exc_tb = error_details.exc_info()
self.lineno=exc_tb.tb_lineno
self.file_name=exc_tb.tb_frame.f_code.co_filename
def __str__(self):
return "Error occured in python script name [{0}] line number [{1}] error message [{2}]".format(
self.file_name, self.lineno, str(self.error_message))
def load_object(file_path):
try:
with open(file_path,'rb') as file_obj:
return pickle.load(file_obj)
except Exception as e:
logging.info('Exception Occured in load_object function utils')
raise customexception(e,sys) | null |
178,038 | from src.DiamondPricePrediction.pipelines.Prediction_Pipeline import CustomData,PredictPipeline
from flask import Flask,request,render_template,jsonify
class PredictPipeline:
def __init__(self):
pass
def predict(self,features):
try:
preprocessor_path=os.path.join("Artifacts","preprocessor.pkl")
model_path=os.path.join("Artifacts","model.pkl")
preprocessor=load_object(preprocessor_path)
model=load_object(model_path)
scaled_data=preprocessor.transform(features)
pred=model.predict(scaled_data)
return pred
except Exception as e:
raise customexception(e,sys)
class CustomData:
def __init__(self,
carat:float,
depth:float,
table:float,
x:float,
y:float,
z:float,
cut:str,
color:str,
clarity:str):
self.carat=carat
self.depth=depth
self.table=table
self.x=x
self.y=y
self.z=z
self.cut = cut
self.color = color
self.clarity = clarity
def get_data_as_dataframe(self):
try:
custom_data_input_dict = {
'carat':[self.carat],
'depth':[self.depth],
'table':[self.table],
'x':[self.x],
'y':[self.y],
'z':[self.z],
'cut':[self.cut],
'color':[self.color],
'clarity':[self.clarity]
}
df = pd.DataFrame(custom_data_input_dict)
logging.info('Dataframe Gathered')
return df
except Exception as e:
logging.info('Exception Occured in prediction pipeline')
raise customexception(e,sys)
def predict_datapoint():
if request.method == "GET":
return render_template("form.html")
else:
data=CustomData(
carat=float(request.form.get('carat')),
depth = float(request.form.get('depth')),
table = float(request.form.get('table')),
x = float(request.form.get('x')),
y = float(request.form.get('y')),
z = float(request.form.get('z')),
cut = request.form.get('cut'),
color= request.form.get('color'),
clarity = request.form.get('clarity')
)
# this is my final data
final_data=data.get_data_as_dataframe()
predict_pipeline=PredictPipeline()
pred=predict_pipeline.predict(final_data)
result=round(pred[0],2)
return render_template("result.html",final_result=result) | null |
178,039 | from chain_retriever import *
from utils import *
def read_root():
return {"Welcome": "to the Codebasics FAQs API"} | null |
178,040 | from chain_retriever import *
from utils import *
def retrieve_qa(querybody: QueryBody):
# Use the QA retriever to get the QA chain
qa_chain = qa_retriever.get_qa_chain(
temperature=querybody.temperature,
max_output_tokens=querybody.max_output_tokens,
)
# You might need to adapt this part to how you want to use the qa_chain
response = qa_chain(querybody.query)
return {"response": response} | null |
178,041 | import streamlit as st
import keras
import tensorflow as tf
import requests
import numpy as np
import nltk
import spacy
from nltk.corpus import stopwords
from tqdm import tqdm
import pandas as pd
import pycountry
from keras.preprocessing.text import one_hot,Tokenizer
from keras.utils import pad_sequences
import datetime
def fetch_country_code(name):
code = pycountry.countries.get(name=name).alpha_2
return code.lower() | null |
178,042 | import streamlit as st
import keras
import tensorflow as tf
import requests
import numpy as np
import nltk
import spacy
from nltk.corpus import stopwords
from tqdm import tqdm
import pandas as pd
import pycountry
from keras.preprocessing.text import one_hot,Tokenizer
from keras.utils import pad_sequences
import datetime
def fetch_news(name, date_from, date_to):
headers = {
"User-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.200"
}
news = []
global final
# code = fetch_country_code(country_name)
code = 'us'
query = name
url = 'https://newsapi.org/v2/everything?q={}&from={}&to=()&language=en&sortBy=popularity&apiKey=a1e91cf9073f4b85aa784fe5f37e6294'.format(query, date_from, date_to)
data = requests.get(url, headers=headers).json()
# print('------------------------------------------------------------------------------------------------------------------')
tot_res = data['totalResults']
ls_name = []
ls_title = []
ls_desc = []
ls_url = []
ls_img_url = []
publish_date = []
ls_content = []
print(url, tot_res)
range_ = tot_res // 100
for i in tqdm(range(1, range_)):
try:
next_page_url = 'https://newsapi.org/v2/everything?q={}&from={}&to={}&language=en&page={}&sortBy=popularity&apiKey=a1e91cf9073f4b85aa784fe5f37e6294'.format(query, date_from, date_to, str(i))
for j in data['articles']:
name = j['source']['name']
title = j['title']
desc = j['description']
url = j['url']
img_url = j['urlToImage']
date = j['publishedAt']
content = j['content']
ls_name.append(name)
ls_title.append(title)
ls_desc.append(desc)
ls_url.append(url)
ls_img_url.append(img_url)
ls_content.append(content)
publish_date.append(date)
data = requests.get(next_page_url, headers=headers).json()
except:
pass
dic = ({
'name': ls_name,
'title': ls_title,
'description': ls_desc,
'content': ls_content,
'url': ls_url,
'img_url': ls_img_url,
'Date': publish_date
})
final = pd.DataFrame(dic)
final.to_csv('news.csv', index=False) | null |
178,043 | import streamlit as st
import keras
import tensorflow as tf
import requests
import numpy as np
import nltk
import spacy
from nltk.corpus import stopwords
from tqdm import tqdm
import pandas as pd
import pycountry
from keras.preprocessing.text import one_hot,Tokenizer
from keras.utils import pad_sequences
import datetime
nlp = spacy.load('en_core_web_sm')
stopwords: WordListCorpusReader = LazyCorpusLoader(
"stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
)
def preprocess():
df = pd.read_csv('news.csv')
print(df)
df['tags'] = df['title'] + df['description'] + df['content']
df['tags'] = df['tags'].astype('str')
#Lowercasing
df['tags'] = df['tags'].str.lower()
#Removing Contradictions
import contractions
def remove_contradictions(text):
return " ".join([contractions.fix(word.text) for word in nlp(text)])
df['tags']= df['tags'].apply(remove_contradictions)
# Removing HTML tags
import re
def remove_html(text):
pattern = re.compile('<.*?>')
return pattern.sub(r'', text)
df['tags'] = df['tags'].apply(remove_html)
#Remove @
def remove_at_the_rate(text):
ls = []
new = []
ls = nlp(text)
for word in ls:
if word.text != "@":
new.append(word.text)
return ' '.join(new)
df['tags'] = df['tags'].apply(remove_at_the_rate)
#Removing URL
import re
def remove_url(text):
pattern = re.compile(r'https?://\S+|www\.\S+')
return pattern.sub(r'', text)
df['tags']= df['tags'].apply(remove_url)
#Remmove punctuation
import string
punc = string.punctuation
def remove_punc(text):
return text.translate(str.maketrans('', '', punc))
df['tags']= df['tags'].apply(remove_punc)
# Removing stop words
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
def remove_stop_words(text):
ls = []
new = []
ls = nlp(text)
for word in ls:
if word.text not in stopwords:
new.append(word.text)
return ' '.join(new)
df['tags'] = df['tags'].apply(remove_stop_words)
def Lemmetization(text):
return " ".join([word.lemma_ for word in nlp(text)])
df['tags'] = df['tags'].apply(Lemmetization)
def is_alpha(string):
ls = string.split()
new = []
# print(ls)
for word in ls:
if word.isalpha()==True:
new.append(word)
return ' '.join(new)
df['tags'] = df['tags'].apply(is_alpha)
df.to_csv('preprocessed.csv', index=False) | null |
178,044 | import streamlit as st
import keras
import tensorflow as tf
import requests
import numpy as np
import nltk
import spacy
from nltk.corpus import stopwords
from tqdm import tqdm
import pandas as pd
import pycountry
from keras.preprocessing.text import one_hot,Tokenizer
from keras.utils import pad_sequences
import datetime
model = tf.keras.models.load_model('News_Classification_bidirectional_lstm_model.keras')
def predict():
preprocessed = pd.read_csv('preprocessed.csv')
train_df = pd.read_csv('train_transformed.csv')
news = pd.read_csv('news.csv')
tok = Tokenizer()
tok.fit_on_texts(train_df['tags'])
max_len = 100
encd_news = tok.texts_to_sequences(preprocessed['tags'])
embd_dim = 200
pad_news = pad_sequences(maxlen = max_len, padding='pre', sequences=encd_news)
y_pred = model.predict([pad_news], 1024)
y_pred = np.argmax(y_pred, axis=1)
y_pred = y_pred.T
# print(y_pred)
dic = {
'predictions': y_pred
}
pred = pd.DataFrame(dic)
predicitions_merged = pd.concat([news, pred], axis=1)
print(predicitions_merged)
print(predicitions_merged.duplicated().sum())
predicitions_merged['predictions'] = predicitions_merged['predictions'].astype('str')
# print(type(predicitions_merged.iloc[0,7]))
print(predicitions_merged.duplicated().sum())
predicitions_merged['predictions'].replace(to_replace=['0', '1', '2', '3'],value=['World', 'Sports', 'Business', 'Sci-Fi/Tech'], inplace=True)
predicitions_merged.drop_duplicates(inplace=True)
predicitions_merged.fillna('N/A', inplace=True)
predicitions_merged.to_csv('predictions.csv', index=False) | null |
178,045 | import streamlit as st
import keras
import tensorflow as tf
import requests
import numpy as np
import nltk
import spacy
from nltk.corpus import stopwords
from tqdm import tqdm
import pandas as pd
import pycountry
from keras.preprocessing.text import one_hot,Tokenizer
from keras.utils import pad_sequences
import datetime
st.markdown("<h1 style='text-align: center; color: white;'>Multi-Class News Classifier</h1>", unsafe_allow_html=True)
if st.button('Fetch!'):
fetch_news(name, date_from, date_to)
preprocess()
predict()
display()
def display():
bar = st.progress(25)
predicitions_merged = pd.read_csv('predictions.csv')
tabs_titles = ['World', 'Sports', 'Business', 'Sci-Fi/Tech']
tab1, tab2, tab3, tab4 = st.tabs(tabs_titles)
with tab1:
for i in range(predicitions_merged.shape[0]):
if predicitions_merged.iloc[i, 7] == 'World':
st.markdown("<h4 style='text-align: center; color: white;'>{}</h4>".format(predicitions_merged.iloc[i, 1]), unsafe_allow_html=True)
st.divider()
col1, col2, col3 = st.columns([5, 5, 3])
with col1:
try:
img_url = predicitions_merged.iloc[i, 5]
image = st.image(img_url)
except:
pass
with col2:
desc = predicitions_merged.iloc[i, 2]
st.write(desc)
with col3:
date = predicitions_merged.iloc[i, 6]
date = date[:10]
with st.expander('Published'):
st.write(date)
with st.expander('Source'):
st.write(predicitions_merged.iloc[i, 0])
with st.expander('Description'):
content = predicitions_merged.iloc[i, 2]
st.write(content)
with st.expander('Content'):
content = predicitions_merged.iloc[i, 3]
st.write(content)
with st.expander('Website'):
url = predicitions_merged.iloc[i, 4]
st.write(url)
with tab2:
bar.progress(50)
for i in range((predicitions_merged.shape[0])):
if predicitions_merged.iloc[i, 7] == 'Sports':
st.markdown("<h4 style='text-align: center; color: white;'>{}</h4>".format(predicitions_merged.iloc[i, 1]), unsafe_allow_html=True)
st.divider()
col1, col2, col3 = st.columns([5, 5, 3])
with col1:
try:
img_url = predicitions_merged.iloc[i, 5]
image = st.image(img_url)
except:
pass
with col2:
desc = predicitions_merged.iloc[i, 2]
st.write(desc)
with col3:
date = predicitions_merged.iloc[i, 6]
date = date[:10]
with st.expander('Published'):
st.write(date)
with st.expander('Source'):
st.write(predicitions_merged.iloc[i, 0])
with st.expander('Description'):
content = predicitions_merged.iloc[i, 2]
st.write(content)
with st.expander('Content'):
content = predicitions_merged.iloc[i, 3]
st.write(content)
with st.expander('Website'):
url = predicitions_merged.iloc[i, 4]
st.write(url)
with tab3:
bar.progress(75)
for i in range(predicitions_merged.shape[0]):
if predicitions_merged.iloc[i, 7] == 'Business':
st.markdown("<h4 style='text-align: center; color: white;'>{}</h4>".format(predicitions_merged.iloc[i, 1]), unsafe_allow_html=True)
st.divider()
col1, col2, col3 = st.columns([5, 5, 3])
try:
img_url = predicitions_merged.iloc[i, 5]
image = st.image(img_url)
except:
pass
with col2:
desc = predicitions_merged.iloc[i, 2]
st.write(desc)
with col3:
date = predicitions_merged.iloc[i, 6]
date = date[:10]
with st.expander('Published'):
st.write(date)
with st.expander('Source'):
st.write(predicitions_merged.iloc[i, 0])
with st.expander('Description'):
content = predicitions_merged.iloc[i, 2]
st.write(content)
with st.expander('Content'):
content = predicitions_merged.iloc[i, 3]
st.write(content)
with st.expander('Website'):
url = predicitions_merged.iloc[i, 4]
st.write(url)
with tab4:
bar.progress(100)
for i in range(predicitions_merged.shape[0]):
if predicitions_merged.iloc[i, 7] == 'Sci-Fi/Tech':
st.markdown("<h4 style='text-align: center; color: white;'>{}</h4>".format(predicitions_merged.iloc[i, 1]), unsafe_allow_html=True)
st.divider()
col1, col2, col3 = st.columns([5, 5, 3])
try:
img_url = predicitions_merged.iloc[i, 5]
image = st.image(img_url)
except:
pass
with col2:
desc = predicitions_merged.iloc[i, 2]
st.write(desc)
with col3:
date = predicitions_merged.iloc[i, 6]
date = date[:10]
with st.expander('Published'):
st.write(date)
with st.expander('Source'):
st.write(predicitions_merged.iloc[i, 0])
with st.expander('Description'):
content = predicitions_merged.iloc[i, 2]
st.write(content)
with st.expander('Content'):
content = predicitions_merged.iloc[i, 3]
st.write(content)
with st.expander('Website'):
url = predicitions_merged.iloc[i, 4]
st.write(url) | null |
178,046 | from keyword_extract import extract_keywords
import re
import spacy
nlp = spacy.load("en_core_web_sm")
def extract_entities(text):
doc = nlp(text)
entities = {
"name": [],
"location": [],
"skills": [],
"keywords": []
}
for ent in doc.ents:
if ent.label_ == "PERSON":
entities["name"].append(ent.text)
elif ent.label_ == "GPE":
entities["location"].append(ent.text)
elif ent.label_ == "SKILLS":
entities["skills"].append(ent.text)
return entities | null |
178,047 | from keyword_extract import extract_keywords
import re
import spacy
def extract_phone_numbers(text):
phone_numbers = re.findall(r'\b\d{3}[-.\s]?\d{3}[-.\s]?\d{4}\b', text)
return phone_numbers | null |
178,048 | from keyword_extract import extract_keywords
import re
import spacy
def extract_emails(text):
emails = re.findall(r'\S+@\S+', text)
return emails | null |
178,049 | from keyword_extract import extract_keywords
import re
import spacy
nlp = spacy.load("en_core_web_sm")
def extract_keywords(text):
# Process the text with spaCy
doc = nlp(text)
# Extract keywords based on relevant criteria (e.g., noun phrases)
keywords = [chunk.text for chunk in doc.noun_chunks]
return keywords
def extract_information(text):
extracted_entities = {
"name": [],
"location": [],
"skills": [],
"keywords": []
}
doc = nlp(text)
# Extract skills using specific rules or patterns
skills_list = ["Python", "Machine Learning", "Data Analysis", "SQL", "Latex"]
for token in doc:
if token.text in skills_list:
extracted_entities["skills"].append(token.text)
# Extract location entities (modify as per your requirements)
for ent in doc.ents:
if ent.label_ == "GPE":
extracted_entities["location"].append(ent.text)
# Extract keywords using your preferred technique (e.g., YAKE)
extracted_entities["keywords"] = extract_keywords(text)
return extracted_entities | null |
178,050 | from flask import Flask, render_template, request, redirect, url_for
from ocr_engine import OCREngine
from pdf_extractor import extract_text_from_pdf
from utils.helper import allowed_file, save_uploaded_file
from keyword_extract import extract_keywords
from yake import KeywordExtractor
import os
import spacy
import yake
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
if not os.path.exists(app.config['UPLOAD_FOLDER']):
os.makedirs(app.config['UPLOAD_FOLDER'])
kw_extractor = yake.KeywordExtractor(lan="en", n=3, dedupLim=0.9, top=20)
class OCREngine:
def extract_text(file_path):
try:
image = Image.open(file_path)
text = pytesseract.image_to_string(image)
return text
except Exception as e:
return str(e)
def extract_text_from_pdf(pdf_path):
try:
pdf_document = fitz.open(pdf_path)
text = ""
for page_num in range(pdf_document.page_count):
page = pdf_document[page_num]
text += page.get_text()
return text
except Exception as e:
return str(e)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in {'pdf', 'png', 'jpg', 'jpeg', 'gif'}
def save_uploaded_file(file):
if file and allowed_file(file.filename):
filename = file.filename
file.save(os.path.join('uploads', filename))
return filename
else:
return None
def extract_keywords(text):
# Process the text with spaCy
doc = nlp(text)
# Extract keywords based on relevant criteria (e.g., noun phrases)
keywords = [chunk.text for chunk in doc.noun_chunks]
return keywords
def index():
if request.method == 'POST':
file = request.files['file']
file_type = request.form['file-type']
if file and allowed_file(file.filename):
filename = save_uploaded_file(file)
if filename:
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
extracted_text = ""
if file_type == 'image':
ocr = OCREngine()
extracted_text = ocr.extract_text(file_path)
elif file_type == 'pdf':
extracted_text = extract_text_from_pdf(file_path)
# Extract keywords using YAKE
keywords = kw_extractor.extract_keywords(extracted_text)
#keywords = custom_kw_extractor.extract_keywords(extracted_text)
# Extract the keyword text from YAKE results
extracted_keywords = [keyword[0] for keyword in keywords]
return render_template('index.html', extracted_text=extracted_text, extracted_keywords=extracted_keywords)
# Show the home page for both GET and POST requests
return render_template('index.html', extracted_text=None) | null |
178,051 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
import socket
from pytorch_lightning.plugins.environments import ClusterEnvironment,SLURMEnvironment
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=["configs/stable-diffusion/v1-inference-inpaint.yaml"],
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=True,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument(
"-p",
"--project",
help="name of new or path to existing project"
)
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
parser.add_argument(
"-l",
"--logdir",
type=str,
default="logs",
help="directory for logging dat shit",
)
parser.add_argument(
"--pretrained_model",
type=str,
default="",
help="path to pretrained model",
)
parser.add_argument(
"--scale_lr",
type=str2bool,
nargs="?",
const=True,
default=True,
help="scale base-lr by ngpu * batch_size * n_accumulate",
)
parser.add_argument(
"--train_from_scratch",
type=str2bool,
nargs="?",
const=True,
default=False,
help="Train from scratch",
)
return parser | null |
178,052 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
import socket
from pytorch_lightning.plugins.environments import ClusterEnvironment,SLURMEnvironment
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k)) | null |
178,053 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
import socket
from pytorch_lightning.plugins.environments import ClusterEnvironment,SLURMEnvironment
class Txt2ImgIterableBaseDataset(IterableDataset):
'''
Define an interface to make the IterableDatasets for text2img data chainable
'''
def __init__(self, num_records=0, valid_ids=None, size=256):
super().__init__()
self.num_records = num_records
self.valid_ids = valid_ids
self.sample_ids = valid_ids
self.size = size
print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
def __len__(self):
return self.num_records
def __iter__(self):
pass
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id) | null |
178,054 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
import socket
from pytorch_lightning.plugins.environments import ClusterEnvironment,SLURMEnvironment
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path) | null |
178,055 | import argparse, os, sys, datetime, glob, importlib, csv
import numpy as np
import time
import torch
import torchvision
import pytorch_lightning as pl
from packaging import version
from omegaconf import OmegaConf
from torch.utils.data import random_split, DataLoader, Dataset, Subset
from functools import partial
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities import rank_zero_info
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
import socket
from pytorch_lightning.plugins.environments import ClusterEnvironment,SLURMEnvironment
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb;
pudb.set_trace() | null |
178,056 | import argparse, os, sys, glob
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from imwatermark import WatermarkEncoder
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
import torchvision
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import clip
from torchvision.transforms import Resize
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ()) | null |
178,057 | import argparse, os, sys, glob
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from imwatermark import WatermarkEncoder
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
import torchvision
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import clip
from torchvision.transforms import Resize
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model | null |
178,058 | import argparse, os, sys, glob
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from imwatermark import WatermarkEncoder
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
import torchvision
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import clip
from torchvision.transforms import Resize
def put_watermark(img, wm_encoder=None):
if wm_encoder is not None:
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
img = wm_encoder.encode(img, 'dwtDct')
img = Image.fromarray(img[:, :, ::-1])
return img | null |
178,059 | import argparse, os, sys, glob
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from imwatermark import WatermarkEncoder
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
import torchvision
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import clip
from torchvision.transforms import Resize
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
def numpy_to_pil(images):
"""
Convert a numpy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def load_replacement(x):
try:
hwc = x.shape
y = Image.open("assets/rick.jpeg").convert("RGB").resize((hwc[1], hwc[0]))
y = (np.array(y)/255.0).astype(x.dtype)
assert y.shape == x.shape
return y
except Exception:
return x
def check_safety(x_image):
safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt")
x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values)
assert x_checked_image.shape[0] == len(has_nsfw_concept)
for i in range(len(has_nsfw_concept)):
if has_nsfw_concept[i]:
x_checked_image[i] = load_replacement(x_checked_image[i])
return x_checked_image, has_nsfw_concept | null |
178,060 | import argparse, os, sys, glob
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from imwatermark import WatermarkEncoder
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import contextmanager, nullcontext
import torchvision
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import clip
from torchvision.transforms import Resize
def get_tensor(normalize=True, toTensor=True):
transform_list = []
if toTensor:
transform_list += [torchvision.transforms.ToTensor()]
if normalize:
transform_list += [torchvision.transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return torchvision.transforms.Compose(transform_list) | null |
178,103 | import math
import torch as th
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `convert_module_to_f16` function. Write a Python function `def convert_module_to_f16(l)` to solve the following problem:
Convert primitive modules to float16.
Here is the function:
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half() | Convert primitive modules to float16. |
178,111 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import os
from io import BytesIO
import json
import logging
import base64
from sys import prefix
import threading
import random
from turtle import left, right
import numpy as np
from typing import Callable, List, Tuple, Union
from PIL import Image,ImageDraw
import torch.utils.data as data
import json
import time
import cv2
import torch
import torchvision
import torch.nn.functional as F
import torchvision.transforms as T
import copy
import math
from functools import partial
import albumentations as A
import bezier
def bbox_process(bbox):
x_min = int(bbox[0])
y_min = int(bbox[1])
x_max = x_min + int(bbox[2])
y_max = y_min + int(bbox[3])
return list(map(int, [x_min, y_min, x_max, y_max])) | null |
178,112 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import os
from io import BytesIO
import json
import logging
import base64
from sys import prefix
import threading
import random
from turtle import left, right
import numpy as np
from typing import Callable, List, Tuple, Union
from PIL import Image,ImageDraw
import torch.utils.data as data
import json
import time
import cv2
import torch
import torchvision
import torch.nn.functional as F
import torchvision.transforms as T
import copy
import math
from functools import partial
import albumentations as A
import bezier
def get_tensor(normalize=True, toTensor=True):
transform_list = []
if toTensor:
transform_list += [torchvision.transforms.ToTensor()]
if normalize:
transform_list += [torchvision.transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return torchvision.transforms.Compose(transform_list) | null |
178,113 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import os
from io import BytesIO
import json
import logging
import base64
from sys import prefix
import threading
import random
from turtle import left, right
import numpy as np
from typing import Callable, List, Tuple, Union
from PIL import Image,ImageDraw
import torch.utils.data as data
import json
import time
import cv2
import torch
import torchvision
import torch.nn.functional as F
import torchvision.transforms as T
import copy
import math
from functools import partial
import albumentations as A
import bezier
def get_tensor_clip(normalize=True, toTensor=True):
transform_list = []
if toTensor:
transform_list += [torchvision.transforms.ToTensor()]
if normalize:
transform_list += [torchvision.transforms.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))]
return torchvision.transforms.Compose(transform_list) | null |
178,115 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
from torchvision.transforms import Resize
import math
import time
import random
from torch.autograd import Variable
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
178,116 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
from torchvision.transforms import Resize
import math
import time
import random
from torch.autograd import Variable
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2 | null |
178,117 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import clip
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`
Skips default weight inititialization if supported by torchvision version.
See https://github.com/mseitzer/pytorch-fid/issues/28.
"""
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
if version >= (0, 6):
kwargs['init_weights'] = False
return torchvision.models.inception_v3(*args, **kwargs)
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
The provided code snippet includes necessary dependencies for implementing the `fid_inception_v3` function. Write a Python function `def fid_inception_v3()` to solve the following problem:
Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model.
Here is the function:
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception | Build pretrained Inception model for FID computation The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. |
178,118 | import os
import pathlib
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
import torch
import torchvision.transforms as TF
from PIL import Image
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
import clip
from inception import InceptionV3
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1)
+ np.trace(sigma2) - 2 * tr_covmean)
def compute_statistics_of_path(path, model, batch_size, dims, device,
num_workers=1):
if path.endswith('.npz'):
with np.load(path) as f:
m, s = f['mu'][:], f['sigma'][:]
else:
path = pathlib.Path(path)
files = sorted([file for ext in IMAGE_EXTENSIONS
for file in path.glob('*.{}'.format(ext))])
m, s = calculate_activation_statistics(files, model, batch_size,
dims, device, num_workers)
return m, s
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=(DEFAULT_BLOCK_INDEX,),
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.clip_model, self.preprocess = clip.load("ViT-B/32", device="cuda")
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
# print(x.shape)
image_features = self.clip_model.encode_image(x.squeeze(1))
# print(image_features.shape)
outp=[image_features.unsqueeze(2).unsqueeze(3)]
# print(outp[0].shape)
# if self.resize_input:
# x = F.interpolate(x,
# size=(299, 299),
# mode='bilinear',
# align_corners=False)
# if self.normalize_input:
# x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
# for idx, block in enumerate(self.blocks):
# x = block(x)
# if idx in self.output_blocks:
# outp.append(x)
# if idx == self.last_needed_block:
# break
return outp
The provided code snippet includes necessary dependencies for implementing the `calculate_fid_given_paths` function. Write a Python function `def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1)` to solve the following problem:
Calculates the FID of two paths
Here is the function:
def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,
dims, device, num_workers)
m2, s2 = compute_statistics_of_path(paths[1], model, batch_size,
dims, device, num_workers)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value | Calculates the FID of two paths |
178,120 | import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from PIL import Image
from torch.nn.functional import adaptive_avg_pool2d
import pickle
from scipy.stats import multivariate_normal
from sklearn import mixture
from inception import InceptionV3
import clip
model, preprocess = clip.load("ViT-B/32", device="cuda")
def _compute_statistics_of_path(path, model, batch_size, dims, cuda, pca_path, gmm_path, output_file):
class InceptionV3(nn.Module):
def __init__(self,
output_blocks=(DEFAULT_BLOCK_INDEX,),
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
def forward(self, inp):
def calculate_fid_given_paths(paths, batch_size, cuda, dims, pca_path, gmm_path, output_file):
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size, dims, cuda, pca_path, gmm_path, output_file)
return 777 | null |
178,122 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import torch
from tqdm import tqdm
from PIL import Image
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
IMAGE_TOKEN_INDEX = -200
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
def collate_fn(batches, tokenizer):
questions = [_['question'] for _ in batches]
questions_origin = [_['question_origin'] for _ in batches]
annotations = [_['annotation'] for _ in batches]
image_names = [_['image_name'] for _ in batches]
categories = [_['category'] for _ in batches]
image_tensor = [_['image_tensor'] for _ in batches]
input_ids = []
for input_text in questions:
input_ids.append(tokenizer_image_token(input_text, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').tolist())
input_tokens_max_length = max([len(x) for x in input_ids])
pad_token_id = tokenizer.pad_token_id
input_ids = [([pad_token_id] * (input_tokens_max_length - len(_)) + _) for _ in input_ids] # pad in the left
input_ids = torch.LongTensor(input_ids)
attention_mask = 1 - input_ids.eq(pad_token_id).long()
image_tensor = torch.cat(image_tensor, dim=0)
return questions_origin, image_tensor, input_ids, attention_mask, annotations, categories, image_names | null |
178,123 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import torch
from tqdm import tqdm
from PIL import Image
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from vqa import VQA
from vqa_eval import VQAEval
IMAGE_TOKEN_INDEX = -200
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
def collate_fn(batches, tokenizer):
questions = [_['question'] for _ in batches]
question_ids = [_['question_id'] for _ in batches]
annotations = [_['annotation'] for _ in batches]
image_tensor = [_['image_tensor'] for _ in batches]
input_ids = []
for input_text in questions:
input_ids.append(tokenizer_image_token(input_text, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').tolist())
input_tokens_max_length = max([len(x) for x in input_ids])
pad_token_id = tokenizer.pad_token_id
input_ids = [([pad_token_id] * (input_tokens_max_length - len(_)) + _) for _ in input_ids] # pad in the left
input_ids = torch.LongTensor(input_ids)
attention_mask = 1 - input_ids.eq(pad_token_id).long()
image_tensor = torch.cat(image_tensor, dim=0)
return question_ids, image_tensor, input_ids, attention_mask, annotations | null |
178,124 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
import torch
from pycocoevalcap.eval import COCOEvalCap
from pycocotools.coco import COCO
from tqdm import tqdm
from PIL import Image
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
IMAGE_TOKEN_INDEX = -200
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
def collate_fn(inputs, tokenizer):
image_ids = [_['image_id'] for _ in inputs]
image_tensor = [_['image_tensor'] for _ in inputs]
input_texts = [_['input_text'] for _ in inputs]
input_ids = []
for input_text in input_texts:
input_ids.append(tokenizer_image_token(input_text, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').tolist())
input_tokens_max_length = max([len(x) for x in input_ids])
pad_token_id = tokenizer.pad_token_id
input_ids = [([pad_token_id] * (input_tokens_max_length - len(_)) + _) for _ in input_ids] # pad in the left
input_ids = torch.LongTensor(input_ids)
attention_mask = 1 - input_ids.eq(pad_token_id).long()
image_tensor = torch.cat(image_tensor, dim=0)
return image_ids, image_tensor, input_ids, attention_mask | null |
178,125 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import torch
from tqdm import tqdm
from PIL import Image
import pandas as pd
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
def mapping_to_annotation(results, raw_annotation):
outputs = []
for result in results:
index, prediction = result['index'], result['prediction']
row_df = raw_annotation[raw_annotation['index'] == index].squeeze().to_dict()
output = {
"index": index,
"image": row_df['image'],
"question": row_df['question'],
"answer": row_df.get('answer', None),
"options": [y for y in [row_df.get(x, None) for x in 'ABCD'] if isinstance(y, str)],
"prediction": prediction,
"l2-category": row_df['l2-category'] if 'l2-category' in row_df else None
}
outputs.append(output)
return outputs | null |
178,126 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import torch
from tqdm import tqdm
from PIL import Image
import pandas as pd
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
def generate_submission_file(results, raw_annotation):
outputs = []
for result in results:
index, prediction = result['index'], result['prediction']
row_df = raw_annotation[raw_annotation['index'] == index].squeeze().to_dict()
output = {
"index": index,
"question": row_df['question'],
"prediction": prediction,
"A": row_df.get('A', None),
"B": row_df.get('B', None),
"C": row_df.get('C', None),
"D": row_df.get('D', None),
}
outputs.append(output)
return outputs | null |
178,127 | import argparse
import itertools
import json
import os
import random
import time
from functools import partial
from typing import Optional
import torch
from tqdm import tqdm
from PIL import Image
import pandas as pd
from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from mplug_owl2.conversation import conv_templates, SeparatorStyle
from mplug_owl2.model.builder import load_pretrained_model
from mplug_owl2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
IMAGE_TOKEN_INDEX = -200
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
def collate_fn(batches, tokenizer):
questions = [_['question'] for _ in batches]
indices = [_['index'] for _ in batches]
image_tensor = [_['image_tensor'] for _ in batches]
input_ids = []
for input_text in questions:
input_ids.append(tokenizer_image_token(input_text, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').tolist())
input_tokens_max_length = max([len(x) for x in input_ids])
pad_token_id = tokenizer.pad_token_id
input_ids = [([pad_token_id] * (input_tokens_max_length - len(_)) + _) for _ in input_ids] # pad in the left
input_ids = torch.LongTensor(input_ids)
attention_mask = 1 - input_ids.eq(pad_token_id).long()
image_tensor = torch.cat(image_tensor, dim=0)
return image_tensor, input_ids, attention_mask, indices | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.