language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytorch__pytorch
|
torch/utils/cpp_extension.py
|
{
"start": 23945,
"end": 134172
}
|
class ____(build_ext):
"""
A custom :mod:`setuptools` build extension .
This :class:`setuptools.build_ext` subclass takes care of passing the
minimum required compiler flags (e.g. ``-std=c++17``) as well as mixed
C++/CUDA/SYCL compilation (and support for CUDA/SYCL files in general).
When using :class:`BuildExtension`, it is allowed to supply a dictionary
for ``extra_compile_args`` (rather than the usual list) that maps from
languages/compilers (the only expected values are ``cxx``, ``nvcc`` or
``sycl``) to a list of additional compiler flags to supply to the compiler.
This makes it possible to supply different flags to the C++, CUDA and SYCL
compiler during mixed compilation.
``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we
attempt to build using the Ninja backend. Ninja greatly speeds up
compilation compared to the standard ``setuptools.build_ext``.
Fallbacks to the standard distutils backend if Ninja is not available.
.. note::
By default, the Ninja backend uses #CPUS + 2 workers to build the
extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
"""
@classmethod
def with_options(cls, **options):
"""Return a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options."""
class cls_with_options(cls): # type: ignore[misc, valid-type]
def __init__(self, *args, **kwargs) -> None:
kwargs.update(options)
super().__init__(*args, **kwargs)
return cls_with_options
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False)
self.use_ninja = kwargs.get('use_ninja', True)
if self.use_ninja:
# Test if we can use ninja. Fallback otherwise.
msg = ('Attempted to use ninja as the BuildExtension backend but '
'%s. Falling back to using the slow distutils backend.')
if not is_ninja_available():
logger.warning(msg, 'we could not find ninja.')
self.use_ninja = False
def finalize_options(self) -> None:
super().finalize_options()
if self.use_ninja:
self.force = True
def build_extensions(self) -> None:
compiler_name, compiler_version = self._check_abi()
cuda_ext = False
sycl_ext = False
extension_iter = iter(self.extensions)
extension = next(extension_iter, None)
while not (cuda_ext and sycl_ext) and extension:
for source in extension.sources:
_, ext = os.path.splitext(source)
if ext == '.cu':
cuda_ext = True
elif ext == '.sycl':
sycl_ext = True
# This check accounts on a case when cuda and sycl sources
# are mixed in the same extension. We can stop checking
# sources if both are found or there is no more sources.
if cuda_ext and sycl_ext:
break
extension = next(extension_iter, None)
if sycl_ext:
if not self.use_ninja:
raise AssertionError("ninja is required to build sycl extensions.")
if cuda_ext and not IS_HIP_EXTENSION:
_check_cuda_version(compiler_name, compiler_version)
for extension in self.extensions:
# Ensure at least an empty list of flags for 'cxx', 'nvcc' and 'sycl' when
# extra_compile_args is a dict. Otherwise, default torch flags do
# not get passed. Necessary when only one of 'cxx', 'nvcc' or 'sycl' is
# passed to extra_compile_args in CUDAExtension or SyclExtension, i.e.
# CUDAExtension(..., extra_compile_args={'cxx': [...]})
# or
# CUDAExtension(..., extra_compile_args={'nvcc': [...]})
if isinstance(extension.extra_compile_args, dict):
for ext in ['cxx', 'nvcc', 'sycl']:
if ext not in extension.extra_compile_args:
extension.extra_compile_args[ext] = []
self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H')
if IS_HIP_EXTENSION:
self._hipify_compile_flags(extension)
if extension.py_limited_api:
# compile any extension that has passed in py_limited_api to the
# Extension constructor with the Py_LIMITED_API flag set to our
# min supported CPython version.
# See https://docs.python.org/3/c-api/stable.html#c.Py_LIMITED_API
self._add_compile_flag(extension, f'-DPy_LIMITED_API={min_supported_cpython}')
self._define_torch_extension_name(extension)
if 'nvcc_dlink' in extension.extra_compile_args:
if not self.use_ninja:
raise AssertionError(
f"With dlink=True, ninja is required to build cuda extension {extension.name}."
)
# Register .cu, .cuh, .hip, .mm and .sycl as valid source extensions.
# NOTE: At the moment .sycl is not a standard extension for SYCL supported
# by compiler. Here we introduce a torch level convention that SYCL sources
# should have .sycl file extension.
self.compiler.src_extensions += ['.cu', '.cuh', '.hip', '.sycl']
if torch.backends.mps.is_built():
self.compiler.src_extensions += ['.mm']
# Save the original _compile method for later.
if self.compiler.compiler_type == 'msvc':
self.compiler._cpp_extensions += ['.cu', '.cuh']
original_compile = self.compiler.compile
original_spawn = self.compiler.spawn
else:
original_compile = self.compiler._compile
def append_std17_if_no_std_present(cflags) -> None:
# NVCC does not allow multiple -std to be passed, so we avoid
# overriding the option if the user explicitly passed it.
cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}='
cpp_flag_prefix = cpp_format_prefix.format('std')
cpp_flag = cpp_flag_prefix + 'c++17'
if not any(flag.startswith(cpp_flag_prefix) for flag in cflags):
cflags.append(cpp_flag)
def unix_cuda_flags(cflags):
cflags = (COMMON_NVCC_FLAGS +
['--compiler-options', "'-fPIC'"] +
cflags + _get_cuda_arch_flags(cflags))
# NVCC does not allow multiple -ccbin/--compiler-bindir to be passed, so we avoid
# overriding the option if the user explicitly passed it.
_ccbin = os.getenv("CC")
if (
_ccbin is not None
and not any(flag.startswith(('-ccbin', '--compiler-bindir')) for flag in cflags)
):
cflags.extend(['-ccbin', _ccbin])
return cflags
def convert_to_absolute_paths_inplace(paths) -> None:
# Helper function. See Note [Absolute include_dirs]
if paths is not None:
for i in range(len(paths)):
if not os.path.isabs(paths[i]):
paths[i] = os.path.abspath(paths[i])
def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None:
# Copy before we make any modifications.
cflags = copy.deepcopy(extra_postargs)
try:
original_compiler = self.compiler.compiler_so
if _is_cuda_file(src):
nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')]
self.compiler.set_executable('compiler_so', nvcc)
if isinstance(cflags, dict):
cflags = cflags['nvcc']
if IS_HIP_EXTENSION:
cflags = COMMON_HIPCC_FLAGS + cflags + _get_rocm_arch_flags(cflags)
else:
cflags = unix_cuda_flags(cflags)
elif isinstance(cflags, dict):
cflags = cflags['cxx']
if IS_HIP_EXTENSION:
cflags = COMMON_HIP_FLAGS + cflags
append_std17_if_no_std_present(cflags)
original_compile(obj, src, ext, cc_args, cflags, pp_opts)
finally:
# Put the original compiler back in place.
self.compiler.set_executable('compiler_so', original_compiler)
def unix_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
r"""Compiles sources by outputting a ninja file and running it."""
# NB: I copied some lines from self.compiler (which is an instance
# of distutils.UnixCCompiler). See the following link.
# https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567 # codespell:ignore
# This can be fragile, but a lot of other repos also do this
# (see https://github.com/search?q=_setup_compile&type=Code)
# so it is probably OK; we'll also get CI signal if/when
# we update our python version (which is when distutils can be
# upgraded)
# Use absolute path for output_dir so that the object file paths
# (`objects`) get generated with absolute paths.
# pyrefly: ignore [no-matching-overload]
output_dir = os.path.abspath(output_dir)
# See Note [Absolute include_dirs]
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs)
extra_cc_cflags = self.compiler.compiler_so[1:]
with_cuda = any(map(_is_cuda_file, sources))
with_sycl = any(map(_is_sycl_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc/sycl to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
post_cflags = COMMON_HIP_FLAGS + post_cflags
append_std17_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = common_cflags
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags)
cuda_post_cflags = COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_post_cflags
else:
cuda_post_cflags = unix_cuda_flags(cuda_post_cflags)
append_std17_if_no_std_present(cuda_post_cflags)
cuda_cflags = [shlex.quote(f) for f in cuda_cflags]
cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags]
if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs:
cuda_dlink_post_cflags = unix_cuda_flags(extra_postargs['nvcc_dlink'])
cuda_dlink_post_cflags = [shlex.quote(f) for f in cuda_dlink_post_cflags]
else:
cuda_dlink_post_cflags = None
sycl_post_cflags = None
sycl_cflags = None
sycl_dlink_post_cflags = None
if with_sycl:
sycl_cflags = extra_cc_cflags + common_cflags + _COMMON_SYCL_FLAGS
if isinstance(extra_postargs, dict):
sycl_post_cflags = extra_postargs['sycl']
else:
sycl_post_cflags = list(extra_postargs)
_append_sycl_targets_if_missing(sycl_post_cflags)
append_std17_if_no_std_present(sycl_cflags)
_append_sycl_std_if_no_std_present(sycl_cflags)
host_cflags = extra_cc_cflags + common_cflags + post_cflags
append_std17_if_no_std_present(host_cflags)
# escaping quoted arguments to pass them thru SYCL compiler
icpx_version = _get_icpx_version()
if int(icpx_version) >= 20250200:
host_cflags = [item.replace('"', '\\"') for item in host_cflags]
else:
host_cflags = [item.replace('"', '\\\\"') for item in host_cflags]
host_cflags = ' '.join(host_cflags)
# Note the order: shlex.quote sycl_flags first, _wrap_sycl_host_flags
# second. Reason is that sycl host flags are quoted, space containing
# strings passed to SYCL compiler.
sycl_cflags = [shlex.quote(f) for f in sycl_cflags]
sycl_cflags += _wrap_sycl_host_flags(host_cflags)
sycl_dlink_post_cflags = _SYCL_DLINK_FLAGS.copy()
sycl_dlink_post_cflags += _get_sycl_device_flags(sycl_post_cflags)
sycl_post_cflags = [shlex.quote(f) for f in sycl_post_cflags]
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags],
post_cflags=[shlex.quote(f) for f in post_cflags],
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=sycl_cflags,
sycl_post_cflags=sycl_post_cflags,
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda,
with_sycl=with_sycl)
# Return *all* object filenames, not just the ones we just built.
return objects
def win_cuda_flags(cflags):
return (COMMON_NVCC_FLAGS +
cflags + _get_cuda_arch_flags(cflags))
def win_hip_flags(cflags):
return (COMMON_HIPCC_FLAGS + COMMON_HIP_FLAGS + cflags + _get_rocm_arch_flags(cflags))
def win_wrap_single_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
self.cflags = copy.deepcopy(extra_postargs)
extra_postargs = None
def spawn(cmd):
# Using regex to match src, obj and include files
src_regex = re.compile('/T(p|c)(.*)')
src_list = [
m.group(2) for m in (src_regex.match(elem) for elem in cmd)
if m
]
obj_regex = re.compile('/Fo(.*)') # codespell:ignore
obj_list = [
m.group(1) for m in (obj_regex.match(elem) for elem in cmd)
if m
]
include_regex = re.compile(r'((\-|\/)I.*)')
include_list = [
m.group(1)
for m in (include_regex.match(elem) for elem in cmd) if m
]
if len(src_list) >= 1 and len(obj_list) >= 1:
src = src_list[0]
obj = obj_list[0]
if _is_cuda_file(src):
if IS_HIP_EXTENSION:
nvcc = _get_hipcc_path()
else:
nvcc = _join_cuda_home('bin', 'nvcc')
if isinstance(self.cflags, dict):
cflags = self.cflags['nvcc']
elif isinstance(self.cflags, list):
cflags = self.cflags
else:
cflags = []
if IS_HIP_EXTENSION:
cflags = win_hip_flags(cflags)
else:
cflags = win_cuda_flags(cflags) + ['-std=c++17', '--use-local-env']
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags
for flag in COMMON_MSVC_FLAGS:
cflags = ['-Xcompiler', flag] + cflags
cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags
elif isinstance(self.cflags, dict):
cflags = COMMON_MSVC_FLAGS + self.cflags['cxx']
append_std17_if_no_std_present(cflags)
cmd += cflags
elif isinstance(self.cflags, list):
cflags = COMMON_MSVC_FLAGS + self.cflags
append_std17_if_no_std_present(cflags)
cmd += cflags
return original_spawn(cmd)
try:
self.compiler.spawn = spawn
return original_compile(sources, output_dir, macros,
include_dirs, debug, extra_preargs,
extra_postargs, depends)
finally:
self.compiler.spawn = original_spawn
def win_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
is_standalone=False):
if not self.compiler.initialized:
self.compiler.initialize()
# pyrefly: ignore [no-matching-overload]
output_dir = os.path.abspath(output_dir)
# Note [Absolute include_dirs]
# Convert relative path in self.compiler.include_dirs to absolute path if any.
# For ninja build, the build location is not local, but instead, the build happens
# in a script-created build folder. Thus, relative paths lose their correctness.
# To be consistent with jit extension, we allow user to enter relative include_dirs
# in setuptools.setup, and we convert the relative path to absolute path here.
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
# Replace space with \ when using hipcc (hipcc passes includes to clang without ""s so clang sees space in include paths as new argument)
if IS_HIP_EXTENSION:
pp_opts = ["-I{}".format(s[2:].replace(" ", "\\")) if s.startswith('-I') else s for s in pp_opts]
common_cflags = extra_preargs or []
cflags = []
if debug:
cflags.extend(self.compiler.compile_options_debug)
else:
cflags.extend(self.compiler.compile_options)
cflags = cflags + common_cflags + pp_opts + COMMON_MSVC_FLAGS
if IS_HIP_EXTENSION:
_set_hipcc_runtime_lib(is_standalone, debug)
common_cflags.extend(COMMON_HIP_FLAGS)
else:
common_cflags.extend(COMMON_MSVC_FLAGS)
with_cuda = any(map(_is_cuda_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
post_cflags = COMMON_HIP_FLAGS + post_cflags
append_std17_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = ['-std=c++17']
for common_cflag in common_cflags:
cuda_cflags.append('-Xcompiler')
cuda_cflags.append(common_cflag)
if not IS_HIP_EXTENSION:
cuda_cflags.append('--use-local-env')
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_cflags.append('-Xcudafe')
cuda_cflags.append('--diag_suppress=' + ignore_warning)
cuda_cflags.extend(pp_opts)
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
cuda_post_cflags = win_hip_flags(cuda_post_cflags)
else:
cuda_post_cflags = win_cuda_flags(cuda_post_cflags)
cflags = _nt_quote_args(cflags)
post_cflags = _nt_quote_args(post_cflags)
if with_cuda:
cuda_cflags = _nt_quote_args(cuda_cflags)
cuda_post_cflags = _nt_quote_args(cuda_post_cflags)
if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs:
cuda_dlink_post_cflags = win_cuda_flags(extra_postargs['nvcc_dlink'])
else:
cuda_dlink_post_cflags = None
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=None,
sycl_post_cflags=None,
sycl_dlink_post_cflags=None,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda,
with_sycl=False)
# Return *all* object filenames, not just the ones we just built.
return objects
# Monkey-patch the _compile or compile method.
# https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511 # codespell:ignore
if self.compiler.compiler_type == 'msvc':
if self.use_ninja:
self.compiler.compile = win_wrap_ninja_compile
else:
self.compiler.compile = win_wrap_single_compile
else:
if self.use_ninja:
self.compiler.compile = unix_wrap_ninja_compile
else:
self.compiler._compile = unix_wrap_single_compile
build_ext.build_extensions(self)
def get_ext_filename(self, ext_name):
# Get the original shared library name. For Python 3, this name will be
# suffixed with "<SOABI>.so", where <SOABI> will be something like
# cpython-37m-x86_64-linux-gnu.
ext_filename = super().get_ext_filename(ext_name)
# If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI
# component. This makes building shared libraries with setuptools that
# aren't Python modules nicer.
if self.no_python_abi_suffix:
# The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"].
ext_filename_parts = ext_filename.split('.')
# Omit the second to last element.
without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
ext_filename = '.'.join(without_abi)
return ext_filename
def _check_abi(self) -> tuple[str, TorchVersion]:
# On some platforms, like Windows, compiler_cxx is not available.
if hasattr(self.compiler, 'compiler_cxx'):
compiler = self.compiler.compiler_cxx[0]
else:
compiler = get_cxx_compiler()
_, version = get_compiler_abi_compatibility_and_version(compiler)
# Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set.
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ:
msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.'
'This may lead to multiple activations of the VC env.'
'Please set `DISTUTILS_USE_SDK=1` and try again.')
raise UserWarning(msg)
return compiler, version
def _add_compile_flag(self, extension, flag) -> None:
extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args)
if isinstance(extension.extra_compile_args, dict):
for args in extension.extra_compile_args.values():
args.append(flag)
else:
extension.extra_compile_args.append(flag)
# Simple hipify, replace the first occurrence of CUDA with HIP
# in flags starting with "-" and containing "CUDA", but exclude -I flags
def _hipify_compile_flags(self, extension) -> None:
if isinstance(extension.extra_compile_args, dict) and 'nvcc' in extension.extra_compile_args:
modified_flags = []
for flag in extension.extra_compile_args['nvcc']:
if flag.startswith("-") and "CUDA" in flag and not flag.startswith("-I"):
# check/split flag into flag and value
parts = flag.split("=", 1)
if len(parts) == 2:
flag_part, value_part = parts
# replace fist instance of "CUDA" with "HIP" only in the flag and not flag value
modified_flag_part = flag_part.replace("CUDA", "HIP", 1)
modified_flag = f"{modified_flag_part}={value_part}"
else:
# replace fist instance of "CUDA" with "HIP" in flag
modified_flag = flag.replace("CUDA", "HIP", 1)
modified_flags.append(modified_flag)
logger.info('Modified flag: %s -> %s', flag, modified_flag)
else:
modified_flags.append(flag)
extension.extra_compile_args['nvcc'] = modified_flags
def _define_torch_extension_name(self, extension) -> None:
# pybind11 doesn't support dots in the names
# so in order to support extensions in the packages
# like torch._C, we take the last part of the string
# as the library name
names = extension.name.split('.')
name = names[-1]
define = f'-DTORCH_EXTENSION_NAME={name}'
self._add_compile_flag(extension, define)
def CppExtension(name, sources, *args, **kwargs):
"""
Create a :class:`setuptools.Extension` for C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a C++ extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor. Full list arguments can be found at
https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CppExtension
>>> setup(
... name='extension',
... ext_modules=[
... CppExtension(
... name='extension',
... sources=['extension.cpp'],
... extra_compile_args=['-g'],
... extra_link_args=['-Wl,--no-as-needed', '-lm'])
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
"""
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths()
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append('torch_python')
if IS_WINDOWS:
libraries.append("sleef")
kwargs['libraries'] = libraries
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def CUDAExtension(name, sources, *args, **kwargs):
"""
Create a :class:`setuptools.Extension` for CUDA/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a CUDA/C++
extension. This includes the CUDA include path, library path and runtime
library.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor. Full list arguments can be found at
https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension
>>> setup(
... name='cuda_extension',
... ext_modules=[
... CUDAExtension(
... name='cuda_extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... extra_compile_args={'cxx': ['-g'],
... 'nvcc': ['-O2']},
... extra_link_args=['-Wl,--no-as-needed', '-lcuda'])
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
Compute capabilities:
By default the extension will be compiled to run on all archs of the cards visible during the
building process of the extension, plus PTX. If down the road a new card is installed the
extension may need to be recompiled. If a visible card has a compute capability (CC) that's
newer than the newest version for which your nvcc can build fully-compiled binaries, PyTorch
will make nvcc fall back to building kernels with the newest version of PTX your nvcc does
support (see below for details on PTX).
You can override the default behavior using `TORCH_CUDA_ARCH_LIST` to explicitly specify which
CCs you want the extension to support:
``TORCH_CUDA_ARCH_LIST="6.1 8.6" python build_my_extension.py``
``TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python build_my_extension.py``
The +PTX option causes extension kernel binaries to include PTX instructions for the specified
CC. PTX is an intermediate representation that allows kernels to runtime-compile for any CC >=
the specified CC (for example, 8.6+PTX generates PTX that can runtime-compile for any GPU with
CC >= 8.6). This improves your binary's forward compatibility. However, relying on older PTX to
provide forward compat by runtime-compiling for newer CCs can modestly reduce performance on
those newer CCs. If you know exact CC(s) of the GPUs you want to target, you're always better
off specifying them individually. For example, if you want your extension to run on 8.0 and 8.6,
"8.0+PTX" would work functionally because it includes PTX that can runtime-compile for 8.6, but
"8.0 8.6" would be better.
Note that while it's possible to include all supported archs, the more archs get included the
slower the building process will be, as it will build a separate kernel image for each arch.
Note that CUDA-11.5 nvcc will hit internal compiler error while parsing torch/extension.h on Windows.
To workaround the issue, move python binding logic to pure C++ file.
Example use:
#include <ATen/ATen.h>
at::Tensor SigmoidAlphaBlendForwardCuda(....)
Instead of:
#include <torch/extension.h>
torch::Tensor SigmoidAlphaBlendForwardCuda(...)
Currently open issue for nvcc bug: https://github.com/pytorch/pytorch/issues/69460
Complete workaround code example: https://github.com/facebookresearch/pytorch3d/commit/cb170ac024a949f1f9614ffe6af1c38d972f7d48
Relocatable device code linking:
If you want to reference device symbols across compilation units (across object files),
the object files need to be built with `relocatable device code` (-rdc=true or -dc).
An exception to this rule is "dynamic parallelism" (nested kernel launches) which is not used a lot anymore.
`Relocatable device code` is less optimized so it needs to be used only on object files that need it.
Using `-dlto` (Device Link Time Optimization) at the device code compilation step and `dlink` step
helps reduce the protentional perf degradation of `-rdc`.
Note that it needs to be used at both steps to be useful.
If you have `rdc` objects you need to have an extra `-dlink` (device linking) step before the CPU symbol linking step.
There is also a case where `-dlink` is used without `-rdc`:
when an extension is linked against a static lib containing rdc-compiled objects
like the [NVSHMEM library](https://developer.nvidia.com/nvshmem).
Note: Ninja is required to build a CUDA Extension with RDC linking.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> CUDAExtension(
... name='cuda_extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... dlink=True,
... dlink_libraries=["dlink_lib"],
... extra_compile_args={'cxx': ['-g'],
... 'nvcc': ['-O2', '-rdc=true']})
"""
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths(device_type="cuda")
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append('torch_python')
if IS_HIP_EXTENSION:
libraries.append('amdhip64')
libraries.append('c10_hip')
libraries.append('torch_hip')
else:
libraries.append('cudart')
libraries.append('c10_cuda')
libraries.append('torch_cuda')
kwargs['libraries'] = libraries
include_dirs = kwargs.get('include_dirs', [])
if IS_HIP_EXTENSION:
from .hipify import hipify_python
build_dir = os.getcwd()
hipify_result = hipify_python.hipify(
project_directory=build_dir,
output_directory=build_dir,
header_include_dirs=include_dirs,
includes=[os.path.join(build_dir, '*')], # limit scope to build_dir only
extra_files=[os.path.abspath(s) for s in sources],
show_detailed=True,
is_pytorch_extension=True,
hipify_extra_files_only=True, # don't hipify everything in includes path
)
hipified_sources = set()
for source in sources:
s_abs = os.path.abspath(source)
hipified_s_abs = (hipify_result[s_abs].hipified_path if (s_abs in hipify_result and
hipify_result[s_abs].hipified_path is not None) else s_abs)
# setup() arguments must *always* be /-separated paths relative to the setup.py directory,
# *never* absolute paths
hipified_sources.add(os.path.relpath(hipified_s_abs, build_dir))
sources = list(hipified_sources)
include_dirs += include_paths(device_type="cuda")
kwargs['include_dirs'] = include_dirs
kwargs['language'] = 'c++'
dlink_libraries = kwargs.get('dlink_libraries', [])
dlink = kwargs.get('dlink', False) or dlink_libraries
if dlink:
extra_compile_args = kwargs.get('extra_compile_args', {})
extra_compile_args_dlink = extra_compile_args.get('nvcc_dlink', [])
extra_compile_args_dlink += ['-dlink']
extra_compile_args_dlink += [f'-L{x}' for x in library_dirs]
extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries]
if (torch.version.cuda is not None) and TorchVersion(torch.version.cuda) >= '11.2':
extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2
extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink
kwargs['extra_compile_args'] = extra_compile_args
return setuptools.Extension(name, sources, *args, **kwargs)
def SyclExtension(name, sources, *args, **kwargs):
r"""
Creates a :class:`setuptools.Extension` for SYCL/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a SYCL/C++
extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor.
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from torch.utils.cpp_extension import BuildExtension, SyclExtension
>>> setup(
... name='xpu_extension',
... ext_modules=[
... SyclExtension(
... name='xpu_extension',
... sources=['extension.cpp', 'extension_kernel.cpp'],
... extra_compile_args={'cxx': ['-g', '-std=c++20', '-fPIC']})
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
By default the extension will be compiled to run on all archs of the cards visible during the
building process of the extension. If down the road a new card is installed the
extension may need to be recompiled. You can override the default behavior using
`TORCH_XPU_ARCH_LIST` to explicitly specify which device architectures you want the extension
to support:
``TORCH_XPU_ARCH_LIST="pvc,xe-lpg" python build_my_extension.py``
Note that while it's possible to include all supported archs, the more archs get included the
slower the building process will be, as it will build a separate kernel image for each arch.
Note: Ninja is required to build SyclExtension.
"""
library_dirs = kwargs.get("library_dirs", [])
library_dirs += library_paths()
kwargs["library_dirs"] = library_dirs
libraries = kwargs.get("libraries", [])
libraries.append("c10")
libraries.append("c10_xpu")
libraries.append("torch")
libraries.append("torch_cpu")
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append("torch_python")
libraries.append("torch_xpu")
kwargs["libraries"] = libraries
include_dirs = kwargs.get("include_dirs", [])
include_dirs += include_paths()
kwargs["include_dirs"] = include_dirs
kwargs["language"] = "c++"
return setuptools.Extension(name, sources, *args, **kwargs)
def include_paths(device_type: str = "cpu", torch_include_dirs=True) -> list[str]:
"""
Get the include paths required to build a C++ or CUDA or SYCL extension.
Args:
device_type: Defaults to "cpu".
Returns:
A list of include path strings.
"""
paths = []
lib_include = os.path.join(_TORCH_PATH, 'include')
if torch_include_dirs:
paths.extend([
lib_include,
# Remove this once torch/torch.h is officially no longer supported for C++ extensions.
os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'),
])
if device_type == "cuda" and IS_HIP_EXTENSION:
paths.append(os.path.join(lib_include, 'THH'))
paths.append(_join_rocm_home('include'))
elif device_type == "cuda":
cuda_home_include = _join_cuda_home('include')
# if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home.
# but gcc doesn't like having /usr/include passed explicitly
if cuda_home_include != '/usr/include':
paths.append(cuda_home_include)
# Support CUDA_INC_PATH env variable supported by CMake files
if (cuda_inc_path := os.environ.get("CUDA_INC_PATH", None)) and \
cuda_inc_path != '/usr/include':
paths.append(cuda_inc_path)
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, 'include'))
elif device_type == "xpu":
paths.append(_join_sycl_home('include'))
paths.append(_join_sycl_home('include', 'sycl'))
return paths
def library_paths(device_type: str = "cpu", torch_include_dirs: bool = True, cross_target_platform: str | None = None) -> list[str]:
"""
Get the library paths required to build a C++ or CUDA extension.
Args:
device_type: Defaults to "cpu".
Returns:
A list of library path strings.
"""
paths = []
if torch_include_dirs:
# We need to link against libtorch.so
paths.extend([TORCH_LIB_PATH])
if device_type == "cuda" and IS_HIP_EXTENSION:
lib_dir = 'lib'
paths.append(_join_rocm_home(lib_dir))
if HIP_HOME is not None:
paths.append(os.path.join(HIP_HOME, 'lib'))
elif device_type == "cuda":
if cross_target_platform == "windows":
lib_dir = os.path.join('lib', 'x64')
if WINDOWS_CUDA_HOME is None:
raise RuntimeError("Need to set WINDOWS_CUDA_HOME for windows cross-compilation")
paths.append(os.path.join(WINDOWS_CUDA_HOME, lib_dir))
else:
if IS_WINDOWS:
lib_dir = os.path.join('lib', 'x64')
else:
lib_dir = 'lib64'
if (not os.path.exists(_join_cuda_home(lib_dir)) and
os.path.exists(_join_cuda_home('lib'))):
# 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955)
# Note that it's also possible both don't exist (see
# _find_cuda_home) - in that case we stay with 'lib64'.
lib_dir = 'lib'
paths.append(_join_cuda_home(lib_dir))
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, lib_dir))
elif device_type == "xpu":
if IS_WINDOWS:
lib_dir = os.path.join('lib', 'x64')
else:
lib_dir = 'lib64'
if (not os.path.exists(_join_sycl_home(lib_dir)) and
os.path.exists(_join_sycl_home('lib'))):
lib_dir = 'lib'
paths.append(_join_sycl_home(lib_dir))
return paths
def load(name,
sources: str | list[str],
extra_cflags=None,
extra_cuda_cflags=None,
extra_sycl_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda: bool | None = None,
with_sycl: bool | None = None,
is_python_module=True,
is_standalone=False,
keep_intermediates=True):
"""
Load a PyTorch C++ extension just-in-time (JIT).
To load an extension, a Ninja build file is emitted, which is used to
compile the given sources into a dynamic library. This library is
subsequently loaded into the current Python process as a module and
returned from this function, ready for use.
By default, the directory to which the build file is emitted and the
resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where
``<tmp>`` is the temporary folder on the current platform and ``<name>``
the name of the extension. This location can be overridden in two ways.
First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it
replaces ``<tmp>/torch_extensions`` and all extensions will be compiled
into subfolders of this directory. Second, if the ``build_directory``
argument to this function is supplied, it overrides the entire path, i.e.
the library will be compiled into that folder directly.
To compile the sources, the default system compiler (``c++``) is used,
which can be overridden by setting the ``CXX`` environment variable. To pass
additional arguments to the compilation process, ``extra_cflags`` or
``extra_ldflags`` can be provided. For example, to compile your extension
with optimizations, pass ``extra_cflags=['-O3']``. You can also use
``extra_cflags`` to pass further include directories.
CUDA support with mixed compilation is provided. Simply pass CUDA source
files (``.cu`` or ``.cuh``) along with other sources. Such files will be
detected and compiled with nvcc rather than the C++ compiler. This includes
passing the CUDA lib64 directory as a library directory, and linking
``cudart``. You can pass additional flags to nvcc via
``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various
heuristics for finding the CUDA install directory are used, which usually
work fine. If not, setting the ``CUDA_HOME`` environment variable is the
safest option.
SYCL support with mixed compilation is provided. Simply pass SYCL source
files (``.sycl``) along with other sources. Such files will be detected
and compiled with SYCL compiler (such as Intel DPC++ Compiler) rather
than the C++ compiler. You can pass additional flags to SYCL compiler
via ``extra_sycl_cflags``, just like with ``extra_cflags`` for C++.
SYCL compiler is expected to be found via system PATH environment
variable.
Args:
name: The name of the extension to build. This MUST be the same as the
name of the pybind11 module!
sources: A list of relative or absolute paths to C++ source files.
extra_cflags: optional list of compiler flags to forward to the build.
extra_cuda_cflags: optional list of compiler flags to forward to nvcc
when building CUDA sources.
extra_sycl_cflags: optional list of compiler flags to forward to SYCL
compiler when building SYCL sources.
extra_ldflags: optional list of linker flags to forward to the build.
extra_include_paths: optional list of include directories to forward
to the build.
build_directory: optional path to use as build workspace.
verbose: If ``True``, turns on verbose logging of load steps.
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on the existence of ``.cu`` or
``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers
and libraries to be included.
with_sycl: Determines whether SYCL headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on the existence of ``.sycl`` in
``sources``. Set it to `True`` to force SYCL headers and
libraries to be included.
is_python_module: If ``True`` (default), imports the produced shared
library as a Python module. If ``False``, behavior depends on
``is_standalone``.
is_standalone: If ``False`` (default) loads the constructed extension
into the process as a plain dynamic library. If ``True``, build a
standalone executable.
Returns:
If ``is_python_module`` is ``True``:
Returns the loaded PyTorch extension as a Python module.
If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``:
Returns nothing. (The shared library is loaded into the process as
a side effect.)
If ``is_standalone`` is ``True``.
Return the path to the executable. (On Windows, TORCH_LIB_PATH is
added to the PATH environment variable as a side effect.)
Example:
>>> # xdoctest: +SKIP
>>> from torch.utils.cpp_extension import load
>>> module = load(
... name='extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... extra_cflags=['-O2'],
... verbose=True)
"""
return _jit_compile(
name,
[sources] if isinstance(sources, str) else sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory or _get_build_directory(name, verbose),
verbose,
with_cuda,
with_sycl,
is_python_module,
is_standalone,
keep_intermediates=keep_intermediates)
@deprecated("PyBind11 ABI handling is internal to PyBind11; this will be removed after PyTorch 2.9.0")
def _get_pybind11_abi_build_flags() -> list[str]:
return []
def check_compiler_is_gcc(compiler) -> bool:
if not IS_LINUX:
return False
env = os.environ.copy()
env['LC_ALL'] = 'C' # Don't localize output
try:
version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
except Exception:
try:
version_string = subprocess.check_output([compiler, '--version'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
except Exception:
return False
# Check for GCC by verifying both COLLECT_GCC and gcc version string are present
# This works for c++, g++, gcc, and versioned variants like g++-13
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
has_collect_gcc = pattern.search(version_string) is not None
if has_collect_gcc and 'gcc version' in version_string:
return True
return False
def _check_and_build_extension_h_precompiler_headers(
extra_cflags,
extra_include_paths,
is_standalone=False) -> None:
r'''
Precompiled Headers(PCH) can pre-build the same headers and reduce build time for pytorch load_inline modules.
GCC official manual: https://gcc.gnu.org/onlinedocs/gcc-4.0.4/gcc/Precompiled-Headers.html
PCH only works when built pch file(header.h.gch) and build target have the same build parameters. So, We need
add a signature file to record PCH file parameters. If the build parameters(signature) changed, it should rebuild
PCH file.
Note:
1. Windows and MacOS have different PCH mechanism. We only support Linux currently.
2. It only works on GCC/G++.
'''
if not IS_LINUX:
return
compiler = get_cxx_compiler()
b_is_gcc = check_compiler_is_gcc(compiler)
if b_is_gcc is False:
return
head_file = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h')
head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch')
head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign')
def listToString(s):
# initialize an empty string
string = ""
if s is None:
return string
# traverse in the string
for element in s:
string += (element + ' ')
# return string
return string
def format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags, torch_include_dirs, extra_cflags, extra_include_paths):
return re.sub(
r"[ \n]+",
" ",
f"""
{compiler} -x c++-header {head_file} -o {head_file_pch} {torch_include_dirs} {extra_include_paths} {extra_cflags} {common_cflags}
""",
).strip()
def command_to_signature(cmd):
signature = cmd.replace(' ', '_')
return signature
def check_pch_signature_in_file(file_path, signature):
b_exist = os.path.isfile(file_path)
if b_exist is False:
return False
with open(file_path) as file:
# read all content of a file
content = file.read()
# check if string present in a file
return signature == content
def _create_if_not_exist(path_dir) -> None:
if not os.path.exists(path_dir):
try:
Path(path_dir).mkdir(parents=True, exist_ok=True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise RuntimeError(f"Fail to create path {path_dir}") from exc
def write_pch_signature_to_file(file_path, pch_sign) -> None:
_create_if_not_exist(os.path.dirname(file_path))
with open(file_path, "w") as f:
f.write(pch_sign)
f.close()
def build_precompile_header(pch_cmd) -> None:
try:
subprocess.check_output(shlex.split(pch_cmd), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Compile PreCompile Header fail, command: {pch_cmd}") from e
extra_cflags_str = listToString(extra_cflags)
extra_include_paths_str = " ".join(
[f"-I{include}" for include in extra_include_paths] if extra_include_paths else []
)
lib_include = os.path.join(_TORCH_PATH, 'include')
torch_include_dirs = [
f"-I {lib_include}",
# Python.h
"-I {}".format(sysconfig.get_path("include")),
# torch/all.h
"-I {}".format(os.path.join(lib_include, 'torch', 'csrc', 'api', 'include')),
]
torch_include_dirs_str = listToString(torch_include_dirs)
common_cflags = []
if not is_standalone:
common_cflags += ['-DTORCH_API_INCLUDE_EXTENSION_H']
common_cflags += ['-std=c++17', '-fPIC']
common_cflags_str = listToString(common_cflags)
pch_cmd = format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags_str, torch_include_dirs_str, extra_cflags_str, extra_include_paths_str)
pch_sign = command_to_signature(pch_cmd)
if os.path.isfile(head_file_pch) is not True:
build_precompile_header(pch_cmd)
write_pch_signature_to_file(head_file_signature, pch_sign)
else:
b_same_sign = check_pch_signature_in_file(head_file_signature, pch_sign)
if b_same_sign is False:
build_precompile_header(pch_cmd)
write_pch_signature_to_file(head_file_signature, pch_sign)
def remove_extension_h_precompiler_headers() -> None:
def _remove_if_file_exists(path_file) -> None:
if os.path.exists(path_file):
os.remove(path_file)
head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch')
head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign')
_remove_if_file_exists(head_file_pch)
_remove_if_file_exists(head_file_signature)
def load_inline(name,
cpp_sources,
cuda_sources=None,
sycl_sources=None,
functions=None,
extra_cflags=None,
extra_cuda_cflags=None,
extra_sycl_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda=None,
with_sycl=None,
is_python_module=True,
with_pytorch_error_handling=True,
keep_intermediates=True,
use_pch=False,
no_implicit_headers=False):
r'''
Load a PyTorch C++ extension just-in-time (JIT) from string sources.
This function behaves exactly like :func:`load`, but takes its sources as
strings rather than filenames. These strings are stored to files in the
build directory, after which the behavior of :func:`load_inline` is
identical to :func:`load`.
See `the
tests <https://github.com/pytorch/pytorch/blob/master/test/test_cpp_extensions_jit.py>`_
for good examples of using this function.
Sources may omit two required parts of a typical non-inline C++ extension:
the necessary header includes, as well as the (pybind11) binding code. More
precisely, strings passed to ``cpp_sources`` are first concatenated into a
single ``.cpp`` file. This file is then prepended with ``#include
<torch/extension.h>``
Furthermore, if the ``functions`` argument is supplied, bindings will be
automatically generated for each function specified. ``functions`` can
either be a list of function names, or a dictionary mapping from function
names to docstrings. If a list is given, the name of each function is used
as its docstring.
The sources in ``cuda_sources`` are concatenated into a separate ``.cu``
file and prepended with ``torch/types.h``, ``cuda.h`` and
``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled
separately, but ultimately linked into a single library. Note that no
bindings are generated for functions in ``cuda_sources`` per se. To bind
to a CUDA kernel, you must create a C++ function that calls it, and either
declare or define this C++ function in one of the ``cpp_sources`` (and
include its name in ``functions``).
The sources in ``sycl_sources`` are concatenated into a separate ``.sycl``
file and prepended with ``torch/types.h``, ``sycl/sycl.hpp`` includes.
The ``.cpp`` and ``.sycl`` files are compiled separately, but ultimately
linked into a single library. Note that no bindings are generated for
functions in ``sycl_sources`` per se. To bind to a SYCL kernel, you must
create a C++ function that calls it, and either declare or define this
C++ function in one of the ``cpp_sources`` (and include its name
in ``functions``).
See :func:`load` for a description of arguments omitted below.
Args:
cpp_sources: A string, or list of strings, containing C++ source code.
cuda_sources: A string, or list of strings, containing CUDA source code.
sycl_sources: A string, or list of strings, containing SYCL source code.
functions: A list of function names for which to generate function
bindings. If a dictionary is given, it should map function names to
docstrings (which are otherwise just the function names).
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on whether ``cuda_sources`` is
provided. Set it to ``True`` to force CUDA headers
and libraries to be included.
with_sycl: Determines whether SYCL headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on whether ``sycl_sources`` is
provided. Set it to ``True`` to force SYCL headers
and libraries to be included.
with_pytorch_error_handling: Determines whether pytorch error and
warning macros are handled by pytorch instead of pybind. To do
this, each function ``foo`` is called via an intermediary ``_safe_foo``
function. This redirection might cause issues in obscure cases
of cpp. This flag should be set to ``False`` when this redirect
causes issues.
no_implicit_headers: If ``True``, skips automatically adding headers, most notably
``#include <torch/extension.h>`` and ``#include <torch/types.h>`` lines.
Use this option to improve cold start times when you
already include the necessary headers in your source code. Default: ``False``.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from torch.utils.cpp_extension import load_inline
>>> source = """
at::Tensor sin_add(at::Tensor x, at::Tensor y) {
return x.sin() + y.sin();
}
"""
>>> module = load_inline(name='inline_extension',
... cpp_sources=[source],
... functions=['sin_add'])
.. note::
Since load_inline will just-in-time compile the source code, please ensure
that you have the right toolchains installed in the runtime. For example,
when loading C++, make sure a C++ compiler is available. If you're loading
a CUDA extension, you will need to additionally install the corresponding CUDA
toolkit (nvcc and any other dependencies your code has). Compiling toolchains
are not included when you install torch and must be additionally installed.
During compiling, by default, the Ninja backend uses #CPUS + 2 workers to build
the extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
'''
build_directory = build_directory or _get_build_directory(name, verbose)
if isinstance(cpp_sources, str):
cpp_sources = [cpp_sources]
cuda_sources = cuda_sources or []
if isinstance(cuda_sources, str):
cuda_sources = [cuda_sources]
sycl_sources = sycl_sources or []
if isinstance(sycl_sources, str):
sycl_sources = [sycl_sources]
if not no_implicit_headers:
cpp_sources.insert(0, '#include <torch/extension.h>')
if use_pch is True:
# Using PreCompile Header('torch/extension.h') to reduce compile time.
_check_and_build_extension_h_precompiler_headers(extra_cflags, extra_include_paths)
else:
remove_extension_h_precompiler_headers()
# If `functions` is supplied, we create the pybind11 bindings for the user.
# Here, `functions` is (or becomes, after some processing) a map from
# function names to function docstrings.
if functions is not None:
module_def = []
module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {')
if isinstance(functions, str):
functions = [functions]
if isinstance(functions, list):
# Make the function docstring the same as the function name.
functions = {f: f for f in functions}
elif not isinstance(functions, dict):
raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}")
for function_name, docstring in functions.items():
if with_pytorch_error_handling:
module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");')
else:
module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");')
module_def.append('}')
cpp_sources += module_def
cpp_source_path = os.path.join(build_directory, 'main.cpp')
_maybe_write(cpp_source_path, "\n".join(cpp_sources))
sources = [cpp_source_path]
if cuda_sources:
if not no_implicit_headers:
cuda_sources.insert(0, '#include <torch/types.h>')
cuda_sources.insert(1, '#include <cuda.h>')
cuda_sources.insert(2, '#include <cuda_runtime.h>')
cuda_source_path = os.path.join(build_directory, 'cuda.cu')
_maybe_write(cuda_source_path, "\n".join(cuda_sources))
sources.append(cuda_source_path)
if sycl_sources:
if not no_implicit_headers:
sycl_sources.insert(0, '#include <torch/types.h>')
sycl_sources.insert(1, '#include <sycl/sycl.hpp>')
sycl_source_path = os.path.join(build_directory, 'sycl.sycl')
_maybe_write(sycl_source_path, "\n".join(sycl_sources))
sources.append(sycl_source_path)
return _jit_compile(
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory,
verbose,
with_cuda,
with_sycl,
is_python_module,
is_standalone=False,
keep_intermediates=keep_intermediates)
def _jit_compile(name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: bool | None,
with_sycl: bool | None,
is_python_module,
is_standalone,
keep_intermediates=True) -> types.ModuleType | str:
if is_python_module and is_standalone:
raise ValueError("`is_python_module` and `is_standalone` are mutually exclusive.")
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
with_cudnn = any('cudnn' in f for f in extra_ldflags or [])
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
old_version = JIT_EXTENSION_VERSIONER.get_version(name)
version = JIT_EXTENSION_VERSIONER.bump_version_if_changed(
name,
sources,
build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths],
build_directory=build_directory,
with_cuda=with_cuda,
with_sycl=with_sycl,
is_python_module=is_python_module,
is_standalone=is_standalone,
)
if version > 0:
if version != old_version and verbose:
logger.info('The input conditions for extension module %s have changed.', name)
logger.info('Bumping to version %s and re-building as %s_v%s...', version, name, version)
name = f'{name}_v{version}'
baton = FileBaton(os.path.join(build_directory, 'lock'))
if baton.try_acquire():
try:
if version != old_version:
from .hipify import hipify_python
from .hipify.hipify_python import GeneratedFileCleaner
with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx:
if IS_HIP_EXTENSION and (with_cuda or with_cudnn):
hipify_result = hipify_python.hipify(
project_directory=build_directory,
output_directory=build_directory,
header_include_dirs=(extra_include_paths if extra_include_paths is not None else []),
extra_files=[os.path.abspath(s) for s in sources],
ignores=[_join_rocm_home('*'), os.path.join(_TORCH_PATH, '*')], # no need to hipify ROCm or PyTorch headers
show_detailed=verbose,
show_progress=verbose,
is_pytorch_extension=True,
clean_ctx=clean_ctx
)
hipified_sources = set()
for source in sources:
s_abs = os.path.abspath(source)
hipified_sources.add(hipify_result[s_abs].hipified_path if s_abs in hipify_result else s_abs)
sources = list(hipified_sources)
_write_ninja_file_and_build_library(
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_sycl_cflags=extra_sycl_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
build_directory=build_directory,
verbose=verbose,
with_cuda=with_cuda,
with_sycl=with_sycl,
is_standalone=is_standalone)
elif verbose:
logger.debug('No modifications detected for re-loaded extension module %s, skipping build step...', name)
finally:
baton.release()
else:
baton.wait()
if verbose:
logger.info('Loading extension module %s...', name)
if is_standalone:
return _get_exec_path(name, build_directory)
return _import_module_from_library(name, build_directory, is_python_module)
def _get_hipcc_path():
if IS_WINDOWS:
# mypy thinks ROCM_VERSION is None but it will never be None here
hipcc_exe = 'hipcc.exe' if ROCM_VERSION >= (6, 4) else 'hipcc.bat' # type: ignore[operator]
return _join_rocm_home('bin', hipcc_exe)
else:
return _join_rocm_home('bin', 'hipcc')
def _write_ninja_file_and_compile_objects(
sources: list[str],
objects,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
cuda_dlink_post_cflags,
sycl_cflags,
sycl_post_cflags,
sycl_dlink_post_cflags,
build_directory: str,
verbose: bool,
with_cuda: bool | None,
with_sycl: bool | None) -> None:
verify_ninja_availability()
compiler = get_cxx_compiler()
get_compiler_abi_compatibility_and_version(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
logger.debug('Emitting ninja build file %s...', build_file_path)
# Create build_directory if it does not exist
if not os.path.exists(build_directory):
if verbose:
logger.debug('Creating directory %s...', build_directory)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
_write_ninja_file(
path=build_file_path,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=sycl_cflags,
sycl_post_cflags=sycl_post_cflags,
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
sources=sources,
objects=objects,
ldflags=None,
library_target=None,
with_cuda=with_cuda,
with_sycl=with_sycl)
if verbose:
logger.info('Compiling objects...')
_run_ninja_build(
build_directory,
verbose,
# It would be better if we could tell users the name of the extension
# that failed to build but there isn't a good way to get it here.
error_prefix='Error compiling objects for extension')
def _write_ninja_file_and_build_library(
name,
sources: list[str],
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: bool | None,
with_sycl: bool | None,
is_standalone: bool = False) -> None:
verify_ninja_availability()
compiler = get_cxx_compiler()
get_compiler_abi_compatibility_and_version(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
extra_ldflags = _prepare_ldflags(
extra_ldflags or [],
with_cuda,
verbose,
is_standalone)
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
logger.debug('Emitting ninja build file %s...', build_file_path)
# Create build_directory if it does not exist
if not os.path.exists(build_directory):
if verbose:
logger.debug('Creating directory %s...', build_directory)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
# NOTE: Emitting a new ninja build file does not cause re-compilation if
# the sources did not change, so it's ok to re-emit (and it's fast).
_write_ninja_file_to_build_library(
path=build_file_path,
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_sycl_cflags=extra_sycl_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
with_cuda=with_cuda,
with_sycl=with_sycl,
is_standalone=is_standalone)
if verbose:
logger.info('Building extension module %s...', name)
_run_ninja_build(
build_directory,
verbose,
error_prefix=f"Error building extension '{name}'")
def is_ninja_available() -> bool:
"""Return ``True`` if the `ninja <https://ninja-build.org/>`_ build system is available on the system, ``False`` otherwise."""
try:
subprocess.check_output(['ninja', '--version'])
except Exception:
return False
else:
return True
def verify_ninja_availability() -> None:
"""Raise ``RuntimeError`` if `ninja <https://ninja-build.org/>`_ build system is not available on the system, does nothing otherwise."""
if not is_ninja_available():
raise RuntimeError("Ninja is required to load C++ extensions (pip install ninja to get it)")
def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone):
if IS_WINDOWS:
python_lib_path = os.path.join(sys.base_exec_prefix, 'libs')
extra_ldflags.append('c10.lib')
if with_cuda:
extra_ldflags.append('c10_hip.lib' if IS_HIP_EXTENSION else 'c10_cuda.lib')
extra_ldflags.append('torch_cpu.lib')
if with_cuda:
extra_ldflags.append('torch_hip.lib' if IS_HIP_EXTENSION else 'torch_cuda.lib')
# /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it.
# Related issue: https://github.com/pytorch/pytorch/issues/31611
extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ')
extra_ldflags.append('torch.lib')
extra_ldflags.append(f'/LIBPATH:{TORCH_LIB_PATH}')
if not is_standalone:
extra_ldflags.append('torch_python.lib')
extra_ldflags.append(f'/LIBPATH:{python_lib_path}')
else:
extra_ldflags.append(f'-L{TORCH_LIB_PATH}')
extra_ldflags.append('-lc10')
if with_cuda:
extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda')
extra_ldflags.append('-ltorch_cpu')
if with_cuda:
extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda')
extra_ldflags.append('-ltorch')
if not is_standalone:
extra_ldflags.append('-ltorch_python')
if is_standalone:
extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}")
if with_cuda:
if verbose:
logger.info('Detected CUDA files, patching ldflags')
if IS_WINDOWS and not IS_HIP_EXTENSION:
extra_ldflags.append(f'/LIBPATH:{_join_cuda_home("lib", "x64")}')
extra_ldflags.append('cudart.lib')
if CUDNN_HOME is not None:
extra_ldflags.append(f'/LIBPATH:{os.path.join(CUDNN_HOME, "lib", "x64")}')
elif not IS_HIP_EXTENSION:
extra_lib_dir = "lib64"
if (not os.path.exists(_join_cuda_home(extra_lib_dir)) and
os.path.exists(_join_cuda_home("lib"))):
# 64-bit CUDA may be installed in "lib"
# Note that it's also possible both don't exist (see _find_cuda_home) - in that case we stay with "lib64"
extra_lib_dir = "lib"
extra_ldflags.append(f'-L{_join_cuda_home(extra_lib_dir)}')
extra_ldflags.append('-lcudart')
if CUDNN_HOME is not None:
extra_ldflags.append(f'-L{os.path.join(CUDNN_HOME, "lib64")}')
elif IS_HIP_EXTENSION:
if IS_WINDOWS:
extra_ldflags.append(f'/LIBPATH:{_join_rocm_home("lib")}')
extra_ldflags.append('amdhip64.lib')
else:
extra_ldflags.append(f'-L{_join_rocm_home("lib")}')
extra_ldflags.append('-lamdhip64')
return extra_ldflags
def _get_cuda_arch_flags(cflags: list[str] | None = None) -> list[str]:
"""
Determine CUDA arch flags to use.
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
See select_compute_arch.cmake for corresponding named and supported arches
when building with CMake.
"""
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`)
if cflags is not None:
for flag in cflags:
if 'TORCH_EXTENSION_NAME' in flag:
continue
if 'arch' in flag:
return []
# Note: keep combined names ("arch1+arch2") above single names, otherwise
# string replacement may not do the right thing
named_arches = collections.OrderedDict([
('Kepler+Tesla', '3.7'),
('Kepler', '3.5+PTX'),
('Maxwell+Tegra', '5.3'),
('Maxwell', '5.0;5.2+PTX'),
('Pascal', '6.0;6.1+PTX'),
('Volta+Tegra', '7.2'),
('Volta', '7.0+PTX'),
('Turing', '7.5+PTX'),
('Ampere+Tegra', '8.7'),
('Ampere', '8.0;8.6+PTX'),
('Ada', '8.9+PTX'),
('Hopper', '9.0+PTX'),
('Blackwell+Tegra', '11.0'),
('Blackwell', '10.0;10.3;12.0;12.1+PTX'),
])
supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2',
'7.0', '7.2', '7.5', '8.0', '8.6', '8.7', '8.9', '9.0', '9.0a',
'10.0', '10.0a', '11.0', '11.0a', '10.3', '10.3a', '12.0',
'12.0a', '12.1', '12.1a']
valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches]
# The default is sm_30 for CUDA 9.x and 10.x
# First check for an env var (same as used by the main setup.py)
# Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX"
# See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
# If not given or set as native, determine what's best for the GPU / CUDA version that can be found
if not _arch_list or _arch_list == "native":
arch_list = []
# the assumption is that the extension should run on any of the currently visible cards,
# which could be of different types - therefore all archs for visible cards should be included
for i in range(torch.cuda.device_count()):
capability = torch.cuda.get_device_capability(i)
supported_sm = [int("".join(re.findall(r"\d+", arch.split('_')[1])))
for arch in torch.cuda.get_arch_list() if 'sm_' in arch]
max_supported_sm = max((sm // 10, sm % 10) for sm in supported_sm)
# Capability of the device may be higher than what's supported by the user's
# NVCC, causing compilation error. User's NVCC is expected to match the one
# used to build pytorch, so we use the maximum supported capability of pytorch
# to clamp the capability.
capability = min(max_supported_sm, capability)
arch = f'{capability[0]}.{capability[1]}'
if arch not in arch_list:
arch_list.append(arch)
arch_list = sorted(arch_list)
arch_list[-1] += '+PTX'
if not _arch_list:
# Only log on rank 0 in distributed settings to avoid spam
if not torch.distributed.is_available() or not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
arch_list_str = ';'.join(arch_list)
logger.debug(
"TORCH_CUDA_ARCH_LIST is not set, using TORCH_CUDA_ARCH_LIST='%s' "
"for visible GPU architectures. Set os.environ['TORCH_CUDA_ARCH_LIST'] to override.",
arch_list_str)
else:
# Deal with lists that are ' ' separated (only deal with ';' after)
_arch_list = _arch_list.replace(' ', ';')
# Expand named arches
for named_arch, archival in named_arches.items():
_arch_list = _arch_list.replace(named_arch, archival)
arch_list = _arch_list.split(';')
flags = []
for arch in arch_list:
if arch not in valid_arch_strings:
raise ValueError(f"Unknown CUDA arch ({arch}) or GPU not supported")
else:
# Handle both single and double-digit architecture versions
version = arch.split('+')[0] # Remove "+PTX" if present
major, minor = version.split('.')
num = f"{major}{minor}"
flags.append(f'-gencode=arch=compute_{num},code=sm_{num}')
if arch.endswith('+PTX'):
flags.append(f'-gencode=arch=compute_{num},code=compute_{num}')
return sorted(set(flags))
def _get_rocm_arch_flags(cflags: list[str] | None = None) -> list[str]:
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`). If user also specified -fgpu-rdc or -fno-gpu-rdc, we
# assume they know what they're doing. Otherwise, we force -fno-gpu-rdc default.
has_gpu_rdc_flag = False
if cflags is not None:
has_custom_flags = False
for flag in cflags:
if 'amdgpu-target' in flag or 'offload-arch' in flag:
has_custom_flags = True
elif 'gpu-rdc' in flag:
has_gpu_rdc_flag = True
if has_custom_flags:
return [] if has_gpu_rdc_flag else ['-fno-gpu-rdc']
# Use same defaults as used for building PyTorch
# Allow env var to override, just like during initial cmake build.
_archs = os.environ.get('PYTORCH_ROCM_ARCH', None)
if not _archs:
archFlags = torch._C._cuda_getArchFlags()
if archFlags:
archs = archFlags.split()
else:
archs = []
else:
archs = _archs.replace(' ', ';').split(';')
flags = [f'--offload-arch={arch}' for arch in archs]
flags += [] if has_gpu_rdc_flag else ['-fno-gpu-rdc']
return flags
def _get_build_directory(name: str, verbose: bool) -> str:
"""
Get the build directory for an extension.
Args:
name: The name of the extension
verbose: Whether to print verbose information
Returns:
The path to the build directory
"""
root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR')
if root_extensions_directory is None:
root_extensions_directory = get_default_build_root()
cu_str = ('cpu' if torch.version.cuda is None else
f'cu{torch.version.cuda.replace(".", "")}')
python_version = f'py{sys.version_info.major}{sys.version_info.minor}{getattr(sys, "abiflags", "")}'
build_folder = f'{python_version}_{cu_str}'
root_extensions_directory = os.path.join(
root_extensions_directory, build_folder)
if verbose:
logger.info('Using %s as PyTorch extensions root...', root_extensions_directory)
build_directory = os.path.join(root_extensions_directory, name)
if not os.path.exists(build_directory):
if verbose:
logger.debug('Creating extension directory %s...', build_directory)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
return build_directory
def _get_num_workers(verbose: bool) -> int | None:
max_jobs = os.environ.get('MAX_JOBS')
if max_jobs is not None and max_jobs.isdigit():
if verbose:
logger.debug('Using envvar MAX_JOBS (%s) as the number of workers...', max_jobs)
return int(max_jobs)
if verbose:
logger.info(
'Allowing ninja to set a default number of workers... '
'(overridable by setting the environment variable MAX_JOBS=N)'
)
return None
def _get_vc_env(vc_arch: str) -> dict[str, str]:
try:
from setuptools import distutils # type: ignore[attr-defined]
# pyrefly: ignore [missing-attribute]
return distutils._msvccompiler._get_vc_env(vc_arch)
except AttributeError:
try:
from setuptools._distutils import _msvccompiler
return _msvccompiler._get_vc_env(vc_arch) # type: ignore[attr-defined]
except AttributeError:
from setuptools._distutils.compilers.C import msvc
return msvc._get_vc_env(vc_arch) # type: ignore[attr-defined]
def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None:
command = ['ninja', '-v']
num_workers = _get_num_workers(verbose)
if num_workers is not None:
command.extend(['-j', str(num_workers)])
env = os.environ.copy()
# Try to activate the vc env for the users
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env:
from setuptools import distutils # type: ignore[attr-defined]
plat_name = distutils.util.get_platform()
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = {k.upper(): v for k, v in _get_vc_env(plat_spec).items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
env = vc_env
try:
sys.stdout.flush()
sys.stderr.flush()
# Warning: don't pass stdout=None to subprocess.run to get output.
# subprocess.run assumes that sys.__stdout__ has not been modified and
# attempts to write to it by default. However, when we call _run_ninja_build
# from ahead-of-time cpp extensions, the following happens:
# 1) If the stdout encoding is not utf-8, setuptools detaches __stdout__.
# https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110
# (it probably shouldn't do this)
# 2) subprocess.run (on POSIX, with no stdout override) relies on
# __stdout__ not being detached:
# https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214
# To work around this, we pass in the fileno directly and hope that
# it is valid.
stdout_fileno = 1
subprocess.run(
command,
shell=IS_WINDOWS and IS_HIP_EXTENSION,
stdout=stdout_fileno if verbose else subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=build_directory,
check=True,
env=env)
except subprocess.CalledProcessError as e:
# Python 2 and 3 compatible way of getting the error object.
_, error, _ = sys.exc_info()
# error.output contains the stdout and stderr of the build attempt.
message = error_prefix
# `error` is a CalledProcessError (which has an `output`) attribute, but
# mypy thinks it's Optional[BaseException] and doesn't narrow
if hasattr(error, 'output') and error.output: # type: ignore[union-attr]
message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr]
raise RuntimeError(message) from e
def _get_exec_path(module_name, path):
if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv('PATH', '').split(';'):
torch_lib_in_path = any(
os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH)
for p in os.getenv('PATH', '').split(';')
)
if not torch_lib_in_path:
os.environ['PATH'] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}"
return os.path.join(path, f'{module_name}{EXEC_EXT}')
def _import_module_from_library(module_name, path, is_python_module):
filepath = os.path.join(path, f"{module_name}{LIB_EXT}")
if is_python_module:
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
spec = importlib.util.spec_from_file_location(module_name, filepath)
if spec is None:
raise AssertionError(f"Failed to create spec for module {module_name} at {filepath}")
module = importlib.util.module_from_spec(spec)
if not isinstance(spec.loader, importlib.abc.Loader):
raise AssertionError("spec.loader is not a valid importlib Loader")
spec.loader.exec_module(module)
return module
else:
torch.ops.load_library(filepath)
return filepath
def _write_ninja_file_to_build_library(path,
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
with_cuda,
with_sycl,
is_standalone) -> None:
extra_cflags = [flag.strip() for flag in extra_cflags]
extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags]
extra_sycl_cflags = [flag.strip() for flag in extra_sycl_cflags]
extra_ldflags = [flag.strip() for flag in extra_ldflags]
extra_include_paths = [flag.strip() for flag in extra_include_paths]
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
user_includes = [os.path.abspath(file) for file in extra_include_paths]
# include_paths() gives us the location of torch/extension.h
# TODO generalize with_cuda as specific device type.
if with_cuda:
system_includes = include_paths("cuda")
else:
system_includes = include_paths("cpu")
# sysconfig.get_path('include') gives us the location of Python.h
# Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS
# installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder
python_include_path = sysconfig.get_path('include', scheme='nt' if IS_WINDOWS else 'posix_prefix')
if python_include_path is not None:
system_includes.append(python_include_path)
common_cflags = []
if not is_standalone:
common_cflags.append(f'-DTORCH_EXTENSION_NAME={name}')
common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H')
# Windows does not understand `-isystem` and quotes flags later.
if IS_WINDOWS:
common_cflags += [f'-I{include}' for include in user_includes + system_includes]
else:
common_cflags += [f'-I{shlex.quote(include)}' for include in user_includes]
common_cflags += [f'-isystem {shlex.quote(include)}' for include in system_includes]
if IS_WINDOWS:
COMMON_HIP_FLAGS.extend(['-fms-runtime-lib=dll'])
cflags = common_cflags + ['/std:c++17'] + extra_cflags
cflags += COMMON_MSVC_FLAGS + (COMMON_HIP_FLAGS if IS_HIP_EXTENSION else [])
cflags = _nt_quote_args(cflags)
else:
cflags = common_cflags + ['-fPIC', '-std=c++17'] + extra_cflags
if with_cuda and IS_HIP_EXTENSION:
cuda_flags = ['-DWITH_HIP'] + common_cflags + extra_cflags + COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS
cuda_flags = cuda_flags + ['-std=c++17']
cuda_flags += _get_rocm_arch_flags(cuda_flags)
cuda_flags += extra_cuda_cflags
if IS_WINDOWS:
cuda_flags = _nt_quote_args(cuda_flags)
elif with_cuda:
cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags(extra_cuda_cflags)
if IS_WINDOWS:
for flag in COMMON_MSVC_FLAGS:
cuda_flags = ['-Xcompiler', flag] + cuda_flags
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags
cuda_flags = cuda_flags + ['-std=c++17']
cuda_flags = _nt_quote_args(cuda_flags)
cuda_flags += _nt_quote_args(extra_cuda_cflags)
else:
cuda_flags += ['--compiler-options', "'-fPIC'"]
cuda_flags += extra_cuda_cflags
if not any(flag.startswith('-std=') for flag in cuda_flags):
cuda_flags.append('-std=c++17')
cc_env = os.getenv("CC")
if cc_env is not None:
cuda_flags = ['-ccbin', cc_env] + cuda_flags
else:
cuda_flags = None
if with_sycl:
sycl_cflags = cflags + _COMMON_SYCL_FLAGS
sycl_cflags += extra_sycl_cflags
_append_sycl_targets_if_missing(sycl_cflags)
_append_sycl_std_if_no_std_present(sycl_cflags)
host_cflags = cflags
# escaping quoted arguments to pass them thru SYCL compiler
icpx_version = _get_icpx_version()
if int(icpx_version) < 20250200:
host_cflags = [item.replace('\\"', '\\\\"') for item in host_cflags]
host_cflags = ' '.join(host_cflags)
sycl_cflags += _wrap_sycl_host_flags(host_cflags)
sycl_dlink_post_cflags = _SYCL_DLINK_FLAGS.copy()
sycl_dlink_post_cflags += _get_sycl_device_flags(sycl_cflags)
else:
sycl_cflags = None
sycl_dlink_post_cflags = None
def object_file_path(source_file: str) -> str:
# '/path/to/file.cpp' -> 'file'
file_name = os.path.splitext(os.path.basename(source_file))[0]
if _is_cuda_file(source_file) and with_cuda:
# Use a different object filename in case a C++ and CUDA file have
# the same filename but different extension (.cpp vs. .cu).
target = f'{file_name}.cuda.o'
elif _is_sycl_file(source_file) and with_sycl:
target = f'{file_name}.sycl.o'
else:
target = f'{file_name}.o'
return target
objects = [object_file_path(src) for src in sources]
ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags
# The darwin linker needs explicit consent to ignore unresolved symbols.
if IS_MACOS:
ldflags.append('-undefined dynamic_lookup')
elif IS_WINDOWS:
ldflags = _nt_quote_args(ldflags)
ext = EXEC_EXT if is_standalone else LIB_EXT
library_target = f'{name}{ext}'
_write_ninja_file(
path=path,
cflags=cflags,
post_cflags=None,
cuda_cflags=cuda_flags,
cuda_post_cflags=None,
cuda_dlink_post_cflags=None,
sycl_cflags=sycl_cflags,
sycl_post_cflags=[],
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
sources=sources,
objects=objects,
ldflags=ldflags,
library_target=library_target,
with_cuda=with_cuda,
with_sycl=with_sycl)
def _write_ninja_file(path,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
cuda_dlink_post_cflags,
sycl_cflags,
sycl_post_cflags,
sycl_dlink_post_cflags,
sources,
objects,
ldflags,
library_target,
with_cuda,
with_sycl) -> None:
r"""Write a ninja file that does the desired compiling and linking.
`path`: Where to write this file
`cflags`: list of flags to pass to $cxx. Can be None.
`post_cflags`: list of flags to append to the $cxx invocation. Can be None.
`cuda_cflags`: list of flags to pass to $nvcc. Can be None.
`cuda_post_cflags`: list of flags to append to the $nvcc invocation. Can be None.
`cuda_dlink_post_cflags`: list of flags to append to the $nvcc device code link invocation. Can be None.
`sycl_cflags`: list of flags to pass to SYCL compiler. Can be None.
`sycl_post_cflags`: list of flags to append to the SYCL compiler invocation. Can be None.
`sycl_dlink_post_cflags`: list of flags to append to the SYCL compiler device code link invocation. Can be None.
e.
`sources`: list of paths to source files
`objects`: list of desired paths to objects, one per source.
`ldflags`: list of flags to pass to linker. Can be None.
`library_target`: Name of the output library. Can be None; in that case,
we do no linking.
`with_cuda`: If we should be compiling with CUDA.
"""
def sanitize_flags(flags):
if flags is None:
return []
else:
return [flag.strip() for flag in flags]
cflags = sanitize_flags(cflags)
post_cflags = sanitize_flags(post_cflags)
cuda_cflags = sanitize_flags(cuda_cflags)
cuda_post_cflags = sanitize_flags(cuda_post_cflags)
cuda_dlink_post_cflags = sanitize_flags(cuda_dlink_post_cflags)
sycl_cflags = sanitize_flags(sycl_cflags)
sycl_post_cflags = sanitize_flags(sycl_post_cflags)
sycl_dlink_post_cflags = sanitize_flags(sycl_dlink_post_cflags)
ldflags = sanitize_flags(ldflags)
# Sanity checks...
if len(sources) != len(objects):
raise AssertionError("sources and objects lists must be the same length")
if len(sources) == 0:
raise AssertionError("At least one source is required to build a library")
compiler = get_cxx_compiler()
# Version 1.3 is required for the `deps` directive.
config = ['ninja_required_version = 1.3']
config.append(f'cxx = {compiler}')
if with_cuda or cuda_dlink_post_cflags:
if "PYTORCH_NVCC" in os.environ:
nvcc = os.getenv("PYTORCH_NVCC") # user can set nvcc compiler with ccache using the environment variable here
else:
if IS_HIP_EXTENSION:
nvcc = _get_hipcc_path()
else:
nvcc = _join_cuda_home('bin', 'nvcc')
config.append(f'nvcc = {nvcc}')
if with_sycl or sycl_dlink_post_cflags:
sycl = 'icx' if IS_WINDOWS else 'icpx'
config.append(f'sycl = {sycl}')
if IS_HIP_EXTENSION:
post_cflags = COMMON_HIP_FLAGS + post_cflags
flags = [f'cflags = {" ".join(cflags)}']
flags.append(f'post_cflags = {" ".join(post_cflags)}')
if with_cuda:
flags.append(f'cuda_cflags = {" ".join(cuda_cflags)}')
flags.append(f'cuda_post_cflags = {" ".join(cuda_post_cflags)}')
flags.append(f'cuda_dlink_post_cflags = {" ".join(cuda_dlink_post_cflags)}')
if with_sycl:
flags.append(f'sycl_cflags = {" ".join(sycl_cflags)}')
flags.append(f'sycl_post_cflags = {" ".join(sycl_post_cflags)}')
flags.append(f'sycl_dlink_post_cflags = {" ".join(sycl_dlink_post_cflags)}')
flags.append(f'ldflags = {" ".join(ldflags)}')
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
sources = [os.path.abspath(file) for file in sources]
# See https://ninja-build.org/build.ninja.html for reference.
compile_rule = ['rule compile']
if IS_WINDOWS:
compiler_name = "$cxx" if IS_HIP_EXTENSION else "cl"
compile_rule.append(
f' command = {compiler_name} '
'/showIncludes $cflags -c $in /Fo$out $post_cflags' # codespell:ignore
)
if not IS_HIP_EXTENSION:
compile_rule.append(' deps = msvc')
else:
compile_rule.append(
' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags')
compile_rule.append(' depfile = $out.d')
compile_rule.append(' deps = gcc')
if with_cuda:
cuda_compile_rule = ['rule cuda_compile']
nvcc_gendeps = ''
# --generate-dependencies-with-compile is not supported by ROCm
# Nvcc flag `--generate-dependencies-with-compile` is not supported by sccache, which may increase build time.
if torch.version.cuda is not None and os.getenv('TORCH_EXTENSION_SKIP_NVCC_GEN_DEPENDENCIES', '0') != '1':
cuda_compile_rule.append(' depfile = $out.d')
cuda_compile_rule.append(' deps = gcc')
# Note: non-system deps with nvcc are only supported
# on Linux so use --generate-dependencies-with-compile
# to make this work on Windows too.
nvcc_gendeps = '--generate-dependencies-with-compile --dependency-output $out.d'
cuda_compile_rule.append(
f' command = $nvcc {nvcc_gendeps} $cuda_cflags -c $in -o $out $cuda_post_cflags')
if with_sycl:
sycl_compile_rule = ['rule sycl_compile']
# SYCL compiler does not recognize .sycl extension automatically,
# so we pass '-x c++' explicitly notifying compiler of file format
sycl_compile_rule.append(
' command = $sycl $sycl_cflags -c -x c++ $in -o $out $sycl_post_cflags')
# Emit one build rule per source to enable incremental build.
build = []
for source_file, object_file in zip(sources, objects, strict=True):
is_cuda_source = _is_cuda_file(source_file) and with_cuda
is_sycl_source = _is_sycl_file(source_file) and with_sycl
if is_cuda_source:
rule = 'cuda_compile'
elif is_sycl_source:
rule = 'sycl_compile'
else:
rule = 'compile'
if IS_WINDOWS:
source_file = source_file.replace(':', '$:')
object_file = object_file.replace(':', '$:')
source_file = source_file.replace(" ", "$ ")
object_file = object_file.replace(" ", "$ ")
build.append(f'build {object_file}: {rule} {source_file}')
if cuda_dlink_post_cflags:
cuda_devlink_out = os.path.join(os.path.dirname(objects[0]), 'dlink.o')
cuda_devlink_rule = ['rule cuda_devlink']
cuda_devlink_rule.append(' command = $nvcc $in -o $out $cuda_dlink_post_cflags')
cuda_devlink = [f'build {cuda_devlink_out}: cuda_devlink {" ".join(objects)}']
objects += [cuda_devlink_out]
else:
cuda_devlink_rule, cuda_devlink = [], []
if sycl_dlink_post_cflags:
sycl_devlink_out = os.path.join(os.path.dirname(objects[0]), 'sycl_dlink.o')
sycl_devlink_rule = ['rule sycl_devlink']
sycl_devlink_rule.append(' command = $sycl $in -o $out $sycl_dlink_post_cflags')
sycl_devlink = [f'build {sycl_devlink_out}: sycl_devlink {" ".join(objects)}']
objects += [sycl_devlink_out]
else:
sycl_devlink_rule, sycl_devlink = [], []
if library_target is not None:
link_rule = ['rule link']
if IS_WINDOWS:
cl_paths = subprocess.check_output(['where',
'cl']).decode(*SUBPROCESS_DECODE_ARGS).split('\r\n')
if len(cl_paths) >= 1:
cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:')
else:
raise RuntimeError("MSVC is required to load C++ extensions")
link_rule.append(f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out')
else:
link_rule.append(' command = $cxx $in $ldflags -o $out')
link = [f'build {library_target}: link {" ".join(objects)}']
default = [f'default {library_target}']
else:
link_rule, link, default = [], [], []
# 'Blocks' should be separated by newlines, for visual benefit.
blocks = [config, flags, compile_rule]
if with_cuda:
blocks.append(cuda_compile_rule) # type: ignore[possibly-undefined]
if with_sycl:
blocks.append(sycl_compile_rule) # type: ignore[possibly-undefined]
blocks += [cuda_devlink_rule, sycl_devlink_rule, link_rule, build, cuda_devlink, sycl_devlink, link, default]
content = "\n\n".join("\n".join(b) for b in blocks)
# Ninja requires a new lines at the end of the .ninja file
content += "\n"
_maybe_write(path, content)
def _join_cuda_home(*paths) -> str:
"""
Join paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set.
This is basically a lazy way of raising an error for missing $CUDA_HOME
only once we need to get any CUDA-specific path.
"""
if CUDA_HOME is None:
raise OSError('CUDA_HOME environment variable is not set. '
'Please set it to your CUDA install root.')
return os.path.join(CUDA_HOME, *paths)
def _is_cuda_file(path: str) -> bool:
valid_ext = ['.cu', '.cuh']
if IS_HIP_EXTENSION:
valid_ext.append('.hip')
return os.path.splitext(path)[1] in valid_ext
def _is_sycl_file(path: str) -> bool:
valid_ext = ['.sycl']
return os.path.splitext(path)[1] in valid_ext
|
BuildExtension
|
python
|
sympy__sympy
|
sympy/stats/crv_types.py
|
{
"start": 108540,
"end": 111426
}
|
class ____(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(a, b, c):
_value_check(b > a, "Parameter b > %s. b = %s"%(a, b))
_value_check((a <= c, c <= b),
"Parameter c must be in range [%s, %s]. c = %s"%(a, b, c))
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def _characteristic_function(self, t):
a, b, c = self.a, self.b, self.c
return -2 *((b-c) * exp(I*a*t) - (b-a) * exp(I*c*t) + (c-a) * exp(I*b*t)) / ((b-a)*(c-a)*(b-c)*t**2)
def _moment_generating_function(self, t):
a, b, c = self.a, self.b, self.c
return 2 * ((b - c) * exp(a * t) - (b - a) * exp(c * t) + (c - a) * exp(b * t)) / (
(b - a) * (c - a) * (b - c) * t ** 2)
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
Explanation
===========
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Triangular, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, c > z)
|(-a + b)*(-a + c)
|
| 2
| ------ for c = z
< -a + b
|
| 2*b - 2*z
|---------------- for And(b >= z, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_distribution
.. [2] https://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
|
TriangularDistribution
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/postgresql/test_types.py
|
{
"start": 187650,
"end": 187737
}
|
class ____(_DateTimeRangeTests, _RangeTypeRoundTrip):
pass
|
DateTimeRangeRoundTripTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/strings_ops/string_upper_op_test.py
|
{
"start": 838,
"end": 1908
}
|
class ____(test.TestCase):
"""Test cases for tf.strings.upper."""
def test_string_upper(self):
strings = ["Pigs on The Wing", "aNimals"]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [b"PIGS ON THE WING", b"ANIMALS"])
def test_string_upper_2d(self):
strings = [["pigS on THE wIng", "aniMals"], [" hello ", "\n\tWorld! \r \n"]]
with self.cached_session():
output = string_ops.string_upper(strings)
output = self.evaluate(output)
self.assertAllEqual(output, [[b"PIGS ON THE WING", b"ANIMALS"],
[b" HELLO ", b"\n\tWORLD! \r \n"]])
def test_string_upper_unicode(self):
strings = [["óósschloë"]]
with self.cached_session():
output = string_ops.string_upper(strings, encoding="utf-8")
output = self.evaluate(output)
# output: "ÓÓSSCHLOË"
self.assertAllEqual(output, [[b"\xc3\x93\xc3\x93SSCHLO\xc3\x8b"]])
if __name__ == "__main__":
test.main()
|
StringUpperOpTest
|
python
|
skorch-dev__skorch
|
skorch/callbacks/training.py
|
{
"start": 30625,
"end": 33212
}
|
class ____(Callback):
"""Sets the input dimension of the PyTorch module to the input dimension
of the training data. By default the last dimension of X (``X.shape[-1]``)
will be used.
This can be of use when the shape of X is not known beforehand,
e.g. when using a skorch model within an sklearn pipeline and
grid-searching feature transformers, or using feature selection
methods.
Basic usage:
>>> class MyModule(torch.nn.Module):
... def __init__(self, input_dim=1):
... super().__init__()
... self.layer = torch.nn.Linear(input_dim, 3)
... # ...
>>> X1 = np.zeros(100, 5)
>>> X2 = np.zeros(100, 3)
>>> y = np.zeros(100)
>>> net = NeuralNetClassifier(MyModule, callbacks=[InputShapeSetter()])
>>> net.fit(X1, y) # self.module_.layer.in_features == 5
>>> net.fit(X2, y) # self.module_.layer.in_features == 3
Parameters
----------
param_name : str (default='input_dim')
The parameter name is the parameter your model uses to define the
input dimension in its ``__init__`` method.
input_dim_fn : callable, None (default=None)
In case your ``X`` value is more complex and deriving the input
dimension is not as easy as ``X.shape[-1]`` you can pass a callable
to this parameter which takes ``X`` and returns the input dimension.
module_name : str (default='module')
Only needs change when you are using more than one module in your
skorch model (e.g., in case of GANs).
"""
def __init__(
self,
param_name='input_dim',
input_dim_fn=None,
module_name='module',
):
self.module_name = module_name
self.param_name = param_name
self.input_dim_fn = input_dim_fn
def get_input_dim(self, X):
if self.input_dim_fn is not None:
return self.input_dim_fn(X)
if len(X.shape) < 2:
raise ValueError(
"Expected at least two-dimensional input data for X. "
"If your data is one-dimensional, please use the "
"`input_dim_fn` parameter to infer the correct "
"input shape."
)
return X.shape[-1]
def on_train_begin(self, net, X, y, **kwargs):
params = net.get_params()
input_dim = self.get_input_dim(X)
param_name = f'{self.module_name}__{self.param_name}'
if params.get(param_name, None) == input_dim:
return
kwargs = {param_name: input_dim}
net.set_params(**kwargs)
|
InputShapeSetter
|
python
|
huggingface__transformers
|
src/transformers/models/flava/configuration_flava.py
|
{
"start": 839,
"end": 5594
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
mask_token (`bool`, *optional*, defaults to `True`):
Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
vocab_size (`int`, *optional*, defaults to 8192):
Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
Image Modeling) loss for FLAVA.
Example:
```python
>>> from transformers import FlavaImageConfig, FlavaImageModel
>>> # Initializing a FlavaImageModel with style configuration
>>> configuration = FlavaImageConfig()
>>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
>>> model = FlavaImageModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "flava_image_model"
base_config_key = "image_config"
def __init__(
self,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: int = "gelu",
hidden_dropout_prob: float = 0.0,
attention_probs_dropout_prob: float = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
qkv_bias: bool = True,
mask_token: bool = True,
vocab_size: int = 8192,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.mask_token = mask_token
self.vocab_size = vocab_size
|
FlavaImageConfig
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 173596,
"end": 175587
}
|
class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_negatives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.true_negatives(
labels=labels, predictions=predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn)
self.assertAllClose(3., tn_update_op)
self.assertAllClose(3., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.true_negatives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn)
self.assertAllClose(4., tn_update_op)
self.assertAllClose(4., tn)
|
TrueNegativesTest
|
python
|
ray-project__ray
|
python/ray/data/_internal/datasource/hudi_datasource.py
|
{
"start": 300,
"end": 486
}
|
class ____(Enum):
SNAPSHOT = "snapshot"
INCREMENTAL = "incremental"
@classmethod
def supported_types(cls) -> List[str]:
return [e.value for e in cls]
|
HudiQueryType
|
python
|
jazzband__django-model-utils
|
tests/models.py
|
{
"start": 9219,
"end": 9370
}
|
class ____(TrackedFK):
custom_tracker = FieldTracker(fields=['fk_id'])
custom_tracker_without_id = FieldTracker(fields=['fk'])
|
InheritedTrackedFK
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess9.py
|
{
"start": 108,
"end": 229
}
|
class ____:
def __getattr__(self, name: str) -> int: ...
def test_get_attr() -> None:
a = GetAttrTest()
|
GetAttrTest
|
python
|
ray-project__ray
|
python/ray/train/tensorflow/keras.py
|
{
"start": 5495,
"end": 7342
}
|
class ____(RayReportCallback):
"""Keras callback for Ray Train reporting and checkpointing.
.. note::
Metrics are always reported with checkpoints, even if the event isn't specified
in ``report_metrics_on``.
Example:
.. testcode:: python
############# Using it in TrainSession ###############
from ray.air.integrations.keras import ReportCheckpointCallback
def train_loop_per_worker():
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = build_model()
model.fit(dataset_shard, callbacks=[ReportCheckpointCallback()])
Args:
metrics: Metrics to report. If this is a list, each item describes
the metric key reported to Keras, and it's reported under the
same name. If this is a dict, each key is the name reported
and the respective value is the metric key reported to Keras.
If this is None, all Keras logs are reported.
report_metrics_on: When to report metrics. Must be one of
the Keras event hooks (less the ``on_``), e.g.
"train_start" or "predict_end". Defaults to "epoch_end".
checkpoint_on: When to save checkpoints. Must be one of the Keras event hooks
(less the ``on_``), e.g. "train_start" or "predict_end". Defaults to
"epoch_end".
"""
def _save_and_report_checkpoint(
self, metrics: Dict, checkpoint: TensorflowCheckpoint
):
"""Save checkpoint and report metrics corresonding to this checkpoint."""
ray.train.report(metrics, checkpoint=checkpoint)
def _report_metrics(self, metrics: Dict):
"""Report metrics."""
ray.train.report(metrics, checkpoint=None)
|
ReportCheckpointCallback
|
python
|
PrefectHQ__prefect
|
src/prefect/server/api/ui/flow_runs.py
|
{
"start": 753,
"end": 4172
}
|
class ____(PrefectBaseModel):
id: UUID = Field(default=..., description="The flow run id.")
state_type: schemas.states.StateType = Field(
default=..., description="The state type."
)
timestamp: DateTime = Field(
default=...,
description=(
"The start time of the run, or the expected start time "
"if it hasn't run yet."
),
)
duration: datetime.timedelta = Field(
default=..., description="The total run time of the run."
)
lateness: datetime.timedelta = Field(
default=..., description="The delay between the expected and actual start time."
)
@router.post("/history")
async def read_flow_run_history(
sort: schemas.sorting.FlowRunSort = Body(
schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC
),
limit: int = Body(1000, le=1000),
offset: int = Body(0, ge=0),
flows: schemas.filters.FlowFilter = None,
flow_runs: schemas.filters.FlowRunFilter = None,
task_runs: schemas.filters.TaskRunFilter = None,
deployments: schemas.filters.DeploymentFilter = None,
work_pools: schemas.filters.WorkPoolFilter = None,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> List[SimpleFlowRun]:
columns = [
db.FlowRun.id,
db.FlowRun.state_type,
db.FlowRun.start_time,
db.FlowRun.expected_start_time,
db.FlowRun.total_run_time,
# Although it isn't returned, we need to select
# this field in order to compute `estimated_run_time`
db.FlowRun.state_timestamp,
]
async with db.session_context() as session:
result = await models.flow_runs.read_flow_runs(
columns=columns,
flow_filter=flows,
flow_run_filter=flow_runs,
task_run_filter=task_runs,
deployment_filter=deployments,
work_pool_filter=work_pools,
sort=sort,
limit=limit,
offset=offset,
session=session,
)
return [
SimpleFlowRun(
id=r.id,
state_type=r.state_type,
timestamp=r.start_time or r.expected_start_time,
duration=r.estimated_run_time,
lateness=r.estimated_start_time_delta,
)
for r in result
]
@router.post("/count-task-runs")
async def count_task_runs_by_flow_run(
flow_run_ids: list[UUID] = Body(default=..., embed=True, max_items=200),
db: PrefectDBInterface = Depends(provide_database_interface),
) -> dict[UUID, int]:
"""
Get task run counts by flow run id.
"""
async with db.session_context() as session:
query = (
sa.select(
db.TaskRun.flow_run_id,
sa.func.count(db.TaskRun.id).label("task_run_count"),
)
.where(
sa.and_(
db.TaskRun.flow_run_id.in_(flow_run_ids),
sa.not_(db.TaskRun.subflow_run.has()),
)
)
.group_by(db.TaskRun.flow_run_id)
)
results = await session.execute(query)
task_run_counts_by_flow_run = {
flow_run_id: task_run_count for flow_run_id, task_run_count in results.t
}
return {
flow_run_id: task_run_counts_by_flow_run.get(flow_run_id, 0)
for flow_run_id in flow_run_ids
}
|
SimpleFlowRun
|
python
|
huggingface__transformers
|
src/transformers/models/dac/modeling_dac.py
|
{
"start": 23368,
"end": 28867
}
|
class ____(DacPreTrainedModel):
input_modalities = "audio"
def __init__(self, config: DacConfig):
super().__init__(config)
self.config = config
self.encoder = DacEncoder(config)
self.decoder = DacDecoder(config)
self.quantizer = DacResidualVectorQuantize(config)
self.bits_per_codebook = int(math.log2(self.config.codebook_size))
if 2**self.bits_per_codebook != self.config.codebook_size:
raise ValueError("The codebook_size must be a power of 2.")
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def encode(
self,
input_values: torch.Tensor,
n_quantizers: Optional[int] = None,
return_dict: Optional[bool] = None,
):
r"""
input_values (`torch.Tensor of shape `(batch_size, 1, time_steps)`):
Input audio data to encode,
n_quantizers (int, *optional*):
Number of quantizers to use. If None, all quantizers are used. Default is None.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
quantized_representation = self.encoder(input_values)
quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss = self.quantizer(
quantized_representation, n_quantizers
)
loss = self.config.commitment_loss_weight * commitment_loss + self.config.codebook_loss_weight * codebook_loss
if not return_dict:
return (loss, quantized_representation, audio_codes, projected_latents)
return DacEncoderOutput(loss, quantized_representation, audio_codes, projected_latents)
@auto_docstring
def decode(
self,
quantized_representation: Optional[torch.Tensor] = None,
audio_codes: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
):
r"""
quantized_representation (torch.Tensor of shape `(batch_size, dimension, time_steps)`, *optional*):
Quantized continuous representation of input.
audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*):
The codebook indices for each codebook, representing the quantized discrete
representation of the input. This parameter should be provided if you want
to decode directly from the audio codes (it will overwrite quantized_representation).
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`DacDecoderOutput`] instead of a plain tuple.
"""
if quantized_representation is None and audio_codes is None:
raise ValueError("Either `quantized_representation` or `audio_codes` must be provided.")
return_dict = return_dict if return_dict is not None else self.config.return_dict
if audio_codes is not None:
quantized_representation = self.quantizer.from_codes(audio_codes)[0]
audio_values = self.decoder(quantized_representation).squeeze(1)
if not return_dict:
return (audio_values,)
return DacDecoderOutput(audio_values)
@auto_docstring
def forward(
self,
input_values: torch.Tensor,
n_quantizers: Optional[int] = None,
return_dict: Optional[bool] = None,
):
r"""
input_values (`torch.Tensor` of shape `(batch_size, 1, time_steps)`):
Audio data to encode.
n_quantizers (`int`, *optional*):
Number of quantizers to use. If `None`, all quantizers are used. Default is `None`.
Examples:
```python
>>> from datasets import load_dataset, Audio
>>> from transformers import DacModel, AutoProcessor
>>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> model = DacModel.from_pretrained("descript/dac_16khz")
>>> processor = AutoProcessor.from_pretrained("descript/dac_16khz")
>>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
>>> audio_sample = librispeech_dummy[-1]["audio"]["array"]
>>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt")
>>> encoder_outputs = model.encode(inputs["input_values"])
>>> # Get the intermediate audio codes
>>> audio_codes = encoder_outputs.audio_codes
>>> # Reconstruct the audio from its quantized representation
>>> audio_values = model.decode(encoder_outputs.quantized_representation)
>>> # or the equivalent with a forward pass
>>> audio_values = model(inputs["input_values"]).audio_values
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
length = input_values.shape[-1]
loss, quantized_representation, audio_codes, projected_latents = self.encode(
input_values, n_quantizers, return_dict=False
)
audio_values = self.decode(quantized_representation, return_dict=False)[0][..., :length]
if not return_dict:
return (loss, audio_values, quantized_representation, audio_codes, projected_latents)
return DacOutput(loss, audio_values, quantized_representation, audio_codes, projected_latents)
__all__ = ["DacModel", "DacPreTrainedModel"]
|
DacModel
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol6.py
|
{
"start": 602,
"end": 634
}
|
class ____:
species: str
|
Tapir
|
python
|
encode__django-rest-framework
|
tests/test_permissions.py
|
{
"start": 688,
"end": 941
}
|
class ____(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
|
RootView
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_basic.py
|
{
"start": 11507,
"end": 12099
}
|
class ____:
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, shape=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
|
Testfft2
|
python
|
pydata__xarray
|
xarray/tests/test_backends_datatree.py
|
{
"start": 917,
"end": 6409
}
|
class ____(_TestNetCDF4Data):
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_datatree(path, engine=self.engine, **kwargs) as ds:
yield ds.to_dataset()
def test_child_group_with_inconsistent_dimensions(self) -> None:
with pytest.raises(
ValueError, match=r"group '/child' is not aligned with its parents"
):
super().test_child_group_with_inconsistent_dimensions()
def diff_chunks(
comparison: dict[tuple[str, Hashable], bool], tree1: DataTree, tree2: DataTree
) -> str:
mismatching_variables = [loc for loc, equals in comparison.items() if not equals]
variable_messages = [
"\n".join(
[
f"L {path}:{name}: {tree1[path].variables[name].chunksizes}",
f"R {path}:{name}: {tree2[path].variables[name].chunksizes}",
]
)
for path, name in mismatching_variables
]
return "\n".join(["Differing chunk sizes:"] + variable_messages)
def assert_chunks_equal(
actual: DataTree, expected: DataTree, enforce_dask: bool = False
) -> None:
__tracebackhide__ = True
from xarray.namedarray.pycompat import array_type
dask_array_type = array_type("dask")
comparison = {
(path, name): (
(
not enforce_dask
or isinstance(node1.variables[name].data, dask_array_type)
)
and node1.variables[name].chunksizes == node2.variables[name].chunksizes
)
for path, (node1, node2) in xr.group_subtrees(actual, expected)
for name in node1.variables.keys()
}
assert all(comparison.values()), diff_chunks(comparison, actual, expected)
@pytest.fixture(scope="module")
def unaligned_datatree_nc(tmp_path_factory):
"""Creates a test netCDF4 file with the following unaligned structure, writes it to a /tmp directory
and returns the file path of the netCDF4 file.
Group: /
│ Dimensions: (lat: 1, lon: 2)
│ Dimensions without coordinates: lat, lon
│ Data variables:
│ root_variable (lat, lon) float64 16B ...
└── Group: /Group1
│ Dimensions: (lat: 1, lon: 2)
│ Dimensions without coordinates: lat, lon
│ Data variables:
│ group_1_var (lat, lon) float64 16B ...
└── Group: /Group1/subgroup1
Dimensions: (lat: 2, lon: 2)
Dimensions without coordinates: lat, lon
Data variables:
subgroup1_var (lat, lon) float64 32B ...
"""
filepath = tmp_path_factory.mktemp("data") / "unaligned_subgroups.nc"
with nc4.Dataset(filepath, "w", format="NETCDF4") as root_group:
group_1 = root_group.createGroup("/Group1")
subgroup_1 = group_1.createGroup("/subgroup1")
root_group.createDimension("lat", 1)
root_group.createDimension("lon", 2)
root_group.createVariable("root_variable", np.float64, ("lat", "lon"))
group_1_var = group_1.createVariable("group_1_var", np.float64, ("lat", "lon"))
group_1_var[:] = np.array([[0.1, 0.2]])
group_1_var.units = "K"
group_1_var.long_name = "air_temperature"
subgroup_1.createDimension("lat", 2)
subgroup1_var = subgroup_1.createVariable(
"subgroup1_var", np.float64, ("lat", "lon")
)
subgroup1_var[:] = np.array([[0.1, 0.2]])
yield filepath
@pytest.fixture(scope="module")
def unaligned_datatree_zarr_factory(
tmp_path_factory,
) -> Generator[
Callable[[Literal[2, 3]], Path],
None,
None,
]:
"""Creates a zarr store with the following unaligned group hierarchy:
Group: /
│ Dimensions: (y: 3, x: 2)
│ Dimensions without coordinates: y, x
│ Data variables:
│ a (y) int64 24B ...
│ set0 (x) int64 16B ...
└── Group: /Group1
│ │ Dimensions: ()
│ │ Data variables:
│ │ a int64 8B ...
│ │ b int64 8B ...
│ └── /Group1/subgroup1
│ Dimensions: ()
│ Data variables:
│ a int64 8B ...
│ b int64 8B ...
└── Group: /Group2
Dimensions: (y: 2, x: 2)
Dimensions without coordinates: y, x
Data variables:
a (y) int64 16B ...
b (x) float64 16B ...
"""
def _unaligned_datatree_zarr(zarr_format: Literal[2, 3]) -> Path:
filepath = tmp_path_factory.mktemp("data") / "unaligned_simple_datatree.zarr"
root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])})
set1_data = xr.Dataset({"a": 0, "b": 1})
set2_data = xr.Dataset({"a": ("y", [2, 3]), "b": ("x", [0.1, 0.2])})
root_data.to_zarr(
filepath,
mode="w",
zarr_format=zarr_format,
)
set1_data.to_zarr(
filepath,
group="/Group1",
mode="a",
zarr_format=zarr_format,
)
set2_data.to_zarr(
filepath,
group="/Group2",
mode="a",
zarr_format=zarr_format,
)
set1_data.to_zarr(
filepath,
group="/Group1/subgroup1",
mode="a",
zarr_format=zarr_format,
)
return filepath
yield _unaligned_datatree_zarr
|
TestNetCDF4DataTree
|
python
|
huggingface__transformers
|
src/transformers/trainer_utils.py
|
{
"start": 7386,
"end": 7494
}
|
class ____(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
BEST = "best"
|
SaveStrategy
|
python
|
numba__numba
|
numba/tests/test_init_utils.py
|
{
"start": 131,
"end": 1557
}
|
class ____(TestCase):
def test_major_minor_patch(self):
expected = version_info(0, 1, 0,
(0, 1), (0, 1, 0),
"0.1.0", ('0', '1', '0'), None)
received = generate_version_info("0.1.0")
self.assertEqual(received, expected)
def test_unknown(self):
expected = version_info(None, None, None,
(None, None), (None, None, None),
'0+unknown', ('0+unknown',), None)
received = generate_version_info('0+unknown')
self.assertEqual(received, expected)
def test_dev(self):
expected = version_info(0, 1, None,
(0, 1), (0, 1, None),
'0.1.0dev0', ('0', '1', '0dev0'), None)
received = generate_version_info('0.1.0dev0')
self.assertEqual(received, expected)
def test_full_rev(self):
expected = version_info(0, 1, None,
(0, 1), (0, 1, None),
'0.1.0dev0+1.g0123456789abcdef',
('0', '1', '0dev0+1', 'g0123456789abcdef'),
'g0123456789abcdef')
received = generate_version_info('0.1.0dev0+1.g0123456789abcdef')
self.assertEqual(received, expected)
if __name__ == '__main__':
unittest.main()
|
TestGenerateVersionInfo
|
python
|
py-pdf__pypdf
|
pypdf/_text_extraction/_layout_mode/_text_state_params.py
|
{
"start": 224,
"end": 5306
}
|
class ____:
"""
Text state parameters and operator values for a single text value in a
TJ or Tj PDF operation.
Attributes:
txt (str): the text to be rendered.
font (Font): font object
font_size (int | float): font size
Tc (float): character spacing. Defaults to 0.0.
Tw (float): word spacing. Defaults to 0.0.
Tz (float): horizontal scaling. Defaults to 100.0.
TL (float): leading, vertical displacement between text lines. Defaults to 0.0.
Ts (float): text rise. Used for super/subscripts. Defaults to 0.0.
transform (List[float]): effective transformation matrix.
tx (float): x cood of rendered text, i.e. self.transform[4]
ty (float): y cood of rendered text. May differ from self.transform[5] per self.Ts.
displaced_tx (float): x coord immediately following rendered text
space_tx (float): tx for a space character
font_height (float): effective font height accounting for CTM
flip_vertical (bool): True if y axis has been inverted (i.e. if self.transform[3] < 0.)
rotated (bool): True if the text orientation is rotated with respect to the page.
"""
txt: str
font: Font
font_size: Union[int, float]
Tc: float = 0.0
Tw: float = 0.0
Tz: float = 100.0
TL: float = 0.0
Ts: float = 0.0
transform: list[float] = field(
default_factory=lambda: [1.0, 0.0, 0.0, 1.0, 0.0, 0.0]
)
tx: float = field(default=0.0, init=False)
ty: float = field(default=0.0, init=False)
displaced_tx: float = field(default=0.0, init=False)
space_tx: float = field(default=0.0, init=False)
font_height: float = field(default=0.0, init=False)
flip_vertical: bool = field(default=False, init=False)
rotated: bool = field(default=False, init=False)
def __post_init__(self) -> None:
if orient(self.transform) in (90, 270):
self.transform = mult(
[1.0, -self.transform[1], -self.transform[2], 1.0, 0.0, 0.0],
self.transform,
)
self.rotated = True
# self.transform[0] AND self.transform[3] < 0 indicates true rotation.
# If only self.transform[3] < 0, the y coords are simply inverted.
if orient(self.transform) == 180 and self.transform[0] < -1e-6:
self.transform = mult([-1.0, 0.0, 0.0, -1.0, 0.0, 0.0], self.transform)
self.rotated = True
self.displaced_tx = self.displaced_transform()[4]
self.tx = self.transform[4]
self.ty = self.render_transform()[5]
self.space_tx = round(self.word_tx(" "), 3)
if self.space_tx < 1e-6:
# if the " " char is assigned 0 width (e.g. for fine tuned spacing
# with TJ int operators a la crazyones.pdf), calculate space_tx as
# a TD_offset of -2 * font.space_width where font.space_width is
# the space_width calculated in _cmap.py.
self.space_tx = round(self.word_tx("", self.font.space_width * -2), 3)
self.font_height = self.font_size * math.sqrt(
self.transform[1] ** 2 + self.transform[3] ** 2
)
# flip_vertical handles PDFs generated by Microsoft Word's "publish" command.
self.flip_vertical = self.transform[3] < -1e-6 # inverts y axis
def font_size_matrix(self) -> list[float]:
"""Font size matrix"""
return [
self.font_size * (self.Tz / 100.0),
0.0,
0.0,
self.font_size,
0.0,
self.Ts,
]
def displaced_transform(self) -> list[float]:
"""Effective transform matrix after text has been rendered."""
return mult(self.displacement_matrix(), self.transform)
def render_transform(self) -> list[float]:
"""Effective transform matrix accounting for font size, Tz, and Ts."""
return mult(self.font_size_matrix(), self.transform)
def displacement_matrix(
self, word: Union[str, None] = None, TD_offset: float = 0.0
) -> list[float]:
"""
Text displacement matrix
Args:
word (str, optional): Defaults to None in which case self.txt displacement is
returned.
TD_offset (float, optional): translation applied by TD operator. Defaults to 0.0.
"""
word = word if word is not None else self.txt
return [1.0, 0.0, 0.0, 1.0, self.word_tx(word, TD_offset), 0.0]
def word_tx(self, word: str, TD_offset: float = 0.0) -> float:
"""Horizontal text displacement for any word according this text state"""
return (
(self.font_size * ((self.font.word_width(word) - TD_offset) / 1000.0))
+ self.Tc
+ word.count(" ") * self.Tw
) * (self.Tz / 100.0)
@staticmethod
def to_dict(inst: "TextStateParams") -> dict[str, Any]:
"""Dataclass to dict for json.dumps serialization"""
return {k: getattr(inst, k) for k in inst.__dataclass_fields__ if k != "font"}
|
TextStateParams
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/dicts.py
|
{
"start": 58192,
"end": 60086
}
|
class ____(VariableTracker):
"""
Models _PyDictViewObject
This is an "abstract" class. Subclasses will override kv and the items method
"""
kv: Optional[str] = None
def __init__(self, dv_dict: ConstDictVariable, **kwargs: Any) -> None:
super().__init__(**kwargs)
assert self.kv in ("keys", "values", "items")
assert isinstance(dv_dict, ConstDictVariable)
self.dv_dict = dv_dict
@property
def view_items(self) -> Any:
assert self.kv is not None
return getattr(self.dv_dict.items, self.kv)()
@property
def view_items_vt(self) -> list[VariableTracker]:
# Returns an iterable of the unpacked items
# Implement in the subclasses
raise NotImplementedError
def unpack_var_sequence(self, tx: "InstructionTranslator") -> list[VariableTracker]:
return self.view_items_vt
def reconstruct(self, codegen: "PyCodegen") -> None:
assert self.kv is not None
codegen(self.dv_dict)
codegen.load_method(self.kv)
codegen.call_method(0)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
assert self.kv is not None
if name in self.python_type().__dict__:
return ConstantVariable.create(True)
return ConstantVariable.create(False)
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if name == "__len__":
return self.dv_dict.call_method(tx, name, args, kwargs)
elif name == "__iter__":
return ListIteratorVariable(
self.view_items_vt, mutation_type=ValueMutationNew()
)
return super().call_method(tx, name, args, kwargs)
|
DictViewVariable
|
python
|
google__jax
|
jax/experimental/jax2tf/tests/flax_models/gnn.py
|
{
"start": 1208,
"end": 1678
}
|
class ____(nn.Module):
"""A multi-layer perceptron."""
feature_sizes: Sequence[int]
dropout_rate: float = 0
deterministic: bool = True
activation: Callable[[jax.Array], jax.Array] = nn.relu
@nn.compact
def __call__(self, inputs):
x = inputs
for size in self.feature_sizes:
x = nn.Dense(features=size)(x)
x = self.activation(x)
x = nn.Dropout(
rate=self.dropout_rate, deterministic=self.deterministic)(x)
return x
|
MLP
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/data_mocks/dask_mocks.py
|
{
"start": 1449,
"end": 2143
}
|
class ____:
"""This is dummy Series class, which imitates dask.dataframe.core.Series class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a Dask Series.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "dask.dataframe.core"
def __init__(self, data: pd.Series):
self._data: pd.Series = data
def head(self, n: int, compute: bool) -> pd.Series:
"""Returns the top n element of a mock version of Dask Series."""
return self._data.head(n)
|
Series
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/operators/variables_test.py
|
{
"start": 838,
"end": 1864
}
|
class ____(test.TestCase):
def test_undefined(self):
undefined_symbol = variables.Undefined('name')
undefined_symbol2 = variables.Undefined('name')
self.assertEqual(undefined_symbol.symbol_name, 'name')
self.assertEqual(undefined_symbol2.symbol_name, 'name')
self.assertNotEqual(undefined_symbol, undefined_symbol2)
def test_undefined_operations(self):
undefined_symbol = variables.Undefined('name')
self.assertIsInstance(undefined_symbol.foo, variables.Undefined)
self.assertIsInstance(undefined_symbol[0], variables.Undefined)
self.assertNotIsInstance(undefined_symbol.__class__, variables.Undefined)
def test_read(self):
self.assertEqual(variables.ld(1), 1)
o = object()
self.assertEqual(variables.ld(o), o)
self.assertIsNone(variables.ld(None))
def test_read_undefined(self):
with self.assertRaisesRegex(UnboundLocalError, 'used before assignment'):
variables.ld(variables.Undefined('a'))
if __name__ == '__main__':
test.main()
|
SpecialValuesTest
|
python
|
numpy__numpy
|
numpy/_core/code_generators/generate_umath.py
|
{
"start": 1120,
"end": 6279
}
|
class ____:
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
The string representing the expression to insert into the data
array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
astype : dict or None, optional
If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y
instead of PyUFunc_x_x/PyUFunc_xx_x.
cfunc_alias : str or none, optional
Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays.
NOTE: it doesn't support 'astype'
dispatch : str or None, optional
Dispatch-able source name without its extension '.dispatch.c' that
contains the definition of ufunc, dispatched at runtime depending on the
specified targets of the dispatch-able source.
NOTE: it doesn't support 'astype'
"""
def __init__(self, type, f=None, in_=None, out=None, astype=None, cfunc_alias=None,
dispatch=None):
self.type = type
self.func_data = f
if astype is None:
astype = {}
self.astype_dict = astype
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
self.cfunc_alias = cfunc_alias
self.dispatch = dispatch
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
def _check_order(types1, types2):
"""
Helper to check that the loop types are ordered. The legacy type resolver
(and potentially downstream) may pick use the first loop to which operands
can be cast safely.
"""
# Insert kK (int64) after all other ints (assumes long long isn't larger)
dtype_order = bints + 'kK' + times + flts + cmplxP + "O"
for t1, t2 in zip(types1, types2):
# We have no opinion on object or time ordering for now:
if t1 in "OP" or t2 in "OP":
return True
if t1 in "mM" or t2 in "mM":
return True
t1i = dtype_order.index(t1)
t2i = dtype_order.index(t2)
if t1i < t2i:
return
if t2i > t1i:
break
if types1 == "QQ?" and types2 == "qQ?":
# Explicitly allow this mixed case, rather than figure out what order
# is nicer or how to encode it.
return
raise TypeError(
f"Input dtypes are unsorted or duplicate: {types1} and {types2}")
def check_td_order(tds):
# A quick check for whether the signatures make sense, it happened too
# often that SIMD additions added loops that do not even make some sense.
# TODO: This should likely be a test and it would be nice if it rejected
# duplicate entries as well (but we have many as of writing this).
signatures = [t.in_ + t.out for t in tds]
for prev_i, sign in enumerate(signatures[1:]):
if sign in signatures[:prev_i + 1]:
continue # allow duplicates...
_check_order(signatures[prev_i], sign)
_floatformat_map = {
"e": 'npy_%sf',
"f": 'npy_%sf',
"d": 'npy_%s',
"g": 'npy_%sl',
"F": 'nc_%sf',
"D": 'nc_%s',
"G": 'nc_%sl'
}
def build_func_data(types, f):
func_data = [_floatformat_map.get(t, '%s') % (f,) for t in types]
return func_data
def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None,
dispatch=None):
"""
Generate a TypeDescription instance for each item in types
"""
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
elif len(f) != len(types):
raise ValueError("Number of types and f do not match")
else:
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
elif len(in_) != len(types):
raise ValueError("Number of types and inputs do not match")
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
elif len(out) != len(types):
raise ValueError("Number of types and outputs do not match")
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
# [(dispatch file name without extension '.dispatch.c*', list of types)]
if dispatch:
dispt = ([k for k, v in dispatch if t in v] + [None])[0]
else:
dispt = None
tds.append(TypeDescription(
t, f=fd, in_=i, out=o, astype=astype, cfunc_alias=cfunc_alias,
dispatch=dispt
))
return tds
|
TypeDescription
|
python
|
pytorch__pytorch
|
torch/autograd/function.py
|
{
"start": 30564,
"end": 33536
}
|
class ____(Function):
r"""
This class is here only for backward compatibility reasons.
Use :class:`Function` instead of this for any new use case.
"""
# The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the
# superclass (Function) but are instance methods here, which mypy reports as incompatible.
def _do_forward(self, *input):
self._nested_input = input
flat_input = tuple(_iter_tensors(input))
flat_output = super()._do_forward(*flat_input) # type: ignore[misc]
nested_tensors = _unflatten(flat_output, self._nested_output)
return nested_tensors
def _do_backward(self, gradients, retain_variables):
self.retain_variables = retain_variables
result = super()._do_backward(gradients, retain_variables) # type: ignore[misc]
if not retain_variables:
del self._nested_output
del self._to_save_nested
return result
def backward(self, *gradients: Any) -> Any: # type: ignore[override]
r"""
Shared backward utility.
"""
nested_gradients = _unflatten(gradients, self._nested_output)
result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value]
return tuple(_iter_None_tensors(result))
__call__ = _do_forward
def forward(self, *args: Any) -> Any: # type: ignore[override]
r"""
Shared forward utility.
"""
nested_tensors = _map_tensor_data(self._nested_input)
result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value]
del self._nested_input
self._nested_output = result
return tuple(_iter_tensors(result))
def save_for_backward(self, *args: Any) -> None:
r"""
See :meth:`Function.save_for_backward`.
"""
self.to_save = tuple(_iter_tensors(args))
self._to_save_nested = args
@property
def saved_tensors(self): # type: ignore[override]
r"""
See :meth:`Function.saved_tensors`.
"""
flat_tensors = super().saved_tensors # type: ignore[misc]
return _unflatten(flat_tensors, self._to_save_nested)
def mark_dirty(self, *args: Any, **kwargs: Any) -> None:
r"""
See :meth:`Function.mark_dirty`.
"""
self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))
def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None:
r"""
See :meth:`Function.mark_non_differentiable`.
"""
self.non_differentiable = tuple(_iter_tensors((args, kwargs)))
def forward_extended(self, *input: Any) -> None:
r"""
User defined forward.
"""
raise NotImplementedError
def backward_extended(self, *grad_output: Any) -> None:
r"""
User defined backward.
"""
raise NotImplementedError
|
NestedIOFunction
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/types/base.py
|
{
"start": 40627,
"end": 40744
}
|
class ____(Schema):
globally = fields.Boolean()
metric_calculations = fields.Boolean()
|
ProgressBarsConfigSchema
|
python
|
fluentpython__example-code-2e
|
19-concurrency/primes/py36/procs.py
|
{
"start": 396,
"end": 1798
}
|
class ____(NamedTuple): # <3>
n: int
prime: bool
elapsed: float
JobQueue = queues.SimpleQueue # <4>
ResultQueue = queues.SimpleQueue # <5>
def check(n: int) -> PrimeResult: # <6>
t0 = perf_counter()
res = is_prime(n)
return PrimeResult(n, res, perf_counter() - t0)
def worker(jobs: JobQueue, results: ResultQueue) -> None: # <7>
while True:
n = jobs.get() # <8>
if n == 0:
break
results.put(check(n)) # <9>
# end::PRIMES_PROC_TOP[]
# tag::PRIMES_PROC_MAIN[]
def main() -> None:
if len(sys.argv) < 2: # <1>
workers = cpu_count()
else:
workers = int(sys.argv[1])
print(f'Checking {len(NUMBERS)} numbers with {workers} processes:')
jobs: JobQueue = SimpleQueue() # <2>
results: ResultQueue = SimpleQueue()
t0 = perf_counter()
for n in NUMBERS: # <3>
jobs.put(n)
for _ in range(workers):
proc = Process(target=worker, args=(jobs, results)) # <4>
proc.start() # <5>
jobs.put(0) # <6>
while True:
n, prime, elapsed = results.get() # <7>
label = 'P' if prime else ' '
print(f'{n:16} {label} {elapsed:9.6f}s') # <8>
if jobs.empty(): # <9>
break
elapsed = perf_counter() - t0
print(f'Total time: {elapsed:.2f}s')
if __name__ == '__main__':
main()
# end::PRIMES_PROC_MAIN[]
|
PrimeResult
|
python
|
django__django
|
tests/staticfiles_tests/test_management.py
|
{
"start": 8568,
"end": 8703
}
|
class ____(TestCollection):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
|
TestCollectionPathLib
|
python
|
tiangolo__fastapi
|
docs_src/response_model/tutorial005_py310.py
|
{
"start": 78,
"end": 816
}
|
class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float = 10.5
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The Bar fighters", "price": 62, "tax": 20.2},
"baz": {
"name": "Baz",
"description": "There goes my baz",
"price": 50.2,
"tax": 10.5,
},
}
@app.get(
"/items/{item_id}/name",
response_model=Item,
response_model_include={"name", "description"},
)
async def read_item_name(item_id: str):
return items[item_id]
@app.get("/items/{item_id}/public", response_model=Item, response_model_exclude={"tax"})
async def read_item_public_data(item_id: str):
return items[item_id]
|
Item
|
python
|
run-llama__llama_index
|
llama-index-core/tests/evaluation/test_batch_runner.py
|
{
"start": 349,
"end": 3942
}
|
class ____(BaseEvaluator):
def __init__(
self,
mock_score: float = 1.0,
mock_passing: bool = True,
mock_feedback: str = "test feedback",
) -> None:
self._mock_score = mock_score
self._mock_passing = mock_passing
self._mock_feedback = mock_feedback
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference: Optional[str] = None,
**kwargs: Any,
) -> EvaluationResult:
return EvaluationResult(
query=query,
contexts=contexts,
response=response,
passing=(
str(response) == str(reference) if reference else self._mock_passing
),
score=self._mock_score,
feedback=self._mock_feedback,
)
def get_eval_results(key, eval_results):
results = eval_results[key]
correct = 0
for result in results:
if result.passing:
correct += 1
return correct / len(results)
def test_batch_runner() -> None:
# single evaluator
runner = BatchEvalRunner(
evaluators={
"evaluator1": MockEvaluator(),
"no_kwarg_evaluator": MockEvaluator(),
}
)
exp_queries = ["query1", "query2"]
exp_response_strs = ["response1", "response2"]
exp_responses = [
Response(response="response1", source_nodes=[]),
Response(response="response2", source_nodes=[]),
]
# original eval_kwargs_lists format - Dict[str, List]
exp_kwargs = {"reference": ["response1", "response1"]}
# test evaluate_response_strs()
results = runner.evaluate_response_strs(
queries=exp_queries, response_strs=exp_response_strs, **exp_kwargs
)
assert get_eval_results("evaluator1", results) == 0.5
# test evaluate_responses()
results = runner.evaluate_responses(
queries=exp_queries, responses=exp_responses, **exp_kwargs
)
assert get_eval_results("evaluator1", results) == 0.5
# multiple evaluators
runner.evaluators = {
"evaluator1": MockEvaluator(),
"evaluator2": MockEvaluator(),
"no_kwarg_evaluator": MockEvaluator(),
}
exp_queries = ["query1", "query2"]
exp_response_strs = ["response1", "response2"]
exp_responses = [
Response(response="response1", source_nodes=[]),
Response(response="response2", source_nodes=[]),
]
# updated eval_kwargs_lists format - Dict[str, Dict[str, List]]
exp_kwargs = {
"evaluator1": {"reference": ["response1", "response1"]},
"evaluator2": {"reference": ["response1", "response2"]},
}
# test evaluate_response_strs()
results = runner.evaluate_response_strs(
queries=exp_queries, response_strs=exp_response_strs, **exp_kwargs
)
assert get_eval_results("evaluator1", results) == 0.5
assert get_eval_results("evaluator2", results) == 1.0
# test evaluate_responses()
results = runner.evaluate_responses(
queries=exp_queries, responses=exp_responses, **exp_kwargs
)
assert get_eval_results("evaluator1", results) == 0.5
assert get_eval_results("evaluator2", results) == 1.0
assert get_eval_results("evaluator1", results) == 0.5
assert get_eval_results("evaluator2", results) == 1.0
|
MockEvaluator
|
python
|
etianen__django-reversion
|
reversion/management/commands/deleterevisions.py
|
{
"start": 226,
"end": 4112
}
|
class ____(BaseRevisionCommand):
help = "Deletes revisions for a given app [and model]."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--days",
default=0,
type=int,
help="Delete only revisions older than the specified number of days.",
)
parser.add_argument(
"--keep",
default=0,
type=int,
help="Keep the specified number of revisions (most recent) for each object.",
)
def handle(self, *app_labels, **options):
verbosity = options["verbosity"]
using = options["using"]
model_db = options["model_db"]
days = options["days"]
keep = options["keep"]
# Delete revisions.
using = using or router.db_for_write(Revision)
with transaction.atomic(using=using):
revision_query = models.Q()
keep_revision_ids = set()
# By default, delete nothing.
can_delete = False
# Get all revisions for the given revision manager and model.
for model in self.get_models(options):
if verbosity >= 1:
self.stdout.write("Finding stale revisions for {name}".format(
name=model._meta.verbose_name,
))
# Find all matching revision IDs.
model_query = Version.objects.using(using).get_for_model(
model,
model_db=model_db,
)
if keep:
overflow_object_ids = list(Version.objects.using(using).get_for_model(
model,
model_db=model_db,
).order_by().values_list("object_id").annotate(
count=models.Count("object_id"),
).filter(
count__gt=keep,
).values_list("object_id", flat=True).iterator())
# Only delete overflow revisions.
model_query = model_query.filter(object_id__in=overflow_object_ids)
for object_id in overflow_object_ids:
if verbosity >= 2:
self.stdout.write("- Finding stale revisions for {name} #{object_id}".format(
name=model._meta.verbose_name,
object_id=object_id,
))
# But keep the underflow revisions.
keep_revision_ids.update(Version.objects.using(using).get_for_object_reference(
model,
object_id,
model_db=model_db,
).values_list("revision_id", flat=True)[:keep].iterator())
# Add to revision query.
revision_query |= models.Q(
pk__in=model_query.order_by().values_list("revision_id", flat=True)
)
# If we have at least one model, then we can delete.
can_delete = True
if can_delete:
revisions_to_delete = Revision.objects.using(using).filter(
revision_query,
date_created__lt=timezone.now() - timedelta(days=days),
).exclude(
pk__in=keep_revision_ids
).order_by()
else:
revisions_to_delete = Revision.objects.using(using).none()
# Print out a message, if feeling verbose.
if verbosity >= 1:
self.stdout.write("Deleting {total} revisions...".format(
total=revisions_to_delete.count(),
))
revisions_to_delete.delete()
|
Command
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType47.py
|
{
"start": 188,
"end": 513
}
|
class ____(Collection[T]):
def __init__(self, value: Iterable[T]) -> None:
self.values = tuple(value)
def __contains__(self, item: object) -> bool:
return True
def __iter__(self) -> Iterator[T]:
return iter(self.values)
def __len__(self) -> int:
return len(self.values)
|
ClassA
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/strings.py
|
{
"start": 7049,
"end": 7260
}
|
class ____:
def setup(self):
self.ser = Series(Index([f"i-{i}" for i in range(10_000)], dtype=object))
def time_encode_decode(self):
self.ser.str.encode("utf-8").str.decode("utf-8")
|
Encode
|
python
|
scipy__scipy
|
scipy/interpolate/tests/test_bsplines.py
|
{
"start": 151428,
"end": 152868
}
|
class ____:
@pytest.mark.parametrize('make_spline, kwargs',
[(make_interp_spline, {}),
(make_smoothing_spline, {}),
(make_smoothing_spline, {'lam': 1.0}),
(make_lsq_spline, {'method': "norm-eq"}),
(make_lsq_spline, {'method': "qr"}),
])
@pytest.mark.parametrize('eval_shape', [(), (1,), (3,)])
@pytest.mark.parametrize('axis', [-1, 0, 1])
def test_batch(self, make_spline, kwargs, axis, eval_shape):
rng = np.random.default_rng(4329872134985134)
n = 10
shape = (2, 3, 4, n)
domain = (0, 10)
x = np.linspace(*domain, n)
y = np.moveaxis(rng.random(shape), -1, axis)
if make_spline == make_lsq_spline:
k = 3 # spline degree, if needed
t = (x[0],) * (k + 1) + (x[-1],) * (k + 1) # valid knots, if needed
kwargs = kwargs | dict(t=t, k=k)
res = make_spline(x, y, axis=axis, **kwargs)
ref = BatchSpline(x, y, axis=axis, spline=make_spline, **kwargs)
x = rng.uniform(*domain, size=eval_shape)
np.testing.assert_allclose(res(x), ref(x))
res, ref = res.antiderivative(1), ref.antiderivative(1)
np.testing.assert_allclose(res(x), ref(x))
res, ref = res.derivative(2), ref.derivative(2)
np.testing.assert_allclose(res(x), ref(x))
np.testing.assert_allclose(res.integrate(*domain), ref.integrate(*domain))
|
TestBatch
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/packaging.py
|
{
"start": 7462,
"end": 9585
}
|
class ____(PackagingCheck):
name = f"Connector version in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file must match"
description = f"Connector version in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file must match. This is to ensure that connector release is consistent."
applies_to_connector_languages = [
ConnectorLanguage.PYTHON,
ConnectorLanguage.LOW_CODE,
]
def _run(self, connector: Connector) -> CheckResult:
metadata_version = get(connector.metadata, "dockerImageTag")
if metadata_version is None:
return self.fail(
connector=connector,
message=f"dockerImageTag field is missing in the {consts.METADATA_FILE_NAME} file",
)
if not (connector.code_directory / consts.PYPROJECT_FILE_NAME).exists():
return self.fail(
connector=connector,
message=f"{consts.PYPROJECT_FILE_NAME} file is missing",
)
try:
pyproject = toml.load((connector.code_directory / consts.PYPROJECT_FILE_NAME))
except toml.TomlDecodeError:
return self.fail(
connector=connector,
message=f"{consts.PYPROJECT_FILE_NAME} is invalid toml file",
)
poetry_version = get(pyproject, "tool.poetry.version")
if poetry_version is None:
return self.fail(
connector=connector,
message=f"Version field is missing in the {consts.PYPROJECT_FILE_NAME} file",
)
if poetry_version != metadata_version:
return self.fail(
connector=connector,
message=f"Version is {metadata_version} in {consts.METADATA_FILE_NAME}, but version is {poetry_version} in {consts.PYPROJECT_FILE_NAME}. These two files have to be consistent",
)
return self.pass_(
connector=connector,
message=f"Version in {consts.METADATA_FILE_NAME} and {consts.PYPROJECT_FILE_NAME} file match",
)
|
CheckConnectorVersionMatchInPyproject
|
python
|
django__django
|
tests/gis_tests/geoapp/models.py
|
{
"start": 1777,
"end": 1998
}
|
class ____(models.IntegerField):
def db_type(self, connection):
return None
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
|
NonConcreteField
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/connect/select/tutorial005_py310.py
|
{
"start": 222,
"end": 2176
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).join(Team).where(Team.name == "Preventers")
results = session.exec(statement)
for hero, team in results:
print("Preventer Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 23845,
"end": 24152
}
|
class ____(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren, models.CASCADE)
family_name = models.CharField(max_length=255)
|
DependentChild
|
python
|
streamlit__streamlit
|
lib/streamlit/user_info.py
|
{
"start": 14554,
"end": 20344
}
|
class ____(Mapping[str, str | bool | None]):
"""
A read-only, dict-like object for accessing information about the current\
user.
``st.user`` is dependent on the host platform running your
Streamlit app. If your host platform has not configured the object,
``st.user`` will behave as it does in a locally running app.
When authentication is configured in ``secrets.toml``, Streamlit will parse
the OpenID Connect (OIDC) identity token and copy the attributes to
``st.user``. Check your provider's documentation for their
available attributes (known as claims).
When authentication is not configured, ``st.user`` has no
attributes.
You can access values via key or attribute notation. For example, use
``st.user["email"]`` or ``st.user.email`` to
access the ``email`` attribute.
.. Important::
Identity tokens include an issuance and expiration time. Streamlit does
not implicitly check these. If you want to automatically expire a
user's authentication, check these values manually and programmatically
log out your user (``st.logout()``) when needed.
Attributes
----------
is_logged_in: bool
Whether a user is logged in. For a locally running app, this attribute
is only available when authentication (``st.login()``) is configured in
``secrets.toml``. Otherwise, it does not exist.
Examples
--------
**Example 1: Google's identity token**
If you configure a basic Google OIDC connection as shown in Example 1 of
``st.login()``, the following data is available in
``st.user``. Streamlit adds the ``is_logged_in`` attribute.
Additional attributes may be available depending on the configuration of
the user's Google account. For more information about Google's identity
tokens, see `Obtain user information from the ID token
<https://developers.google.com/identity/openid-connect/openid-connect#obtainuserinfo>`_
in Google's docs.
Your app code:
>>> import streamlit as st
>>>
>>> if st.user.is_logged_in:
>>> st.write(st.user)
Displayed data when a user is logged in:
>>> {
>>> "is_logged_in":true
>>> "iss":"https://accounts.google.com"
>>> "azp":"{client_id}.apps.googleusercontent.com"
>>> "aud":"{client_id}.apps.googleusercontent.com"
>>> "sub":"{unique_user_id}"
>>> "email":"{user}@gmail.com"
>>> "email_verified":true
>>> "at_hash":"{access_token_hash}"
>>> "nonce":"{nonce_string}"
>>> "name":"{full_name}"
>>> "picture":"https://lh3.googleusercontent.com/a/{content_path}"
>>> "given_name":"{given_name}"
>>> "family_name":"{family_name}"
>>> "iat":{issued_time}
>>> "exp":{expiration_time}
>>> }
**Example 2: Microsoft's identity token**
If you configure a basic Microsoft OIDC connection as shown in Example 2 of
``st.login()``, the following data is available in
``st.user``. For more information about Microsoft's identity
tokens, see `ID token claims reference
<https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference>`_
in Microsoft's docs.
Your app code:
>>> import streamlit as st
>>>
>>> if st.user.is_logged_in:
>>> st.write(st.user)
Displayed data when a user is logged in:
>>> {
>>> "is_logged_in":true
>>> "ver":"2.0"
>>> "iss":"https://login.microsoftonline.com/{tenant_id}/v2.0"
>>> "sub":"{application_user_id}"
>>> "aud":"{application_id}"
>>> "exp":{expiration_time}
>>> "iat":{issued_time}
>>> "nbf":{start_time}
>>> "name":"{full_name}"
>>> "preferred_username":"{username}"
>>> "oid":"{user_GUID}"
>>> "email":"{email}"
>>> "tid":"{tenant_id}"
>>> "nonce":"{nonce_string}"
>>> "aio":"{opaque_string}"
>>> }
"""
def __getitem__(self, key: str) -> str | bool | None:
try:
return _get_user_info()[key]
except KeyError:
raise KeyError(f'st.user has no key "{key}".')
def __getattr__(self, key: str) -> str | bool | None:
try:
return _get_user_info()[key]
except KeyError:
raise AttributeError(f'st.user has no attribute "{key}".')
def __setattr__(self, name: str, value: str | None) -> NoReturn:
raise StreamlitAPIException("st.user cannot be modified")
def __setitem__(self, name: str, value: str | None) -> NoReturn:
raise StreamlitAPIException("st.user cannot be modified")
def __iter__(self) -> Iterator[str]:
return iter(_get_user_info())
def __len__(self) -> int:
return len(_get_user_info())
def to_dict(self) -> UserInfo:
"""
Get user info as a dictionary.
This method primarily exists for internal use and is not needed for
most cases. ``st.user`` returns an object that inherits from
``dict`` by default.
Returns
-------
Dict[str,str]
A dictionary of the current user's information.
"""
return _get_user_info()
has_shown_experimental_user_warning = False
def maybe_show_deprecated_user_warning() -> None:
"""Show a deprecation warning for the experimental_user alias."""
global has_shown_experimental_user_warning # noqa: PLW0603
if not has_shown_experimental_user_warning:
has_shown_experimental_user_warning = True
show_deprecation_warning(
make_deprecated_name_warning(
"experimental_user",
"user",
"2025-11-06",
)
)
|
UserInfoProxy
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classes1.py
|
{
"start": 973,
"end": 996
}
|
class ____(T):
pass
|
K
|
python
|
jazzband__django-pipeline
|
pipeline/compilers/less.py
|
{
"start": 116,
"end": 650
}
|
class ____(SubProcessCompiler):
output_extension = "css"
def match_file(self, filename):
return filename.endswith(".less")
def compile_file(self, infile, outfile, outdated=False, force=False):
# Pipe to file rather than provide outfile arg due to a bug in lessc
command = (
settings.LESS_BINARY,
settings.LESS_ARGUMENTS,
infile,
)
return self.execute_command(
command, cwd=dirname(infile), stdout_captured=outfile
)
|
LessCompiler
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 225229,
"end": 225554
}
|
class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("closed", "closed_at")
closed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="closed")
closed_at = sgqlc.types.Field(DateTime, graphql_name="closedAt")
|
Closable
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-deltalake/dagster_deltalake/io_manager.py
|
{
"start": 1180,
"end": 1302
}
|
class ____(str, Enum):
error = "error"
append = "append"
overwrite = "overwrite"
ignore = "ignore"
|
WriteMode
|
python
|
mwaskom__seaborn
|
tests/test_rcmod.py
|
{
"start": 5374,
"end": 7345
}
|
class ____(RCParamFixtures):
contexts = ["paper", "notebook", "talk", "poster"]
def test_default_return(self):
current = rcmod.plotting_context()
self.assert_rc_params(current)
def test_key_usage(self):
_context_keys = set(rcmod._context_keys)
for context in self.contexts:
missing = set(rcmod.plotting_context(context)) ^ _context_keys
assert not missing
def test_bad_context(self):
with pytest.raises(ValueError):
rcmod.plotting_context("i_am_not_a_context")
def test_font_scale(self):
notebook_ref = rcmod.plotting_context("notebook")
notebook_big = rcmod.plotting_context("notebook", 2)
font_keys = [
"font.size",
"axes.labelsize", "axes.titlesize",
"xtick.labelsize", "ytick.labelsize",
"legend.fontsize", "legend.title_fontsize",
]
for k in font_keys:
assert notebook_ref[k] * 2 == notebook_big[k]
def test_rc_override(self):
key, val = "grid.linewidth", 5
rc = {key: val, "foo": "bar"}
out = rcmod.plotting_context("talk", rc=rc)
assert out[key] == val
assert "foo" not in out
def test_set_context(self):
for context in self.contexts:
context_dict = rcmod.plotting_context(context)
rcmod.set_context(context)
self.assert_rc_params(context_dict)
def test_context_context_manager(self):
rcmod.set_context("notebook")
orig_params = rcmod.plotting_context()
context_params = rcmod.plotting_context("paper")
with rcmod.plotting_context("paper"):
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
@rcmod.plotting_context("paper")
def func():
self.assert_rc_params(context_params)
func()
self.assert_rc_params(orig_params)
|
TestPlottingContext
|
python
|
realpython__materials
|
python-bitwise-operators/src/stegano/bitmap.py
|
{
"start": 227,
"end": 2282
}
|
class ____:
"""High-level interface to a bitmap file."""
def __init__(self, path: pathlib.Path) -> None:
self._file = path.open(mode="r+b")
self._file_bytes = mmap(self._file.fileno(), 0, access=ACCESS_WRITE)
self._header = Header.from_bytes(self._file_bytes[:50])
def __enter__(self) -> "Bitmap":
return self
def __exit__(self, *args, **kwargs) -> None:
self._file_bytes.close()
self._file.close()
def __getattr__(self, name: str) -> Any:
return getattr(self._header, name)
def __getitem__(self, offset: Union[int, slice]) -> Union[int, bytes]:
return self._file_bytes[offset]
def __setitem__(
self, offset: Union[int, slice], value: Union[int, bytes]
) -> None:
self._file_bytes[offset] = value
@property
def max_bytes(self) -> int:
"""The maximum number of bytes the bitmap can hide."""
return self.width * self.height * 3
@property
def byte_offsets(self) -> Iterator[int]:
"""Return an iterator over byte offsets (skip the padding)."""
start_index = self.pixels_offset
end_index = self.pixels_offset + self.pixel_size_bytes
scanline_bytes = self.pixel_size_bytes // self.height
for scanline in range(start_index, end_index, scanline_bytes):
yield from range(scanline, scanline + self.width * 3)
@property
def byte_slices(self) -> Iterator[slice]:
"""Generator iterator of 8-byte long slices."""
for byte_index in islice(self.byte_offsets, 0, self.max_bytes, 8):
yield slice(byte_index, byte_index + 8)
@property
def reserved_field(self) -> int:
"""Return a little-endian 32-bit unsigned integer."""
return unsigned_int(self._file_bytes, 0x06)
@reserved_field.setter
def reserved_field(self, value: int) -> None:
"""Store a little-endian 32-bit unsigned integer."""
self._file_bytes.seek(0x06)
self._file_bytes.write(pack("<I", value))
@dataclass
|
Bitmap
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_list.py
|
{
"start": 667,
"end": 1640
}
|
class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
import sys
import textwrap
import list_tests
from test.support import cpython_only
from test.support.script_helper import assert_python_ok
import pickle
import unittest
|
RedirectImportFinder
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/drip/views.py
|
{
"start": 208,
"end": 1113
}
|
class ____(OAuth2Adapter):
"""OAuth2Adapter for Drip API v3."""
provider_id = "drip"
authorize_url = "https://www.getdrip.com/oauth/authorize"
access_token_url = "https://www.getdrip.com/oauth/token" # nosec
profile_url = "https://api.getdrip.com/v2/user"
def complete_login(self, request, app, token, **kwargs):
"""Complete login, ensuring correct OAuth header."""
headers = {"Authorization": "Bearer {0}".format(token.token)}
response = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
response.raise_for_status()
extra_data = response.json()["users"][0]
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(DripOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(DripOAuth2Adapter)
|
DripOAuth2Adapter
|
python
|
encode__django-rest-framework
|
tests/test_validators.py
|
{
"start": 1430,
"end": 1520
}
|
class ____(models.Model):
code = models.IntegerField(unique=True)
|
AnotherUniquenessModel
|
python
|
celery__celery
|
celery/backends/database/models.py
|
{
"start": 362,
"end": 1611
}
|
class ____(ResultModelBase):
"""Task result/status."""
__tablename__ = 'celery_taskmeta'
__table_args__ = {'sqlite_autoincrement': True}
id = sa.Column(DialectSpecificInteger, sa.Sequence('task_id_sequence'),
primary_key=True, autoincrement=True)
task_id = sa.Column(sa.String(155), unique=True)
status = sa.Column(sa.String(50), default=states.PENDING)
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.now(timezone.utc),
onupdate=datetime.now(timezone.utc), nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
def to_dict(self):
return {
'task_id': self.task_id,
'status': self.status,
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done,
}
def __repr__(self):
return '<Task {0.task_id} state: {0.status}>'.format(self)
@classmethod
def configure(cls, schema=None, name=None):
cls.__table__.schema = schema
cls.id.default.schema = schema
cls.__table__.name = name or cls.__tablename__
|
Task
|
python
|
keras-team__keras
|
keras/src/metrics/metric_test.py
|
{
"start": 1391,
"end": 9218
}
|
class ____(testing.TestCase):
def setUp(self):
self._global_dtype_policy = dtype_policies.dtype_policy.dtype_policy()
self._floatx = backend.floatx()
return super().setUp()
def tearDown(self):
dtype_policies.dtype_policy.set_dtype_policy(self._global_dtype_policy)
backend.set_floatx(self._floatx)
return super().tearDown()
def test_end_to_end_flow(self):
metric = ExampleMetric(name="mse")
self.assertEqual(metric.name, "mse")
self.assertEqual(len(metric.variables), 2)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric.update_state(y_true_batch, y_pred_batch)
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
metric.reset_state()
self.assertEqual(metric.result(), 0.0)
def test_stateless_update_state(self):
metric = ExampleMetric(name="mse")
self.assertEqual(len(metric.variables), 2)
original_variable_values = (
metric.variables[0].numpy(),
metric.variables[1].numpy(),
)
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
batch_size = 8
metric_variables = metric.variables
for b in range(0, num_samples // batch_size + 1):
y_true_batch = y_true[b * batch_size : (b + 1) * batch_size]
y_pred_batch = y_pred[b * batch_size : (b + 1) * batch_size]
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
self.assertAllClose(metric.variables[0], original_variable_values[0])
self.assertAllClose(metric.variables[1], original_variable_values[1])
metric.variables[0].assign(metric_variables[0])
metric.variables[1].assign(metric_variables[1])
self.assertAllClose(metric.total, 20)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples
)
if backend.backend() == "jax":
# Check no side effects.
import jax
@jax.jit
def update(metric_variables, y_true_batch, y_pred_batch):
metric_variables = metric.stateless_update_state(
metric_variables, y_true_batch, y_pred_batch
)
update(metric_variables, y_true_batch, y_pred_batch)
def test_stateless_result(self):
metric = ExampleMetric(name="mse")
res = metric.stateless_result([ops.ones(()) * 12, ops.ones(()) * 3])
self.assertAllClose(res, 4.0)
def test_stateless_reset_state(self):
metric = ExampleMetric(name="mse")
num_samples = 20
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
metric.update_state(y_true, y_pred)
vars = metric.stateless_reset_state()
self.assertLen(vars, 2)
self.assertEqual(vars[0], 0)
self.assertEqual(vars[1], 0)
def test_variable_tracking(self):
# In list
metric = ExampleMetric(name="mse")
metric.more_vars = [backend.Variable(0.0), backend.Variable(1.0)]
self.assertEqual(len(metric.variables), 4)
# In dict
metric = ExampleMetric(name="mse")
metric.more_vars = {
"a": backend.Variable(0.0),
"b": backend.Variable(1.0),
}
self.assertEqual(len(metric.variables), 4)
# In nested structured
metric = ExampleMetric(name="mse")
metric.more_vars = {"a": [backend.Variable(0.0), backend.Variable(1.0)]}
self.assertEqual(len(metric.variables), 4)
def test_submetric_tracking(self):
# Plain attr
metric = ExampleMetric(name="mse")
metric.submetric = ExampleMetric(name="submse")
self.assertEqual(len(metric.variables), 4)
# In list
metric = ExampleMetric(name="mse")
metric.submetrics = [
ExampleMetric(name="submse1"),
ExampleMetric(name="submse2"),
]
self.assertEqual(len(metric.variables), 6)
# In dict
metric = ExampleMetric(name="mse")
metric.submetrics = {
"1": ExampleMetric(name="submse1"),
"2": ExampleMetric(name="submse2"),
}
self.assertEqual(len(metric.variables), 6)
# Two levels deep
metric = ExampleMetric(name="mse")
metric.submetric = ExampleMetric(name="submse")
metric.submetric.submetric = ExampleMetric(name="subsubmse")
self.assertEqual(len(metric.variables), 6)
def test_serialization(self):
self.run_class_serialization_test(
ExampleMetric(name="mse"),
custom_objects={"ExampleMetric": ExampleMetric},
)
def test_pickle(self):
metric = metrics_module.get("mse")
reloaded = pickle.loads(pickle.dumps(metric))
self.assertIsInstance(reloaded, metrics_module.MeanSquaredError)
def test_get_method(self):
metric = metrics_module.get("mse")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("mean_squared_error")
self.assertIsInstance(metric, metrics_module.MeanSquaredError)
metric = metrics_module.get("categorical_accuracy")
self.assertIsInstance(metric, metrics_module.CategoricalAccuracy)
metric = metrics_module.get(None)
self.assertEqual(metric, None)
with self.assertRaises(ValueError):
metrics_module.get("typo")
def test_dtype_arg(self):
metric = ExampleMetric(name="mse", dtype="float16")
self.assertEqual(metric.name, "mse")
self.assertEqual(len(metric.variables), 2)
num_samples = 10
y_true = np.random.random((num_samples, 3))
y_pred = np.random.random((num_samples, 3))
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples, atol=1e-3
)
self.assertDType(result, "float16")
# Test DTypePolicy for `dtype` argument
metric = ExampleMetric(
dtype=dtype_policies.DTypePolicy("mixed_float16")
)
metric.update_state(y_true, y_pred)
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertAllClose(
result, np.sum((y_true - y_pred) ** 2) / num_samples, atol=1e-3
)
self.assertDType(result, "float16")
# `dtype` setter should raise AttributeError
with self.assertRaises(AttributeError):
metric.dtype = "bfloat16"
def test_default_dtype(self):
y_true = np.random.random((10, 3))
y_pred = np.random.random((10, 3))
# Defaults to `keras.config.floatx()` not global `dtype_policy`
dtype_policies.dtype_policy.set_dtype_policy("mixed_float16")
metric = ExampleMetric()
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertDType(result, "float32")
backend.set_floatx("float16")
metric = ExampleMetric()
metric.update_state(y_true, y_pred)
result = metric.result()
self.assertDType(result, backend.floatx())
|
MetricTest
|
python
|
coleifer__peewee
|
tests/libs/mock.py
|
{
"start": 27671,
"end": 31066
}
|
class ____(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
|
CallableMixin
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/multicolumn_map_expectation_template.py
|
{
"start": 1056,
"end": 2858
}
|
class ____(MulticolumnMapMetricProvider):
# </snippet>
# This is the id string that will be used to reference your metric.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/multicolumn_map_expectation_template.py metric_name">
condition_metric_name = "METRIC NAME GOES HERE"
# </snippet>
# These point your metric at the provided keys to facilitate calculation
condition_domain_keys = (
"batch_id",
"table",
"column_list",
"row_condition",
"condition_parser",
"ignore_row_if",
)
condition_value_keys = ()
# This method implements the core logic for the PandasExecutionEngine
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/multicolumn_map_expectation_template.py pandas">
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, **kwargs):
raise NotImplementedError
# </snippet>
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column_list, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @multicolumn_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column_list, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/multicolumn_map_expectation_template.py ExpectMulticolumnValuesToMatchSomeCriteria class_def">
|
MulticolumnValuesMatchSomeCriteria
|
python
|
ethereum__web3.py
|
ens/ens.py
|
{
"start": 1481,
"end": 21641
}
|
class ____(BaseENS):
"""
Quick access to common Ethereum Name Service functions,
like getting the address for a name.
Unless otherwise specified, all addresses are assumed to be a `str` in
`checksum format <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md>`_, # blocklint: pragma # noqa: E501
like: ``"0x314159265dD8dbb310642f98f50C066173C1259b"``
"""
# mypy types
w3: "Web3"
def __init__(
self,
provider: "BaseProvider" = None,
addr: ChecksumAddress = None,
middleware: Sequence[tuple["Middleware", str]] | None = None,
) -> None:
"""
:param provider: a single provider used to connect to Ethereum
:type provider: instance of `web3.providers.base.BaseProvider`
:param hex-string addr: the address of the ENS registry on-chain.
If not provided, ENS.py will default to the mainnet ENS
registry address.
"""
provider = provider or cast("BaseProvider", default)
self.w3 = init_web3(provider, middleware)
ens_addr = addr if addr else ENS_MAINNET_ADDR
self.ens = self.w3.eth.contract(abi=abis.ENS, address=ens_addr)
self._resolver_contract = self.w3.eth.contract(
abi=abis.PUBLIC_RESOLVER_2_EXTENDED
)
self._reverse_resolver_contract = self.w3.eth.contract(
abi=abis.REVERSE_RESOLVER
)
@classmethod
def from_web3(cls, w3: "Web3", addr: ChecksumAddress = None) -> "ENS":
"""
Generate an ENS instance from a Web3 instance
:param `web3.Web3` w3: to infer connection, middleware, and codec information
:param hex-string addr: the address of the ENS registry on-chain. If not
provided, defaults to the mainnet ENS registry address.
"""
provider = w3.manager.provider
middleware = w3.middleware_onion.middleware
ns = cls(cast("BaseProvider", provider), addr=addr, middleware=middleware)
# inherit strict bytes checking from w3 instance
ns.strict_bytes_type_checking = w3.strict_bytes_type_checking
return ns
def address(
self,
name: str,
coin_type: int | None = None,
) -> ChecksumAddress | None:
"""
Look up the Ethereum address that `name` currently points to.
:param str name: an ENS name to look up
:param int coin_type: if provided, look up the address for this coin type
:raises InvalidName: if `name` has invalid syntax
:raises ResolverNotFound: if no resolver found for `name`
:raises UnsupportedFunction: if the resolver does not support the ``addr()``
function
"""
if coin_type is None:
# don't validate `addr(bytes32)` interface id since extended resolvers
# can implement a "resolve" function as of ENSIP-10
return cast(ChecksumAddress, self._resolve(name, "addr"))
else:
r = self.resolver(name)
_validate_resolver_and_interface_id(
name, r, ENS_MULTICHAIN_ADDRESS_INTERFACE_ID, "addr(bytes32,uint256)"
)
node = raw_name_to_hash(name)
address_as_bytes = r.caller.addr(node, coin_type)
if is_none_or_zero_address(address_as_bytes):
return None
return to_checksum_address(address_as_bytes)
def setup_address(
self,
name: str,
address: Address
| ChecksumAddress
| HexAddress = cast(ChecksumAddress, default), # noqa: B008
coin_type: int | None = None,
transact: Optional["TxParams"] = None,
) -> HexBytes | None:
"""
Set up the name to point to the supplied address.
The sender of the transaction must own the name, or
its parent name.
Example: If the caller owns ``parentname.eth`` with no subdomains
and calls this method with ``sub.parentname.eth``,
then ``sub`` will be created as part of this call.
:param str name: ENS name to set up
:param str address: name will point to this address, in checksum format.
If ``None``, erase the record. If not specified, name will point
to the owner's address.
:param int coin_type: if provided, set up the address for this coin type
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.Eth.send_transaction`
:raises InvalidName: if ``name`` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
"""
if not transact:
transact = {}
transact = deepcopy(transact)
owner = self.setup_owner(name, transact=transact)
self._assert_control(owner, name)
if is_none_or_zero_address(address):
address = None
elif address is default:
address = owner
elif is_binary_address(address):
address = to_checksum_address(cast(str, address))
elif not is_checksum_address(address):
raise ENSValueError("You must supply the address in checksum format")
if self.address(name) == address:
return None
if address is None:
address = EMPTY_ADDR_HEX
transact["from"] = owner
resolver: "Contract" = self._set_resolver(name, transact=transact)
node = raw_name_to_hash(name)
if coin_type is None:
return resolver.functions.setAddr(node, address).transact(transact)
else:
return resolver.functions.setAddr(node, coin_type, address).transact(
transact
)
def name(self, address: ChecksumAddress) -> str | None:
"""
Look up the name that the address points to, using a
reverse lookup. Reverse lookup is opt-in for name owners.
:param address:
:type address: hex-string
"""
reversed_domain = address_to_reverse_domain(address)
name = self._resolve(reversed_domain, fn_name="name")
# To be absolutely certain of the name, via reverse resolution,
# the address must match in the forward resolution
return name if to_checksum_address(address) == self.address(name) else None
def setup_name(
self,
name: str,
address: ChecksumAddress | None = None,
transact: Optional["TxParams"] = None,
) -> HexBytes:
"""
Set up the address for reverse lookup, aka "caller ID".
After successful setup, the method :meth:`~ens.ENS.name` will return
`name` when supplied with `address`.
:param str name: ENS name that address will point to
:param str address: address to set up, in checksum format
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.send_transaction`
:raises AddressMismatch: if the name does not already point to the address
:raises InvalidName: if `name` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
:raises UnownedName: if no one owns `name`
"""
if not transact:
transact = {}
transact = deepcopy(transact)
if not name:
self._assert_control(address, "the reverse record")
return self._setup_reverse(None, address, transact=transact)
else:
resolved = self.address(name)
if is_none_or_zero_address(address):
address = resolved
elif resolved and address != resolved and resolved != EMPTY_ADDR_HEX:
raise AddressMismatch(
f"Could not set address {address!r} to point to name, "
f"because the name resolves to {resolved!r}. "
"To change the name for an existing address, call "
"setup_address() first."
)
if is_none_or_zero_address(address):
address = self.owner(name)
if is_none_or_zero_address(address):
raise UnownedName("claim subdomain using setup_address() first")
if is_binary_address(address):
address = to_checksum_address(address)
if not is_checksum_address(address):
raise ENSValueError("You must supply the address in checksum format")
self._assert_control(address, name)
if not resolved:
self.setup_address(name, address, transact=transact)
return self._setup_reverse(name, address, transact=transact)
def owner(self, name: str) -> ChecksumAddress:
"""
Get the owner of a name. Note that this may be different from the
deed holder in the '.eth' registrar. Learn more about the difference
between deed and name ownership in the ENS `Managing Ownership docs
<http://docs.ens.domains/en/latest/userguide.html#managing-ownership>`_
:param str name: ENS name to look up
:return: owner address
:rtype: str
"""
node = raw_name_to_hash(name)
return self.ens.caller.owner(node)
def setup_owner(
self,
name: str,
new_owner: ChecksumAddress = None,
transact: Optional["TxParams"] = None,
) -> ChecksumAddress | None:
"""
Set the owner of the supplied name to `new_owner`.
For typical scenarios, you'll never need to call this method directly,
simply call :meth:`setup_name` or :meth:`setup_address`. This method does *not*
set up the name to point to an address.
If `new_owner` is not supplied, then this will assume you
want the same owner as the parent domain.
If the caller owns ``parentname.eth`` with no subdomains
and calls this method with ``sub.parentname.eth``,
then ``sub`` will be created as part of this call.
:param str name: ENS name to set up
:param new_owner: account that will own `name`. If ``None``, set owner to
empty addr. If not specified, name will point to the parent domain
owner's address.
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.Eth.send_transaction`
:raises InvalidName: if `name` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name`
:returns: the new owner's address
"""
new_owner = new_owner or cast(ChecksumAddress, default)
if not transact:
transact = {}
transact = deepcopy(transact)
(super_owner, unowned, owned) = self._first_owner(name)
if new_owner is default:
new_owner = super_owner
elif not new_owner:
new_owner = ChecksumAddress(EMPTY_ADDR_HEX)
else:
new_owner = to_checksum_address(new_owner)
current_owner = self.owner(name)
if new_owner == EMPTY_ADDR_HEX and not current_owner:
return None
elif current_owner == new_owner:
return current_owner
else:
self._assert_control(super_owner, name, owned)
self._claim_ownership(
new_owner, unowned, owned, super_owner, transact=transact
)
return new_owner
def resolver(self, name: str) -> Optional["Contract"]:
"""
Get the resolver for an ENS name.
:param str name: The ENS name
"""
normal_name = normalize_name(name)
return self._get_resolver(normal_name)[0]
def reverser(self, target_address: ChecksumAddress) -> Optional["Contract"]:
reversed_domain = address_to_reverse_domain(target_address)
return self.resolver(reversed_domain)
# -- text records -- #
def get_text(self, name: str, key: str) -> str:
"""
Get the value of a text record by key from an ENS name.
:param str name: ENS name to look up
:param str key: ENS name's text record key
:return: ENS name's text record value
:rtype: str
:raises UnsupportedFunction: If the resolver does not support
the "0x59d1d43c" interface id
:raises ResolverNotFound: If no resolver is found for the provided name
"""
node = raw_name_to_hash(name)
r = self.resolver(name)
_validate_resolver_and_interface_id(name, r, ENS_TEXT_INTERFACE_ID, "text")
return r.caller.text(node, key)
def set_text(
self,
name: str,
key: str,
value: str,
transact: "TxParams" = None,
) -> HexBytes:
"""
Set the value of a text record of an ENS name.
:param str name: ENS name
:param str key: Name of the attribute to set
:param str value: Value to set the attribute to
:param dict transact: The transaction configuration, like in
:meth:`~web3.eth.Eth.send_transaction`
:return: Transaction hash
:rtype: HexBytes
:raises UnsupportedFunction: If the resolver does not support
the "0x59d1d43c" interface id
:raises ResolverNotFound: If no resolver is found for the provided name
"""
r = self.resolver(name)
_validate_resolver_and_interface_id(name, r, ENS_TEXT_INTERFACE_ID, "text")
node = raw_name_to_hash(name)
return self._set_property(
name, r.functions.setText, (node, key, value), transact
)
# -- private methods -- #
def _get_resolver(
self,
normal_name: str,
fn_name: str = "addr",
) -> tuple[Optional["Contract"], str]:
current_name = normal_name
# look for a resolver, starting at the full name and taking the parent
# each time that no resolver is found
while True:
if is_empty_name(current_name):
# if no resolver found across all iterations, current_name
# will eventually be the empty string '' which returns here
return None, current_name
resolver_addr = self.ens.caller.resolver(normal_name_to_hash(current_name))
if not is_none_or_zero_address(resolver_addr):
# if resolver found, return it
resolver = cast(
"Contract", self._type_aware_resolver(resolver_addr, fn_name)
)
return resolver, current_name
# set current_name to parent and try again
current_name = self.parent(current_name)
def _set_resolver(
self,
name: str,
resolver_addr: ChecksumAddress | None = None,
transact: Optional["TxParams"] = None,
) -> "Contract":
if not transact:
transact = {}
transact = deepcopy(transact)
if is_none_or_zero_address(resolver_addr):
resolver_addr = self.address("resolver.eth")
namehash = raw_name_to_hash(name)
if self.ens.caller.resolver(namehash) != resolver_addr:
self.ens.functions.setResolver(namehash, resolver_addr).transact(transact)
return cast("Contract", self._resolver_contract(address=resolver_addr))
def _resolve(
self, name: str, fn_name: str = "addr"
) -> ChecksumAddress | str | None:
normal_name = normalize_name(name)
resolver, current_name = self._get_resolver(normal_name, fn_name)
if not resolver:
return None
node = self.namehash(normal_name)
# handle extended resolver case
if _resolver_supports_interface(resolver, ENS_EXTENDED_RESOLVER_INTERFACE_ID):
contract_func_with_args = (fn_name, [node])
calldata = resolver.encode_abi(*contract_func_with_args)
contract_call_result = resolver.caller.resolve(
dns_encode_name(normal_name),
calldata,
)
result = self._decode_ensip10_resolve_data(
contract_call_result, resolver, fn_name
)
return to_checksum_address(result) if is_address(result) else result
elif normal_name == current_name:
lookup_function = getattr(resolver.functions, fn_name)
result = lookup_function(node).call()
if is_none_or_zero_address(result):
return None
return to_checksum_address(result) if is_address(result) else result
return None
def _assert_control(
self,
account: ChecksumAddress,
name: str,
parent_owned: str | None = None,
) -> None:
if not address_in(account, self.w3.eth.accounts):
raise UnauthorizedError(
f"in order to modify {name!r}, you must control account"
f" {account!r}, which owns {parent_owned or name!r}"
)
def _first_owner(
self, name: str
) -> tuple[ChecksumAddress | None, Sequence[str], str]:
"""
Takes a name, and returns the owner of the deepest subdomain that has an owner
:returns: (owner or None, list(unowned_subdomain_labels), first_owned_domain)
"""
owner = None
unowned = []
pieces = normalize_name(name).split(".")
while pieces and is_none_or_zero_address(owner):
name = ".".join(pieces)
owner = self.owner(name)
if is_none_or_zero_address(owner):
unowned.append(pieces.pop(0))
return (owner, unowned, name)
def _claim_ownership(
self,
owner: ChecksumAddress,
unowned: Sequence[str],
owned: str,
old_owner: ChecksumAddress | None = None,
transact: Optional["TxParams"] = None,
) -> None:
if not transact:
transact = {}
transact = deepcopy(transact)
transact["from"] = old_owner or owner
for label in reversed(unowned):
self.ens.functions.setSubnodeOwner(
raw_name_to_hash(owned),
label_to_hash(label),
owner,
).transact(transact)
owned = f"{label}.{owned}"
def _setup_reverse(
self,
name: str | None,
address: ChecksumAddress,
transact: Optional["TxParams"] = None,
) -> HexBytes:
name = normalize_name(name) if name else ""
if not transact:
transact = {}
transact = deepcopy(transact)
transact["from"] = address
return self._reverse_registrar().functions.setName(name).transact(transact)
def _reverse_registrar(self) -> "Contract":
addr = self.ens.caller.owner(normal_name_to_hash(REVERSE_REGISTRAR_DOMAIN))
return self.w3.eth.contract(address=addr, abi=abis.REVERSE_REGISTRAR)
def _set_property(
self,
name: str,
func: "ContractFunction",
args: Sequence[Any],
transact: "TxParams" = None,
) -> HexBytes:
if not transact:
transact = {}
owner = self.owner(name)
transact_from_owner = merge({"from": owner}, transact)
return func(*args).transact(transact_from_owner)
def _validate_resolver_and_interface_id(
ens_name: str,
resolver: "Contract",
ens_interface_id: HexStr,
interface_name: str,
) -> None:
if not resolver:
raise ResolverNotFound(
f"No resolver found for name `{ens_name}`. It is likely the name "
"contains an unsupported top level domain (tld)."
)
elif not _resolver_supports_interface(resolver, ens_interface_id):
raise UnsupportedFunction(
f"Resolver for name `{ens_name}` does not support the `{interface_name}` "
f"interface."
)
def _resolver_supports_interface(
resolver: "Contract",
ens_interface_id: HexStr,
) -> bool:
return any(
"supportsInterface" in repr(func) for func in resolver.all_functions()
) and resolver.caller.supportsInterface(ens_interface_id)
|
ENS
|
python
|
numba__numba
|
numba/core/typed_passes.py
|
{
"start": 32227,
"end": 38835
}
|
class ____(FunctionPass):
"""Remove phi nodes (ir.Expr.phi) introduced by SSA.
This is needed before Lowering because the phi nodes in Numba IR do not
match the semantics of phi nodes in LLVM IR. In Numba IR, phi nodes may
expand into multiple LLVM instructions.
"""
_name = "strip_phis"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
state.func_ir = self._strip_phi_nodes(state.func_ir)
state.func_ir._definitions = build_definitions(state.func_ir.blocks)
if "flags" in state and state.flags.auto_parallel.enabled:
self._simplify_conditionally_defined_variable(state.func_ir)
state.func_ir._definitions = build_definitions(state.func_ir.blocks)
# Rerun postprocessor to update metadata
post_proc = postproc.PostProcessor(state.func_ir)
post_proc.run(emit_dels=False)
# Ensure we are not in objectmode generator
if (state.func_ir.generator_info is not None
and state.typemap is not None):
# Rebuild generator type
# TODO: move this into PostProcessor
gentype = state.return_type
state_vars = state.func_ir.generator_info.state_vars
state_types = [state.typemap[k] for k in state_vars]
state.return_type = types.Generator(
gen_func=gentype.gen_func,
yield_type=gentype.yield_type,
arg_types=gentype.arg_types,
state_types=state_types,
has_finalizer=gentype.has_finalizer,
)
return True
def _strip_phi_nodes(self, func_ir):
"""Strip Phi nodes from ``func_ir``
For each phi node, put incoming value to their respective incoming
basic-block at possibly the latest position (i.e. after the latest
assignment to the corresponding variable).
"""
exporters = defaultdict(list)
phis = set()
# Find all variables that needs to be exported
for label, block in func_ir.blocks.items():
for assign in block.find_insts(ir.Assign):
if isinstance(assign.value, ir.Expr):
if assign.value.op == 'phi':
phis.add(assign)
phi = assign.value
for ib, iv in zip(phi.incoming_blocks,
phi.incoming_values):
exporters[ib].append((assign.target, iv))
# Rewrite the blocks with the new exporting assignments
newblocks = {}
for label, block in func_ir.blocks.items():
newblk = copy(block)
newblocks[label] = newblk
# strip phis
newblk.body = [stmt for stmt in block.body if stmt not in phis]
# insert exporters
for target, rhs in exporters[label]:
# If RHS is undefined
if rhs is ir.UNDEFINED:
# Put in a NULL initializer, set the location to be in what
# will eventually materialize as the prologue.
rhs = ir.Expr.null(loc=func_ir.loc)
assign = ir.Assign(
target=target,
value=rhs,
loc=rhs.loc
)
# Insert at the earliest possible location; i.e. after the
# last assignment to rhs
assignments = [stmt for stmt in newblk.find_insts(ir.Assign)
if stmt.target == rhs]
if assignments:
last_assignment = assignments[-1]
newblk.insert_after(assign, last_assignment)
else:
newblk.prepend(assign)
func_ir.blocks = newblocks
return func_ir
def _simplify_conditionally_defined_variable(self, func_ir):
"""
Rewrite assignments like:
ver1 = null()
...
ver1 = ver
...
uses(ver1)
into:
# delete all assignments to ver1
uses(ver)
This is only needed for parfors because the SSA pass will create extra
variable assignments that the parfor code does not expect.
This pass helps avoid problems by reverting the effect of SSA.
"""
any_block = next(iter(func_ir.blocks.values()))
scope = any_block.scope
defs = func_ir._definitions
def unver_or_undef(unver, defn):
# Is the definition undefined or pointing to the unversioned name?
if isinstance(defn, ir.Var):
if defn.unversioned_name == unver:
return True
elif isinstance(defn, ir.Expr):
if defn.op == "null":
return True
return False
def legalize_all_versioned_names(var):
# Are all versioned names undefined or defined to the same
# variable chain?
if not var.versioned_names:
return False
for versioned in var.versioned_names:
vs = defs.get(versioned, ())
if not all(map(partial(unver_or_undef, k), vs)):
return False
return True
# Find unversioned variables that met the conditions
suspects = set()
for k in defs:
try:
# This may fail?
var = scope.get_exact(k)
except errors.NotDefinedError:
continue
# is the var name unversioned?
if var.unversioned_name == k:
if legalize_all_versioned_names(var):
suspects.add(var)
delete_set = set()
replace_map = {}
for var in suspects:
# rewrite Var uses to the unversioned name
for versioned in var.versioned_names:
ver_var = scope.get_exact(versioned)
# delete assignment to the versioned name
delete_set.add(ver_var)
# replace references to versioned name with the unversioned
replace_map[versioned] = var
# remove assignments to the versioned names
for _label, blk in func_ir.blocks.items():
for assign in blk.find_insts(ir.Assign):
if assign.target in delete_set:
blk.remove(assign)
# do variable replacement
replace_vars(func_ir.blocks, replace_map)
|
PreLowerStripPhis
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py
|
{
"start": 1843,
"end": 2192
}
|
class ____:
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
GoodEight
|
python
|
getsentry__sentry
|
src/sentry/plugins/sentry_webhooks/apps.py
|
{
"start": 36,
"end": 262
}
|
class ____(AppConfig):
name = "sentry.plugins.sentry_webhooks"
def ready(self) -> None:
from sentry.plugins.base import register
from .plugin import WebHooksPlugin
register(WebHooksPlugin)
|
Config
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/base.py
|
{
"start": 5728,
"end": 5908
}
|
class ____(MutationType):
"""
This case of VariableTracker.mutation_type marker indicates that Dynamo
allows mutation on the value's attributes.
"""
|
AttributeMutation
|
python
|
getsentry__sentry
|
src/sentry/api/bases/organization.py
|
{
"start": 3714,
"end": 3870
}
|
class ____(StaffPermissionMixin, OrganizationPermission):
"""Allows staff to to access organization endpoints."""
pass
|
OrganizationAndStaffPermission
|
python
|
google__pytype
|
pytype/load_pytd_test.py
|
{
"start": 37052,
"end": 39138
}
|
class ____(_LoaderTest):
def test_import_class(self):
b_ast = self._import(
a="""
class Foo:
def f(self) -> int: ...
""",
b="""
import a
f = a.Foo.f
""",
)
self.assertEqual(
pytd_utils.Print(b_ast.Lookup("b.f")),
"def b.f(self: a.Foo) -> int: ...",
)
def test_import_class_instance(self):
b_ast = self._import(
a="""
class Foo:
def f(self) -> int: ...
foo: Foo
""",
b="""
import a
f = a.foo.f
""",
)
self.assertEqual(
pytd_utils.Print(b_ast.Lookup("b.f")), "def b.f() -> int: ..."
)
def test_create_instance_after_import(self):
b_ast = self._import(
a="""
class Foo:
def f(self) -> int: ...
""",
b="""
import a
foo: a.Foo
f = foo.f
""",
)
self.assertEqual(
pytd_utils.Print(b_ast.Lookup("b.f")), "def b.f() -> int: ..."
)
def test_function(self):
ast = self._import(a="""
def f(x: int) -> int: ...
g = f
""")
self.assertEqual(
pytd_utils.Print(ast.Lookup("a.g")), "def a.g(x: int) -> int: ..."
)
def test_imported_function(self):
b_ast = self._import(
a="""
def f(x: int) -> int: ...
""",
b="""
import a
f = a.f
""",
)
self.assertEqual(
pytd_utils.Print(b_ast.Lookup("b.f")), "def b.f(x: int) -> int: ..."
)
def test_base_class(self):
a_ast = self._import(a="""
class Foo:
def f(self) -> int: ...
class Bar(Foo): ...
x: Bar
f = x.f
""")
self.assertEqual(
pytd_utils.Print(a_ast.Lookup("a.f")), "def a.f() -> int: ..."
)
def test_base_class_imported(self):
b_ast = self._import(
a="""
class Foo:
def f(self) -> int: ...
class Bar(Foo): ...
x: Bar
""",
b="""
import a
f = a.x.f
""",
)
self.assertEqual(
pytd_utils.Print(b_ast.Lookup("b.f")), "def b.f() -> int: ..."
)
|
MethodAliasTest
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/quality-testing/unit-testing-assets-and-ops/op-combo.py
|
{
"start": 36,
"end": 520
}
|
class ____(dg.Config):
separator: str
@dg.op
def process_file(
primary_file: str, secondary_file: str, config: SeparatorConfig
) -> str:
return f"{primary_file}{config.separator}{secondary_file}"
# end_file
# start_test
def test_process_file() -> None:
assert (
process_file(
primary_file="abc",
secondary_file="def",
config=SeparatorConfig(separator=","),
)
== "abc,def"
)
# end_test
|
SeparatorConfig
|
python
|
plotly__plotly.py
|
tests/test_optional/test_figure_factory/test_figure_factory.py
|
{
"start": 116321,
"end": 138525
}
|
class ____(NumpyTestUtilsMixin, TestCaseNoTemplate):
def test_df_as_list(self):
df = [{"titles": "Revenue"}, "foo"]
pattern = (
"Every entry of the data argument (list, tuple, etc) must be a dictionary."
)
self.assertRaisesRegex(PlotlyError, pattern, ff.create_bullet, df)
def test_not_df_or_list(self):
df = "foo"
pattern = "You must input a pandas DataFrame, or a list of dictionaries."
self.assertRaisesRegex(PlotlyError, pattern, ff.create_bullet, df)
def test_valid_color_lists_of_2_rgb_colors(self):
df = [{"title": "Revenue"}]
range_colors = ["rgb(0, 0, 0)"]
measure_colors = ["rgb(0, 0, 0)"]
pattern = (
"Both 'range_colors' or 'measure_colors' must be a list "
"of two valid colors."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_bullet, df, range_colors=range_colors
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_bullet, df, measure_colors=measure_colors
)
def test_full_bullet(self):
data = [
{
"title": "Revenue",
"subtitle": "US$, in thousands",
"ranges": [150, 225, 300],
"measures": [220, 270],
"markers": [250],
},
{
"title": "Profit",
"subtitle": "%",
"ranges": [20, 25, 30],
"measures": [21, 23],
"markers": [26],
},
{
"title": "Order Size",
"subtitle": "US$, average",
"ranges": [350, 500, 600],
"measures": [100, 320],
"markers": [550],
},
{
"title": "New Customers",
"subtitle": "count",
"ranges": [1400, 2000, 2500],
"measures": [1000, 1650],
"markers": [2100],
},
{
"title": "Satisfaction",
"subtitle": "out of 5",
"ranges": [3.5, 4.25, 5],
"measures": [3.2, 4.7],
"markers": [4.4],
},
]
df = pd.DataFrame(data)
measure_colors = ["rgb(255, 127, 14)", "rgb(44, 160, 44)"]
range_colors = ["rgb(255, 127, 14)", "rgb(44, 160, 44)"]
fig = ff.create_bullet(
df,
orientation="v",
markers="markers",
measures="measures",
ranges="ranges",
subtitles="subtitle",
titles="title",
range_colors=range_colors,
measure_colors=measure_colors,
title="new title",
scatter_options={"marker": {"size": 30, "symbol": "hourglass"}},
)
exp_fig = {
"data": [
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x",
"y": [300],
"yaxis": "y",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(149.5, 143.5, 29.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x",
"y": [225],
"yaxis": "y",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x",
"y": [150],
"yaxis": "y",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x",
"y": [270],
"yaxis": "y",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x",
"y": [220],
"yaxis": "y",
},
{
"hoverinfo": "y",
"marker": {
"color": "rgb(0, 0, 0)",
"size": 30,
"symbol": "hourglass",
},
"name": "markers",
"type": "scatter",
"x": [0.5],
"xaxis": "x",
"y": [250],
"yaxis": "y",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x2",
"y": [30],
"yaxis": "y2",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(149.5, 143.5, 29.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x2",
"y": [25],
"yaxis": "y2",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x2",
"y": [20],
"yaxis": "y2",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x2",
"y": [23],
"yaxis": "y2",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x2",
"y": [21],
"yaxis": "y2",
},
{
"hoverinfo": "y",
"marker": {
"color": "rgb(0, 0, 0)",
"size": 30,
"symbol": "hourglass",
},
"name": "markers",
"type": "scatter",
"x": [0.5],
"xaxis": "x2",
"y": [26],
"yaxis": "y2",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x3",
"y": [600],
"yaxis": "y3",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(149.5, 143.5, 29.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x3",
"y": [500],
"yaxis": "y3",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x3",
"y": [350],
"yaxis": "y3",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x3",
"y": [320],
"yaxis": "y3",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x3",
"y": [100],
"yaxis": "y3",
},
{
"hoverinfo": "y",
"marker": {
"color": "rgb(0, 0, 0)",
"size": 30,
"symbol": "hourglass",
},
"name": "markers",
"type": "scatter",
"x": [0.5],
"xaxis": "x3",
"y": [550],
"yaxis": "y3",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x4",
"y": [2500],
"yaxis": "y4",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(149.5, 143.5, 29.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x4",
"y": [2000],
"yaxis": "y4",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x4",
"y": [1400],
"yaxis": "y4",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x4",
"y": [1650],
"yaxis": "y4",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x4",
"y": [1000],
"yaxis": "y4",
},
{
"hoverinfo": "y",
"marker": {
"color": "rgb(0, 0, 0)",
"size": 30,
"symbol": "hourglass",
},
"name": "markers",
"type": "scatter",
"x": [0.5],
"xaxis": "x4",
"y": [2100],
"yaxis": "y4",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x5",
"y": [5],
"yaxis": "y5",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(149.5, 143.5, 29.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x5",
"y": [4.25],
"yaxis": "y5",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "ranges",
"orientation": "v",
"type": "bar",
"width": 2,
"x": [0],
"xaxis": "x5",
"y": [3.5],
"yaxis": "y5",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(44.0, 160.0, 44.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x5",
"y": [4.7],
"yaxis": "y5",
},
{
"base": 0,
"hoverinfo": "y",
"marker": {"color": "rgb(255.0, 127.0, 14.0)"},
"name": "measures",
"orientation": "v",
"type": "bar",
"width": 0.4,
"x": [0.5],
"xaxis": "x5",
"y": [3.2],
"yaxis": "y5",
},
{
"hoverinfo": "y",
"marker": {
"color": "rgb(0, 0, 0)",
"size": 30,
"symbol": "hourglass",
},
"name": "markers",
"type": "scatter",
"x": [0.5],
"xaxis": "x5",
"y": [4.4],
"yaxis": "y5",
},
],
"layout": {
"annotations": [
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "<b>Revenue</b>",
"textangle": 0,
"x": 0.019999999999999997,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "<b>Profit</b>",
"textangle": 0,
"x": 0.26,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "<b>Order Size</b>",
"textangle": 0,
"x": 0.5,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "<b>New Customers</b>",
"textangle": 0,
"x": 0.74,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "<b>Satisfaction</b>",
"textangle": 0,
"x": 0.98,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
],
"barmode": "stack",
"height": 600,
"margin": {"l": 80},
"shapes": [],
"showlegend": False,
"title": "new title",
"width": 1000,
"xaxis1": {
"anchor": "y",
"domain": [0.0, 0.039999999999999994],
"range": [0, 1],
"showgrid": False,
"showticklabels": False,
"zeroline": False,
},
"xaxis2": {
"anchor": "y2",
"domain": [0.24, 0.27999999999999997],
"range": [0, 1],
"showgrid": False,
"showticklabels": False,
"zeroline": False,
},
"xaxis3": {
"anchor": "y3",
"domain": [0.48, 0.52],
"range": [0, 1],
"showgrid": False,
"showticklabels": False,
"zeroline": False,
},
"xaxis4": {
"anchor": "y4",
"domain": [0.72, 0.76],
"range": [0, 1],
"showgrid": False,
"showticklabels": False,
"zeroline": False,
},
"xaxis5": {
"anchor": "y5",
"domain": [0.96, 1.0],
"range": [0, 1],
"showgrid": False,
"showticklabels": False,
"zeroline": False,
},
"yaxis1": {
"anchor": "x",
"domain": [0.0, 1.0],
"showgrid": False,
"tickwidth": 1,
"zeroline": False,
},
"yaxis2": {
"anchor": "x2",
"domain": [0.0, 1.0],
"showgrid": False,
"tickwidth": 1,
"zeroline": False,
},
"yaxis3": {
"anchor": "x3",
"domain": [0.0, 1.0],
"showgrid": False,
"tickwidth": 1,
"zeroline": False,
},
"yaxis4": {
"anchor": "x4",
"domain": [0.0, 1.0],
"showgrid": False,
"tickwidth": 1,
"zeroline": False,
},
"yaxis5": {
"anchor": "x5",
"domain": [0.0, 1.0],
"showgrid": False,
"tickwidth": 1,
"zeroline": False,
},
},
}
for i in range(len(fig["data"])):
self.assert_fig_equal(fig["data"][i], exp_fig["data"][i])
|
TestBullet
|
python
|
streamlit__streamlit
|
lib/streamlit/util.py
|
{
"start": 2542,
"end": 3625
}
|
class ____(dict[Any, Any]):
"""
A dictionary subclass that supports attribute-style access.
This class extends the functionality of a standard dictionary to allow items
to be accessed via attribute-style dot notation in addition to the traditional
key-based access. If a dictionary item is accessed and is itself a dictionary,
it is automatically wrapped in another `AttributeDictionary`, enabling recursive
attribute-style access.
"""
def __getattr__(self, key: str) -> Any:
try:
item = self.__getitem__(key)
return AttributeDictionary(item) if isinstance(item, dict) else item
except KeyError as err:
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{key}'"
) from err
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def in_sidebar(dg: DeltaGenerator) -> bool:
"""Check if the DeltaGenerator is in the sidebar."""
return dg._active_dg._root_container == RootContainer.SIDEBAR
|
AttributeDictionary
|
python
|
sympy__sympy
|
sympy/polys/numberfields/modules.py
|
{
"start": 53531,
"end": 56774
}
|
class ____(ModuleElement):
r"""
Subclass for :py:class:`~.ModuleElement` instances whose module is a
:py:class:`~.PowerBasis`.
"""
@property
def T(self):
"""Access the defining polynomial of the :py:class:`~.PowerBasis`."""
return self.module.T
def numerator(self, x=None):
"""Obtain the numerator as a polynomial over :ref:`ZZ`."""
x = x or self.T.gen
return Poly(reversed(self.coeffs), x, domain=ZZ)
def poly(self, x=None):
"""Obtain the number as a polynomial over :ref:`QQ`."""
return self.numerator(x=x) // self.denom
@property
def is_rational(self):
"""Say whether this element represents a rational number."""
return self.col[1:, :].is_zero_matrix
@property
def generator(self):
"""
Return a :py:class:`~.Symbol` to be used when expressing this element
as a polynomial.
If we have an associated :py:class:`~.AlgebraicField` whose primitive
element has an alias symbol, we use that. Otherwise we use the variable
of the minimal polynomial defining the power basis to which we belong.
"""
K = self.module.number_field
return K.ext.alias if K and K.ext.is_aliased else self.T.gen
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.poly(x or self.generator).as_expr()
def norm(self, T=None):
"""Compute the norm of this number."""
T = T or self.T
x = T.gen
A = self.numerator(x=x)
return T.resultant(A) // self.denom ** self.n
def inverse(self):
f = self.poly()
f_inv = f.invert(self.T)
return self.module.element_from_poly(f_inv)
def __rfloordiv__(self, a):
return self.inverse() * a
def _negative_power(self, e, modulo=None):
return self.inverse() ** abs(e)
def to_ANP(self):
"""Convert to an equivalent :py:class:`~.ANP`. """
return ANP(list(reversed(self.QQ_col.flat())), QQ.map(self.T.rep.to_list()), QQ)
def to_alg_num(self):
"""
Try to convert to an equivalent :py:class:`~.AlgebraicNumber`.
Explanation
===========
In general, the conversion from an :py:class:`~.AlgebraicNumber` to a
:py:class:`~.PowerBasisElement` throws away information, because an
:py:class:`~.AlgebraicNumber` specifies a complex embedding, while a
:py:class:`~.PowerBasisElement` does not. However, in some cases it is
possible to convert a :py:class:`~.PowerBasisElement` back into an
:py:class:`~.AlgebraicNumber`, namely when the associated
:py:class:`~.PowerBasis` has a reference to an
:py:class:`~.AlgebraicField`.
Returns
=======
:py:class:`~.AlgebraicNumber`
Raises
======
StructureError
If the :py:class:`~.PowerBasis` to which this element belongs does
not have an associated :py:class:`~.AlgebraicField`.
"""
K = self.module.number_field
if K:
return K.to_alg_num(self.to_ANP())
raise StructureError("No associated AlgebraicField")
|
PowerBasisElement
|
python
|
ansible__ansible
|
lib/ansible/galaxy/collection/gpg.py
|
{
"start": 6168,
"end": 6391
}
|
class ____(GpgBaseError):
"""This is a generic error status message, it might be followed by error location specific data."""
location: str
code: int
more: str = ""
@dataclass(frozen=True, slots=True)
|
GpgError
|
python
|
plotly__plotly.py
|
plotly/graph_objs/parcoords/line/colorbar/title/_font.py
|
{
"start": 233,
"end": 9949
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords.line.colorbar.title"
_path_str = "parcoords.line.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.parcoords.line
.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.line.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
pypa__virtualenv
|
src/virtualenv/create/via_global_ref/builtin/ref.py
|
{
"start": 3519,
"end": 4093
}
|
class ____(PathRef):
"""Link a path on the file system."""
def __init__(self, src, dest, must=RefMust.NA, when=RefWhen.ANY) -> None:
super().__init__(src, must, when)
self.dest = dest
def run(self, creator, symlinks):
dest = self.dest(creator, self.src)
method = self.method(symlinks)
dest_iterable = dest if isinstance(dest, list) else (dest,)
if not dest.parent.exists():
dest.parent.mkdir(parents=True, exist_ok=True)
for dst in dest_iterable:
method(self.src, dst)
|
PathRefToDest
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/webhooks/action.py
|
{
"start": 29940,
"end": 35689
}
|
class ____(ABC):
@property
@abstractmethod
def dialog_type(self) -> str:
raise NotImplementedError
def _build_format_options(self, options: dict[str, str]) -> list[dict[str, Any]]:
return [
{
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
"value": value,
}
for text, value in options.items()
]
def build_modal_payload(
self,
title: str,
action_text: str,
options: dict[str, str],
initial_option_text: str,
initial_option_value: str,
callback_id: str,
metadata: str,
) -> View:
formatted_options = self._build_format_options(options)
return View(
type="modal",
title={"type": "plain_text", "text": f"{title} Issue"},
blocks=[
{
"type": "section",
"text": {"type": "mrkdwn", "text": action_text},
"accessory": {
"type": "static_select",
"initial_option": {
"text": {
"type": "plain_text",
"text": initial_option_text,
"emoji": True,
},
"value": initial_option_value,
},
"options": formatted_options,
"action_id": "static_select-action",
},
}
],
close={"type": "plain_text", "text": "Cancel"},
submit={"type": "plain_text", "text": title},
private_metadata=metadata,
callback_id=callback_id,
)
@abstractmethod
def get_modal_payload(self, callback_id: str, metadata: str) -> View:
raise NotImplementedError
def _update_modal(
self,
slack_client: SlackSdkClient,
external_id: str,
modal_payload: View,
slack_request: SlackActionRequest,
) -> None:
try:
slack_client.views_update(
external_id=external_id,
view=modal_payload,
)
except SlackApiError as e:
# If the external_id is not found, Slack we send `not_found` error
# https://api.slack.com/methods/views.update
if unpack_slack_api_error(e) == MODAL_NOT_FOUND:
logging_data = slack_request.get_logging_data()
_logger.info(
"slack.action.update-modal-not-found",
extra={
**logging_data,
"dialog": self.dialog_type,
},
)
# The modal was not found, so we need to open a new one
self._open_modal(slack_client, modal_payload, slack_request)
else:
raise
def _open_modal(
self, slack_client: SlackSdkClient, modal_payload: View, slack_request: SlackActionRequest
) -> None:
# Error handling is done in the calling function
slack_client.views_open(
trigger_id=slack_request.data["trigger_id"],
view=modal_payload,
)
def open_dialog(self, slack_request: SlackActionRequest, group: Group) -> None:
# XXX(epurkhiser): In order to update the original message we have to
# keep track of the response_url in the callback_id. Definitely hacky,
# but seems like there's no other solutions [1]:
#
# [1]: https://stackoverflow.com/questions/46629852/update-a-bot-message-after-responding-to-a-slack-dialog#comment80795670_46629852
org = group.project.organization
callback_id_dict = {
"issue": group.id,
"orig_response_url": slack_request.data["response_url"],
"is_message": _is_message(slack_request.data),
"rule": slack_request.callback_data.get("rule"),
}
if slack_request.data.get("channel"):
callback_id_dict["channel_id"] = slack_request.data["channel"]["id"]
callback_id = orjson.dumps(callback_id_dict).decode()
# only add tags to metadata
metadata_dict = callback_id_dict.copy()
metadata_dict["tags"] = list(slack_request.get_tags())
metadata = orjson.dumps(metadata_dict).decode()
# XXX(CEO): the second you make a selection (without hitting Submit) it sends a slightly different request
modal_payload = self.get_modal_payload(callback_id, metadata=metadata)
slack_client = SlackSdkClient(integration_id=slack_request.integration.id)
try:
# We need to use the action_ts as the external_id to update the modal
# We passed this in control when we sent the loading modal to beat the 3 second timeout
external_id = slack_request.get_action_ts()
if not external_id:
# If we don't have an external_id or option is disabled we need to open a new modal
self._open_modal(slack_client, modal_payload, slack_request)
else:
self._update_modal(slack_client, external_id, modal_payload, slack_request)
except SlackApiError as e:
_logger.info(
"slack.action.response-error",
extra={
"organization_id": org.id,
"integration_id": slack_request.integration.id,
"exec_summary": repr(e),
},
)
|
_ModalDialog
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py
|
{
"start": 8289,
"end": 8640
}
|
class ____(StringRunMapper):
"""Map an input to the tool."""
@override
def map(self, run: Run) -> dict[str, str]:
if not run.outputs:
msg = f"Run {run.id} has no outputs to evaluate."
raise ValueError(msg)
return {"input": run.inputs["input"], "prediction": run.outputs["output"]}
|
ToolStringRunMapper
|
python
|
gevent__gevent
|
src/gevent/tests/test__socket_dns.py
|
{
"start": 24552,
"end": 26829
}
|
class ____(TestCase):
# For this test to work correctly, it needs to resolve to
# an address with a single A record; round-robin DNS and multiple A records
# may mess it up (subsequent requests---and we always make two---may return
# unequal results). We used to use gevent.org, but that now has multiple A records;
# trying www.gevent.org which is a CNAME to readthedocs.org then worked, but it became
# an alias for python-gevent.readthedocs.org, which is an alias for readthedocs.io,
# and which also has multiple addresses. So we run the resolver twice to try to get
# the different answers, if needed. Even then it's not enough, so
# we normalize the two addresses we get to a single one.
HOSTNAME = 'www.gevent.org'
def _normalize_result_gethostbyname(self, result):
if result == '104.17.33.82':
result = '104.17.32.82'
if result == '104.16.254.120':
result = '104.16.253.120'
return result
def _normalize_result_gethostbyname_ex(self, result):
result = super(TestGeventOrg, self)._normalize_result_gethostbyname_ex(result)
if result[0] == 'python-gevent.readthedocs.org':
result = ('readthedocs.io', ) + result[1:]
return result
def test_AI_CANONNAME(self):
# Not all systems support AI_CANONNAME; notably tha manylinux
# resolvers *sometimes* do not. Specifically, sometimes they
# provide the canonical name *only* on the first result.
args = (
# host
TestGeventOrg.HOSTNAME,
# port
None,
# family
socket.AF_INET,
# type
0,
# proto
0,
# flags
socket.AI_CANONNAME
)
gevent_result = gevent_socket.getaddrinfo(*args)
self.assertEqual(gevent_result[0][3], 'readthedocs.io')
real_result = socket.getaddrinfo(*args)
self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME = not all(r[3] for r in real_result)
try:
self.assertEqualResults(real_result, gevent_result, 'getaddrinfo')
finally:
del self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME
add(TestGeventOrg, TestGeventOrg.HOSTNAME)
|
TestGeventOrg
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/strategies.py
|
{
"start": 4804,
"end": 5879
}
|
class ____(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs,
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
|
_UninstrumentedColumnLoader
|
python
|
jina-ai__jina
|
tests/unit/serve/executors/test_bad_executor_constructor.py
|
{
"start": 217,
"end": 1201
}
|
class ____(Executor):
def __init__(self, metas, requests, runtime_args, dynamic_batching):
pass
@requests
def foo(self, docs, parameters, docs_matrix):
pass
def test_bad_executor_constructor():
# executor can be used as out of Flow as Python object
exec1 = GoodExecutor()
exec2 = GoodExecutor2({}, {}, {}, {})
# can be used in the Flow
with Flow().add(uses=GoodExecutor):
pass
with Flow().add(uses=GoodExecutor2):
pass
# bad executor due to mismatch on args
with pytest.raises(TypeError):
class BadExecutor1(Executor):
def __init__(self):
pass
@requests
def foo(self, **kwargs):
pass
with pytest.raises(TypeError):
class BadExecutor2(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests
def foo(self):
pass
|
GoodExecutor2
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-azure/dagster_azure/blob/resources.py
|
{
"start": 665,
"end": 1081
}
|
class ____(Config):
"""Authenticate using azure.identity.DefaultAzureCredential."""
credential_type: Literal["default_azure_credential"] = "default_azure_credential"
kwargs: dict[str, Any] = {}
"additional arguments to be passed to azure.identity.DefaultAzureCredential."
' e.g. AzureBlobStorageDefaultCredential(kwargs={"exclude_environment_credential": True})'
|
AzureBlobStorageDefaultCredential
|
python
|
numba__numba
|
numba/core/datamodel/models.py
|
{
"start": 38733,
"end": 39763
}
|
class ____(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [('shape', types.UniTuple(types.intp, ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(NdIndexModel, self).__init__(dmm, fe_type, members)
@register_default(types.NumpyFlatType)
def handle_numpy_flat_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=False)
else:
return FlatIter(dmm, ty)
@register_default(types.NumpyNdEnumerateType)
def handle_numpy_ndenumerate_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=True)
else:
return FlatIter(dmm, ty)
@register_default(types.BoundFunction)
def handle_bound_function(dmm, ty):
# The same as the underlying type
return dmm[ty.this]
@register_default(types.NumpyNdIterType)
|
NdIndexModel
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builder_test/packages/callbacks/package.py
|
{
"start": 243,
"end": 574
}
|
class ____(Package):
"""Package used to verify that callbacks on phases work correctly, including conditions"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("2.0", md5="abcdef0123456789abcdef0123456789")
version("1.0", md5="0123456789abcdef0123456789abcdef")
|
Callbacks
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/obsolete.py
|
{
"start": 549,
"end": 1236
}
|
class ____(Node):
"""Missing node instance."""
activation_class = ObsoleteActivation
task_type = "OBSOLETE"
shape = {"width": 0, "height": 0, "svg": ""}
bpmn_element = None
def __init__(self, cancel_func=None, undo_func=None, **kwargs):
super().__init__(**kwargs)
self._undo_func = undo_func
self._cancel_func = cancel_func
def _outgoing(self):
return
yield
def create_node(self, name, flow_class):
"""
Create real node instance for missing entry
"""
obsolete = copy.copy(self)
obsolete.name = name
obsolete.flow_class = flow_class
return obsolete
|
Obsolete
|
python
|
google__pytype
|
pytype/overlays/attr_overlay.py
|
{
"start": 2252,
"end": 2395
}
|
class ____:
pass
# A unique sentinel value to signal not to write anything, not even the
# original value.
_NO_CHANGE = _NoChange()
|
_NoChange
|
python
|
jina-ai__jina
|
tests/integration/docarray_v2/test_v2.py
|
{
"start": 40111,
"end": 41126
}
|
class ____(BaseDoc):
tags: Dict[str, str] = {}
@pytest.fixture(scope='function')
def input_docs():
return DocList[ExternalDeploymentDoc]([ExternalDeploymentDoc() for _ in range(50)])
@pytest.fixture
def num_shards(request):
return request.param
def _external_deployment_args(num_shards, port=None):
from jina.parsers import set_deployment_parser
args = [
'--uses',
'MyExternalExecutor',
'--name',
'external_real',
'--port',
str(port) if port else str(random_port()),
'--host-in',
'0.0.0.0',
'--shards',
str(num_shards),
'--polling',
'all',
]
return set_deployment_parser().parse_args(args)
@pytest.fixture(scope='function')
def external_deployment_args(num_shards, port=None):
return _external_deployment_args(num_shards, port)
@pytest.fixture
def external_deployment(external_deployment_args):
return Deployment(external_deployment_args)
import uuid
|
ExternalDeploymentDoc
|
python
|
requests__requests-oauthlib
|
requests_oauthlib/oauth1_session.py
|
{
"start": 809,
"end": 971
}
|
class ____(ValueError):
def __init__(self, message, response):
super(TokenMissing, self).__init__(message)
self.response = response
|
TokenMissing
|
python
|
great-expectations__great_expectations
|
contrib/cli/great_expectations_contrib/package.py
|
{
"start": 1209,
"end": 1314
}
|
class ____(SerializableDictDot):
account_type: SocialLinkType
identifier: str
@dataclass
|
SocialLink
|
python
|
pytorch__pytorch
|
test/mobile/model_test/tensor_ops.py
|
{
"start": 3036,
"end": 5128
}
|
class ____(torch.nn.Module):
def forward(self):
return self.tensor_creation_ops()
def tensor_creation_ops(self):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
real = torch.tensor([1, 2], dtype=torch.float32)
imag = torch.tensor([3, 4], dtype=torch.float32)
inp = torch.tensor([-1.5, 0.0, 2.0])
values = torch.tensor([0.5])
quantized = torch.quantize_per_channel(
torch.tensor([[-1.0, 0.0], [1.0, 2.0]]),
torch.tensor([0.1, 0.01]),
torch.tensor([10, 0]),
0,
torch.quint8,
)
return len(
torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]),
# torch.sparse_coo_tensor(i, v, [2, 3]), # not work for iOS
torch.as_tensor([1, 2, 3]),
torch.as_strided(torch.randn(3, 3), (2, 2), (1, 2)),
torch.zeros(2, 3),
torch.zeros((2, 3)),
torch.zeros([2, 3], out=i),
torch.zeros(5),
torch.zeros_like(torch.empty(2, 3)),
torch.ones(2, 3),
torch.ones((2, 3)),
torch.ones([2, 3]),
torch.ones(5),
torch.ones_like(torch.empty(2, 3)),
torch.arange(5),
torch.arange(1, 4),
torch.arange(1, 2.5, 0.5),
torch.range(1, 4),
torch.range(1, 4, 0.5),
torch.linspace(3.0, 3.0, steps=1),
torch.logspace(start=2, end=2, steps=1, base=2.0),
torch.eye(3),
torch.empty(2, 3),
torch.empty_like(torch.empty(2, 3), dtype=torch.int64),
torch.empty_strided((2, 3), (1, 2)),
torch.full((2, 3), 3.141592),
torch.full_like(torch.full((2, 3), 3.141592), 2.71828),
torch.quantize_per_tensor(
torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8
),
torch.dequantize(quantized),
torch.complex(real, imag),
torch.polar(real, imag),
torch.heaviside(inp, values),
)
|
TensorCreationOpsModule
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 14203,
"end": 14905
}
|
class ____(Value):
__slots__ = ('loc', 'value',)
_fields = ('value',)
def __init__(self, value, loc=None):
self.loc = loc
self.value = value
def __eq__(self, other):
return (
self is other or (
isinstance(other, EnumValue) and
# self.loc == other.loc and
self.value == other.value
)
)
def __repr__(self):
return ('EnumValue('
'value={self.value!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.value,
self.loc
)
def __hash__(self):
return id(self)
|
EnumValue
|
python
|
Netflix__metaflow
|
metaflow/plugins/frameworks/pytorch.py
|
{
"start": 181,
"end": 1606
}
|
class ____(ParallelDecorator):
name = "pytorch_parallel"
defaults = {"master_port": None}
IS_PARALLEL = True
def task_decorate(
self, step_func, flow, graph, retry_count, max_user_code_retries, ubf_context
):
return super().task_decorate(
step_func, flow, graph, retry_count, max_user_code_retries, ubf_context
)
def setup_distributed_env(self, flow):
setup_torch_distributed(self.attributes["master_port"])
def setup_torch_distributed(master_port=None):
"""
Set up environment variables for PyTorch's distributed (DDP).
"""
# Choose port depending on run id to reduce probability of collisions, unless
# provided by the user.
try:
master_port = master_port or (51000 + abs(int(current.run_id)) % 10000)
except:
# if `int()` fails, i.e. `run_id` is not an `int`, use just a constant port. Can't use `hash()`,
# as that is not constant.
master_port = 51001
os.environ["MASTER_PORT"] = str(master_port)
os.environ["MASTER_ADDR"] = current.parallel.main_ip
os.environ["NODE_RANK"] = str(current.parallel.node_index)
os.environ["WORLD_SIZE"] = str(current.parallel.num_nodes)
os.environ["NUM_NODES"] = str(current.parallel.num_nodes)
# Specific for PyTorch Lightning
os.environ["PL_TORCH_DISTRIBUTED_BACKEND"] = "gloo" # NCCL crashes on aws batch!
|
PytorchParallelDecorator
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam/modeling_edgetam.py
|
{
"start": 2146,
"end": 3459
}
|
class ____(nn.LayerNorm):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, *, eps=1e-6, data_format="channels_last", **kwargs):
super().__init__(normalized_shape, eps=eps, **kwargs)
if data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {data_format}")
self.data_format = data_format
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels)
"""
if self.data_format == "channels_first":
features = features.permute(0, 2, 3, 1)
features = super().forward(features)
features = features.permute(0, 3, 1, 2)
else:
features = super().forward(features)
return features
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
|
EdgeTamLayerNorm
|
python
|
python-poetry__poetry
|
src/poetry/packages/locker.py
|
{
"start": 1751,
"end": 24495
}
|
class ____:
_VERSION = "2.1"
_READ_VERSION_RANGE = ">=1,<3"
_legacy_keys: ClassVar[list[str]] = [
"dependencies",
"source",
"extras",
"dev-dependencies",
]
_relevant_keys: ClassVar[list[str]] = [*_legacy_keys, "group"]
_relevant_project_keys: ClassVar[list[str]] = [
"requires-python",
"dependencies",
"optional-dependencies",
]
def __init__(self, lock: Path, pyproject_data: dict[str, Any]) -> None:
self._lock = lock
self._pyproject_data = pyproject_data
self._lock_data: dict[str, Any] | None = None
self._content_hash = self._get_content_hash()
@property
def lock(self) -> Path:
return self._lock
@property
def lock_data(self) -> dict[str, Any]:
if self._lock_data is None:
self._lock_data = self._get_lock_data()
return self._lock_data
def is_locked(self) -> bool:
"""
Checks whether the locker has been locked (lockfile found).
"""
return self._lock.exists()
def is_fresh(self) -> bool:
"""
Checks whether the lock file is still up to date with the current hash.
"""
with self.lock.open("rb") as f:
lock = tomllib.load(f)
metadata = lock.get("metadata", {})
if "content-hash" in metadata:
fresh: bool = self._content_hash == metadata["content-hash"]
if not fresh:
with self.lock.open("r", encoding="utf-8") as f:
generated_comment = f.readline()
if m := re.search("Poetry ([^ ]+)", generated_comment):
try:
version = Version.parse(m.group(1))
except InvalidVersionError:
pass
else:
if version < Version.parse("2.3.0"):
# Before Poetry 2.3.0, the content hash did not include
# dependency groups, so we need to recompute it without
# them for comparison.
old_content_hash = self._get_content_hash(
with_dependency_groups=False
)
fresh = old_content_hash == metadata["content-hash"]
return fresh
return False
def is_locked_groups_and_markers(self) -> bool:
if not self.is_locked():
return False
version = Version.parse(self.lock_data["metadata"]["lock-version"])
return version >= Version.parse("2.1")
def set_pyproject_data(self, pyproject_data: dict[str, Any]) -> None:
self._pyproject_data = pyproject_data
self._content_hash = self._get_content_hash()
def set_local_config(self, local_config: dict[str, Any]) -> None:
warnings.warn(
"Locker.set_local_config() is deprecated and will be removed in a future"
" release. Use Locker.set_pyproject_data() instead.",
DeprecationWarning,
stacklevel=2,
)
self._pyproject_data.setdefault("tool", {})["poetry"] = local_config
self._content_hash = self._get_content_hash()
def locked_repository(self) -> LockfileRepository:
"""
Searches and returns a repository of locked packages.
"""
from poetry.repositories.lockfile_repository import LockfileRepository
repository = LockfileRepository()
if not self.is_locked():
return repository
locked_packages = cast("list[dict[str, Any]]", self.lock_data["package"])
if not locked_packages:
return repository
for info in locked_packages:
repository.add_package(self._get_locked_package(info))
return repository
def locked_packages(self) -> dict[Package, TransitivePackageInfo]:
if not self.is_locked_groups_and_markers():
raise RuntimeError(
"This method should not be called if the lock file"
" is not at least version 2.1."
)
locked_packages: dict[Package, TransitivePackageInfo] = {}
locked_package_info = cast("list[dict[str, Any]]", self.lock_data["package"])
for info in locked_package_info:
package = self._get_locked_package(info, with_dependencies=False)
groups = set(info["groups"])
locked_marker = info.get("markers", "*")
if isinstance(locked_marker, str):
markers = {
canonicalize_name(group): parse_marker(locked_marker)
for group in groups
}
else:
markers = {
canonicalize_name(group): parse_marker(
locked_marker.get(group, "*")
)
for group in groups
}
locked_packages[package] = TransitivePackageInfo(
0, {canonicalize_name(g) for g in groups}, markers
)
return locked_packages
def set_lock_data(
self, root: Package, packages: dict[Package, TransitivePackageInfo]
) -> bool:
"""Store lock data and eventually persist to the lock file"""
lock = self._compute_lock_data(root, packages)
if self._should_write(lock):
self._write_lock_data(lock)
return True
return False
def _compute_lock_data(
self, root: Package, packages: dict[Package, TransitivePackageInfo]
) -> TOMLDocument:
package_specs = self._lock_packages(packages)
# Retrieving hashes
for package in package_specs:
files = array()
for f in package["files"]:
file_metadata = inline_table()
for k, v in sorted(f.items()):
file_metadata[k] = v
files.append(file_metadata)
package["files"] = files.multiline(True)
lock = document()
lock.add(comment(GENERATED_COMMENT))
lock["package"] = package_specs
if root.extras:
lock["extras"] = {
extra: sorted(dep.pretty_name for dep in deps)
for extra, deps in sorted(root.extras.items())
}
lock["metadata"] = {
"lock-version": self._VERSION,
"python-versions": root.python_versions,
"content-hash": self._content_hash,
}
return lock
def _should_write(self, lock: TOMLDocument) -> bool:
# if lock file exists: compare with existing lock data
do_write = True
if self.is_locked():
try:
lock_data = self.lock_data
except RuntimeError:
# incompatible, invalid or no lock file
pass
else:
do_write = lock != lock_data
return do_write
def _write_lock_data(self, data: TOMLDocument) -> None:
if self.lock.exists():
# The following code is roughly equivalent to
# • lockfile = TOMLFile(self.lock)
# • lockfile.read()
# • lockfile.write(data)
# However, lockfile.read() takes more than half a second even
# for a modestly sized project like Poetry itself and the only reason
# for reading the lockfile is to determine the line endings. Thus,
# we do that part for ourselves here, which only takes about 10 ms.
# get original line endings
with open(self.lock, encoding="utf-8", newline="") as f:
line = f.readline()
linesep = "\r\n" if line.endswith("\r\n") else "\n"
# enforce original line endings
content = data.as_string()
if linesep == "\n":
content = content.replace("\r\n", "\n")
elif linesep == "\r\n":
content = re.sub(r"(?<!\r)\n", "\r\n", content)
with open(self.lock, "w", encoding="utf-8", newline="") as f:
f.write(content)
else:
lockfile = TOMLFile(self.lock)
lockfile.write(data)
self._lock_data = None
def _get_content_hash(self, *, with_dependency_groups: bool = True) -> str:
"""
Returns the sha256 hash of the sorted content of the pyproject file.
"""
project_content = self._pyproject_data.get("project", {})
group_content = (
self._pyproject_data.get("dependency-groups", {})
if with_dependency_groups
else {}
)
tool_poetry_content = self._pyproject_data.get("tool", {}).get("poetry", {})
relevant_project_content = {}
for key in self._relevant_project_keys:
data = project_content.get(key)
if data is not None:
relevant_project_content[key] = data
relevant_poetry_content = {}
for key in self._relevant_keys:
data = tool_poetry_content.get(key)
if data is None and (
# Special handling for legacy keys is just for backwards compatibility,
# and thereby not required if there is relevant content in [project]
# or [dependency-groups].
key not in self._legacy_keys
or relevant_project_content
or group_content
):
continue
relevant_poetry_content[key] = data
relevant_content = {}
if relevant_project_content:
relevant_content["project"] = relevant_project_content
if group_content:
# For backwards compatibility, we must not add dependency-groups
# if it is empty.
relevant_content["dependency-groups"] = group_content
if relevant_content:
relevant_content["tool"] = {"poetry": relevant_poetry_content}
else:
# For backwards compatibility, we have to put the relevant content
# of the [tool.poetry] section at top level!
relevant_content = relevant_poetry_content
return sha256(json.dumps(relevant_content, sort_keys=True).encode()).hexdigest()
def _get_lock_data(self) -> dict[str, Any]:
if not self.lock.exists():
raise RuntimeError("No lockfile found. Unable to read locked packages")
with self.lock.open("rb") as f:
try:
lock_data = tomllib.load(f)
except tomllib.TOMLDecodeError as e:
raise RuntimeError(f"Unable to read the lock file ({e}).")
# if the lockfile doesn't contain a metadata section at all,
# it probably needs to be rebuilt completely
if "metadata" not in lock_data:
raise RuntimeError(
"The lock file does not have a metadata entry.\n"
"Regenerate the lock file with the `poetry lock` command."
)
metadata = lock_data["metadata"]
if "lock-version" not in metadata:
raise RuntimeError(
"The lock file is not compatible with the current version of Poetry.\n"
"Regenerate the lock file with the `poetry lock` command."
)
lock_version = Version.parse(metadata["lock-version"])
current_version = Version.parse(self._VERSION)
accepted_versions = parse_constraint(self._READ_VERSION_RANGE)
lock_version_allowed = accepted_versions.allows(lock_version)
if lock_version_allowed and current_version < lock_version:
logger.warning(
"The lock file might not be compatible with the current version of"
" Poetry.\nUpgrade Poetry to ensure the lock file is read properly or,"
" alternatively, regenerate the lock file with the `poetry lock`"
" command."
)
elif not lock_version_allowed:
raise RuntimeError(
"The lock file is not compatible with the current version of Poetry.\n"
"Upgrade Poetry to be able to read the lock file or, alternatively, "
"regenerate the lock file with the `poetry lock` command."
)
return lock_data
def _get_locked_package(
self, info: dict[str, Any], with_dependencies: bool = True
) -> Package:
source = info.get("source", {})
source_type = source.get("type")
url = source.get("url")
if source_type in ["directory", "file"]:
url = self.lock.parent.joinpath(url).resolve().as_posix()
name = info["name"]
package = Package(
name,
info["version"],
source_type=source_type,
source_url=url,
source_reference=source.get("reference"),
source_resolved_reference=source.get("resolved_reference"),
source_subdirectory=source.get("subdirectory"),
)
package.description = info.get("description", "")
package.optional = info["optional"]
metadata = cast("dict[str, Any]", self.lock_data["metadata"])
# Storing of package files and hashes has been through a few generations in
# the lockfile, we can read them all:
#
# - latest and preferred is that this is read per package, from
# package.files
# - oldest is that hashes were stored in metadata.hashes without filenames
# - in between those two, hashes were stored alongside filenames in
# metadata.files
package_files = info.get("files")
if package_files is not None:
package.files = package_files
elif "hashes" in metadata:
hashes = cast("dict[str, Any]", metadata["hashes"])
package.files = [{"name": h, "hash": h} for h in hashes[name]]
elif source_type in {"git", "directory", "url"}:
package.files = []
else:
files = metadata["files"][name]
if source_type == "file":
filename = Path(url).name
package.files = [item for item in files if item["file"] == filename]
else:
# Strictly speaking, this is not correct, but we have no chance
# to always determine which are the correct files because the
# lockfile doesn't keep track which files belong to which package.
package.files = files
package.python_versions = info["python-versions"]
if "develop" in info:
package.develop = info["develop"]
if with_dependencies:
from poetry.factory import Factory
package_extras: dict[NormalizedName, list[Dependency]] = {}
extras = info.get("extras", {})
if extras:
for name, deps in extras.items():
name = canonicalize_name(name)
package_extras[name] = []
for dep in deps:
try:
dependency = Dependency.create_from_pep_508(dep)
except InvalidRequirementError:
# handle lock files with invalid PEP 508
m = re.match(r"^(.+?)(?:\[(.+?)])?(?:\s+\((.+)\))?$", dep)
if not m:
raise
dep_name = m.group(1)
extras = m.group(2) or ""
constraint = m.group(3) or "*"
dependency = Dependency(
dep_name, constraint, extras=extras.split(",")
)
package_extras[name].append(dependency)
package.extras = package_extras
for dep_name, constraint in info.get("dependencies", {}).items():
root_dir = self.lock.parent
if package.source_type == "directory":
# root dir should be the source of the package relative to the lock
# path
assert package.source_url is not None
root_dir = Path(package.source_url)
if isinstance(constraint, list):
for c in constraint:
package.add_dependency(
Factory.create_dependency(dep_name, c, root_dir=root_dir)
)
continue
package.add_dependency(
Factory.create_dependency(dep_name, constraint, root_dir=root_dir)
)
return package
def _lock_packages(
self, packages: dict[Package, TransitivePackageInfo]
) -> list[dict[str, Any]]:
locked = []
for package in sorted(
packages,
key=lambda x: (
x.name,
x.version,
x.source_type or "",
x.source_url or "",
x.source_subdirectory or "",
x.source_reference or "",
x.source_resolved_reference or "",
),
):
spec = self._dump_package(package, packages[package])
locked.append(spec)
return locked
def _dump_package(
self, package: Package, transitive_info: TransitivePackageInfo
) -> dict[str, Any]:
dependencies: dict[str, list[Any]] = {}
for dependency in sorted(
package.requires,
key=lambda d: d.name,
):
dependencies.setdefault(dependency.pretty_name, [])
constraint = inline_table()
if dependency.is_directory():
dependency = cast("DirectoryDependency", dependency)
constraint["path"] = dependency.path.as_posix()
if dependency.develop:
constraint["develop"] = True
elif dependency.is_file():
dependency = cast("FileDependency", dependency)
constraint["path"] = dependency.path.as_posix()
elif dependency.is_url():
dependency = cast("URLDependency", dependency)
constraint["url"] = dependency.url
elif dependency.is_vcs():
dependency = cast("VCSDependency", dependency)
constraint[dependency.vcs] = dependency.source
if dependency.branch:
constraint["branch"] = dependency.branch
elif dependency.tag:
constraint["tag"] = dependency.tag
elif dependency.rev:
constraint["rev"] = dependency.rev
if dependency.directory:
constraint["subdirectory"] = dependency.directory
else:
constraint["version"] = str(dependency.pretty_constraint)
if dependency.extras:
constraint["extras"] = sorted(dependency.extras)
if dependency.is_optional():
constraint["optional"] = True
if not dependency.marker.is_any():
constraint["markers"] = str(dependency.marker)
dependencies[dependency.pretty_name].append(constraint)
# All the constraints should have the same type,
# but we want to simplify them if it's possible
for dependency_name, constraints in dependencies.items():
if all(
len(constraint) == 1 and "version" in constraint
for constraint in constraints
):
dependencies[dependency_name] = [
constraint["version"] for constraint in constraints
]
data: dict[str, Any] = {
"name": package.pretty_name,
"version": package.pretty_version,
"description": package.description or "",
"optional": package.optional,
"python-versions": package.python_versions,
"groups": sorted(transitive_info.groups, key=lambda x: (x != "main", x)),
}
if transitive_info.markers:
if len(markers := set(transitive_info.markers.values())) == 1:
if not (marker := next(iter(markers))).is_any():
data["markers"] = str(marker)
else:
data["markers"] = inline_table()
for group, marker in sorted(
transitive_info.markers.items(),
key=lambda x: (x[0] != "main", x[0]),
):
if not marker.is_any():
data["markers"][group] = str(marker)
data["files"] = sorted(package.files, key=lambda x: x["file"])
if dependencies:
data["dependencies"] = table()
for dep_name, constraints in dependencies.items():
if len(constraints) == 1:
data["dependencies"][dep_name] = constraints[0]
else:
data["dependencies"][dep_name] = array().multiline(True)
for constraint in constraints:
data["dependencies"][dep_name].append(constraint)
if package.extras:
extras = {}
for name, deps in sorted(package.extras.items()):
extras[name] = sorted(dep.to_pep_508(with_extras=False) for dep in deps)
data["extras"] = extras
if package.source_url:
url = package.source_url
if package.source_type in ["file", "directory"]:
# The lock file should only store paths relative to the root project
url = Path(
os.path.relpath(
Path(url).resolve(),
Path(self.lock.parent).resolve(),
)
).as_posix()
data["source"] = {}
if package.source_type:
data["source"]["type"] = package.source_type
data["source"]["url"] = url
if package.source_reference:
data["source"]["reference"] = package.source_reference
if package.source_resolved_reference:
data["source"]["resolved_reference"] = package.source_resolved_reference
if package.source_subdirectory:
data["source"]["subdirectory"] = package.source_subdirectory
if package.source_type in ["directory", "git"]:
data["develop"] = package.develop
return data
|
Locker
|
python
|
python-pillow__Pillow
|
Tests/test_file_libtiff_small.py
|
{
"start": 157,
"end": 1573
}
|
class ____(LibTiffTestCase):
"""The small lena image was failing on open in the libtiff
decoder because the file pointer was set to the wrong place
by a spurious seek. It wasn't failing with the byteio method.
It was fixed by forcing an lseek to the beginning of the
file just before reading in libtiff. These tests remain
to ensure that it stays fixed."""
def test_g4_hopper_file(self, tmp_path: Path) -> None:
"""Testing the open file load path"""
test_file = "Tests/images/hopper_g4.tif"
with open(test_file, "rb") as f:
with Image.open(f) as im:
assert im.size == (128, 128)
self._assert_noerr(tmp_path, im)
def test_g4_hopper_bytesio(self, tmp_path: Path) -> None:
"""Testing the bytesio loading code path"""
test_file = "Tests/images/hopper_g4.tif"
s = BytesIO()
with open(test_file, "rb") as f:
s.write(f.read())
s.seek(0)
with Image.open(s) as im:
assert im.size == (128, 128)
self._assert_noerr(tmp_path, im)
def test_g4_hopper(self, tmp_path: Path) -> None:
"""The 128x128 lena image failed for some reason."""
test_file = "Tests/images/hopper_g4.tif"
with Image.open(test_file) as im:
assert im.size == (128, 128)
self._assert_noerr(tmp_path, im)
|
TestFileLibTiffSmall
|
python
|
tensorflow__tensorflow
|
tensorflow/python/types/distribute.py
|
{
"start": 7296,
"end": 8014
}
|
class ____(DistributedValues):
"""Holds a distributed value: a map from replica id to synchronized values.
`Mirrored` values are `tf.distribute.DistributedValues` for which we know that
the value on all replicas is the same. `Mirrored` values are kept synchronized
by the distribution strategy in use, while `tf.types.experimental.PerReplica`
values are left unsynchronized. `Mirrored` values typically represent model
weights. We can safely read a `Mirrored` value in a cross-replica context by
using the value on any replica, while `PerReplica` values should not be read
or manipulated directly by the user in a cross-replica context.
"""
@tf_export("distribute.DistributedIterator", v1=[])
|
Mirrored
|
python
|
google__jax
|
tests/pallas/pallas_test.py
|
{
"start": 2783,
"end": 3415
}
|
class ____(jtu.JaxTestCase):
INTERPRET = False
def setUp(self):
if jtu.test_device_matches(["cpu"]) and not self.INTERPRET:
self.skipTest("On CPU the test works only in interpret mode")
if (jtu.test_device_matches(["cuda"]) and
not jtu.is_cuda_compute_capability_at_least("8.0")):
self.skipTest("Only works on GPU with capability >= sm80")
if sys.platform == "win32" and not self.INTERPRET:
self.skipTest("Only works on non-Windows platforms")
super().setUp()
def pallas_call(self, *args, **kwargs):
return pl.pallas_call(*args, **kwargs, interpret=self.INTERPRET)
|
PallasBaseTest
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_ml/src/project_ml/defs/assets/model_assets.py
|
{
"start": 18139,
"end": 23519
}
|
class ____(dg.Config):
"""Configuration for model deployment."""
accuracy_threshold: float = ACCURACY_THRESHOLD
model_path: str = str(MODELS_DIR)
custom_model_name: Optional[str] = None # Allow users to specify a specific model to deploy
force_deploy: bool = False # Allow users to bypass accuracy threshold
# end_deployment_config
@dg.asset(
description="Deploy model to production if it meets quality threshold or if custom model specified",
group_name="model_pipeline",
required_resource_keys={"model_storage"},
)
def production_digit_classifier(
context,
digit_classifier: DigitCNN,
model_evaluation: dict[str, Any],
config: DeploymentConfig,
) -> Optional[DigitCNN]:
"""Deploy model to production based on configuration options."""
# Get the model store resource
model_store = context.resources.model_storage
# Check if user wants to deploy a specific custom model
if config.custom_model_name:
context.log.info(f"User requested deployment of custom model: {config.custom_model_name}")
try:
# Load the custom model
custom_model_data = model_store.load_model(config.custom_model_name)
# Handle both formats: dict with 'model' key or direct model object
if isinstance(custom_model_data, dict) and "model" in custom_model_data:
custom_model = custom_model_data["model"]
else:
custom_model = custom_model_data # Direct model object
# Save the custom model as production model
production_model_name = f"production_custom_{config.custom_model_name}"
context.log.info(f"Saving custom model as production model: {production_model_name}")
model_store.save_model(custom_model, production_model_name)
context.add_output_metadata(
{
"deployment_status": "deployed_custom",
"custom_model_name": config.custom_model_name,
"production_model_name": production_model_name,
"deployment_type": "user_override",
},
output_name="result",
)
return custom_model
except Exception as e:
context.log.error(f"Failed to load custom model {config.custom_model_name}: {e!s}")
context.add_output_metadata(
{
"deployment_status": "failed_custom",
"custom_model_name": config.custom_model_name,
"error": str(e),
"deployment_type": "user_override",
},
output_name="result",
)
return None
# Standard deployment logic based on accuracy threshold
test_accuracy = model_evaluation["test_accuracy"]
# Check if user wants to force deployment regardless of accuracy
if config.force_deploy:
context.log.info(
f"Force deployment enabled - deploying model with accuracy: {test_accuracy:.4f}"
)
accuracy_str = f"{test_accuracy:.2f}".replace(".", "p")
model_name = f"production_model_forced_{accuracy_str}"
# Save the model using the model store
context.log.info(f"Saving forced production model as {model_name}")
model_store.save_model(digit_classifier, model_name)
context.add_output_metadata(
{
"deployment_status": "deployed_forced",
"deployed_accuracy": test_accuracy,
"deployment_threshold": config.accuracy_threshold,
"model_name": model_name,
"deployment_type": "force_override",
},
output_name="result",
)
return digit_classifier
# Standard accuracy-based deployment
context.log.info(
f"Candidate model accuracy: {test_accuracy:.4f}, Threshold: {config.accuracy_threshold}"
)
if test_accuracy >= config.accuracy_threshold:
context.log.info("Model meets quality threshold - deploying to production")
# Create model name with accuracy
accuracy_str = f"{test_accuracy:.2f}".replace(".", "p")
model_name = f"production_model_{accuracy_str}"
# Save the model using the model store
context.log.info(f"Saving production model as {model_name}")
model_store.save_model(digit_classifier, model_name)
context.add_output_metadata(
{
"deployment_status": "deployed",
"deployed_accuracy": test_accuracy,
"deployment_threshold": config.accuracy_threshold,
"model_name": model_name,
"deployment_type": "standard",
},
output_name="result",
)
return digit_classifier
else:
context.log.warning(
f"Model accuracy {test_accuracy:.4f} below threshold {config.accuracy_threshold} - skipping deployment"
)
context.add_output_metadata(
{
"deployment_status": "skipped",
"candidate_accuracy": test_accuracy,
"deployment_threshold": config.accuracy_threshold,
"deployment_type": "standard",
},
output_name="result",
)
return None
|
DeploymentConfig
|
python
|
walkccc__LeetCode
|
solutions/1482. Minimum Number of Days to Make m Bouquets/1482.py
|
{
"start": 0,
"end": 924
}
|
class ____:
def minDays(self, bloomDay: list[int], m: int, k: int) -> int:
if len(bloomDay) < m * k:
return -1
def getBouquetCount(waitingDays: int) -> int:
"""
Returns the number of bouquets (k flowers needed) can be made after the
`waitingDays`.
"""
bouquetCount = 0
requiredFlowers = k
for day in bloomDay:
if day > waitingDays:
# Reset `requiredFlowers` since there was not enough adjacent flowers.
requiredFlowers = k
else:
requiredFlowers -= 1
if requiredFlowers == 0:
# Use k adjacent flowers to make a bouquet.
bouquetCount += 1
requiredFlowers = k
return bouquetCount
l = min(bloomDay)
r = max(bloomDay)
while l < r:
mid = (l + r) // 2
if getBouquetCount(mid) >= m:
r = mid
else:
l = mid + 1
return l
|
Solution
|
python
|
pytorch__pytorch
|
tools/test/test_upload_stats_lib.py
|
{
"start": 722,
"end": 8433
}
|
class ____(unittest.TestCase):
emitted_metric: dict[str, Any] = {"did_not_emit": True}
def mock_put_item(self, **kwargs: Any) -> None:
# Utility for mocking putting items into s3. THis will save the emitted
# metric so tests can check it
self.emitted_metric = json.loads(
gzip.decompress(kwargs["Body"]).decode("utf-8")
)
# Before each test, set the env vars to their default values
def setUp(self) -> None:
get_s3_resource.cache_clear()
global_metrics.clear()
mock.patch.dict(
"os.environ",
{
"CI": "true",
"BUILD_ENVIRONMENT": BUILD_ENV,
"TEST_CONFIG": TEST_CONFIG,
"GITHUB_REPOSITORY": REPO,
"GITHUB_WORKFLOW": WORKFLOW,
"GITHUB_JOB": JOB,
"GITHUB_RUN_ID": str(RUN_ID),
"GITHUB_RUN_NUMBER": str(RUN_NUMBER),
"GITHUB_RUN_ATTEMPT": str(RUN_ATTEMPT),
"JOB_ID": str(JOB_ID),
"JOB_NAME": str(JOB_NAME),
},
clear=True, # Don't read any preset env vars
).start()
def test_emits_default_and_given_metrics(self, mock_resource: Any) -> None:
metric = {
"some_number": 123,
"float_number": 32.34,
}
# Querying for this instead of hard coding it b/c this will change
# based on whether we run this test directly from python or from
# pytest
current_module = inspect.getmodule(inspect.currentframe()).__name__ # type: ignore[union-attr]
emit_should_include = {
"metric_name": "metric_name",
"calling_file": "test_upload_stats_lib.py",
"calling_module": current_module,
"calling_function": "test_emits_default_and_given_metrics",
"repo": REPO,
"workflow": WORKFLOW,
"build_environment": BUILD_ENV,
"job": JOB,
"test_config": TEST_CONFIG,
"run_id": RUN_ID,
"run_number": RUN_NUMBER,
"run_attempt": RUN_ATTEMPT,
"job_id": JOB_ID,
"job_name": JOB_NAME,
"info": metric,
}
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertEqual(
self.emitted_metric,
{**self.emitted_metric, **emit_should_include},
)
def test_when_global_metric_specified_then_it_emits_it(
self, mock_resource: Any
) -> None:
metric = {
"some_number": 123,
}
global_metric_name = "global_metric"
global_metric_value = "global_value"
add_global_metric(global_metric_name, global_metric_value)
emit_should_include = {
**metric,
global_metric_name: global_metric_value,
}
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertEqual(
self.emitted_metric,
{**self.emitted_metric, "info": emit_should_include},
)
def test_when_local_and_global_metric_specified_then_global_is_overridden(
self, mock_resource: Any
) -> None:
global_metric_name = "global_metric"
global_metric_value = "global_value"
local_override = "local_override"
add_global_metric(global_metric_name, global_metric_value)
metric = {
"some_number": 123,
global_metric_name: local_override,
}
emit_should_include = {
**metric,
global_metric_name: local_override,
}
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertEqual(
self.emitted_metric,
{**self.emitted_metric, "info": emit_should_include},
)
def test_when_optional_envvar_set_to_actual_value_then_emit_vars_emits_it(
self, mock_resource: Any
) -> None:
metric = {
"some_number": 123,
}
emit_should_include = {
"info": {**metric},
"pr_number": PR_NUMBER,
}
mock.patch.dict(
"os.environ",
{
"PR_NUMBER": str(PR_NUMBER),
},
).start()
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertEqual(
self.emitted_metric,
{**self.emitted_metric, **emit_should_include},
)
def test_when_optional_envvar_set_to_a_empty_str_then_emit_vars_ignores_it(
self, mock_resource: Any
) -> None:
metric = {"some_number": 123}
emit_should_include: dict[str, Any] = metric.copy()
# Github Actions defaults some env vars to an empty string
default_val = ""
mock.patch.dict(
"os.environ",
{
"PR_NUMBER": default_val,
},
).start()
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertEqual(
self.emitted_metric,
{**self.emitted_metric, "info": emit_should_include},
f"Metrics should be emitted when an option parameter is set to '{default_val}'",
)
self.assertFalse(
self.emitted_metric.get("pr_number"),
f"Metrics should not include optional item 'pr_number' when it's envvar is set to '{default_val}'",
)
def test_no_metrics_emitted_if_required_env_var_not_set(
self, mock_resource: Any
) -> None:
metric = {"some_number": 123}
mock.patch.dict(
"os.environ",
{
"CI": "true",
"BUILD_ENVIRONMENT": BUILD_ENV,
},
clear=True,
).start()
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertTrue(self.emitted_metric["did_not_emit"])
def test_no_metrics_emitted_if_required_env_var_set_to_empty_string(
self, mock_resource: Any
) -> None:
metric = {"some_number": 123}
mock.patch.dict(
"os.environ",
{
"GITHUB_JOB": "",
},
).start()
mock_resource.return_value.Object.return_value.put = self.mock_put_item
emit_metric("metric_name", metric)
self.assertTrue(self.emitted_metric["did_not_emit"])
def test_remove_nan_inf(self, _mocked_resource: Any) -> None:
checks = [
(float("inf"), '"inf"', "Infinity"),
(float("nan"), '"nan"', "NaN"),
({1: float("inf")}, '{"1": "inf"}', '{"1": Infinity}'),
([float("nan")], '["nan"]', "[NaN]"),
({1: [float("nan")]}, '{"1": ["nan"]}', '{"1": [NaN]}'),
]
for input, clean, unclean in checks:
clean_output = json.dumps(remove_nan_inf(input))
unclean_output = json.dumps(input)
self.assertEqual(
clean_output,
clean,
f"Expected {clean} when input is {unclean}, got {clean_output}",
)
self.assertEqual(
unclean_output,
unclean,
f"Expected {unclean} when input is {unclean}, got {unclean_output}",
)
if __name__ == "__main__":
unittest.main()
|
TestUploadStats
|
python
|
docker__docker-py
|
tests/integration/api_container_test.py
|
{
"start": 54856,
"end": 56612
}
|
class ____(BaseAPIIntegrationTest):
@requires_api_version('1.22')
def test_update_container(self):
old_mem_limit = 400 * 1024 * 1024
new_mem_limit = 300 * 1024 * 1024
container = self.client.create_container(
TEST_IMG, 'top', host_config=self.client.create_host_config(
mem_limit=old_mem_limit
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.update_container(container, mem_limit=new_mem_limit)
inspect_data = self.client.inspect_container(container)
assert inspect_data['HostConfig']['Memory'] == new_mem_limit
@requires_api_version('1.23')
def test_restart_policy_update(self):
old_restart_policy = {
'MaximumRetryCount': 0,
'Name': 'always'
}
new_restart_policy = {
'MaximumRetryCount': 42,
'Name': 'on-failure'
}
container = self.client.create_container(
TEST_IMG, ['sleep', '60'],
host_config=self.client.create_host_config(
restart_policy=old_restart_policy
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.update_container(container,
restart_policy=new_restart_policy)
inspect_data = self.client.inspect_container(container)
assert (
inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'] ==
new_restart_policy['MaximumRetryCount']
)
assert (
inspect_data['HostConfig']['RestartPolicy']['Name'] ==
new_restart_policy['Name']
)
|
ContainerUpdateTest
|
python
|
openai__openai-python
|
tests/api_resources/test_embeddings.py
|
{
"start": 385,
"end": 2378
}
|
class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
embedding = client.embeddings.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
)
assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
embedding = client.embeddings.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
dimensions=1,
encoding_format="float",
user="user-1234",
)
assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.embeddings.with_raw_response.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
embedding = response.parse()
assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.embeddings.with_streaming_response.create(
input="The quick brown fox jumped over the lazy dog",
model="text-embedding-3-small",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
embedding = response.parse()
assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"])
assert cast(Any, response.is_closed) is True
|
TestEmbeddings
|
python
|
neetcode-gh__leetcode
|
python/0729-my-calendar-i.py
|
{
"start": 25,
"end": 1010
}
|
class ____:
def __init__(self):
self.calendar = CalendarNode(-1, -1)
def book(self, start: int, end: int) -> bool:
def bookHelper(cur, targetStart, targetEnd):
if targetStart > cur.end:
# go to the right
if not cur.right:
# we can insert event
cur.right = CalendarNode(targetStart, targetEnd)
return True
return bookHelper(cur.right, targetStart, targetEnd)
elif targetEnd < cur.start:
# got to the left
if not cur.left:
# we can insert event
cur.left = CalendarNode(targetStart, targetEnd)
return True
return bookHelper(cur.left, targetStart, targetEnd)
return False
return bookHelper(self.calendar, start, end-1) # "end-1" because "end" bound is exclusive (see example 1)
|
MyCalendar
|
python
|
python__mypy
|
mypy/test/teststubinfo.py
|
{
"start": 216,
"end": 1587
}
|
class ____(unittest.TestCase):
def test_is_legacy_bundled_packages(self) -> None:
assert not is_module_from_legacy_bundled_package("foobar_asdf")
assert not is_module_from_legacy_bundled_package("PIL")
assert is_module_from_legacy_bundled_package("pycurl")
assert is_module_from_legacy_bundled_package("dateparser")
def test_stub_distribution_name(self) -> None:
assert stub_distribution_name("foobar_asdf") is None
assert stub_distribution_name("pycurl") == "types-pycurl"
assert stub_distribution_name("psutil") == "types-psutil"
assert stub_distribution_name("sassutils") == "types-libsass"
assert stub_distribution_name("google.cloud.ndb") == "types-google-cloud-ndb"
assert stub_distribution_name("google.cloud.ndb.submodule") == "types-google-cloud-ndb"
assert stub_distribution_name("google.cloud.unknown") is None
assert stub_distribution_name("google.protobuf") == "types-protobuf"
assert stub_distribution_name("google.protobuf.submodule") == "types-protobuf"
assert stub_distribution_name("google") is None
def test_period_in_top_level(self) -> None:
for packages in (non_bundled_packages_flat, legacy_bundled_packages):
for top_level_module in packages:
assert "." not in top_level_module
|
TestStubInfo
|
python
|
pytorch__pytorch
|
torch/package/_mock.py
|
{
"start": 1390,
"end": 2866
}
|
class ____:
_name: str
def __new__(cls, *args, **kwargs):
# _suppress_err is set by us in the mocked module impl, so that we can
# construct instances of MockedObject to hand out to people looking up
# module attributes.
# Any other attempt to construct a MockedObject instance (say, in the
# unpickling process) should give an error.
if not kwargs.get("_suppress_err"):
raise NotImplementedError(
f"Object '{cls._name}' was mocked out during packaging "
f"but it is being used in '__new__'. If this error is "
"happening during 'load_pickle', please ensure that your "
"pickled object doesn't contain any mocked objects."
)
# Otherwise, this is just a regular object creation
# (e.g. `x = MockedObject("foo")`), so pass it through normally.
return super().__new__(cls)
def __init__(self, name: str, _suppress_err: bool):
self.__dict__["_name"] = name
def __repr__(self):
return f"MockedObject({self._name})"
def install_method(method_name):
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError(
f"Object '{self._name}' was mocked out during packaging but it is being used in {method_name}"
)
setattr(MockedObject, method_name, _not_implemented)
for method_name in _magic_methods:
install_method(method_name)
|
MockedObject
|
python
|
joke2k__faker
|
tests/sphinx/test_docstring.py
|
{
"start": 223,
"end": 15006
}
|
class ____:
def test_what_is_not_method(self):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="not_a_method",
name="name",
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
assert docstring.skipped
def test_name_is_not_dotted_path_to_provider_method(self):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.sphinx.docstring.ProviderMethodDocString._parse",
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
assert docstring.skipped
def test_name_is_dotted_path_to_base_provider_method(self):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
assert not docstring.skipped
assert docstring._method == "bothify"
assert docstring._locale == DEFAULT_LOCALE
def test_name_is_dotted_path_to_standard_provider_method(self):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.barcode.Provider.upc_a",
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
assert not docstring.skipped
assert docstring._method == "upc_a"
assert docstring._locale == DEFAULT_LOCALE
def test_name_is_dotted_path_to_localized_provider_method(self):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.automotive.en_PH.Provider.protocol_license_plate",
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
assert not docstring.skipped
assert docstring._method == "protocol_license_plate"
assert docstring._locale == "en_PH"
@mock.patch("faker.sphinx.docstring.logger.warning")
def test_log_warning(self, mock_logger_warning):
path = inspect.getfile(MagicMock)
name = "faker.providers.color.Provider"
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name=name,
obj=MagicMock,
options=MagicMock(),
lines=MagicMock(),
)
docstring._log_warning("Test Warning 1")
docstring._log_warning("Test Warning 2")
assert docstring._log_prefix == f"{path}:docstring of {name}: WARNING:"
calls = mock_logger_warning.call_args_list
assert len(calls) == 2
# 1st call to logger.warning
args, kwargs = calls[0]
assert len(args) == 1
assert not kwargs
assert args[0] == f"{path}:docstring of {name}: WARNING: Test Warning 1"
# 2nd call to logger.warning
args, kwargs = calls[1]
assert len(args) == 1
assert not kwargs
assert args[0] == f"{path}:docstring of {name}: WARNING: Test Warning 2"
def test_stringify_results(self, faker):
class TestObject:
def __repr__(self):
return "abcdefg"
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=[],
)
results = [
"", # Empty string
"'", # Single quote literal (escaped)
"'", # Single quote literal (unescaped)
'"', # Double quote literal (unescaped)
'"', # Double quote literal (escaped)
"aa\taaaaa\r\n", # String containing \t, \r, \n
b"abcdef", # Bytes object
True, # Booleans
False,
None, # None types
[1, 2, 3, 4, 5], # Other non-primitives
(1, 2, 3, 4, 5),
{1: 2, 2: 3, 3: 4, 4: 5},
faker.uuid4(cast_to=None),
TestObject(),
]
output = [docstring._stringify_result(result) for result in results]
assert output == [
"''", # Ends up as '' when printed
'"\'"', # Ends up as "'" when printed
'"\'"', # Ends up as "'" when printed
"'\"'", # Ends up as '"' when printed
"'\"'", # Ends up as '"' when printed
"'aa\\taaaaa\\r\\n'", # Ends up as 'aa\\taaaaa\\r\\n' when printed
"b'abcdef'", # Ends up as b'abcdef' when printed
"True", # Ends up as True when printed
"False", # Ends up as False when printed
"None", # Ends up as None when printed
"[1, 2, 3, 4, 5]", # Ends up using object's __repr__
"(1, 2, 3, 4, 5)",
"{1: 2, 2: 3, 3: 4, 4: 5}",
"UUID('e3e70682-c209-4cac-a29f-6fbed82c07cd')",
"abcdefg",
]
@mock.patch.object(ProviderMethodDocstring, "_log_warning")
def test_parsing_empty_lines(self, mock_log_warning):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=[],
)
assert not docstring.skipped
assert len(docstring._samples) == 1
assert docstring._samples[0] == Sample(DEFAULT_SAMPLE_SIZE, DEFAULT_SEED, "")
@mock.patch.object(ProviderMethodDocstring, "_log_warning")
def test_parsing_single_line_non_sample(self, mock_log_warning):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=["lorem"],
)
assert not docstring.skipped
assert len(docstring._samples) == 1
assert docstring._samples[0] == Sample(DEFAULT_SAMPLE_SIZE, DEFAULT_SEED, "")
@mock.patch.object(ProviderMethodDocstring, "_log_warning")
def test_parsing_single_line_valid_sample(self, mock_log_warning):
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=[":sample: a=1"],
)
assert not docstring.skipped
assert docstring._samples == [Sample(5, 0, "a=1")]
@mock.patch.object(ProviderMethodDocstring, "_log_warning")
def test_parsing_multiple_lines(self, mock_log_warning):
lines = [
"lorem", # No-op, not a sample line
":sample:", # Valid, default sample count, default seed, empty kwargs, 1st in expected
":sample 10 2000:", # Invalid, size and seed must be specified as "keyword arguments"
":sample 10 seed=1000:", # Invalid, size and seed must be specified as "keyword arguments"
":sample size=10 1000:", # Invalid, size and seed must be specified as "keyword arguments"
":sample size=0:", # Invalid, sample count cannot be zero
":sample size=100:", # Valid, 100 samples, default seed, empty kwargs, 2nd in expected
":sample size=0100:", # Invalid, leading zeroes are not allowed
":sampler", # Invalid, starts with ":sample" but will not pass validation
":sample :", # No-op, must be ":sample:" verbatim
":sample seed=4761:", # Valid, default sample count, seed value of 4761
"", # but line break was detected, so sample parsing stops here
"ipsum", # No-op, not a sample line
":sample sede=123", # Invalid, seed misspelled
":sample size=4 seed=100:", # Valid, will reset to 5 samples, seed value of 100, empty kwargs, the 4th
":sample seed=103 size=104:", # Invalid, "seed" kwarg must come after "size" kwarg
":sample: a=1, b=2", # Valid, default count and seed with kwargs, the 5th
":sample size=2222: a=2, b=1", # Valid, 2222 samples, default seed, and with kwargs, the 6th
":sample 11 12:", # Invalid, seed value must be set with "seed=" prefix
":sample seed=3333: d=3", # Valid, default count, seed value of 3333, with kwargs, the 7th
":sample size=3333 seed=2222: c=1", # Valid, 3333 samples, seed value of 2222, with kwargs, the 8th
":sample size=10 seed=10:", # Valid 9th, 10 samples, seed value of 10, with kwargs
" arg1=1,", # and will continue reading the next few lines
' arg2="val2",arg3="val3",', # and will prettify (missing whitespace after comma)
" arg4=4 , arg5=5,", # and will remove excess whitespaces here
' arg6="ar g6",', # but not if whitespaces are within double quotes
" arg7=' ar g 7',", # or within single quotes
' arg8="aaa,aaa"', # and will not prettify commas within quotes
":sample size=20 seed=3456:", # Valid 10th, 20 samples, seed value of 3456, with kwargs
'arg1="val1,val1,val1",', # and this is very similar to previous sample
'arg2="val2",', # and it is ok not to have leading whitespaces in continuation lines
'arg3="val3 val3",', # and it is ok to have a trailing comma after the last kwarg
]
expected_output = [
Sample(DEFAULT_SAMPLE_SIZE, DEFAULT_SEED, ""), # 1st sample parsed
Sample(100, DEFAULT_SEED, ""), # 2nd sample parsed
Sample(DEFAULT_SAMPLE_SIZE, 4761, ""), # 3rd sample parsed
Sample(5, 100, ""), # 4th sample parsed
Sample(DEFAULT_SAMPLE_SIZE, DEFAULT_SEED, "a=1, b=2"), # 5th sample parsed
Sample(2222, DEFAULT_SEED, "a=2, b=1"), # 6th sample parsed
Sample(DEFAULT_SAMPLE_SIZE, 3333, "d=3"), # 7th sample parsed
Sample(3333, 2222, "c=1"), # 8th sample parsed
Sample( # 9th sample parsed
10,
10,
'arg1=1, arg2="val2", arg3="val3", arg4=4, arg5=5, arg6="ar g6", arg7=\' ar g 7\', arg8="aaa,aaa"',
),
Sample( # 10th sample parsed
20,
3456,
'arg1="val1,val1,val1", arg2="val2", arg3="val3 val3",',
),
]
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=lines,
)
assert not docstring.skipped
assert docstring._samples == expected_output
@mock.patch.object(ProviderMethodDocstring, "_log_warning")
def test_end_to_end_sample_generation(self, mock_warning, faker):
non_sample_lines = ["lorem", "ipsum", "dolor", "sit", "amet"]
valid_sample_lines = [
":sample 1234jdbvhjdbygdvbhxjhx", # Will fail during sample section processing, 1st log warning
":sample: invalid_arg='value'", # Will fail during sample generation, 2nd log warning
":sample size=3 seed=1000: text='???###'", # 1st sample generation
":sample: number=100**100**100", # Will fail SampleCodeValidator validation, 3rd log warning
":sample seed=3210: letters='abcde'", # 2nd sample generation
":sample size=10 seed=1: abcd='abcd'", # Will fail during sample generation, 4th log warning
":sample size=20 seed=1234: text='???###', ", # 3rd sample generation
" letters='abcde'",
]
lines = non_sample_lines + valid_sample_lines
docstring = ProviderMethodDocstring(
app=MagicMock(),
what="method",
name="faker.providers.BaseProvider.bothify",
obj=MagicMock,
options=MagicMock(),
lines=lines,
)
output = docstring.lines[len(non_sample_lines) :]
assert output[0] == ":examples:"
# 1st sample generation
faker.seed_instance(1000)
assert output[1] == ""
assert output[2] == ">>> Faker.seed(1000)"
assert output[3] == ">>> for _ in range(5):"
assert output[4] == "... fake.bothify(text='???###')"
assert output[5] == "..."
for i in range(6, 11):
assert output[i] == docstring._stringify_result(faker.bothify(text="???###"))
# 2nd sample generation
faker.seed_instance(3210)
assert output[11] == ""
assert output[12] == ">>> Faker.seed(3210)"
assert output[13] == ">>> for _ in range(5):"
assert output[14] == "... fake.bothify(letters='abcde')"
assert output[15] == "..."
for i in range(16, 21):
assert output[i] == docstring._stringify_result(faker.bothify(letters="abcde"))
# 3rd sample generation
faker.seed_instance(1234)
assert output[21] == ""
assert output[22] == ">>> Faker.seed(1234)"
assert output[23] == ">>> for _ in range(20):"
assert output[24] == "... fake.bothify(text='???###', letters='abcde')"
assert output[25] == "..."
for i in range(26, 46):
assert output[i] == docstring._stringify_result(faker.bothify(text="???###", letters="abcde"))
calls = mock_warning.call_args_list
assert len(calls) == 4
# 1st call to _log_warning
args, kwargs = calls[0]
assert len(args) == 1
assert not kwargs
assert args[0] == "The section `:sample 1234jdbvhjdbygdvbhxjhx` is malformed and will be discarded."
# 2nd call to _log_warning
args, kwargs = calls[1]
assert len(args) == 1
assert not kwargs
assert args[0] == "Sample generation failed for method `bothify` with arguments `invalid_arg='value'`."
# 3rd call to _log_warning
args, kwargs = calls[2]
assert len(args) == 1
assert not kwargs
assert args[0] == (
"Invalid code elements detected. Sample generation will be skipped for "
"method `bothify` with arguments `number=100**100**100`."
)
# 4th call to _log_warning
args, kwargs = calls[3]
assert len(args) == 1
assert not kwargs
assert args[0] == "Sample generation failed for method `bothify` with arguments `abcd='abcd'`."
|
TestProviderMethodDocstring
|
python
|
tensorflow__tensorflow
|
tensorflow/python/profiler/pprof_profiler.py
|
{
"start": 8379,
"end": 15157
}
|
class ____(object):
"""Creates profiles in pprof format."""
def __init__(self, graph, run_metadata):
"""Constructor.
Args:
graph: A `Graph` instance.
run_metadata: A list of `RunMetadata` objects.
"""
self._graph = graph
self._run_metadata = run_metadata
self._string_table = StringTable()
self._functions = Functions(self._string_table)
self._locations = Locations(self._functions)
def profile(self):
"""Generates pprof profiles.
Returns:
Dictionary mapping from device name to proto in `profile_pb2.Profile`
format.
"""
profiles = {}
data_generator_func = self._get_profile_data_generator()
for device_index, device_stats in enumerate(
self._run_metadata.step_stats.dev_stats):
# Create profile
pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))
if not pprof_proto.sample:
print(
'Not enough data to create profile for device %s. Did you pass '
'RunMetadata to session.run call?' % device_stats.device)
continue
# Add device name comment
device_count = len(self._run_metadata.step_stats.dev_stats)
device_description = (
'Device %d of %d: %s' %
(device_index + 1, device_count, device_stats.device))
device_description_str_index = self._string_table.next_index()
pprof_proto.string_table.append(device_description)
pprof_proto.comment.append(device_description_str_index)
profiles[device_stats.device] = pprof_proto
return profiles
def _get_pprof_proto(self, profile_datum_generator):
"""Returns profile data in pprof proto format.
Args:
profile_datum_generator: Generator outputting `ProfileDatum` objects.
Returns:
A proto in pprof format.
"""
pprof_profile = profile_pb2.Profile()
samples = Samples(self._string_table)
for datum in profile_datum_generator:
if not datum.traceback:
continue
stack_frame = datum.traceback[-1]
after_apply_op = False
location_ids = []
# We add locations from stack trace in bottom-up order.
for stack_frame_index in reversed(range(len(datum.traceback) - 1)):
prev_stack_frame = stack_frame
stack_frame = datum.traceback[stack_frame_index]
# Call at current frame calls function at previous frame.
prev_file_path = prev_stack_frame[0]
prev_function = prev_stack_frame[2]
prev_function_start_line = -1
curr_file_path = stack_frame[0]
curr_line_number = stack_frame[1]
# Skip all calls up to apply_op since they are the same for all ops.
if not after_apply_op:
if prev_function == 'apply_op':
after_apply_op = True
continue
location_index = self._locations.index_of(
curr_file_path, curr_line_number,
prev_function, prev_file_path, prev_function_start_line)
location_ids.append(location_index)
samples.add(datum, location_ids)
sample_type_description = 'count'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('count')
sample_type_description = 'all_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
sample_type_description = 'op_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
pprof_profile.string_table.extend(self._string_table.string_table())
pprof_profile.sample.extend(samples.get_sample_protos())
pprof_profile.function.extend(self._functions.function_protos())
pprof_profile.location.extend(self._locations.location_protos())
return pprof_profile
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_traceback = defaultdict(list)
node_to_op_type = defaultdict(str)
for op in self._graph.get_operations():
node_to_traceback[op.name] = op.traceback
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':
continue
yield ProfileDatum(
node_stats,
node_to_op_type[node_stats.node_name],
node_to_traceback[node_stats.node_name])
return profile_data_generator
def get_profiles(graph, run_metadata):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
Returns:
A dictionary mapping from device name to pprof proto for that device.
"""
return PprofProfiler(graph, run_metadata).profile()
def profile(graph, run_metadata, output_dir=None):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
output_dir: (string) Directory to output pprof profile to.
Profile files for each device will be stored in compressed
serialized proto format. If output_dir is None, profile protos
will be printed to stdout instead.
Returns:
List of output files created by this profile call.
(Note: this list will be empty if output_dir is None)
"""
profiles = get_profiles(graph, run_metadata)
output_file_template = None
if output_dir:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
time_suffix = time.strftime('%Y%m%d%H%M%S')
output_file_template = os.path.join(
output_dir, '%s_' + time_suffix + '.pb.gz')
profile_files = []
for device, pprof_proto in profiles.items():
if output_file_template is None:
print('No output directory specified, printing to stdout instead.')
print(pprof_proto)
else:
device_name = str(device).strip('/').translate(
maketrans('/:', '__'))
profile_file = output_file_template % device_name
profile_files.append(profile_file)
with gzip.open(profile_file, 'w') as output_file:
print('Writing profile to %s...' % profile_file)
output_file.write(pprof_proto.SerializeToString())
return profile_files
|
PprofProfiler
|
python
|
crytic__slither
|
slither/detectors/erc/erc20/incorrect_erc20_interface.py
|
{
"start": 540,
"end": 4327
}
|
class ____(AbstractDetector):
"""
Incorrect ERC20 Interface
"""
ARGUMENT = "erc20-interface"
HELP = "Incorrect ERC20 interfaces"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-erc20-interface"
WIKI_TITLE = "Incorrect erc20 interface"
WIKI_DESCRIPTION = "Incorrect return values for `ERC20` functions. A contract compiled with Solidity > 0.4.22 interacting with these functions will fail to execute them, as the return value is missing."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Token{
function transfer(address to, uint value) external;
//...
}
```
`Token.transfer` does not return a boolean. Bob deploys the token. Alice creates a contract that interacts with it but assumes a correct `ERC20` interface implementation. Alice's contract is unable to interact with Bob's contract."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Set the appropriate return values and types for the defined `ERC20` functions."
)
@staticmethod
def incorrect_erc20_interface(signature: Tuple[str, List[str], List[str]]) -> bool:
(name, parameters, returnVars) = signature
if name == "transfer" and parameters == ["address", "uint256"] and returnVars != ["bool"]:
return True
if (
name == "transferFrom"
and parameters == ["address", "address", "uint256"]
and returnVars != ["bool"]
):
return True
if name == "approve" and parameters == ["address", "uint256"] and returnVars != ["bool"]:
return True
if (
name == "allowance"
and parameters == ["address", "address"]
and returnVars != ["uint256"]
):
return True
if name == "balanceOf" and parameters == ["address"] and returnVars != ["uint256"]:
return True
if name == "totalSupply" and parameters == [] and returnVars != ["uint256"]:
return True
return False
@staticmethod
def detect_incorrect_erc20_interface(contract: Contract) -> List[FunctionContract]:
"""Detect incorrect ERC20 interface
Returns:
list(str) : list of incorrect function signatures
"""
# Verify this is an ERC20 contract.
if not contract.is_possible_erc20():
return []
# If this contract implements a function from ERC721, we can assume it is an ERC721 token. These tokens
# offer functions which are similar to ERC20, but are not compatible.
if contract.is_possible_erc721():
return []
funcs = contract.functions
functions = [
f
for f in funcs
if IncorrectERC20InterfaceDetection.incorrect_erc20_interface(f.signature)
]
return functions
def _detect(self) -> List[Output]:
"""Detect incorrect erc20 interface
Returns:
dict: [contract name] = set(str) events
"""
results = []
for c in self.compilation_unit.contracts_derived:
functions = IncorrectERC20InterfaceDetection.detect_incorrect_erc20_interface(c)
if functions:
for function in functions:
info: DETECTOR_INFO = [
c,
" has incorrect ERC20 function interface:",
function,
"\n",
]
json = self.generate_result(info)
results.append(json)
return results
|
IncorrectERC20InterfaceDetection
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/generic.py
|
{
"start": 186,
"end": 361
}
|
class ____(ProtectedResourceMixin, View):
"""
Generic view protecting resources by providing OAuth2 authentication out of the box
"""
pass
|
ProtectedResourceView
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.