language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__setuptools | setuptools/command/editable_wheel.py | {
"start": 3557,
"end": 15765
} | class ____(Command):
"""Build 'editable' wheel for development.
This command is private and reserved for internal use of setuptools,
users should rely on ``setuptools.build_meta`` APIs.
"""
description = "DO NOT CALL DIRECTLY, INTERNAL ONLY: create PEP 660 editable wheel"
user_options = [
("dist-dir=", "d", "directory to put final built distributions in"),
("dist-info-dir=", "I", "path to a pre-build .dist-info directory"),
("mode=", None, cleandoc(_EditableMode.__doc__ or "")),
]
def initialize_options(self):
self.dist_dir = None
self.dist_info_dir = None
self.project_dir = None
self.mode = None
def finalize_options(self) -> None:
dist = self.distribution
self.project_dir = dist.src_root or os.curdir
self.package_dir = dist.package_dir or {}
self.dist_dir = Path(self.dist_dir or os.path.join(self.project_dir, "dist"))
def run(self) -> None:
try:
self.dist_dir.mkdir(exist_ok=True)
self._ensure_dist_info()
# Add missing dist_info files
self.reinitialize_command("bdist_wheel")
bdist_wheel = self.get_finalized_command("bdist_wheel")
bdist_wheel.write_wheelfile(self.dist_info_dir)
self._create_wheel_file(bdist_wheel)
except Exception as ex:
project = self.distribution.name or self.distribution.get_name()
py310.add_note(
ex,
f"An error occurred when building editable wheel for {project}.\n"
"See debugging tips in: "
"https://setuptools.pypa.io/en/latest/userguide/development_mode.html#debugging-tips",
)
raise
def _ensure_dist_info(self):
if self.dist_info_dir is None:
dist_info = cast(dist_info_cls, self.reinitialize_command("dist_info"))
dist_info.output_dir = self.dist_dir
dist_info.ensure_finalized()
dist_info.run()
self.dist_info_dir = dist_info.dist_info_dir
else:
assert str(self.dist_info_dir).endswith(".dist-info")
assert Path(self.dist_info_dir, "METADATA").exists()
def _install_namespaces(self, installation_dir, pth_prefix):
# XXX: Only required to support the deprecated namespace practice
dist = self.distribution
if not dist.namespace_packages:
return
src_root = Path(self.project_dir, self.package_dir.get("", ".")).resolve()
installer = _NamespaceInstaller(dist, installation_dir, pth_prefix, src_root)
installer.install_namespaces()
def _find_egg_info_dir(self) -> str | None:
parent_dir = Path(self.dist_info_dir).parent if self.dist_info_dir else Path()
candidates = map(str, parent_dir.glob("*.egg-info"))
return next(candidates, None)
def _configure_build(
self, name: str, unpacked_wheel: StrPath, build_lib: StrPath, tmp_dir: StrPath
):
"""Configure commands to behave in the following ways:
- Build commands can write to ``build_lib`` if they really want to...
(but this folder is expected to be ignored and modules are expected to live
in the project directory...)
- Binary extensions should be built in-place (editable_mode = True)
- Data/header/script files are not part of the "editable" specification
so they are written directly to the unpacked_wheel directory.
"""
# Non-editable files (data, headers, scripts) are written directly to the
# unpacked_wheel
dist = self.distribution
wheel = str(unpacked_wheel)
build_lib = str(build_lib)
data = str(Path(unpacked_wheel, f"{name}.data", "data"))
headers = str(Path(unpacked_wheel, f"{name}.data", "headers"))
scripts = str(Path(unpacked_wheel, f"{name}.data", "scripts"))
# egg-info may be generated again to create a manifest (used for package data)
egg_info = cast(
egg_info_cls, dist.reinitialize_command("egg_info", reinit_subcommands=True)
)
egg_info.egg_base = str(tmp_dir)
egg_info.ignore_egg_info_in_manifest = True
build = cast(
build_cls, dist.reinitialize_command("build", reinit_subcommands=True)
)
install = cast(
install_cls, dist.reinitialize_command("install", reinit_subcommands=True)
)
build.build_platlib = build.build_purelib = build.build_lib = build_lib
install.install_purelib = install.install_platlib = install.install_lib = wheel
install.install_scripts = build.build_scripts = scripts
install.install_headers = headers
install.install_data = data
# For portability, ensure scripts are built with #!python shebang
# pypa/setuptools#4863
build_scripts = dist.get_command_obj("build_scripts")
build_scripts.executable = 'python'
install_scripts = cast(
install_scripts_cls, dist.get_command_obj("install_scripts")
)
install_scripts.no_ep = True
build.build_temp = str(tmp_dir)
build_py = cast(build_py_cls, dist.get_command_obj("build_py"))
build_py.compile = False
build_py.existing_egg_info_dir = self._find_egg_info_dir()
self._set_editable_mode()
build.ensure_finalized()
install.ensure_finalized()
def _set_editable_mode(self):
"""Set the ``editable_mode`` flag in the build sub-commands"""
dist = self.distribution
build = dist.get_command_obj("build")
for cmd_name in build.get_sub_commands():
cmd = dist.get_command_obj(cmd_name)
if hasattr(cmd, "editable_mode"):
cmd.editable_mode = True
elif hasattr(cmd, "inplace"):
cmd.inplace = True # backward compatibility with distutils
def _collect_build_outputs(self) -> tuple[list[str], dict[str, str]]:
files: list[str] = []
mapping: dict[str, str] = {}
build = self.get_finalized_command("build")
for cmd_name in build.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
if hasattr(cmd, "get_outputs"):
files.extend(cmd.get_outputs() or [])
if hasattr(cmd, "get_output_mapping"):
mapping.update(cmd.get_output_mapping() or {})
return files, mapping
def _run_build_commands(
self,
dist_name: str,
unpacked_wheel: StrPath,
build_lib: StrPath,
tmp_dir: StrPath,
) -> tuple[list[str], dict[str, str]]:
self._configure_build(dist_name, unpacked_wheel, build_lib, tmp_dir)
self._run_build_subcommands()
files, mapping = self._collect_build_outputs()
self._run_install("headers")
self._run_install("scripts")
self._run_install("data")
return files, mapping
def _run_build_subcommands(self) -> None:
"""
Issue #3501 indicates that some plugins/customizations might rely on:
1. ``build_py`` not running
2. ``build_py`` always copying files to ``build_lib``
However both these assumptions may be false in editable_wheel.
This method implements a temporary workaround to support the ecosystem
while the implementations catch up.
"""
# TODO: Once plugins/customizations had the chance to catch up, replace
# `self._run_build_subcommands()` with `self.run_command("build")`.
# Also remove _safely_run, TestCustomBuildPy. Suggested date: Aug/2023.
build = self.get_finalized_command("build")
for name in build.get_sub_commands():
cmd = self.get_finalized_command(name)
if name == "build_py" and type(cmd) is not build_py_cls:
self._safely_run(name)
else:
self.run_command(name)
def _safely_run(self, cmd_name: str):
try:
return self.run_command(cmd_name)
except Exception:
SetuptoolsDeprecationWarning.emit(
"Customization incompatible with editable install",
f"""
{traceback.format_exc()}
If you are seeing this warning it is very likely that a setuptools
plugin or customization overrides the `{cmd_name}` command, without
taking into consideration how editable installs run build steps
starting from setuptools v64.0.0.
Plugin authors and developers relying on custom build steps are
encouraged to update their `{cmd_name}` implementation considering the
information about editable installs in
https://setuptools.pypa.io/en/latest/userguide/extension.html.
For the time being `setuptools` will silence this error and ignore
the faulty command, but this behavior will change in future versions.
""",
# TODO: define due_date
# There is a series of shortcomings with the available editable install
# methods, and they are very controversial. This is something that still
# needs work.
)
def _create_wheel_file(self, bdist_wheel):
from wheel.wheelfile import WheelFile
dist_info = self.get_finalized_command("dist_info")
dist_name = dist_info.name
tag = "-".join(bdist_wheel.get_tag())
build_tag = "0.editable" # According to PEP 427 needs to start with digit
archive_name = f"{dist_name}-{build_tag}-{tag}.whl"
wheel_path = Path(self.dist_dir, archive_name)
if wheel_path.exists():
wheel_path.unlink()
unpacked_wheel = TemporaryDirectory(suffix=archive_name)
build_lib = TemporaryDirectory(suffix=".build-lib")
build_tmp = TemporaryDirectory(suffix=".build-temp")
with unpacked_wheel as unpacked, build_lib as lib, build_tmp as tmp:
unpacked_dist_info = Path(unpacked, Path(self.dist_info_dir).name)
shutil.copytree(self.dist_info_dir, unpacked_dist_info)
self._install_namespaces(unpacked, dist_name)
files, mapping = self._run_build_commands(dist_name, unpacked, lib, tmp)
strategy = self._select_strategy(dist_name, tag, lib)
with strategy, WheelFile(wheel_path, "w") as wheel_obj:
strategy(wheel_obj, files, mapping)
wheel_obj.write_files(unpacked)
return wheel_path
def _run_install(self, category: str):
has_category = getattr(self.distribution, f"has_{category}", None)
if has_category and has_category():
_logger.info(f"Installing {category} as non editable")
self.run_command(f"install_{category}")
def _select_strategy(
self,
name: str,
tag: str,
build_lib: StrPath,
) -> EditableStrategy:
"""Decides which strategy to use to implement an editable installation."""
build_name = f"__editable__.{name}-{tag}"
project_dir = Path(self.project_dir)
mode = _EditableMode.convert(self.mode)
if mode is _EditableMode.STRICT:
auxiliary_dir = _empty_dir(Path(self.project_dir, "build", build_name))
return _LinkTree(self.distribution, name, auxiliary_dir, build_lib)
packages = _find_packages(self.distribution)
has_simple_layout = _simple_layout(packages, self.package_dir, project_dir)
is_compat_mode = mode is _EditableMode.COMPAT
if set(self.package_dir) == {""} and has_simple_layout or is_compat_mode:
# src-layout(ish) is relatively safe for a simple pth file
src_dir = self.package_dir.get("", ".")
return _StaticPth(self.distribution, name, [Path(project_dir, src_dir)])
# Use a MetaPathFinder to avoid adding accidental top-level packages/modules
return _TopLevelFinder(self.distribution, name)
| editable_wheel |
python | gevent__gevent | src/gevent/events.py | {
"start": 10698,
"end": 10910
} | class ____(BaseException):
"""
Subscribers to will-patch events can raise instances
of this class to tell gevent not to patch that particular item.
"""
@implementer(IGeventWillPatchEvent)
| DoNotPatch |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/issue_priority_greater_or_equal_handler.py | {
"start": 325,
"end": 708
} | class ____(DataConditionHandler[WorkflowEventData]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES
@staticmethod
def evaluate_value(event_data: WorkflowEventData, comparison: Any) -> bool:
group = event_data.group
return group.priority >= comparison
| IssuePriorityGreaterOrEqualConditionHandler |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py | {
"start": 225,
"end": 341
} | class ____:
def __exit__(self, *args: object) -> None: ...
async def __aexit__(self, *args) -> str: ...
| GoodOne |
python | networkx__networkx | networkx/exception.py | {
"start": 645,
"end": 1382
} | class ____(NetworkXException):
"""Raised when a null graph is provided as input to an algorithm
that cannot use it.
The null graph is sometimes considered a pointless concept [1]_,
thus the name of the exception.
Notes
-----
Null graphs and empty graphs are often used interchangeably but they
are well defined in NetworkX. An ``empty_graph`` is a graph with ``n`` nodes
and 0 edges, and a ``null_graph`` is a graph with 0 nodes and 0 edges.
References
----------
.. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless
Concept?" In Graphs and Combinatorics Conference, George
Washington University. New York: Springer-Verlag, 1973.
"""
| NetworkXPointlessConcept |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 70960,
"end": 72289
} | class ____(nn.Module):
def __init__(self, config: OneFormerConfig):
"""
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://huggingface.co/papers/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`OneFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
self.encoder = load_backbone(config)
self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels)
def forward(self, pixel_values: Tensor, output_hidden_states: bool = False) -> OneFormerPixelLevelModuleOutput:
features: list[Tensor] = self.encoder(pixel_values).feature_maps
decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states)
return OneFormerPixelLevelModuleOutput(
encoder_features=tuple(features),
decoder_features=decoder_output.multi_scale_features,
decoder_last_feature=decoder_output.mask_features,
)
# Modified from transformers.models.detr.modeling_detr.DetrAttention with Detr->OneFormer
| OneFormerPixelLevelModule |
python | pytorch__pytorch | torchgen/gen_aoti_c_shim.py | {
"start": 19717,
"end": 28140
} | class ____:
inductor_fallback_ops: dict[str, dict[str, list[str]]]
func_group_mapping: dict[OperatorName, NativeFunctionsGroup]
dispatch_key: DispatchKey | None
backend_indices: dict[DispatchKey, BackendIndex]
header: bool # True to generate .h and False to generate .cpp
extend_aoti_c_shim: bool
@method_with_native_function
def __call__(
self,
func: NativeFunction,
) -> str | None:
version_info = self.inductor_fallback_ops[get_fallback_op_name(func)]
result = gen_c_shim(
func,
version_info,
self.func_group_mapping,
self.dispatch_key,
self.backend_indices,
self.header,
self.extend_aoti_c_shim,
)
return result
def gen_aoti_c_shim(
native_functions: Sequence[NativeFunction],
inductor_fallback_ops: dict[str, dict[str, list[str]]],
func_group_mapping: dict[OperatorName, NativeFunctionsGroup],
dispatch_key: DispatchKey | None,
backend_indices: dict[DispatchKey, BackendIndex],
header: bool,
extend_aoti_c_shim: bool,
includes: str = "",
) -> str:
body = "\n".join(
list(
mapMaybe(
ShimGenerator(
inductor_fallback_ops,
func_group_mapping,
dispatch_key,
backend_indices,
header,
extend_aoti_c_shim,
),
native_functions,
)
)
)
device = "aten" if dispatch_key is None else dispatch_key.lower()
include_device_functions = (
"#include <ATen/Functions.h>"
if dispatch_key is None
else f"#include <ATen/{str(dispatch_key)}Functions.h>"
)
aten_warning = (
(
"\n\n// This file corresponds to the aten_shimified_ops list in torchgen/aoti/fallback_ops.py\n"
)
if dispatch_key is None
else ""
)
warning = """
// WARNING: THIS FILE IS AUTOGENERATED BY torchgen. DO NOT MODIFY BY HAND.
// See https://github.com/pytorch/pytorch/blob/7e86a7c0155295539996e0cf422883571126073e/torchgen/gen.py#L2424-L2436 for details"""
if header:
return (
warning
+ aten_warning
+ textwrap.dedent("""
#pragma once
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
#ifdef __cplusplus
extern "C" {
#endif
""")
+ body
+ textwrap.dedent("""
#ifdef __cplusplus
} // extern "C"
#endif
""")
)
else:
return (
warning
+ aten_warning
+ textwrap.dedent(f"""
#include <torch/csrc/inductor/aoti_torch/generated/{"extend/" if extend_aoti_c_shim else ""}c_shim_{device}.h>
#include <torch/csrc/inductor/aoti_torch/utils.h>
#ifndef AT_PER_OPERATOR_HEADERS
{include_device_functions}
#include <ATen/CompositeExplicitAutogradFunctions.h>
#include <ATen/CompositeExplicitAutogradNonFunctionalFunctions.h>
#include <ATen/CompositeImplicitAutogradFunctions.h>
#else
""")
+ includes
+ textwrap.dedent("""
#endif // AT_PER_OPERATOR_HEADERS
using namespace torch::aot_inductor;
""")
+ body
)
def gen_aoti_c_shim_files(
aoti_fm: FileManager,
aoti_backends: set[DispatchKey | None],
native_functions: Sequence[NativeFunction],
backend_indices: dict[DispatchKey, BackendIndex],
structured_native_functions: Sequence[NativeFunctionsGroup],
extra_cuda_headers: str,
extend_aoti_c_shim: bool,
update_aoti_c_shim: bool,
) -> None:
structured_func_group_dict = {}
for func_group in structured_native_functions:
for func in func_group.functions():
if func.structured_delegate is not None:
structured_func_group_dict[func.structured_delegate] = func_group
break
for dispatch_key in aoti_backends:
# Use aten_shimified_ops for the aten backend, inductor_fallback_ops for others
fallback_ops_dict = (
aten_shimified_ops if dispatch_key is None else inductor_fallback_ops
)
fallbacks = {}
for func in native_functions:
op_name = get_fallback_op_name(func)
if op_name in fallback_ops_dict:
fallbacks[op_name] = func
fallback_native_functions = tuple(
value for _, value in sorted(fallbacks.items())
)
# Use "aten" as the device name when dispatch_key is Generic
device_name = "aten" if dispatch_key is None else dispatch_key.lower()
# header files were checked in for ABI-compatibility checking
header_file_name = f"c_shim_{device_name}.h"
new_header = gen_aoti_c_shim(
fallback_native_functions,
fallback_ops_dict,
structured_func_group_dict,
dispatch_key,
backend_indices,
header=True,
extend_aoti_c_shim=extend_aoti_c_shim,
includes="",
)
if update_aoti_c_shim:
aoti_fm.write(
header_file_name,
lambda: new_header,
)
else:
try:
with open(
os.path.join(aoti_fm.install_dir, header_file_name)
) as old_file:
old_header = old_file.read()
if old_header != new_header:
diff = "\n".join(
difflib.unified_diff(
old_header.splitlines(),
new_header.splitlines(),
fromfile="expected",
tofile="actual",
lineterm="",
)
)
raise RuntimeError(f"""
The generated AOTInductor C shim header files have unexpectedly changed. This
indicates an AOTInductor fallback operator ABI backward compatibility breakage!!!
Only in a limited number of situations, this is allowed:
1. You added a fallback op to the inductor_fallback_ops list in torchgen/aoti/fallback_ops.py.
If that's the case, run `python torchgen/gen.py --update-aoti-c-shim` to add a new entry to
existing C shim header files.
2. You added a new default argument to an existing fallback op. This is clearly a BC breaking
change in the AOTInductor land. You need to annotate the new default argument in
torchgen/aoti/fallback_ops.py, and then run `python torchgen/gen.py --update-aoti-c-shim` to
update the C shim header files by creating different versions of the fallback op. See
https://github.com/pytorch/pytorch/pull/154848 as an example.
{diff}
""")
except FileNotFoundError:
print(
f"{os.path.join(aoti_fm.install_dir, header_file_name)} not found"
)
# cpp files are always generated on-the-fly
def headers_for_aoti() -> str:
headers = []
for func in fallback_native_functions:
header = get_header_for_aoti(
func,
structured_func_group_dict,
dispatch_key,
backend_indices,
extend_aoti_c_shim=extend_aoti_c_shim,
)
if header is not None:
headers.append(header)
return "\n".join(sorted(set(headers)))
extra_headers = (
extra_cuda_headers
if dispatch_key is not None and is_cuda_dispatch_key(dispatch_key)
else ""
)
aoti_fm.write(
f"c_shim_{device_name}.cpp",
lambda: gen_aoti_c_shim(
fallback_native_functions,
fallback_ops_dict,
structured_func_group_dict,
dispatch_key,
backend_indices,
header=False,
extend_aoti_c_shim=extend_aoti_c_shim,
includes=headers_for_aoti() + "\n" + extra_headers,
),
)
| ShimGenerator |
python | jazzband__django-redis | django_redis/util.py | {
"start": 0,
"end": 266
} | class ____(str):
"""
A stub string class that we can use to check if a key was created already.
"""
def original_key(self) -> str:
return self.rsplit(":", 1)[1]
def default_reverse_key(key: str) -> str:
return key.split(":", 2)[2]
| CacheKey |
python | langchain-ai__langchain | libs/langchain/langchain_classic/callbacks/streaming_aiter.py | {
"start": 349,
"end": 2667
} | class ____(AsyncCallbackHandler):
"""Callback handler that returns an async iterator."""
queue: asyncio.Queue[str]
done: asyncio.Event
@property
def always_verbose(self) -> bool:
"""Always verbose."""
return True
def __init__(self) -> None:
"""Instantiate AsyncIteratorCallbackHandler."""
self.queue = asyncio.Queue()
self.done = asyncio.Event()
@override
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
@override
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if token is not None and token != "":
self.queue.put_nowait(token)
@override
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.done.set()
@override
async def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
self.done.set()
# TODO: implement the other methods
async def aiter(self) -> AsyncIterator[str]:
"""Asynchronous iterator that yields tokens."""
while not self.queue.empty() or not self.done.is_set():
# Wait for the next token in the queue,
# but stop waiting if the done event is set
done, other = await asyncio.wait(
[
# NOTE: If you add other tasks here, update the code below,
# which assumes each set has exactly one task each
asyncio.ensure_future(self.queue.get()),
asyncio.ensure_future(self.done.wait()),
],
return_when=asyncio.FIRST_COMPLETED,
)
# Cancel the other task
if other:
other.pop().cancel()
# Extract the value of the first completed task
token_or_done = cast("str | Literal[True]", done.pop().result())
# If the extracted value is the boolean True, the done event was set
if token_or_done is True:
break
# Otherwise, the extracted value is a token, which we yield
yield token_or_done
| AsyncIteratorCallbackHandler |
python | PyCQA__pylint | doc/data/messages/d/duplicate-code/good/fruit.py | {
"start": 0,
"end": 504
} | class ____:
def __init__(self):
self.remaining_bites = 3
def take_bite(self):
if self.remaining_bites > 0:
print(f"You take a bite of the {self.__class__.__name__.lower()}.")
self.remaining_bites -= 1
else:
print(f"The {self.__class__.__name__.lower()} is already eaten up!")
def eaten_by_animal(self, animal):
self.remaining_bites = 0
print(f"The {self.__class__.__name__.lower()} has been eaten by an animal.")
| Fruit |
python | walkccc__LeetCode | solutions/233. Number of Digit One/233.py | {
"start": 0,
"end": 361
} | class ____:
def countDigitOne(self, n: int) -> int:
ans = 0
pow10 = 1
while pow10 <= n:
divisor = pow10 * 10
quotient = n // divisor
remainder = n % divisor
if quotient > 0:
ans += quotient * pow10
if remainder >= pow10:
ans += min(remainder - pow10 + 1, pow10)
pow10 *= 10
return ans
| Solution |
python | doocs__leetcode | lcof2/剑指 Offer II 102. 加减的目标值/Solution2.py | {
"start": 0,
"end": 541
} | class ____:
def findTargetSumWays(self, nums: List[int], target: int) -> int:
s = sum(nums)
if s - target < 0 or (s - target) % 2 != 0:
return 0
target = (s - target) // 2 + 1
n = len(nums) + 1
dp = [[0] * target for _ in range(n)]
dp[0][0] = 1
for i in range(1, n):
for j in range(target):
dp[i][j] = dp[i - 1][j]
if nums[i - 1] <= j:
dp[i][j] += dp[i - 1][j - nums[i - 1]]
return dp[-1][-1]
| Solution |
python | django__django | django/template/defaulttags.py | {
"start": 10370,
"end": 11203
} | class ____(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<%s>" % self.__class__.__name__
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
yield from nodelist
@property
def nodelist(self):
return NodeList(self)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ""
| IfNode |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 25331,
"end": 25750
} | class ____(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
inner_dim = int(dim * mult)
self.norm = nn.LayerNorm(dim)
self.fc1 = nn.Linear(dim, inner_dim, bias=False)
self.activation = nn.GELU()
self.fc2 = nn.Linear(inner_dim, dim, bias=False)
def forward(self, x):
return self.fc2(self.activation(self.fc1(self.norm(x))))
| EvollaFeedForward |
python | keras-team__keras | integration_tests/torch_workflow_test.py | {
"start": 126,
"end": 310
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = layers.Dense(1)
def forward(self, x):
x = self.fc1(x)
return x
| Net |
python | dask__dask | dask/tests/test_task_spec.py | {
"start": 12047,
"end": 12321
} | class ____(namedtuple("NewArgsNamedTuple", "ab, c")):
"""Namedtuple with a custom constructor."""
def __new__(cls, a, b, c):
return super().__new__(cls, f"{a}-{b}", c)
def __getnewargs__(self):
return *self.ab.split("-"), self.c
| NewArgsNamedTuple |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/common.py | {
"start": 808,
"end": 1494
} | class ____:
def __init__(self, credentials, **kwargs):
self.config = credentials
self.customer_ids = ["1"]
def get_type(self, type):
return MockSearchRequest()
def get_service(self, service):
if service == "GoogleAdsFieldService":
return MockGoogleAdsFieldService()
return MockGoogleAdsService()
@staticmethod
def load_from_dict(config, version=None):
return MockGoogleAdsClient(config)
def send_request(self, query, customer_id, login_customer_id="none"):
yield from ()
def get_accessible_accounts(self):
yield from ["fake_customer_id", "fake_customer_id_2"]
| MockGoogleAdsClient |
python | wandb__wandb | wandb/vendor/pygments/lexers/scripting.py | {
"start": 56252,
"end": 64859
} | class ____(RegexLexer):
"""
Easytrieve Plus is a programming language for extracting, filtering and
converting sequential data. Furthermore it can layout data for reports.
It is mainly used on mainframe platforms and can access several of the
mainframe's native file formats. It is somewhat comparable to awk.
.. versionadded:: 2.1
"""
name = 'Easytrieve'
aliases = ['easytrieve']
filenames = ['*.ezt', '*.mac']
mimetypes = ['text/x-easytrieve']
flags = 0
# Note: We cannot use r'\b' at the start and end of keywords because
# Easytrieve Plus delimiter characters are:
#
# * space ( )
# * apostrophe (')
# * period (.)
# * comma (,)
# * paranthesis ( and )
# * colon (:)
#
# Additionally words end once a '*' appears, indicatins a comment.
_DELIMITERS = r' \'.,():\n'
_DELIMITERS_OR_COMENT = _DELIMITERS + '*'
_DELIMITER_PATTERN = '[' + _DELIMITERS + ']'
_DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')'
_NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']'
_OPERATORS_PATTERN = u'[.+\\-/=\\[\\](){}<>;,&%¬]'
_KEYWORDS = [
'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR',
'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU',
'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR',
'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D',
'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI',
'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE',
'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF',
'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12',
'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21',
'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30',
'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7',
'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST',
'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT',
'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT',
'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY',
'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE',
'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES',
'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE',
'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT',
'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1',
'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER',
'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT',
'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT',
'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT',
'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE',
'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT',
'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM',
'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT',
'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME',
'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC',
'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE',
'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST'
]
tokens = {
'root': [
(r'\*.*\n', Comment.Single),
(r'\n+', Whitespace),
# Macro argument
(r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable,
'after_macro_argument'),
# Macro call
(r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable),
(r'(FILE|MACRO|REPORT)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'after_declaration'),
(r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')',
bygroups(Keyword.Declaration, Operator)),
(words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE),
bygroups(Keyword.Reserved, Operator)),
(_OPERATORS_PATTERN, Operator),
# Procedure declaration
(r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)',
bygroups(Name.Function, Whitespace, Operator, Whitespace,
Keyword.Declaration, Whitespace)),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String),
(r'\s+', Whitespace),
# Everything else just belongs to a name
(_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
],
'after_declaration': [
(_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function),
default('#pop'),
],
'after_macro_argument': [
(r'\*.*\n', Comment.Single, '#pop'),
(r'\s+', Whitespace, '#pop'),
(_OPERATORS_PATTERN, Operator, '#pop'),
(r"'(''|[^'])*'", String, '#pop'),
# Everything else just belongs to a name
(_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
],
}
_COMMENT_LINE_REGEX = re.compile(r'^\s*\*')
_MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO')
def analyse_text(text):
"""
Perform a structural analysis for basic Easytrieve constructs.
"""
result = 0.0
lines = text.split('\n')
hasEndProc = False
hasHeaderComment = False
hasFile = False
hasJob = False
hasProc = False
hasParm = False
hasReport = False
def isCommentLine(line):
return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
def isEmptyLine(line):
return not bool(line.strip())
# Remove possible empty lines and header comments.
while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
if not isEmptyLine(lines[0]):
hasHeaderComment = True
del lines[0]
if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
# Looks like an Easytrieve macro.
result = 0.4
if hasHeaderComment:
result += 0.4
else:
# Scan the source for lines starting with indicators.
for line in lines:
words = line.split()
if (len(words) >= 2):
firstWord = words[0]
if not hasReport:
if not hasJob:
if not hasFile:
if not hasParm:
if firstWord == 'PARM':
hasParm = True
if firstWord == 'FILE':
hasFile = True
if firstWord == 'JOB':
hasJob = True
elif firstWord == 'PROC':
hasProc = True
elif firstWord == 'END-PROC':
hasEndProc = True
elif firstWord == 'REPORT':
hasReport = True
# Weight the findings.
if hasJob and (hasProc == hasEndProc):
if hasHeaderComment:
result += 0.1
if hasParm:
if hasProc:
# Found PARM, JOB and PROC/END-PROC:
# pretty sure this is Easytrieve.
result += 0.8
else:
# Found PARAM and JOB: probably this is Easytrieve
result += 0.5
else:
# Found JOB and possibly other keywords: might be Easytrieve
result += 0.11
if hasParm:
# Note: PARAM is not a proper English word, so this is
# regarded a much better indicator for Easytrieve than
# the other words.
result += 0.2
if hasFile:
result += 0.01
if hasReport:
result += 0.01
assert 0.0 <= result <= 1.0
return result
| EasytrieveLexer |
python | numba__numba | numba/core/rewrites/ir_print.py | {
"start": 2047,
"end": 2969
} | class ____(Rewrite):
"""
Detect and store constant arguments to print() nodes.
"""
def match(self, func_ir, block, typemap, calltypes):
self.consts = consts = {}
self.block = block
for inst in block.find_insts(ir.Print):
if inst.consts:
# Already rewritten
continue
for idx, var in enumerate(inst.args):
try:
const = func_ir.infer_constant(var)
except errors.ConstantInferenceError:
continue
consts.setdefault(inst, {})[idx] = const
return len(consts) > 0
def apply(self):
"""
Store detected constant arguments on their nodes.
"""
for inst in self.block.body:
if inst in self.consts:
inst.consts = self.consts[inst]
return self.block
| DetectConstPrintArguments |
python | fluentpython__example-code-2e | 24-class-metaprog/bulkfood/bulkfood_v6.py | {
"start": 1812,
"end": 2156
} | class ____:
description = model.NonBlank()
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# end::LINEITEM_V6[]
| LineItem |
python | langchain-ai__langchain | libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py | {
"start": 614,
"end": 1198
} | class ____(VectorStoreIntegrationTests):
@pytest.fixture
def vectorstore(self) -> VectorStore:
embeddings = self.get_embeddings()
return WithoutGetByIdsVectorStore(embedding=embeddings)
@property
def has_get_by_ids(self) -> bool:
return False
def test_get_by_ids_fails(self, vectorstore: VectorStore) -> None:
with pytest.raises(
NotImplementedError,
match="WithoutGetByIdsVectorStore does not yet support get_by_ids",
):
vectorstore.get_by_ids(["id1", "id2"])
| TestWithoutGetByIdVectorStore |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_options.py | {
"start": 978,
"end": 6217
} | class ____(object):
"""Options for constructing a Checkpoint.
Used as the `options` argument to either `tf.train.Checkpoint.save()` or
`tf.train.Checkpoint.restore()` methods to adjust how variables are
saved/restored.
Example: Run IO ops on "localhost" while saving a checkpoint:
```
step = tf.Variable(0, name="step")
checkpoint = tf.train.Checkpoint(step=step)
options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.save("/tmp/ckpt", options=options)
```
"""
# Define object attributes in __slots__ for improved memory and performance.
__slots__ = (
"experimental_io_device",
"experimental_enable_async_checkpoint",
"experimental_write_callbacks",
"enable_async",
"experimental_sharding_callback",
"experimental_skip_slot_variables",
)
@deprecated_args(
None, "Use enable_async instead", "experimental_enable_async_checkpoint"
)
def __init__(
self,
experimental_io_device=None,
experimental_enable_async_checkpoint=False,
experimental_write_callbacks=None,
enable_async=False,
experimental_skip_slot_variables=False,
experimental_sharding_callback=None
):
"""Creates an object that stores options for a Checkpoint.
Args:
experimental_io_device: string. Applies in a distributed setting.
Tensorflow device to use to access the filesystem. If `None` (default)
then for each variable the filesystem is accessed from the CPU:0 device
of the host where that variable is assigned. If specified, the
filesystem is instead accessed from that device for all variables. This
is for example useful if you want to save to a local directory, such as
"/tmp" when running in a distributed setting. In that case pass a device
for the host where the "/tmp" directory is accessible.
experimental_enable_async_checkpoint: bool Type. Deprecated, please use
the enable_async option.
experimental_write_callbacks: List[Callable]. A list of callback functions
that will be executed after each saving event finishes (i.e. after
`save()` or `write()`). For async checkpoint, the callbacks will be
executed only after the async thread finishes saving. The return values
of the callback(s) will be ignored. The callback(s) can optionally take
the `save_path` (the result of `save()` or `write()`) as an argument.
The callbacks will be executed in the same order of this list after the
checkpoint has been written.
enable_async: bool Type. Indicates whether async checkpointing is enabled.
Default is False, i.e., no async checkpoint. Async checkpoint moves the
checkpoint file writing off the main thread, so that the model can
continue to train while the checkpoint file writing runs in the
background. Async checkpoint reduces TPU device idle cycles and speeds
up model training process, while memory consumption may increase.
experimental_skip_slot_variables: bool Type. If true, ignores slot
variables during restore. Context: TPU Embedding layers for Serving do
not properly restore slot variables. This option is a way to omit
restoring slot variables which are not required for Serving usecase
anyways.(b/315912101)
experimental_sharding_callback: `tf.train.experimental.ShardingCallback`.
A pre-made or custom callback that determines how checkpoints are
sharded on disk. Pre-made callback options are
`tf.train.experimental.ShardByDevicePolicy` and
`tf.train.experimental.MaxShardSizePolicy`. You may also write a custom
callback, see `tf.train.experimental.ShardingCallback`.
"""
self.experimental_io_device = experimental_io_device
self.enable_async = experimental_enable_async_checkpoint or enable_async
self.experimental_enable_async_checkpoint = self.enable_async
# Ensure that each callback only has either 0 or 1 parameter
if experimental_write_callbacks is not None:
for callback in experimental_write_callbacks:
assert len(inspect.signature(callback).parameters) <= 1
self.experimental_write_callbacks = experimental_write_callbacks
if experimental_sharding_callback is not None:
if not isinstance(
experimental_sharding_callback, sharding_util.ShardingCallback):
raise ValueError("The experimental_sharding_callback checkpoint option"
"must be of type ShardingCallback. The option provided"
f"was of type {type(experimental_sharding_callback)}.")
self.experimental_sharding_callback = experimental_sharding_callback
self.experimental_skip_slot_variables = experimental_skip_slot_variables
def __copy__(self):
# Only `experimental_write_callbacks` needs special treatment to Ensure that
# the list is deep-copied, but the callbacks are not deep-copied.
result = copy.copy(super()) # First invoke the non-overridden copy method.
result.experimental_write_callbacks = copy.copy(
self.experimental_write_callbacks
)
return result
| CheckpointOptions |
python | google__jax | tests/traceback_test.py | {
"start": 1095,
"end": 4920
} | class ____(absltest.TestCase):
def testNoTracebacksIfDisabled(self):
with tracebacks(enabled=False):
self.assertEqual(None, Traceback.get_traceback())
buffer = jnp.array(7, np.int32)
self.assertEqual(None, buffer.traceback)
e = jax.jit(lambda x: x + 1).lower(1).compile().runtime_executable()
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with tracebacks(enabled=True):
fn = "TracebackTest.testTracebacks"
tb = Traceback.get_traceback()
self.assertIsTracebackContaining(tb, fn)
buffer = jnp.array(7, np.int32)
self.assertIsTracebackContaining(buffer.traceback, fn)
e = jax.jit(lambda x: x + 1).lower(1).compile().runtime_executable()
self.assertIsTracebackContaining(e.traceback, fn)
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return Traceback.get_traceback()
return AnotherFunction()
with tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, Traceback)
frames = tb.frames
fn = "TracebackTest.testNestedFunction.<locals>.AFunction"
i = next(i for (i, f) in enumerate(frames) if f.function_name == fn)
self.assertEqual(
frames[i - 1].function_name,
"TracebackTest.testNestedFunction.<locals>.AFunction.<locals>.AnotherFunction",
)
self.assertEqual(
frames[i + 1].function_name, "TracebackTest.testNestedFunction"
)
def testPythonTracebackHasCorrectLineNumbers(self):
def B():
return Traceback.get_traceback()
def A():
return B()
tb = A().as_python_traceback()
for frame, lineno in traceback.walk_tb(tb):
if frame.f_code.co_name == "A":
line = A.__code__.co_firstlineno
self.assertBetween(lineno, line, line + 2)
elif frame.f_code.co_name == "B":
line = B.__code__.co_firstlineno
self.assertBetween(lineno, line, line + 2)
def testAccessingLocalsDoesNotCrash(self):
# https://github.com/google/jax/issues/16027
tb = Traceback.get_traceback()
python_tb = tb.as_python_traceback()
for frame, _ in traceback.walk_tb(python_tb):
_ = frame.f_locals # should not crash
def testTracebackFromFrames(self):
def FooFn(x):
return x + 1
def BarFn(y):
y = y + 1
y = y + 2
return y * 2
frame_foo = Frame(
__file__,
FooFn.__code__.co_name,
FooFn.__code__.co_firstlineno,
FooFn.__code__.co_firstlineno + 1,
)
frame_bar = Frame(
__file__,
BarFn.__code__.co_name,
BarFn.__code__.co_firstlineno,
BarFn.__code__.co_firstlineno + 2,
)
frames = [frame_foo, frame_bar]
tb = Traceback.traceback_from_frames(frames)
with self.subTest("WalkDoesNotError"):
for frame, _ in traceback.walk_tb(tb):
_ = frame.f_locals # should not crash
with self.subTest("TracebackCorrectness"):
tb_string = traceback.format_tb(tb)
# The traceback should have the format:
# File <this file>, line N in BarFn
# y = y + 2
# File <this file>, line N in FooFn
# return x + 1
self.assertLen(tb_string, len(frames))
bar_frame = tb_string[0].split("\n")
self.assertEndsWith(bar_frame[0], "BarFn")
self.assertEqual(bar_frame[1].strip(), "y = y + 2")
foo_frame = tb_string[1].split("\n")
self.assertEndsWith(foo_frame[0], "FooFn")
self.assertEqual(foo_frame[1].strip(), "return x + 1")
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| TracebackTest |
python | doocs__leetcode | solution/3400-3499/3488.Closest Equal Element Queries/Solution.py | {
"start": 0,
"end": 635
} | class ____:
def solveQueries(self, nums: List[int], queries: List[int]) -> List[int]:
n = len(nums)
m = n << 1
d = [m] * m
left = {}
for i in range(m):
x = nums[i % n]
if x in left:
d[i] = min(d[i], i - left[x])
left[x] = i
right = {}
for i in range(m - 1, -1, -1):
x = nums[i % n]
if x in right:
d[i] = min(d[i], right[x] - i)
right[x] = i
for i in range(n):
d[i] = min(d[i], d[i + n])
return [-1 if d[i] >= n else d[i] for i in queries]
| Solution |
python | django-extensions__django-extensions | django_extensions/management/commands/runscript.py | {
"start": 376,
"end": 622
} | class ____:
NONE = "none"
EACH = "each"
ROOT = "root"
def check_is_directory(value):
if value is None or not os.path.isdir(value):
raise ArgumentTypeError("%s is not a directory!" % value)
return value
| DirPolicyChoices |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_area01.py | {
"start": 315,
"end": 1400
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_area01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "area"})
chart.axis_ids = [43407616, 43433984]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/integrations/gitlab/integration.py | {
"start": 13489,
"end": 14261
} | class ____:
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
if "completed_installation_guide" in request.GET:
return pipeline.next_step()
return render_to_response(
template="sentry/integrations/gitlab-config.html",
context={
"next_url": f'{absolute_uri("/extensions/gitlab/setup/")}?completed_installation_guide',
"setup_values": [
{"label": "Name", "value": "Sentry"},
{"label": "Redirect URI", "value": absolute_uri("/extensions/gitlab/setup/")},
{"label": "Scopes", "value": "api"},
],
},
request=request,
)
| InstallationGuideView |
python | PrefectHQ__prefect | src/prefect/server/services/task_run_recorder.py | {
"start": 7966,
"end": 10011
} | class ____(RunInEphemeralServers, Service):
"""Constructs task runs and states from client-emitted events"""
consumer_task: asyncio.Task[None] | None = None
metrics_task: asyncio.Task[None] | None = None
@classmethod
def service_settings(cls) -> ServicesBaseSetting:
return get_current_settings().server.services.task_run_recorder
def __init__(self):
super().__init__()
self._started_event: Optional[asyncio.Event] = None
@property
def started_event(self) -> asyncio.Event:
if self._started_event is None:
self._started_event = asyncio.Event()
return self._started_event
@started_event.setter
def started_event(self, value: asyncio.Event) -> None:
self._started_event = value
async def start(self) -> NoReturn:
assert self.consumer_task is None, "TaskRunRecorder already started"
self.consumer: Consumer = create_consumer(
"events",
group="task-run-recorder",
name=generate_unique_consumer_name("task-run-recorder"),
)
async with consumer() as handler:
self.consumer_task = asyncio.create_task(self.consumer.run(handler))
self.metrics_task = asyncio.create_task(log_metrics_periodically())
logger.debug("TaskRunRecorder started")
self.started_event.set()
try:
await self.consumer_task
except asyncio.CancelledError:
pass
async def stop(self) -> None:
assert self.consumer_task is not None, "Logger not started"
self.consumer_task.cancel()
if self.metrics_task:
self.metrics_task.cancel()
try:
await self.consumer_task
if self.metrics_task:
await self.metrics_task
except asyncio.CancelledError:
pass
finally:
self.consumer_task = None
self.metrics_task = None
logger.debug("TaskRunRecorder stopped")
| TaskRunRecorder |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_densifier.py | {
"start": 189,
"end": 669
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(Densifier, make_sparse=True)
self.assertIsInstance(transformation, np.ndarray)
self.assertEqual(transformation.shape, original.shape)
self.assertIsInstance(transformation, np.ndarray)
def test_preprocessing_dtype(self):
super(DensifierComponentTest, self)._test_preprocessing_dtype(Densifier)
| DensifierComponentTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-astra/destination_astra/astra_client.py | {
"start": 144,
"end": 5991
} | class ____:
def __init__(
self,
astra_endpoint: str,
astra_application_token: str,
keyspace_name: str,
embedding_dim: int,
similarity_function: str,
):
self.astra_endpoint = astra_endpoint
self.astra_application_token = astra_application_token
self.keyspace_name = keyspace_name
self.embedding_dim = embedding_dim
self.similarity_function = similarity_function
self.request_base_url = f"{self.astra_endpoint}/api/json/v1/{self.keyspace_name}"
self.request_header = {
"x-cassandra-token": self.astra_application_token,
"Content-Type": "application/json",
"User-Agent": "airbyte",
}
def _run_query(self, request_url: str, query: Dict):
try:
response = requests.request("POST", request_url, headers=self.request_header, data=json.dumps(query))
if response.status_code == 200:
response_dict = json.loads(response.text)
if "errors" in response_dict:
raise Exception(f"Astra DB request error - {response_dict['errors']}")
else:
return response_dict
else:
raise urllib3.exceptions.HTTPError(f"Astra DB not available. Status code: {response.status_code}, {response.text}")
except Exception:
raise
def find_collections(self, include_detail: bool = True):
query = {"findCollections": {"options": {"explain": include_detail}}}
result = self._run_query(self.request_base_url, query)
return result["status"]["collections"]
def find_collection(self, collection_name: str):
collections = self.find_collections(False)
return collection_name in collections
def create_collection(self, collection_name: str, embedding_dim: Optional[int] = None, similarity_function: Optional[str] = None):
query = {
"createCollection": {
"name": collection_name,
"options": {
"vector": {
"dimension": embedding_dim if embedding_dim is not None else self.embedding_dim,
"metric": similarity_function if similarity_function is not None else self.similarity_function,
}
},
}
}
result = self._run_query(self.request_base_url, query)
return True if result["status"]["ok"] == 1 else False
def delete_collection(self, collection_name: str):
query = {"deleteCollection": {"name": collection_name}}
result = self._run_query(self.request_base_url, query)
return True if result["status"]["ok"] == 1 else False
def _build_collection_query(self, collection_name: str):
return f"{self.request_base_url}/{collection_name}"
def find_documents(
self,
collection_name: str,
filter: Optional[Dict] = None,
vector: Optional[List[float]] = None,
limit: Optional[int] = None,
include_vector: Optional[bool] = None,
include_similarity: Optional[bool] = None,
) -> List[Dict]:
find_query = {}
if filter is not None:
find_query["filter"] = filter
if vector is not None:
find_query["sort"] = {"$vector": vector}
if include_vector is not None and include_vector == False:
find_query["projection"] = {"$vector": 0}
else:
find_query["projection"] = {"*": 1}
if limit is not None:
find_query["options"] = {"limit": limit}
if include_similarity is not None:
if "options" in find_query:
find_query["options"]["includeSimilarity"] = int(include_similarity)
else:
find_query["options"] = {"includeSimilarity": int(include_similarity)}
query = {"find": find_query}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["data"]["documents"]
def insert_document(self, collection_name: str, document: Dict) -> str:
query = {"insertOne": {"document": document}}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]["insertedIds"][0]
def insert_documents(self, collection_name: str, documents: List[Dict]) -> List[str]:
query = {"insertMany": {"documents": documents}}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]["insertedIds"]
def update_document(self, collection_name: str, filter: Dict, update: Dict, upsert: bool = True) -> Dict:
query = {"findOneAndUpdate": {"filter": filter, "update": update, "options": {"returnDocument": "after", "upsert": upsert}}}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]
def update_documents(self, collection_name: str, filter: Dict, update: Dict):
query = {
"updateMany": {
"filter": filter,
"update": update,
}
}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]
def count_documents(self, collection_name: str):
query = {"countDocuments": {}}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]["count"]
def delete_documents(self, collection_name: str, filter: Dict) -> int:
query = {"deleteMany": {"filter": filter}}
result = self._run_query(self._build_collection_query(collection_name), query)
return result["status"]["deletedCount"]
| AstraClient |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-opensearch/llama_index/vector_stores/alibabacloud_opensearch/base.py | {
"start": 5487,
"end": 14566
} | class ____(BasePydanticVectorStore):
"""
The AlibabaCloud OpenSearch Vector Store.
In this vector store we store the text, its embedding and its metadata
in a OpenSearch table.
In order to use this you need to have a instance and configure a table.
See the following documentation for details:
https://help.aliyun.com/zh/open-search/vector-search-edition/product-overview
Args:
config (AlibabaCloudOpenSearchConfig): The instance configuration
Examples:
`pip install llama-index-vector-stores-alibabacloud_opensearch`
```python
from llama_index.vector_stores.alibabacloud_opensearch import (
AlibabaCloudOpenSearchConfig,
AlibabaCloudOpenSearchStore,
)
# Config
config = AlibabaCloudOpenSearchConfig(
endpoint="xxx",
instance_id="ha-cn-******",
username="****",
password="****",
table_name="your_table_name",
)
vector_store = AlibabaCloudOpenSearchStore(config)
```
"""
stores_text: bool = True
flat_metadata: bool = True
_client: Any = PrivateAttr()
_config: AlibabaCloudOpenSearchConfig = PrivateAttr()
def __init__(self, config: AlibabaCloudOpenSearchConfig) -> None:
"""Initialize params."""
super().__init__()
self._config = config
self._client = client.Client(
models.Config(
endpoint=config.endpoint,
instance_id=config.instance_id,
access_user_name=config.username,
access_pass_word=config.password,
)
)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AlibabaCloudOpenSearchStore"
@property
def client(self) -> Any:
"""Get client."""
return self._client
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to vector store.
Args:
nodes (List[BaseNode]): list of nodes with embeddings
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, **add_kwargs)
)
async def async_add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Asynchronously add nodes with embedding to vector store.
Args:
nodes (List[BaseNode]): list of nodes with embeddings
"""
for i in range(0, len(nodes), DEFAULT_BATCH_SIZE):
docs = []
for node in nodes[i:DEFAULT_BATCH_SIZE]:
doc = {
self._config.id_field: node.node_id,
self._config.embedding_field: node.embedding,
}
if self._config.text_field:
doc[self._config.text_field] = node.get_text()
meta_fields = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
if self._config.field_mapping:
for key, value in meta_fields.items():
doc[self._config.field_mapping.get(key, key)] = value
else:
doc.update(meta_fields)
docs.append(doc)
try:
await self._async_send_data("add", docs)
except Exception as e:
logging.error(f"Add to {self._config.instance_id} failed: {e}")
raise RuntimeError(f"Fail to add docs, error:{e}")
return [node.node_id for node in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Asynchronously delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
filter = f"{DEFAULT_DOC_ID_KEY}='{ref_doc_id}'"
request = models.FetchRequest(table_name=self._config.table_name, filter=filter)
response = self._client.fetch(request)
json_response = json.loads(response.body)
err_msg = json_response.get("errorMsg", None)
if err_msg:
raise RuntimeError(f"Failed to query doc by {filter}: {err_msg}")
docs = []
for doc in json_response["result"]:
docs.append({"id": doc["id"]})
await self._async_send_data("delete", docs)
async def _async_send_data(self, cmd: str, fields_list: List[dict]) -> None:
"""
Asynchronously send data.
Args:
cmd (str): data operator, add: upsert the doc, delete: delete the doc
fields_list (list[dict]): doc fields list
"""
docs = []
for fields in fields_list:
docs.append({"cmd": cmd, "fields": fields})
request = models.PushDocumentsRequest({}, docs)
await self._client.push_documents_async(
self._config.data_source_name, self._config.id_field, request
)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query vector store."""
return asyncio.get_event_loop().run_until_complete(self.aquery(query, **kwargs))
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Asynchronously query vector store.
"""
if query.mode != VectorStoreQueryMode.DEFAULT:
raise ValueError(
f"Alibaba Cloud OpenSearch does not support {query.mode} yet."
)
request = self._gen_query_request(query)
response = await self._client.query_async(request)
json_response = json.loads(response.body)
logging.debug(f"query result: {json_response}")
err_msg = json_response.get("errorMsg", None)
if err_msg:
raise RuntimeError(
f"query doc from Alibaba Cloud OpenSearch instance:{self._config.instance_id} failed:"
f"{err_msg}"
)
ids = []
nodes = []
similarities = []
for doc in json_response["result"]:
try:
node = metadata_dict_to_node(
{
"_node_content": doc["fields"].get(
self._config.field_mapping.get(
"_node_content", "_node_content"
),
None,
),
"_node_type": doc["fields"].get(
self._config.field_mapping.get("_node_type", "_node_type"),
None,
),
}
)
except Exception:
text = doc["fields"][self._config.text_field]
metadata = {
self._config.inverse_field_mapping.get(key, key): doc["fields"].get(
key
)
for key in self._config.output_fields
}
node = TextNode(id_=doc["id"], text=text, metadata=metadata)
ids.append(doc["id"])
nodes.append(node)
similarities.append(doc["score"])
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def _gen_query_request(self, query: VectorStoreQuery) -> models.QueryRequest:
"""
Generate the OpenSearch query request.
Args:
query (VectorStoreQuery): The vector store query
Return:
OpenSearch query request
"""
filter = _to_ha3_engine_filter(query.filters)
request = models.QueryRequest(
table_name=self._config.table_name,
namespace=self._config.namespace,
vector=query.query_embedding,
top_k=query.similarity_top_k,
filter=filter,
include_vector=True,
output_fields=self._config.output_fields,
)
if self._config.search_config:
request.order = self._config.search_config.get("order", "ASC")
score_threshold: float = self._config.search_config.get(
"score_threshold", None
)
if score_threshold is not None:
request.score_threshold = score_threshold
search_params = self._config.search_config.get("search_params", None)
if search_params is not None:
request.search_params = json.dumps(search_params)
return request
| AlibabaCloudOpenSearchStore |
python | numba__numba | numba/core/typing/enumdecl.py | {
"start": 1467,
"end": 1503
} | class ____(EnumCompare):
pass
| EnumNe |
python | aimacode__aima-python | reinforcement_learning.py | {
"start": 7554,
"end": 11380
} | class ____:
"""
[Figure 21.8]
An exploratory Q-learning agent. It avoids having to learn the transition
model because the Q-value of a state can be related directly to those of
its neighbors.
import sys
from mdp import sequential_decision_environment
north = (0, 1)
south = (0,-1)
west = (-1, 0)
east = (1, 0)
policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north,
(3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,}
q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, alpha=lambda n: 60./(59+n))
for i in range(200):
run_single_trial(q_agent,sequential_decision_environment)
q_agent.Q[((0, 1), (0, 1))] >= -0.5
True
q_agent.Q[((1, 0), (0, -1))] <= 0.5
True
"""
def __init__(self, mdp, Ne, Rplus, alpha=None):
self.gamma = mdp.gamma
self.terminals = mdp.terminals
self.all_act = mdp.actlist
self.Ne = Ne # iteration limit in exploration function
self.Rplus = Rplus # large value to assign before iteration limit
self.Q = defaultdict(float)
self.Nsa = defaultdict(float)
self.s = None
self.a = None
self.r = None
if alpha:
self.alpha = alpha
else:
self.alpha = lambda n: 1. / (1 + n) # udacity video
def f(self, u, n):
"""Exploration function. Returns fixed Rplus until
agent has visited state, action a Ne number of times.
Same as ADP agent in book."""
if n < self.Ne:
return self.Rplus
else:
return u
def actions_in_state(self, state):
"""Return actions possible in given state.
Useful for max and argmax."""
if state in self.terminals:
return [None]
else:
return self.all_act
def __call__(self, percept):
s1, r1 = self.update_state(percept)
Q, Nsa, s, a, r = self.Q, self.Nsa, self.s, self.a, self.r
alpha, gamma, terminals = self.alpha, self.gamma, self.terminals,
actions_in_state = self.actions_in_state
if s in terminals:
Q[s, None] = r1
if s is not None:
Nsa[s, a] += 1
Q[s, a] += alpha(Nsa[s, a]) * (r + gamma * max(Q[s1, a1]
for a1 in actions_in_state(s1)) - Q[s, a])
if s in terminals:
self.s = self.a = self.r = None
else:
self.s, self.r = s1, r1
self.a = max(actions_in_state(s1), key=lambda a1: self.f(Q[s1, a1], Nsa[s1, a1]))
return self.a
def update_state(self, percept):
"""To be overridden in most cases. The default case
assumes the percept to be of type (state, reward)."""
return percept
def run_single_trial(agent_program, mdp):
"""Execute trial for given agent_program
and mdp. mdp should be an instance of subclass
of mdp.MDP """
def take_single_action(mdp, s, a):
"""
Select outcome of taking action a
in state s. Weighted Sampling.
"""
x = random.uniform(0, 1)
cumulative_probability = 0.0
for probability_state in mdp.T(s, a):
probability, state = probability_state
cumulative_probability += probability
if x < cumulative_probability:
break
return state
current_state = mdp.init
while True:
current_reward = mdp.R(current_state)
percept = (current_state, current_reward)
next_action = agent_program(percept)
if next_action is None:
break
current_state = take_single_action(mdp, current_state, next_action)
| QLearningAgent |
python | ray-project__ray | release/ray_release/test.py | {
"start": 4240,
"end": 25820
} | class ____(dict):
"""A class represents a test to run on buildkite"""
KEY_GITHUB_ISSUE_NUMBER = "github_issue_number"
KEY_BISECT_BUILD_NUMBER = "bisect_build_number"
KEY_BISECT_BLAMED_COMMIT = "bisect_blamed_commit"
# a test is high impact if it catches regressions frequently
KEY_IS_HIGH_IMPACT = "is_high_impact"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_results = None
@classmethod
def from_bazel_event(cls, event: dict, team: str):
name = event["id"]["testResult"]["label"]
system = platform.system().lower()
return cls(
{
"name": f"{system}:{name}",
"team": team,
}
)
@classmethod
def gen_from_name(cls, name: str):
tests = [
test
for test in Test.gen_from_s3(cls._get_s3_name(name))
if test["name"] == name
]
return tests[0] if tests else None
@classmethod
def gen_from_s3(cls, prefix: str):
"""
Obtain all tests whose names start with the given prefix from s3
"""
bucket = get_read_state_machine_aws_bucket()
s3_client = boto3.client("s3")
pages = s3_client.get_paginator("list_objects_v2").paginate(
Bucket=bucket,
Prefix=f"{AWS_TEST_KEY}/{prefix}",
)
files = chain.from_iterable([page.get("Contents", []) for page in pages])
return [
Test(
json.loads(
s3_client.get_object(Bucket=bucket, Key=file["Key"])
.get("Body")
.read()
.decode("utf-8")
)
)
for file in files
]
@classmethod
def gen_microcheck_step_ids(cls, prefix: str, bazel_workspace_dir: str) -> Set[str]:
"""
This function is used to get the buildkite step ids of the microcheck tests
with the given test prefix. This is used to determine the buildkite steps in
the microcheck pipeline.
"""
step_ids = set()
test_targets = cls.gen_microcheck_tests(prefix, bazel_workspace_dir)
for test_target in test_targets:
test = cls.gen_from_name(f"{prefix}{test_target}")
if not test:
continue
recent_results = test.get_test_results()
if not recent_results:
continue
test_step_ids = {
result.rayci_step_id
for result in recent_results
if result.commit == recent_results[0].commit and result.rayci_step_id
}
if test_step_ids and not step_ids.intersection(test_step_ids):
step_ids.add(sorted(test_step_ids)[0])
return step_ids
@classmethod
def gen_microcheck_tests(
cls, prefix: str, bazel_workspace_dir: str, team: Optional[str] = None
) -> Set[str]:
"""
Obtain all microcheck tests with the given prefix
"""
high_impact_tests = Test._gen_high_impact_tests(prefix, team)
changed_tests = Test._get_changed_tests(bazel_workspace_dir)
human_specified_tests = Test._get_human_specified_tests(bazel_workspace_dir)
return high_impact_tests.union(changed_tests, human_specified_tests)
@classmethod
def _gen_high_impact_tests(
cls, prefix: str, team: Optional[str] = None
) -> Set[str]:
"""
Obtain all high impact tests with the given prefix
"""
high_impact_tests = [
test for test in cls.gen_from_s3(prefix) if test.is_high_impact()
]
if team:
high_impact_tests = [
test for test in high_impact_tests if test.get_oncall() == team
]
return {test.get_target() for test in high_impact_tests}
@classmethod
def _get_human_specified_tests(cls, bazel_workspace_dir: str) -> Set[str]:
"""
Get all test targets that are specified by humans
"""
base = os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH")
head = os.environ.get("BUILDKITE_COMMIT")
if not base or not head:
# if not in a PR, return an empty set
return set()
tests = set()
messages = subprocess.check_output(
["git", "rev-list", "--format=%b", f"origin/{base}...{head}"],
cwd=bazel_workspace_dir,
)
for message in messages.decode().splitlines():
if not message.startswith(MICROCHECK_COMMAND):
continue
tests = tests.union(message[len(MICROCHECK_COMMAND) :].strip().split(" "))
return tests
@classmethod
def _get_changed_tests(cls, bazel_workspace_dir: str) -> Set[str]:
"""
Get all changed tests in the current PR
"""
return set(
chain.from_iterable(
[
cls._get_test_targets_per_file(file, bazel_workspace_dir)
for file in cls._get_changed_files(bazel_workspace_dir)
]
)
)
@classmethod
def _get_changed_files(cls, bazel_workspace_dir: str) -> Set[str]:
"""
Get all changed files in the current PR
"""
base = os.environ.get("BUILDKITE_PULL_REQUEST_BASE_BRANCH")
head = os.environ.get("BUILDKITE_COMMIT")
if not base or not head:
# if not in a PR, return an empty set
return set()
changes = subprocess.check_output(
["git", "diff", "--name-only", f"origin/{base}...{head}"],
cwd=bazel_workspace_dir,
)
return {
file.strip() for file in changes.decode().splitlines() if file is not None
}
@classmethod
def _get_test_targets_per_file(
cls, file: str, bazel_workspace_dir: str
) -> Set[str]:
"""
Get the test target from a file path
"""
try:
package = (
subprocess.check_output(
["bazel", "query", file], cwd=bazel_workspace_dir
)
.decode()
.strip()
)
if not package:
return set()
targets = subprocess.check_output(
["bazel", "query", f"tests(attr('srcs', {package}, //...))"],
cwd=bazel_workspace_dir,
)
targets = {
target.strip()
for target in targets.decode().splitlines()
if target is not None
}
return targets
except subprocess.CalledProcessError:
return set()
def is_jailed_with_open_issue(self, ray_github: "Repository") -> bool:
"""
Returns whether this test is jailed with open issue.
"""
# is jailed
state = self.get_state()
if state != TestState.JAILED:
return False
# has open issue
issue_number = self.get(self.KEY_GITHUB_ISSUE_NUMBER)
if issue_number is None:
return False
issue = ray_github.get_issue(issue_number)
return issue.state == "open"
def is_stable(self) -> bool:
"""
Returns whether this test is stable.
"""
return self.get("stable", True)
def is_gce(self) -> bool:
"""
Returns whether this test is running on GCE.
"""
return self.get("env") == "gce"
def is_kuberay(self) -> bool:
"""
Returns whether this test is running on KubeRay.
"""
return self.get("env") == "kuberay"
def is_azure(self) -> bool:
"""
Returns whether this test is running on Azure.
"""
return self.get("env") == "azure"
def is_high_impact(self) -> bool:
# a test is high impact if it catches regressions frequently, this field is
# populated by the determine_microcheck_tests.py script
return self.get(self.KEY_IS_HIGH_IMPACT, None) == "true"
def get_test_type(self) -> TestType:
test_name = self.get_name()
if test_name.startswith(MACOS_TEST_PREFIX):
return TestType.MACOS_TEST
if test_name.startswith(LINUX_TEST_PREFIX):
return TestType.LINUX_TEST
if test_name.startswith(WINDOWS_TEST_PREFIX):
return TestType.WINDOWS_TEST
return TestType.RELEASE_TEST
def get_bisect_daily_rate_limit(self) -> int:
test_type = self.get_test_type()
if test_type == TestType.MACOS_TEST:
return MACOS_BISECT_DAILY_RATE_LIMIT
if test_type == TestType.LINUX_TEST:
return LINUX_BISECT_DAILY_RATE_LIMIT
if test_type == TestType.WINDOWS_TEST:
return WINDOWS_BISECT_DAILY_RATE_LIMIT
return BISECT_DAILY_RATE_LIMIT
def get_byod_type(self) -> str:
"""
Returns the type of the BYOD cluster.
"""
return self["cluster"]["byod"].get("type", "cpu")
def get_tag_suffix(self) -> str:
"""
Returns the tag suffix for the BYOD image.
"""
byod_type = self.get_byod_type()
if byod_type.startswith("llm-"):
return byod_type[len("llm-") :]
return byod_type
def get_byod_post_build_script(self) -> Optional[str]:
"""
Returns the post-build script for the BYOD cluster.
"""
return self["cluster"]["byod"].get("post_build_script", None)
def get_byod_python_depset(self) -> Optional[str]:
"""
Returns the lock file path.
"""
return self["cluster"]["byod"].get("python_depset", None)
def get_byod_runtime_env(self) -> Dict[str, str]:
"""
Returns the runtime environment variables for the BYOD cluster.
"""
default = {
"RAY_BACKEND_LOG_JSON": "1",
# Logs the full stack trace from Ray Data in case of exception,
# which is useful for debugging failures.
"RAY_DATA_LOG_INTERNAL_STACK_TRACE_TO_STDOUT": "1",
# To make ray data compatible across multiple pyarrow versions.
"RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE": "1",
}
default.update(
_convert_env_list_to_dict(self["cluster"]["byod"].get("runtime_env", []))
)
return default
def get_byod_pips(self) -> List[str]:
"""
Returns the list of pips for the BYOD cluster.
"""
return self["cluster"]["byod"].get("pip", [])
def get_ray_version(self) -> Optional[str]:
"""
Returns the Ray version to use from DockerHub if specified in cluster config.
If set, this will use released Ray images like anyscale/ray:2.50.0-py39-cpu
instead of building custom BYOD images.
"""
return self["cluster"].get("ray_version", None)
def get_name(self) -> str:
"""
Returns the name of the test.
"""
return self["name"]
def get_target(self) -> str:
test_type = self.get_test_type()
test_name = self.get_name()
if test_type == TestType.MACOS_TEST:
return test_name[len(MACOS_TEST_PREFIX) :]
if test_type == TestType.LINUX_TEST:
return test_name[len(LINUX_TEST_PREFIX) :]
if test_type == TestType.WINDOWS_TEST:
return test_name[len(WINDOWS_TEST_PREFIX) :]
return test_name
@classmethod
def _get_s3_name(cls, test_name: str) -> str:
"""
Returns the name of the test for s3. Since '/' is not allowed in s3 key,
replace it with '_'.
"""
return test_name.replace("/", "_")
def get_oncall(self) -> str:
"""
Returns the oncall for the test.
"""
return self["team"]
def update_from_s3(self, force_branch_bucket: bool = True) -> None:
"""
Update test object with data fields that exist only on s3
"""
try:
data = (
boto3.client("s3")
.get_object(
Bucket=get_read_state_machine_aws_bucket(),
Key=f"{AWS_TEST_KEY}/{self._get_s3_name(self.get_name())}.json",
)
.get("Body")
.read()
.decode("utf-8")
)
except ClientError as e:
logger.warning(f"Failed to update data for {self.get_name()} from s3: {e}")
return
for key, value in json.loads(data).items():
if key not in self:
self[key] = value
def get_state(self) -> TestState:
"""
Returns the state of the test.
"""
return TestState(self.get("state", TestState.PASSING.value))
def set_state(self, state: TestState) -> None:
"""
Sets the state of the test.
"""
self["state"] = state.value
def get_python_version(self) -> str:
"""
Returns the python version to use for this test. If not specified, use
the default python version.
"""
return self.get("python", ".".join(str(v) for v in DEFAULT_PYTHON_VERSION))
def get_byod_base_image_tag(self, build_id: Optional[str] = None) -> str:
"""
Returns the byod image tag to use for this test.
"""
byod_image_tag = os.environ.get("RAY_IMAGE_TAG")
if byod_image_tag:
# Use the image tag specified in the environment variable.
# TODO(can): this is a temporary backdoor that should be removed
# once civ2 is fully rolled out.
return byod_image_tag
build_id = build_id or os.environ.get("RAYCI_BUILD_ID", "")
if not build_id:
raise ValueError("RAYCI_BUILD_ID is not set")
python_version = "py" + self.get_python_version().replace(".", "")
return f"{build_id}-{python_version}-{self.get_tag_suffix()}"
def get_byod_image_tag(self, build_id: Optional[str] = None) -> str:
"""
Returns the byod custom image tag to use for this test.
"""
if not self.require_custom_byod_image():
return self.get_byod_base_image_tag(build_id)
custom_info = {
"post_build_script": self.get_byod_post_build_script(),
"python_depset": self.get_byod_python_depset(),
}
tag = f"{self.get_byod_base_image_tag(build_id)}-{dict_hash(custom_info)}"
ray_version = self.get_ray_version()
if ray_version:
tag = f"{tag}-{ray_version}"
return tag
def use_byod_ml_image(self) -> bool:
"""Returns whether to use the ML image for this test."""
return self.get_byod_type() == "gpu"
def use_byod_llm_image(self) -> bool:
return self.get_byod_type().startswith("llm-")
def get_byod_repo(self) -> str:
"""
Returns the byod repo to use for this test.
"""
if self.use_byod_ml_image():
return DATAPLANE_ECR_ML_REPO
if self.use_byod_llm_image():
return DATAPLANE_ECR_LLM_REPO
return DATAPLANE_ECR_REPO
def get_byod_ecr(self) -> str:
"""
Returns the anyscale byod ecr to use for this test.
"""
if self.is_gce() or self.is_kuberay():
return get_global_config()["byod_gcp_cr"]
if self.is_azure():
return get_global_config()["byod_azure_cr"]
byod_ecr = get_global_config()["byod_aws_cr"]
if byod_ecr:
return byod_ecr
return get_global_config()["byod_ecr"]
def get_ray_image(self) -> str:
"""
Returns the ray docker image to use for this test.
"""
config = get_global_config()
repo = self.get_byod_repo()
if repo == DATAPLANE_ECR_REPO:
repo_name = config["byod_ray_cr_repo"]
elif repo == DATAPLANE_ECR_LLM_REPO:
repo_name = config["byod_ray_llm_cr_repo"]
elif repo == DATAPLANE_ECR_ML_REPO:
repo_name = config["byod_ray_ml_cr_repo"]
else:
raise ValueError(f"Unknown repo {repo}")
ecr = config["byod_ray_ecr"]
tag = self.get_byod_base_image_tag()
return f"{ecr}/{repo_name}:{tag}"
def get_anyscale_base_byod_image(self, build_id: Optional[str] = None) -> str:
"""
Returns the anyscale byod image to use for this test.
"""
ray_version = self.get_ray_version()
if ray_version:
python_version = "py" + self.get_python_version().replace(".", "")
tag_suffix = self.get_tag_suffix()
if tag_suffix == "gpu":
tag_suffix = "cu121"
return f"{ANYSCALE_RAY_IMAGE_PREFIX}:{ray_version}-{python_version}-{tag_suffix}"
return (
f"{self.get_byod_ecr()}/"
f"{self.get_byod_repo()}:{self.get_byod_base_image_tag(build_id)}"
)
def require_custom_byod_image(self) -> bool:
"""
Returns whether this test requires a custom byod image.
"""
return (
self.get_byod_post_build_script() is not None
or self.get_byod_python_depset() is not None
)
def get_anyscale_byod_image(self, build_id: Optional[str] = None) -> str:
"""
Returns the anyscale byod image to use for this test.
If ray_version is specified in cluster config, returns anyscale/ray image.
"""
ray_version = self.get_ray_version()
if not ray_version or self.require_custom_byod_image():
# Use custom BYOD image
return (
f"{self.get_byod_ecr()}/"
f"{self.get_byod_repo()}:{self.get_byod_image_tag(build_id)}"
)
python_version = "py" + self.get_python_version().replace(".", "")
tag_suffix = (
"cu121" if self.get_tag_suffix() == "gpu" else self.get_tag_suffix()
)
tag = f"{ray_version}-{python_version}-{tag_suffix}"
return f"{ANYSCALE_RAY_IMAGE_PREFIX}:{tag}"
def get_test_results(
self,
limit: int = 10,
refresh: bool = False,
aws_bucket: str = None,
use_async: bool = False,
) -> List[TestResult]:
"""
Get test result from test object, or s3
:param limit: limit of test results to return
:param refresh: whether to refresh the test results from s3
"""
if self.test_results is not None and not refresh:
return self.test_results
bucket = aws_bucket or get_read_state_machine_aws_bucket()
s3_client = boto3.client("s3")
pages = s3_client.get_paginator("list_objects_v2").paginate(
Bucket=bucket,
Prefix=f"{AWS_TEST_RESULT_KEY}/{self._get_s3_name(self.get_name())}-",
)
files = sorted(
chain.from_iterable([page.get("Contents", []) for page in pages]),
key=lambda file: int(file["LastModified"].timestamp()),
reverse=True,
)[:limit]
if use_async:
self.test_results = _asyncio_thread_pool.submit(
lambda: asyncio.run(
self._gen_test_results(bucket, [file["Key"] for file in files])
)
).result()
else:
self.test_results = [
TestResult.from_dict(
json.loads(
s3_client.get_object(
Bucket=bucket,
Key=file["Key"],
)
.get("Body")
.read()
.decode("utf-8")
)
)
for file in files
]
return self.test_results
async def _gen_test_results(
self,
bucket: str,
keys: List[str],
) -> Awaitable[List[TestResult]]:
session = aioboto3.Session()
async with session.client("s3") as s3_client:
return await asyncio.gather(
*[self._gen_test_result(s3_client, bucket, key) for key in keys]
)
async def _gen_test_result(
self,
s3_client: aioboto3.Session.client,
bucket: str,
key: str,
) -> Awaitable[TestResult]:
object = await s3_client.get_object(Bucket=bucket, Key=key)
object_body = await object["Body"].read()
return TestResult.from_dict(json.loads(object_body.decode("utf-8")))
def persist_result_to_s3(self, result: Result) -> bool:
"""
Persist result object to s3
"""
self.persist_test_result_to_s3(TestResult.from_result(result))
def persist_test_result_to_s3(self, test_result: TestResult) -> bool:
"""
Persist test result object to s3
"""
s3_put_rayci_test_data(
Bucket=get_write_state_machine_aws_bucket(),
Key=f"{AWS_TEST_RESULT_KEY}/"
f"{self._get_s3_name(self.get_name())}-{int(time.time() * 1000)}.json",
Body=json.dumps(test_result.__dict__),
)
def persist_to_s3(self) -> bool:
"""
Persist test object to s3
"""
s3_put_rayci_test_data(
Bucket=get_write_state_machine_aws_bucket(),
Key=f"{AWS_TEST_KEY}/{self._get_s3_name(self.get_name())}.json",
Body=json.dumps(self),
)
| Test |
python | pytorch__pytorch | benchmarks/gpt_fast/benchmark.py | {
"start": 370,
"end": 10670
} | class ____(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dtype):
super().__init__()
self.layers = nn.ModuleList(
[
nn.Linear(input_dim, hidden_dim, dtype=dtype),
nn.LayerNorm(hidden_dim, dtype=dtype),
nn.Linear(hidden_dim, output_dim, dtype=dtype),
nn.LayerNorm(output_dim, dtype=dtype),
]
)
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
@register_experiment(name="mlp_layer_norm_gelu")
def run_mlp_layer_norm_gelu(device: str = "cuda"):
dtype_flops_utilization_map = {
torch.bfloat16: "0.8",
}
input_shapes = [1024, 4096, 8192, 16384]
intermediate_size = 14336
results = []
for dtype, expected_flops_utilization in dtype_flops_utilization_map.items():
flops_utilization = 0
for D in input_shapes:
mod = SimpleMLP(
input_dim=D, hidden_dim=intermediate_size, output_dim=D, dtype=dtype
).to(device)
x = torch.randn(D, device=device, dtype=torch.bfloat16)
with FlopCounterMode(display=False) as mode:
mod(x)
flops = mode.get_total_flops()
compiled_mod = torch.compile(mod, dynamic=False)
for _ in range(WARMUP_ITER):
compiled_mod(x)
us_per_iter = benchmarker.benchmark(compiled_mod, (x,), {}) * 1000
flops_utilization += us_per_iter * flops / 1e9 / A100_40G_BF16_TFLOPS
flops_utilization = flops_utilization / len(input_shapes)
dtype_str = str(dtype).replace("torch.", "")
results.append(
Experiment(
"mlp_layer_norm_gelu",
"flops_utilization",
expected_flops_utilization,
f"{flops_utilization:.02f}",
dtype_str,
device,
get_arch_name(),
)
)
return results
@register_experiment(name="layer_norm")
def run_layer_norm(device: str = "cuda"):
dtype_memory_bandwidth_map = {
torch.bfloat16: "950",
}
input_shapes = [1024, 4096, 8192, 16384]
BS = 4096
results = []
for dtype, expected_memory_bandwidth in dtype_memory_bandwidth_map.items():
memory_bandwidth = 0
for D in input_shapes:
mod = nn.LayerNorm(D).to(device)
x = torch.randn(BS, D, device=device, dtype=dtype)
compiled_mod = torch.compile(mod, dynamic=False)
for _ in range(WARMUP_ITER):
compiled_mod(x)
us_per_iter = benchmarker.benchmark(compiled_mod, (x,), {}) * 1000
memory_bandwidth += (1e6 / us_per_iter) * 2 * BS * D * dtype.itemsize / 1e9
memory_bandwidth = memory_bandwidth / len(input_shapes)
dtype_str = str(dtype).replace("torch.", "")
results.append(
Experiment(
"layer_norm",
"memory_bandwidth(GB/s)",
expected_memory_bandwidth,
f"{memory_bandwidth:.02f}",
dtype_str,
device,
get_arch_name(),
)
)
return results
@register_experiment(name="gather_gemv")
@torch._inductor.config.patch(coordinate_descent_tuning=True)
def run_gather_gemv(device: str = "cuda"):
E = 8
dtype_memory_bandwidth_map = {
torch.int8: "990",
torch.bfloat16: "1060",
}
input_shapes = [1024, 4096, 8192, 16384]
results = []
for dtype, expected_memory_bandwidth in dtype_memory_bandwidth_map.items():
memory_bandwidth = 0
for D in input_shapes:
def gather_gemv(W, score_idxs, x):
return W[score_idxs].to(x.dtype) @ x
W = torch.randn(E, D, D, device=device).to(dtype=dtype)
x = torch.randn(D, device=device, dtype=torch.bfloat16)
score_idxs = torch.tensor([3, 5], device=device)
compiled_fn = torch.compile(gather_gemv, dynamic=False)
for _ in range(WARMUP_ITER):
compiled_fn(W, score_idxs, x)
us_per_iter = (
benchmarker.benchmark(
compiled_fn,
(
W,
score_idxs,
x,
),
{},
)
* 1000
)
memory_bandwidth += (1e6 / us_per_iter) * 2 * D * D * dtype.itemsize / 1e9
memory_bandwidth = memory_bandwidth / len(input_shapes)
dtype_str = str(dtype).replace("torch.", "")
results.append(
Experiment(
"gather_gemv",
"memory_bandwidth(GB/s)",
expected_memory_bandwidth,
f"{memory_bandwidth:.02f}",
dtype_str,
device,
get_arch_name(),
)
)
return results
@register_experiment(name="gemv")
@torch._inductor.config.patch(coordinate_descent_tuning=True)
def run_gemv(device: str = "cuda"):
dtype_memory_bandwidth_map = {
torch.int8: "870",
torch.bfloat16: "990",
}
input_shapes = [1024, 4096, 8192, 16384]
results = []
for dtype, expected_memory_bandwidth in dtype_memory_bandwidth_map.items():
memory_bandwidth = 0
for D in input_shapes:
def gemv(W, x):
return W.to(x.dtype) @ x
W = torch.randn(D, D, device=device).to(dtype=dtype)
x = torch.randn(D, device=device, dtype=torch.bfloat16)
compiled_fn = torch.compile(gemv, dynamic=False)
for _ in range(WARMUP_ITER):
compiled_fn(W, x)
us_per_iter = (
benchmarker.benchmark(
compiled_fn,
(
W,
x,
),
{},
)
* 1000
)
memory_bandwidth += (1e6 / us_per_iter) * D * D * dtype.itemsize / 1e9
memory_bandwidth = memory_bandwidth / len(input_shapes)
dtype_str = str(dtype).replace("torch.", "")
results.append(
Experiment(
"gemv",
"memory_bandwidth(GB/s)",
expected_memory_bandwidth,
f"{memory_bandwidth:.02f}",
dtype_str,
device,
get_arch_name(),
)
)
return results
def output_csv(output_file, headers, row):
if os.path.exists(output_file):
with open(output_file) as fd:
lines = list(csv.reader(fd)) or [[]]
if headers and len(headers) > len(lines[0]):
# if prior results failed the header might not be filled in yet
lines[0] = headers
else:
headers = lines[0]
else:
lines = [headers]
if output_file != DEFAULT_OUTPUT_FILE:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
lines.append([(f"{x:.6f}" if isinstance(x, float) else x) for x in row])
with open(output_file, "w") as fd:
writer = csv.writer(fd, lineterminator="\n")
for line in lines:
writer.writerow(list(line) + ["0"] * (len(headers) - len(line)))
def output_json(output_file, headers, row):
"""
Write the result into JSON format, so that it can be uploaded to the benchmark database
to be displayed on OSS dashboard. The JSON format is defined at
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
"""
mapping_headers = {headers[i]: v for i, v in enumerate(row)}
record = {
"benchmark": {
"name": "PyTorch gpt-fast benchmark",
"mode": "inference",
"dtype": mapping_headers["dtype"],
"extra_info": {
"device": mapping_headers["device"],
"arch": mapping_headers["arch"],
},
},
"model": {
"name": mapping_headers["name"],
"type": "OSS model" if mapping_headers["is_model"] else "micro-benchmark",
"origins": ["pytorch"],
},
"metric": {
"name": mapping_headers["metric"],
"benchmark_values": [mapping_headers["actual"]],
"target_value": mapping_headers["target"],
},
}
with open(f"{os.path.splitext(output_file)[0]}.json", "a") as f:
print(json.dumps(record), file=f)
DEFAULT_OUTPUT_FILE = "gpt_fast_benchmark.csv"
def main(output_file=DEFAULT_OUTPUT_FILE, only_model=None):
results = []
if not only_model:
experiments = all_experiments.values()
else:
if only_model not in all_experiments:
print(
f"Unknown model: {only_model}, all available models: {all_experiments.keys()}"
)
# only run the specified model
experiments = [all_experiments[only_model]]
for func in experiments:
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
except AssertionError:
# This happens when torch is compiled with CUDA turning off completely
device = "cpu"
torch.compiler.cudagraph_mark_step_begin()
lst = func(device)
for x in lst:
results.append(dataclasses.astuple(x))
headers = [field.name for field in dataclasses.fields(Experiment)]
for row in results:
output_csv(output_file, headers, row)
# Also write the output in JSON format so that it can be ingested into the OSS benchmark database
output_json(output_file, headers, row)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run experiments.")
parser.add_argument(
"--output",
default=DEFAULT_OUTPUT_FILE,
help="Set the output CSV file to save the benchmark results",
)
parser.add_argument(
"--only",
help="Specify a model or micro-benchmark name to run exclusively",
)
args = parser.parse_args()
main(output_file=args.output, only_model=args.only)
| SimpleMLP |
python | ijl__orjson | test/test_dataclass.py | {
"start": 1082,
"end": 1175
} | class ____:
c: int
b: int
a: int
d: Optional[dict]
@dataclass
| UnsortedDataclass |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 11804,
"end": 11898
} | class ____(dict):
def getheaders(self, name):
return list(self.values())
| MockHeaders |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 7622,
"end": 7853
} | class ____(BuiltinFilter):
name = "Blur"
# fmt: off
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1,
)
# fmt: on
| BLUR |
python | pytorch__pytorch | torch/ao/pruning/_experimental/data_sparsifier/benchmarks/dlrm_utils.py | {
"start": 284,
"end": 5463
} | class ____(DLRM_Net):
"""The SparseDLRM model is a wrapper around the DLRM_Net model that tries
to use torch.sparse tensors for the features obtained after the ```interact_features()```
call. The idea is to do a simple torch.mm() with the weight matrix of the first linear
layer of the top layer.
"""
def forward(self, dense_x, lS_o, lS_i):
# pyrefly: ignore [missing-attribute]
x = self.apply_mlp(dense_x, self.bot_l) # dense features
# pyrefly: ignore [missing-attribute]
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l) # apply embedding bag
# pyrefly: ignore [missing-attribute]
z = self.interact_features(x, ly)
z = z.to_sparse_coo()
# pyrefly: ignore [missing-attribute]
z = torch.mm(z, self.top_l[0].weight.T).add(self.top_l[0].bias)
# pyrefly: ignore [missing-attribute]
for layer in self.top_l[1:]:
z = layer(z)
return z
def get_valid_name(name):
"""Replaces '.' with '_' as names with '.' are invalid in data sparsifier"""
return name.replace(".", "_")
def get_dlrm_model(sparse_dlrm=False):
"""Obtain dlrm model. The configs specified are based on the script in
bench/dlrm_s_criteo_kaggle.sh. The same config is used to train the model
for benchmarking on data sparsifier.
"""
dlrm_model_config = {
"m_spa": 16,
"ln_emb": np.array(
[
1460,
583,
10131227,
2202608,
305,
24,
12517,
633,
3,
93145,
5683,
8351593,
3194,
27,
14992,
5461306,
10,
5652,
2173,
4,
7046547,
18,
15,
286181,
105,
142572,
],
dtype=np.int32,
),
"ln_bot": np.array([13, 512, 256, 64, 16]),
"ln_top": np.array([367, 512, 256, 1]),
"arch_interaction_op": "dot",
"arch_interaction_itself": False,
"sigmoid_bot": -1,
"sigmoid_top": 2,
"sync_dense_params": True,
"loss_threshold": 0.0,
"ndevices": 1,
"qr_flag": False,
"qr_operation": "mult",
"qr_collisions": 4,
"qr_threshold": 200,
"md_flag": False,
"md_threshold": 200,
"weighted_pooling": None,
"loss_function": "bce",
}
if sparse_dlrm:
dlrm_model = SparseDLRM(**dlrm_model_config)
else:
dlrm_model = DLRM_Net(**dlrm_model_config)
return dlrm_model
def dlrm_wrap(X, lS_o, lS_i, device, ndevices=1):
"""Rewritten simpler version of ```dlrm_wrap()``` found in dlrm_s_pytorch.py.
This function simply moves the input tensors into the device and without the forward pass
"""
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return X.to(device), lS_o, lS_i
def make_test_data_loader(raw_data_file_path, processed_data_file):
"""Function to create dataset and dataloaders for the test dataset.
Rewritten simpler version of ```make_criteo_and_loaders()``` from the dlrm_data_pytorch.py
that makes the test dataset and dataloaders only for the ***kaggle criteo dataset***
"""
test_data = CriteoDataset(
"kaggle",
-1,
0.0,
"total",
"test",
raw_data_file_path,
processed_data_file,
False,
False,
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=16384,
shuffle=False,
num_workers=7,
collate_fn=collate_wrapper_criteo_offset,
pin_memory=False,
drop_last=False,
)
return test_loader
def fetch_model(model_path, device, sparse_dlrm=False):
"""This function unzips the zipped model checkpoint (if zipped) and returns a
model object
Args:
model_path (str)
path pointing to the zipped/raw model checkpoint file that was dumped in evaluate disk savings
device (torch.device)
device to which model needs to be loaded to
"""
if zipfile.is_zipfile(model_path):
with zipfile.ZipFile(model_path, "r", zipfile.ZIP_DEFLATED) as zip_ref:
zip_ref.extractall(os.path.dirname(model_path))
unzip_path = model_path.replace(".zip", ".ckpt")
else:
unzip_path = model_path
model = get_dlrm_model(sparse_dlrm=sparse_dlrm)
model.load_state_dict(torch.load(unzip_path, map_location=device))
model = model.to(device)
model.eval()
# If there was a zip file, clean up the unzipped files
if zipfile.is_zipfile(model_path):
os.remove(unzip_path)
return model
| SparseDLRM |
python | huggingface__transformers | src/transformers/models/llama4/modeling_llama4.py | {
"start": 6901,
"end": 13078
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
# Ignore copy
def __init__(self, config: Llama4TextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Llama4TextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
# Ignore copy
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.to(x.device) @ position_ids_expanded).transpose(1, 2)
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # Convert to complex representation
freqs_cis = freqs_cis * self.attention_scaling
return freqs_cis
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
xq_out = torch.view_as_real(xq_ * freqs_cis[:, :, None, :]).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis[:, :, None, :]).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
# Adapted from transformers.models.llama.modeling_llama.eager_attention_forward -> llama4 doesn't cast attn weights to fp32
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Adapted from transformers.models.llama.modeling_llama.eager_attention_forward -> llama4 doesn't cast attn weights to fp32
def vision_eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * module.head_dim**-0.5
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Llama4TextRotaryEmbedding |
python | getsentry__sentry | src/sentry/auth/store.py | {
"start": 206,
"end": 670
} | class ____(PipelineSessionStore):
redis_namespace = "auth"
@property
def session_key(self) -> str:
return "auth_key"
flow = redis_property("flow")
referrer = redis_property("referrer")
def mark_session(self) -> None:
super().mark_session()
self.request.session.modified = True
def is_valid(self) -> bool:
return super().is_valid() and self.flow in (FLOW_LOGIN, FLOW_SETUP_PROVIDER)
| AuthHelperSessionStore |
python | scikit-learn__scikit-learn | sklearn/utils/_testing.py | {
"start": 11963,
"end": 39220
} | class ____:
"""
Parameters
----------
data
mmap_mode : str, default='r'
"""
def __init__(self, data, mmap_mode="r"):
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
data_read_only, self.temp_folder = create_memmap_backed_data(
self.data, mmap_mode=self.mmap_mode, return_folder=True
)
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
def create_memmap_backed_data(data, mmap_mode="r", return_folder=False):
"""
Parameters
----------
data
mmap_mode : str, default='r'
return_folder : bool, default=False
"""
temp_folder = tempfile.mkdtemp(prefix="sklearn_testing_")
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
filename = op.join(temp_folder, "data.pkl")
joblib.dump(data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
result = (
memmap_backed_data if not return_folder else (memmap_backed_data, temp_folder)
)
return result
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [
key
for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
]
if varargs:
varargs = [
param.name
for param in params.values()
if param.kind == param.VAR_POSITIONAL
]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func):
"""Get function full name.
Parameters
----------
func : callable
The function object.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
qualname = func.__qualname__
if qualname != func.__name__:
parts.append(qualname[: qualname.find(".")])
parts.append(func.__name__)
return ".".join(parts)
def check_docstring_parameters(func, doc=None, ignore=None):
"""Helper to check docstring.
Parameters
----------
func : callable
The function object to test.
doc : str, default=None
Docstring if it is passed manually to the test.
ignore : list, default=None
Parameters to ignore.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func)
if not func_name.startswith("sklearn.") or func_name.startswith(
"sklearn.externals"
):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
# Don't check docstring for setup / teardown pytest functions
if func_name.split(".")[-1] in ("setup_module", "teardown_module"):
return incorrect
# Dont check estimator_checks module
if func_name.split(".")[2] == "estimator_checks":
return incorrect
# Get the arguments from the function signature
param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(param_signature) > 0 and param_signature[0] == "self":
param_signature.remove("self")
# Analyze function's docstring
if doc is None:
records = []
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
try:
doc = docscrape.FunctionDoc(func)
except UserWarning as exp:
if "potentially wrong underline length" in str(exp):
# Catch warning raised as of numpydoc 1.2 when
# the underline length for a section of a docstring
# is not consistent.
message = str(exp).split("\n")[:3]
incorrect += [f"In function: {func_name}"] + message
return incorrect
records.append(str(exp))
except Exception as exp:
incorrect += [func_name + " parsing error: " + str(exp)]
return incorrect
if len(records):
raise RuntimeError("Error for %s:\n%s" % (func_name, records[0]))
param_docs = []
for name, type_definition, param_doc in doc["Parameters"]:
# Type hints are empty only if parameter name ended with :
if not type_definition.strip():
if ":" in name and name[: name.index(":")][-1:].strip():
incorrect += [
func_name
+ " There was no space between the param name and colon (%r)" % name
]
elif name.rstrip().endswith(":"):
incorrect += [
func_name
+ " Parameter %r has an empty type spec. Remove the colon"
% (name.lstrip())
]
# Create a list of parameters to compare with the parameters gotten
# from the func signature
if "*" not in name:
param_docs.append(name.split(":")[0].strip("` "))
# If one of the docstring's parameters had an error then return that
# incorrect message
if len(incorrect) > 0:
return incorrect
# Remove the parameters that should be ignored from list
param_docs = list(filter(lambda x: x not in ignore, param_docs))
# The following is derived from pytest, Copyright (c) 2004-2017 Holger
# Krekel and others, Licensed under MIT License. See
# https://github.com/pytest-dev/pytest
message = []
for i in range(min(len(param_docs), len(param_signature))):
if param_signature[i] != param_docs[i]:
message += [
"There's a parameter name mismatch in function"
" docstring w.r.t. function signature, at index %s"
" diff: %r != %r" % (i, param_signature[i], param_docs[i])
]
break
if len(param_signature) > len(param_docs):
message += [
"Parameters in function docstring have less items w.r.t."
" function signature, first missing item: %s"
% param_signature[len(param_docs)]
]
elif len(param_signature) < len(param_docs):
message += [
"Parameters in function docstring have more items w.r.t."
" function signature, first extra item: %s"
% param_docs[len(param_signature)]
]
# If there wasn't any difference in the parameters themselves between
# docstring and signature including having the same length then return
# empty list
if len(message) == 0:
return []
import difflib
import pprint
param_docs_formatted = pprint.pformat(param_docs).splitlines()
param_signature_formatted = pprint.pformat(param_signature).splitlines()
message += ["Full diff:"]
message.extend(
line.strip()
for line in difflib.ndiff(param_signature_formatted, param_docs_formatted)
)
incorrect.extend(message)
# Prepend function name
incorrect = ["In function: " + func_name] + incorrect
return incorrect
def _check_item_included(item_name, args):
"""Helper to check if item should be included in checking."""
if args.include is not True and item_name not in args.include:
return False
if args.exclude is not None and item_name in args.exclude:
return False
return True
def _diff_key(line):
"""Key for grouping output from `context_diff`."""
if line.startswith(" "):
return " "
elif line.startswith("- "):
return "- "
elif line.startswith("+ "):
return "+ "
elif line.startswith("! "):
return "! "
return None
def _get_diff_msg(docstrings_grouped):
"""Get message showing the difference between type/desc docstrings of all objects.
`docstrings_grouped` keys should be the type/desc docstrings and values are a list
of objects with that docstring. Objects with the same type/desc docstring are
thus grouped together.
"""
msg_diff = ""
ref_str = ""
ref_group = []
for docstring, group in docstrings_grouped.items():
if not ref_str and not ref_group:
ref_str += docstring
ref_group.extend(group)
diff = list(
context_diff(
ref_str.split(),
docstring.split(),
fromfile=str(ref_group),
tofile=str(group),
n=8,
)
)
# Add header
msg_diff += "".join((diff[:3]))
# Group consecutive 'diff' words to shorten error message
for start, group in groupby(diff[3:], key=_diff_key):
if start is None:
msg_diff += "\n" + "\n".join(group)
else:
msg_diff += "\n" + start + " ".join(word[2:] for word in group)
# Add new lines at end of diff, to separate comparisons
msg_diff += "\n\n"
return msg_diff
def _check_consistency_items(
items_docs,
type_or_desc,
section,
n_objects,
descr_regex_pattern="",
ignore_types=tuple(),
):
"""Helper to check docstring consistency of all `items_docs`.
If item is not present in all objects, checking is skipped and warning raised.
If `regex` provided, match descriptions to all descriptions.
Parameters
----------
items_doc : dict of dict of str
Dictionary where the key is the string type or description, value is
a dictionary where the key is "type description" or "description"
and the value is a list of object names with the same string type or
description.
type_or_desc : {"type description", "description"}
Whether to check type description or description between objects.
section : {"Parameters", "Attributes", "Returns"}
Name of the section type.
n_objects : int
Total number of objects.
descr_regex_pattern : str, default=""
Regex pattern to match for description of all objects.
Ignored when `type_or_desc="type description".
ignore_types : tuple of str, default=()
Tuple of parameter/attribute/return names for which type description
matching is ignored. Ignored when `type_or_desc="description".
"""
skipped = []
for item_name, docstrings_grouped in items_docs.items():
# If item not found in all objects, skip
if sum([len(objs) for objs in docstrings_grouped.values()]) < n_objects:
skipped.append(item_name)
# If regex provided, match to all descriptions
elif type_or_desc == "description" and descr_regex_pattern:
not_matched = []
for docstring, group in docstrings_grouped.items():
if not re.search(descr_regex_pattern, docstring):
not_matched.extend(group)
if not_matched:
msg = textwrap.fill(
f"The description of {section[:-1]} '{item_name}' in {not_matched}"
f" does not match 'descr_regex_pattern': {descr_regex_pattern} "
)
raise AssertionError(msg)
# Skip type checking for items in `ignore_types`
elif type_or_desc == "type specification" and item_name in ignore_types:
continue
# Otherwise, if more than one key, docstrings not consistent between objects
elif len(docstrings_grouped.keys()) > 1:
msg_diff = _get_diff_msg(docstrings_grouped)
obj_groups = " and ".join(
str(group) for group in docstrings_grouped.values()
)
msg = textwrap.fill(
f"The {type_or_desc} of {section[:-1]} '{item_name}' is inconsistent "
f"between {obj_groups}:"
)
msg += msg_diff
raise AssertionError(msg)
if skipped:
warnings.warn(
f"Checking was skipped for {section}: {skipped} as they were "
"not found in all objects."
)
def assert_docstring_consistency(
objects,
include_params=False,
exclude_params=None,
include_attrs=False,
exclude_attrs=None,
include_returns=False,
exclude_returns=None,
descr_regex_pattern=None,
ignore_types=tuple(),
):
r"""Check consistency between docstring parameters/attributes/returns of objects.
Checks if parameters/attributes/returns have the same type specification and
description (ignoring whitespace) across `objects`. Intended to be used for
related classes/functions/data descriptors.
Entries that do not appear across all `objects` are ignored.
Parameters
----------
objects : list of {classes, functions, data descriptors}
Objects to check.
Objects may be classes, functions or data descriptors with docstrings that
can be parsed by numpydoc.
include_params : list of str or bool, default=False
List of parameters to be included. If True, all parameters are included,
if False, checking is skipped for parameters.
Can only be set if `exclude_params` is None.
exclude_params : list of str or None, default=None
List of parameters to be excluded. If None, no parameters are excluded.
Can only be set if `include_params` is True.
include_attrs : list of str or bool, default=False
List of attributes to be included. If True, all attributes are included,
if False, checking is skipped for attributes.
Can only be set if `exclude_attrs` is None.
exclude_attrs : list of str or None, default=None
List of attributes to be excluded. If None, no attributes are excluded.
Can only be set if `include_attrs` is True.
include_returns : list of str or bool, default=False
List of returns to be included. If True, all returns are included,
if False, checking is skipped for returns.
Can only be set if `exclude_returns` is None.
exclude_returns : list of str or None, default=None
List of returns to be excluded. If None, no returns are excluded.
Can only be set if `include_returns` is True.
descr_regex_pattern : str, default=None
Regular expression to match to all descriptions of included
parameters/attributes/returns. If None, will revert to default behavior
of comparing descriptions between objects.
ignore_types : tuple of str, default=tuple()
Tuple of parameter/attribute/return names to exclude from type description
matching between objects.
Examples
--------
>>> from sklearn.metrics import (accuracy_score, classification_report,
... mean_absolute_error, mean_squared_error, median_absolute_error)
>>> from sklearn.utils._testing import assert_docstring_consistency
... # doctest: +SKIP
>>> assert_docstring_consistency([mean_absolute_error, mean_squared_error],
... include_params=['y_true', 'y_pred', 'sample_weight']) # doctest: +SKIP
>>> assert_docstring_consistency([median_absolute_error, mean_squared_error],
... include_params=True) # doctest: +SKIP
>>> assert_docstring_consistency([accuracy_score, classification_report],
... include_params=["y_true"],
... descr_regex_pattern=r"Ground truth \(correct\) (labels|target values)")
... # doctest: +SKIP
"""
from numpydoc.docscrape import NumpyDocString
Args = namedtuple("args", ["include", "exclude", "arg_name"])
def _create_args(include, exclude, arg_name, section_name):
if exclude and include is not True:
raise TypeError(
f"The 'exclude_{arg_name}' argument can be set only when the "
f"'include_{arg_name}' argument is True."
)
if include is False:
return {}
return {section_name: Args(include, exclude, arg_name)}
section_args = {
**_create_args(include_params, exclude_params, "params", "Parameters"),
**_create_args(include_attrs, exclude_attrs, "attrs", "Attributes"),
**_create_args(include_returns, exclude_returns, "returns", "Returns"),
}
objects_doc = dict()
for obj in objects:
if (
inspect.isdatadescriptor(obj)
or inspect.isfunction(obj)
or inspect.isclass(obj)
):
objects_doc[obj.__name__] = NumpyDocString(inspect.getdoc(obj))
else:
raise TypeError(
"All 'objects' must be one of: function, class or descriptor, "
f"got a: {type(obj)}."
)
n_objects = len(objects)
for section, args in section_args.items():
type_items = defaultdict(lambda: defaultdict(list))
desc_items = defaultdict(lambda: defaultdict(list))
for obj_name, obj_doc in objects_doc.items():
for item_name, type_def, desc in obj_doc[section]:
if _check_item_included(item_name, args):
# Normalize white space
type_def = " ".join(type_def.strip().split())
desc = " ".join(chain.from_iterable(line.split() for line in desc))
# Use string type/desc as key, to group consistent objs together
type_items[item_name][type_def].append(obj_name)
desc_items[item_name][desc].append(obj_name)
_check_consistency_items(
type_items,
"type specification",
section,
n_objects,
ignore_types=ignore_types,
)
_check_consistency_items(
desc_items,
"description",
section,
n_objects,
descr_regex_pattern=descr_regex_pattern,
)
def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
os.close(fd)
try:
with open(source_file, "wb") as f:
f.write(source_code.encode("utf-8"))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn_path), ".."))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
kwargs["timeout"] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(
"script errored with output:\n%s" % e.output.decode("utf-8")
)
out = out.decode("utf-8")
if re.search(pattern, out):
if pattern == ".+":
expectation = "Expected no output"
else:
expectation = f"The output was not supposed to match {pattern!r}"
message = f"{expectation}, got the following output instead: {out!r}"
raise AssertionError(message)
except TimeoutExpired as e:
raise RuntimeError(
"script timeout, output so far:\n%s" % e.output.decode("utf-8")
)
finally:
os.unlink(source_file)
def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"pandas", "series", "index", "slice", "sparse_csr", "sparse_csc", \
"sparse_csr_array", "sparse_csc_array", "pyarrow", "polars", \
"polars_series"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas/polars container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
minversion : str, default=None
Minimum version for package to install.
categorical_feature_names : list of str, default=None
List of column names to cast to categorical dtype.
Returns
-------
converted_container
"""
if constructor_name == "list":
if dtype is None:
return list(container)
else:
return np.asarray(container, dtype=dtype).tolist()
elif constructor_name == "tuple":
if dtype is None:
return tuple(container)
else:
return tuple(np.asarray(container, dtype=dtype).tolist())
elif constructor_name == "array":
return np.asarray(container, dtype=dtype)
elif constructor_name in ("pandas", "dataframe"):
pd = pytest.importorskip("pandas", minversion=minversion)
result = pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result[col_name] = result[col_name].astype("category")
return result
elif constructor_name == "pyarrow":
pa = pytest.importorskip("pyarrow", minversion=minversion)
array = np.asarray(container)
array = array[:, None] if array.ndim == 1 else array
if columns_name is None:
columns_name = [f"col{i}" for i in range(array.shape[1])]
data = {name: array[:, i] for i, name in enumerate(columns_name)}
result = pa.Table.from_pydict(data)
if categorical_feature_names is not None:
for col_idx, col_name in enumerate(result.column_names):
if col_name in categorical_feature_names:
result = result.set_column(
col_idx, col_name, result.column(col_name).dictionary_encode()
)
return result
elif constructor_name == "polars":
pl = pytest.importorskip("polars", minversion=minversion)
result = pl.DataFrame(container, schema=columns_name, orient="row")
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result = result.with_columns(pl.col(col_name).cast(pl.Categorical))
return result
elif constructor_name == "series":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Series(container, dtype=dtype)
elif constructor_name == "pyarrow_array":
pa = pytest.importorskip("pyarrow", minversion=minversion)
return pa.array(container)
elif constructor_name == "polars_series":
pl = pytest.importorskip("polars", minversion=minversion)
return pl.Series(values=container)
elif constructor_name == "index":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Index(container, dtype=dtype)
elif constructor_name == "slice":
return slice(container[0], container[1])
elif "sparse" in constructor_name:
if not sp.sparse.issparse(container):
# For scipy >= 1.13, sparse array constructed from 1d array may be
# 1d or raise an exception. To avoid this, we make sure that the
# input container is 2d. For more details, see
# https://github.com/scipy/scipy/pull/18530#issuecomment-1878005149
container = np.atleast_2d(container)
if constructor_name in ("sparse", "sparse_csr"):
# sparse and sparse_csr are equivalent for legacy reasons
return sp.sparse.csr_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csr_array":
return sp.sparse.csr_array(container, dtype=dtype)
elif constructor_name == "sparse_csc":
return sp.sparse.csc_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csc_array":
return sp.sparse.csc_array(container, dtype=dtype)
def raises(expected_exc_type, match=None, may_pass=False, err_msg=None):
"""Context manager to ensure exceptions are raised within a code block.
This is similar to and inspired from pytest.raises, but supports a few
other cases.
This is only intended to be used in estimator_checks.py where we don't
want to use pytest. In the rest of the code base, just use pytest.raises
instead.
Parameters
----------
excepted_exc_type : Exception or list of Exception
The exception that should be raised by the block. If a list, the block
should raise one of the exceptions.
match : str or list of str, default=None
A regex that the exception message should match. If a list, one of
the entries must match. If None, match isn't enforced.
may_pass : bool, default=False
If True, the block is allowed to not raise an exception. Useful in
cases where some estimators may support a feature but others must
fail with an appropriate error message. By default, the context
manager will raise an exception if the block does not raise an
exception.
err_msg : str, default=None
If the context manager fails (e.g. the block fails to raise the
proper exception, or fails to match), then an AssertionError is
raised with this message. By default, an AssertionError is raised
with a default error message (depends on the kind of failure). Use
this to indicate how users should fix their estimators to pass the
checks.
Attributes
----------
raised_and_matched : bool
True if an exception was raised and a match was found, False otherwise.
"""
return _Raises(expected_exc_type, match, may_pass, err_msg)
| TempMemmap |
python | kamyu104__LeetCode-Solutions | Python/lfu-cache.py | {
"start": 2766,
"end": 4615
} | class ____(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.__capa = capacity
self.__size = 0
self.__min_freq = float("inf")
self.__freq_to_nodes = collections.defaultdict(LinkedList)
self.__key_to_node = {}
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.__key_to_node:
return -1
value = self.__key_to_node[key].val
self.__update(key, value)
return value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.__capa <= 0:
return
if key not in self.__key_to_node and self.__size == self.__capa:
del self.__key_to_node[self.__freq_to_nodes[self.__min_freq].head.key]
self.__freq_to_nodes[self.__min_freq].delete(self.__freq_to_nodes[self.__min_freq].head)
if not self.__freq_to_nodes[self.__min_freq].head:
del self.__freq_to_nodes[self.__min_freq]
self.__size -= 1
self.__update(key, value)
def __update(self, key, value):
freq = 0
if key in self.__key_to_node:
old_node = self.__key_to_node[key]
freq = old_node.freq
self.__freq_to_nodes[freq].delete(old_node)
if not self.__freq_to_nodes[freq].head:
del self.__freq_to_nodes[freq]
if self.__min_freq == freq:
self.__min_freq += 1
self.__size -= 1
freq += 1
self.__min_freq = min(self.__min_freq, freq)
self.__key_to_node[key] = ListNode(key, value, freq)
self.__freq_to_nodes[freq].append(self.__key_to_node[key])
self.__size += 1
| LFUCache2 |
python | PrefectHQ__prefect | src/prefect/deployments/runner.py | {
"start": 3237,
"end": 3358
} | class ____(RuntimeError):
"""
Raised when an error occurs while applying a deployment.
"""
| DeploymentApplyError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/unsorted_segment_join_op_test.py | {
"start": 1080,
"end": 1376
} | class ____(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype('U'),
np.array(actual).astype('U'))
@test_util.run_all_in_graph_and_eager_modes
| UnicodeTestCase |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 16013,
"end": 16145
} | class ____(_TestDSTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
| TestDSTIVInt |
python | huggingface__transformers | tests/utils/test_tokenization_utils.py | {
"start": 10007,
"end": 12019
} | class ____(unittest.TestCase):
def test_trie(self):
trie = Trie()
trie.add("Hello 友達")
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}})
trie.add("Hello")
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}})
def test_trie_split(self):
trie = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"])
trie.add("[CLS]")
trie.add("extra_id_1")
trie.add("extra_id_100")
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"])
def test_trie_single(self):
trie = Trie()
trie.add("A")
self.assertEqual(trie.split("ABC"), ["A", "BC"])
self.assertEqual(trie.split("BCA"), ["BC", "A"])
def test_trie_final(self):
trie = Trie()
trie.add("TOKEN]")
trie.add("[SPECIAL_TOKEN]")
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"])
def test_trie_subtokens(self):
trie = Trie()
trie.add("A")
trie.add("P")
trie.add("[SPECIAL_TOKEN]")
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"])
def test_trie_suffix_tokens(self):
trie = Trie()
trie.add("AB")
trie.add("B")
trie.add("C")
self.assertEqual(trie.split("ABC"), ["AB", "C"])
def test_trie_skip(self):
trie = Trie()
trie.add("ABC")
trie.add("B")
trie.add("CD")
self.assertEqual(trie.split("ABCD"), ["ABC", "D"])
def test_cut_text_hardening(self):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
trie = Trie()
parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3])
self.assertEqual(parts, ["AB", "C"])
| TrieTest |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 26659,
"end": 30113
} | class ____(ConvBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = ConvBertEmbeddings(config)
if config.embedding_size != config.hidden_size:
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
self.encoder = ConvBertEncoder(config)
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
hidden_states = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states)
hidden_states = self.encoder(
hidden_states,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return hidden_states
| ConvBertModel |
python | coleifer__peewee | tests/regressions.py | {
"start": 35008,
"end": 35113
} | class ____(TestModel):
page = ForeignKeyField(Page, backref='items')
content = TextField()
| PageItem |
python | great-expectations__great_expectations | great_expectations/core/batch.py | {
"start": 2753,
"end": 9274
} | class ____(SerializableDictDot):
"""Precisely identifies a set of data from a data source.
More concretely, a BatchDefinition includes all the information required to precisely
identify a set of data from the external data source that should be
translated into a Batch. One or more BatchDefinitions should always be
*returned* from the Datasource, as a result of processing the Batch Request.
---Documentation---
- https://docs.greatexpectations.io/docs/terms/batch/#batches-and-batch-requests-design-motivation
Args:
datasource_name: name of the Datasource used to connect to the data
data_connector_name: name of the DataConnector used to connect to the data
data_asset_name: name of the DataAsset used to connect to the data
batch_identifiers: key-value pairs that the DataConnector
will use to obtain a specific set of data
batch_spec_passthrough: a dictionary of additional parameters that
the ExecutionEngine will use to obtain a specific set of data
Returns:
BatchDefinition
"""
def __init__( # noqa: PLR0913 # FIXME CoP
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
batch_identifiers: IDDict,
batch_spec_passthrough: dict | None = None,
batching_regex: re.Pattern | None = None,
) -> None:
self._validate_batch_definition(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_identifiers=batch_identifiers,
)
assert type(batch_identifiers) == IDDict # noqa: E721 # legacy code
self._datasource_name = datasource_name
self._data_connector_name = data_connector_name
self._data_asset_name = data_asset_name
self._batch_identifiers = batch_identifiers
self._batch_spec_passthrough = batch_spec_passthrough
self._batching_regex = batching_regex
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this BatchDefinition.
Returns:
A JSON-serializable dict representation of this BatchDefinition.
"""
fields_dict: dict = {
"datasource_name": self._datasource_name,
"data_connector_name": self._data_connector_name,
"data_asset_name": self._data_asset_name,
"batch_identifiers": self._batch_identifiers,
}
if self._batch_spec_passthrough:
fields_dict["batch_spec_passthrough"] = self._batch_spec_passthrough
if self._batching_regex:
fields_dict["batching_regex"] = self._batching_regex
return convert_to_json_serializable(data=fields_dict)
@override
def __repr__(self) -> str:
doc_fields_dict: dict = {
"datasource_name": self._datasource_name,
"data_connector_name": self._data_connector_name,
"data_asset_name": self._data_asset_name,
"batch_identifiers": self._batch_identifiers,
}
return str(doc_fields_dict)
@staticmethod
def _validate_batch_definition(
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
batch_identifiers: IDDict,
) -> None:
if datasource_name is None:
raise ValueError("A valid datasource must be specified.") # noqa: TRY003 # FIXME CoP
if datasource_name and not isinstance(datasource_name, str):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type of an datasource name must be a string (Python "str"). The type given is
"{type(datasource_name)!s}", which is illegal.
""" # noqa: E501 # FIXME CoP
)
if data_connector_name is None:
raise ValueError("A valid data_connector must be specified.") # noqa: TRY003 # FIXME CoP
if data_connector_name and not isinstance(data_connector_name, str):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type of a data_connector name must be a string (Python "str"). The type given is
"{type(data_connector_name)!s}", which is illegal.
""" # noqa: E501 # FIXME CoP
)
if data_asset_name is None:
raise ValueError("A valid data_asset_name must be specified.") # noqa: TRY003 # FIXME CoP
if data_asset_name and not isinstance(data_asset_name, str):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type of a data_asset name must be a string (Python "str"). The type given is
"{type(data_asset_name)!s}", which is illegal.
""" # noqa: E501 # FIXME CoP
)
if batch_identifiers and not isinstance(batch_identifiers, IDDict):
raise TypeError( # noqa: TRY003 # FIXME CoP
f"""The type of batch_identifiers must be an IDDict object. The type given is \
"{type(batch_identifiers)!s}", which is illegal.
"""
)
@property
def datasource_name(self) -> str:
return self._datasource_name
@property
def data_connector_name(self) -> str:
return self._data_connector_name
@property
def data_asset_name(self) -> str:
return self._data_asset_name
@property
def batch_identifiers(self) -> IDDict:
return self._batch_identifiers
@property
def batch_spec_passthrough(self) -> dict | None:
return self._batch_spec_passthrough
@batch_spec_passthrough.setter
def batch_spec_passthrough(self, batch_spec_passthrough: dict | None) -> None:
self._batch_spec_passthrough = batch_spec_passthrough
@property
def id(self) -> IDDictID:
return IDDict(self.to_json_dict()).to_id()
@property
def batching_regex(self) -> re.Pattern | None:
return self._batching_regex
@override
def __eq__(self, other):
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return self.id == other.id
@override
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
@override
def __hash__(self) -> int:
"""Overrides the default implementation"""
_result_hash: int = hash(self.id)
return _result_hash
| LegacyBatchDefinition |
python | catalyst-team__catalyst | catalyst/contrib/losses/recsys.py | {
"start": 11901,
"end": 16665
} | class ____(PairwiseLoss):
"""Roc-star loss function.
Smooth approximation for ROC-AUC. It has been proposed in
`Roc-star\: An objective function for ROC-AUC that actually works`_.
.. _Roc-star\: An objective function for ROC-AUC that actually works:
https://github.com/iridiumblue/roc-star
Adapted from:
https://github.com/iridiumblue/roc-star/issues/2
Args:
delta: Param from the article. Default: ``1.0``.
sample_size: Number of examples to take for ROC AUC approximation. Default: ``100``.
sample_size_gamma: Number of examples to take for Gamma parameter approximation.
Default: ``1000``.
update_gamma_each: Number of steps after which to recompute gamma value.
Default: ``50``.
Example:
.. code-block:: python
import torch
from catalyst.contrib.losses import recsys
outputs = torch.randn(5, 1, requires_grad=True)
targets = torch.randn(5, 1, requires_grad=True)
output = recsys.RocStarLoss()(outputs, targets)
output.backward()
"""
def __init__(
self,
delta: float = 1.0,
sample_size: int = 100,
sample_size_gamma: int = 1000,
update_gamma_each: int = 50,
):
super().__init__()
self.delta = delta
self.sample_size = sample_size
self.sample_size_gamma = sample_size_gamma
self.update_gamma_each = update_gamma_each
self.steps = 0
self.gamma = None
size = max(sample_size, sample_size_gamma)
# Randomly init labels
self.outputs_history = torch.rand((size + 2, 1))
self.targets_history = torch.cat(
(torch.randint(2, (size, 1)), torch.LongTensor([[0], [1]]))
)
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Forward propagation method for the roc-star loss.
Args:
outputs: Tensor of model predictions in [0, 1] range. Shape ``(B x 1)``.
targets: Tensor of true labels in {0, 1}. Shape ``(B x 1)``.
Returns:
computed loss
"""
self._assert_equal_size(outputs, targets)
if torch.sum(targets) == 0 or torch.sum(targets) == targets.shape[0]:
return torch.sum(outputs) * 1e-8
if self.steps % self.update_gamma_each == 0:
self._update_gamma()
self.steps += 1
positive = outputs[targets > 0]
negative = outputs[targets < 1]
# Take last `sample_size` elements from history
outputs_history = self.outputs_history[-self.sample_size :]
targets_history = self.targets_history[-self.sample_size :]
positive_history = outputs_history[targets_history > 0]
negative_history = outputs_history[targets_history < 1]
if positive.size(0) > 0:
diff = negative_history.view(1, -1) + self.gamma - positive.view(-1, 1)
loss_positive = nn.functional.relu(diff ** 2).mean()
else:
loss_positive = 0
if negative.size(0) > 0:
diff = negative.view(1, -1) + self.gamma - positive_history.view(-1, 1)
loss_negative = nn.functional.relu(diff ** 2).mean()
else:
loss_negative = 0
loss = loss_negative + loss_positive
# Update FIFO queue
batch_size = outputs.size(0)
self.outputs_history = torch.cat(
(self.outputs_history[batch_size:], outputs.clone().detach())
)
self.targets_history = torch.cat(
(self.targets_history[batch_size:], targets.clone().detach())
)
return loss
def _update_gamma(self):
# Take last `sample_size_gamma` elements from history
outputs = self.outputs_history[-self.sample_size_gamma :]
targets = self.targets_history[-self.sample_size_gamma :]
positive = outputs[targets > 0]
negative = outputs[targets < 1]
# Create matrix of size sample_size_gamma x sample_size_gamma
diff = positive.view(-1, 1) - negative.view(1, -1)
AUC = (diff > 0).type(torch.float).mean()
num_wrong_ordered = (1 - AUC) * diff.flatten().size(0)
# Adjunct gamma, so that among correct ordered samples `delta * num_wrong_ordered`
# were considered ordered incorrectly with gamma added
correct_ordered = diff[diff > 0].flatten().sort().values
idx = min(int(num_wrong_ordered * self.delta), len(correct_ordered) - 1)
if idx >= 0:
self.gamma = correct_ordered[idx]
__all__ = [
"AdaptiveHingeLoss",
"BPRLoss",
"HingeLoss",
"LogisticLoss",
"RocStarLoss",
"WARPLoss",
]
| RocStarLoss |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 12013,
"end": 12318
} | class ____(MarkdownHeader):
"""An H3 Markdown header."""
LEVEL = 3
DEFAULT_CSS = """
MarkdownH3 {
color: $markdown-h3-color;
background: $markdown-h3-background;
text-style: $markdown-h3-text-style;
margin: 1 0;
width: auto;
}
"""
| MarkdownH3 |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/parallel.py | {
"start": 2751,
"end": 7529
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
"""
Builds an indexer consumer based on the multi process transform Arroyo step.
Multi processing happens in batches, the parallel step batches messages, then
it dispatches them to a process. This is meant to avoid lock contention that
would happen by transferring one message at a time.
The parallel transform function is then applied to all messages one by one.
The indexer must resolve batches of messages. It cannot resolve messages in
isolation otherwise the amount of roundtrip to cache would be enormous.
So the pipeline works this way:
- the indexer batches messages like today.
- each batch is a message for the parallel transform step.
- the parallel transform step may or may not re-batch messages batcehs
together. The load tests show it is still useful.
- messages are exploded back into individual ones after the parallel
transform step.
"""
def __init__(
self,
max_msg_batch_size: int,
max_msg_batch_time: float,
max_parallel_batch_size: int,
max_parallel_batch_time: float,
processes: int,
input_block_size: int | None,
output_block_size: int | None,
ingest_profile: str,
indexer_db: str,
):
from sentry.sentry_metrics.configuration import (
IndexerStorage,
UseCaseKey,
get_ingest_config,
initialize_main_process_state,
)
from sentry.sentry_metrics.consumers.indexer.slicing_router import get_slicing_router
use_case = UseCaseKey(ingest_profile)
db_backend = IndexerStorage(indexer_db)
ingest_config = get_ingest_config(use_case, db_backend)
initialize_main_process_state(ingest_config)
slicing_router = get_slicing_router(ingest_config)
self.config = ingest_config
# This is the size of the initial message batching the indexer does
self.__max_msg_batch_size = max_msg_batch_size
self.__max_msg_batch_time = max_msg_batch_time
# This is the size of the batches sent to the parallel processes.
# These are batches of batches.
self.__max_parallel_batch_size = max_parallel_batch_size
self.__max_parallel_batch_time = max_parallel_batch_time
self.__input_block_size = input_block_size
self.__output_block_size = output_block_size
self.__slicing_router = slicing_router
self.__pool = MultiprocessingPool(
num_processes=processes,
# It is absolutely crucial that we pass a function reference here
# where the function lives in a module that does not depend on
# Django settings. `sentry.sentry_metrics.configuration` fulfills
# that requirement, but if you were to create a wrapper function in
# this module, and pass that function here, it would attempt to
# pull in a bunch of modules that try to read django settings at
# import time
initializer=functools.partial(initialize_subprocess_state, self.config),
)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
producer = get_metrics_producer_strategy(
config=self.config,
commit=commit,
slicing_router=self.__slicing_router,
)
parallel_strategy = run_task_with_multiprocessing(
function=MessageProcessor(self.config).process_messages,
next_step=Unbatcher(next_step=producer),
pool=self.__pool,
max_batch_size=self.__max_parallel_batch_size,
# This is in seconds
max_batch_time=self.__max_parallel_batch_time / 1000,
input_block_size=self.__input_block_size,
output_block_size=self.__output_block_size,
)
strategy = BatchMessages(
parallel_strategy, self.__max_msg_batch_time, self.__max_msg_batch_size
)
return strategy
def shutdown(self) -> None:
self.__pool.close()
def get_metrics_producer_strategy(
config: MetricsIngestConfiguration,
commit: Commit,
slicing_router: SlicingRouter | None,
) -> Any:
if config.is_output_sliced:
if slicing_router is None:
raise ValueError("Slicing router is required for sliced output")
return RoutingProducerStep(
commit_function=commit,
message_router=slicing_router,
)
else:
return SimpleProduceStep(
commit_function=commit,
output_topic=config.output_topic,
)
| MetricsConsumerStrategyFactory |
python | pytorch__pytorch | functorch/dim/__init__.py | {
"start": 30471,
"end": 32974
} | class ____(_Tensor):
_level: int
_name: str
_size: int
_range: Optional[torch.Tensor]
_batchtensor: Optional[torch.Tensor]
def __init__(self, name: str, s: int = -1) -> None:
global _n_dims_created
self._name = name
self._size = s
self._level = _n_dims_created
_n_dims_created += 1
self._range = None
self._batchtensor = None
@property
def ndim(self) -> int:
return 1
@classmethod
def check_exact(cls, obj: Any) -> bool:
return type(obj) is cls
@property
def size(self) -> int:
if self._size == -1:
raise ValueError(f"dimension {self._name} is unbound")
return self._size
@size.setter
def size(self, v: int) -> None:
if self._size == -1:
self._size = v
elif self._size != v:
raise DimensionBindError(
f"Dim '{repr(self)}' previously bound to a dimension of size {self._size} "
f"cannot bind to a dimension of size {v}"
)
@property
def is_bound(self) -> bool:
"""Return True if this dimension is bound to a size."""
return self._size != -1
def _get_range(self) -> torch.Tensor:
"""
Get a tensor representing the range [0, size) for this dimension.
Returns:
A 1D tensor with values [0, 1, 2, ..., size-1]
"""
if self._range is None:
self._range = torch.arange(self.size)
return self._range
def _get_batchtensor(self) -> torch.Tensor:
"""
Get a batched tensor representation of this dimension.
Returns:
A batched tensor created from the range tensor
"""
if self._batchtensor is None:
self._batchtensor = torch._C._functorch._add_batch_dim(
self._get_range(), 0, self._level
)
return self._batchtensor
def __repr__(self) -> str:
"""String representation of a Dim object."""
return self._name
# note that Dim comes before tensor because we want the Dim API for things like size to take precedence.
# Tensor defines format, but we want to print Dims with special formatting
__format__ = object.__format__
# Somewhat confusingly, an FCD tensor is also called Tensor. This confusion
# is somewhat intentional, as FCD tensors are intended to be substitutable
# with regular Tensor (just with some positional dims hidden).
| Dim |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F811_18.py | {
"start": 306,
"end": 420
} | class ____(unittest.TestCase):
def test_send_defaults(self):
transport.Transport()
| TestTransportMethodArgs |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 30248,
"end": 30479
} | class ____(TemplateView):
template_name = "account/account_inactive." + app_settings.TEMPLATE_EXTENSION
account_inactive = AccountInactiveView.as_view()
@method_decorator(login_not_required, name="dispatch")
| AccountInactiveView |
python | getsentry__sentry | src/sentry/workflow_engine/utils/dictpath.py | {
"start": 1124,
"end": 1957
} | class ____[T]:
def __init__(self, exc: ValueError) -> None:
self._exc = exc
def failed(self) -> bool:
return True
def get(self, fallback: T | None = None) -> T:
if fallback is not None:
return fallback
raise self._exc
def get_or_none(self) -> T | None:
return None
def is_type[V](self, t: type[V]) -> Result[V]:
return cast(Result[V], self)
# return _FailedResultImpl[V](self._exc)
def list_of[V](self, t: type[V]) -> Result[list[V]]:
return _FailedResultImpl[list[V]](self._exc)
def _dictpath_error(path: list[str], msg: str) -> ValueError:
return ValueError(f"{'.'.join(path)}: {msg}")
def _failure[T](path: list[str], msg: str) -> Result[T]:
return _FailedResultImpl[T](_dictpath_error(path, msg))
| _FailedResultImpl |
python | wandb__wandb | wandb/integration/keras/callbacks/metrics_logger.py | {
"start": 304,
"end": 4919
} | class ____(callbacks.Callback):
"""Logger that sends system metrics to W&B.
`WandbMetricsLogger` automatically logs the `logs` dictionary that callback methods
take as argument to wandb.
This callback automatically logs the following to a W&B run page:
* system (CPU/GPU/TPU) metrics,
* train and validation metrics defined in `model.compile`,
* learning rate (both for a fixed value or a learning rate scheduler)
Notes:
If you resume training by passing `initial_epoch` to `model.fit` and you are using a
learning rate scheduler, make sure to pass `initial_global_step` to
`WandbMetricsLogger`. The `initial_global_step` is `step_size * initial_step`, where
`step_size` is number of training steps per epoch. `step_size` can be calculated as
the product of the cardinality of the training dataset and the batch size.
Args:
log_freq: ("epoch", "batch", or int) if "epoch", logs metrics
at the end of each epoch. If "batch", logs metrics at the end
of each batch. If an integer, logs metrics at the end of that
many batches. Defaults to "epoch".
initial_global_step: (int) Use this argument to correctly log the
learning rate when you resume training from some `initial_epoch`,
and a learning rate scheduler is used. This can be computed as
`step_size * initial_step`. Defaults to 0.
"""
def __init__(
self,
log_freq: Union[LogStrategy, int] = "epoch",
initial_global_step: int = 0,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before WandbMetricsLogger()"
)
with telemetry.context(run=wandb.run) as tel:
tel.feature.keras_metrics_logger = True
if log_freq == "batch":
log_freq = 1
self.logging_batch_wise = isinstance(log_freq, int)
self.log_freq: Any = log_freq if self.logging_batch_wise else None
self.global_batch = 0
self.global_step = initial_global_step
if self.logging_batch_wise:
# define custom x-axis for batch logging.
wandb.define_metric("batch/batch_step")
# set all batch metrics to be logged against batch_step.
wandb.define_metric("batch/*", step_metric="batch/batch_step")
else:
# define custom x-axis for epoch-wise logging.
wandb.define_metric("epoch/epoch")
# set all epoch-wise metrics to be logged against epoch.
wandb.define_metric("epoch/*", step_metric="epoch/epoch")
def _get_lr(self) -> Union[float, None]:
if isinstance(
self.model.optimizer.learning_rate,
(tf.Variable, tf.Tensor),
) or (
hasattr(self.model.optimizer.learning_rate, "shape")
and self.model.optimizer.learning_rate.shape == ()
):
return float(self.model.optimizer.learning_rate.numpy().item())
try:
return float(
self.model.optimizer.learning_rate(step=self.global_step).numpy().item()
)
except Exception as e:
wandb.termerror(f"Unable to log learning rate: {e}", repeat=False)
return None
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, Any]] = None) -> None:
"""Called at the end of an epoch."""
logs = dict() if logs is None else {f"epoch/{k}": v for k, v in logs.items()}
logs["epoch/epoch"] = epoch
lr = self._get_lr()
if lr is not None:
logs["epoch/learning_rate"] = lr
wandb.log(logs)
def on_batch_end(self, batch: int, logs: Optional[Dict[str, Any]] = None) -> None:
self.global_step += 1
"""An alias for `on_train_batch_end` for backwards compatibility."""
if self.logging_batch_wise and batch % self.log_freq == 0:
logs = {f"batch/{k}": v for k, v in logs.items()} if logs else {}
logs["batch/batch_step"] = self.global_batch
lr = self._get_lr()
if lr is not None:
logs["batch/learning_rate"] = lr
wandb.log(logs)
self.global_batch += self.log_freq
def on_train_batch_end(
self, batch: int, logs: Optional[Dict[str, Any]] = None
) -> None:
"""Called at the end of a training batch in `fit` methods."""
self.on_batch_end(batch, logs if logs else {})
| WandbMetricsLogger |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/match.py | {
"start": 576,
"end": 1937
} | class ____:
x: int
y: int
def location(point):
match point:
case Point(x=0, y =0 ) : # fmt: skip
print("Origin is the point's location.")
case Point(x=0, y=y):
print(f"Y={y} and the point is on the y-axis.")
case Point(x=x, y=0):
print(f"X={x} and the point is on the x-axis.")
case Point():
print("The point is located somewhere else on the plane.")
case _:
print("Not a point")
match points:
case []:
print("No points in the list.")
case [
Point(0, 0)
]: # fmt: skip
print("The origin is the only point in the list.")
case [Point(x, y)]:
print(f"A single point {x}, {y} is in the list.")
case [Point(0, y1), Point(0, y2)]:
print(f"Two points on the Y axis at {y1}, {y2} are in the list.")
case _:
print("Something else is found in the list.")
match test_variable:
case (
'warning',
code,
40
): # fmt: skip
print("A warning has been received.")
case ('error', code, _):
print(f"An error {code} occurred.")
match point:
case Point(x, y) if x == y: # fmt: skip
print(f"The point is located on the diagonal Y=X at {x}.")
case Point(x, y):
print(f"Point is not on the diagonal.")
| Point |
python | dask__distributed | distributed/dashboard/components/shared.py | {
"start": 4922,
"end": 11068
} | class ____(DashboardComponent):
"""Time plots of the current resource usage on the cluster
This is two plots, one for CPU and Memory and another for Network I/O
"""
def __init__(self, server, doc=None, **kwargs):
if doc is not None:
self.doc = weakref.ref(doc)
try:
self.key = doc.session_context.request.arguments.get("key", None)
except AttributeError:
self.key = None
if isinstance(self.key, list):
self.key = self.key[0]
if isinstance(self.key, bytes):
self.key = self.key.decode()
self.task_names = ["All", self.key] if self.key else ["All"]
else:
self.key = None
self.task_names = ["All"]
self.server = server
self.start = None
self.stop = None
self.ts = {"count": [], "time": []}
self.state = profile.create()
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
self.profile_plot, self.source = profile.plot_figure(data, **kwargs)
changing = [False] # avoid repeated changes from within callback
@without_property_validation
@log_errors
def cb(attr, old, new):
if changing[0] or len(new) == 0:
return
data = profile.plot_data(self.states[new[0]], profile_interval)
del self.states[:]
self.states.extend(data.pop("states"))
changing[0] = True # don't recursively trigger callback
update(self.source, data)
self.source.selected.indices = old
changing[0] = False
self.source.selected.on_change("indices", cb)
self.ts_source = ColumnDataSource({"time": [], "count": []})
self.ts_plot = figure(
title="Activity over time",
height=150,
x_axis_type="datetime",
active_drag="xbox_select",
tools="xpan,xwheel_zoom,xbox_select,reset",
sizing_mode="stretch_width",
toolbar_location="above",
)
self.ts_plot.line("time", "count", source=self.ts_source)
self.ts_plot.circle(
"time",
"count",
source=self.ts_source,
color=None,
selection_color="orange",
radius=1,
)
self.ts_plot.yaxis.visible = False
self.ts_plot.grid.visible = False
@log_errors
def ts_change(attr, old, new):
selected = self.ts_source.selected.indices
if selected:
start = self.ts_source.data["time"][min(selected)] / 1000
stop = self.ts_source.data["time"][max(selected)] / 1000
self.start, self.stop = min(start, stop), max(start, stop)
else:
self.start = self.stop = None
self.trigger_update(update_metadata=False)
self.ts_source.selected.on_change("indices", ts_change)
self.reset_button = Button(label="Reset", button_type="success")
self.reset_button.on_click(lambda: self.update(self.state))
self.update_button = Button(label="Update", button_type="success")
self.update_button.on_click(self.trigger_update)
self.select = Select(value=self.task_names[-1], options=self.task_names)
def select_cb(attr, old, new):
if new == "All":
new = None
self.key = new
self.trigger_update(update_metadata=False)
self.select.on_change("value", select_cb)
self.root = column(
row(
self.select,
self.reset_button,
self.update_button,
sizing_mode="scale_width",
height=250,
),
self.profile_plot,
self.ts_plot,
**kwargs,
)
self.subtitle = Title(text=" ", text_font_style="italic")
self.profile_plot.add_layout(self.subtitle, "above")
if not dask.config.get("distributed.worker.profile.enabled"):
self.subtitle.text = "Profiling is disabled."
self.select.disabled = True
self.reset_button.disabled = True
self.update_button.disabled = True
elif sys.version_info.minor == 11:
self.subtitle.text = "Profiling is disabled due to a known deadlock in CPython 3.11 that can be triggered by the profiler. See https://github.com/dask/distributed/issues/8616 for more information."
self.select.disabled = True
self.reset_button.disabled = True
self.update_button.disabled = True
@without_property_validation
@log_errors
def update(self, state, metadata=None):
self.state = state
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
if metadata is not None and metadata["counts"]:
self.task_names = ["All"] + sorted(metadata["keys"])
self.select.options = self.task_names
if self.key and self.key in metadata["keys"]:
ts = metadata["keys"][self.key]
else:
ts = metadata["counts"]
times, counts = zip(*ts)
self.ts = {"count": counts, "time": [t * 1000 for t in times]}
self.ts_source.data.update(self.ts)
@without_property_validation
def trigger_update(self, update_metadata=True):
@log_errors
async def cb():
prof = await self.server.get_profile(
key=self.key, start=self.start, stop=self.stop
)
if update_metadata:
metadata = await self.server.get_profile_metadata()
else:
metadata = None
if isinstance(prof, gen.Future):
prof, metadata = await asyncio.gather(prof, metadata)
self.doc().add_next_tick_callback(lambda: self.update(prof, metadata))
self.server.loop.add_callback(cb)
| ProfileTimePlot |
python | walkccc__LeetCode | solutions/170. Two Sum III - Data structure design/170.py | {
"start": 0,
"end": 393
} | class ____:
def __init__(self):
self.count = collections.Counter()
def add(self, number: int) -> None:
self.count[number] += 1
def find(self, value: int) -> bool:
for key, freq in self.count.items():
remain = value - key
if key == remain and freq > 1:
return True
if key != remain and remain in self.count:
return True
return False
| TwoSum |
python | numba__numba | numba/core/typed_passes.py | {
"start": 8402,
"end": 9636
} | class ____(FunctionPass):
_name = "nopython_rewrites"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
"""
Perform any intermediate representation rewrites after type
inference.
"""
# a bunch of these passes are either making assumptions or rely on some
# very picky and slightly bizarre state particularly in relation to
# ir.Del presence. To accommodate, ir.Dels are added ahead of running
# this pass and stripped at the end.
# Ensure we have an IR and type information.
assert state.func_ir
assert isinstance(getattr(state, 'typemap', None), dict)
assert isinstance(getattr(state, 'calltypes', None), dict)
msg = ('Internal error in post-inference rewriting '
'pass encountered during compilation of '
'function "%s"' % (state.func_id.func_name,))
pp = postproc.PostProcessor(state.func_ir)
pp.run(True)
with fallback_context(state, msg):
rewrites.rewrite_registry.apply('after-inference', state)
pp.remove_dels()
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| NopythonRewrites |
python | huggingface__transformers | src/transformers/models/seed_oss/modeling_seed_oss.py | {
"start": 12681,
"end": 15700
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: SeedOssConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[SeedOssConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| SeedOssRotaryEmbedding |
python | pandas-dev__pandas | pandas/tests/indexing/test_iloc.py | {
"start": 1606,
"end": 45367
} | class ____:
"""Tests Independent Of Base Class"""
@pytest.mark.parametrize(
"key",
[
slice(None),
slice(3),
range(3),
[0, 1, 2],
Index(range(3)),
np.asarray([0, 1, 2]),
],
)
def test_iloc_setitem_fullcol_categorical(self, indexer_li, key):
frame = DataFrame({0: range(3)}, dtype=object)
cat = Categorical(["alpha", "beta", "gamma"])
assert frame._mgr.blocks[0]._can_hold_element(cat)
df = frame.copy()
orig_vals = df.values
indexer_li(df)[key, 0] = cat
expected = DataFrame({0: cat}).astype(object)
assert np.shares_memory(df[0].values, orig_vals)
tm.assert_frame_equal(df, expected)
# check we dont have a view on cat (may be undesired GH#39986)
df.iloc[0, 0] = "gamma"
assert cat[0] != "gamma"
# pre-2.0 with mixed dataframe ("split" path) we always overwrote the
# column. as of 2.0 we correctly write "into" the column, so
# we retain the object dtype.
frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)})
df = frame.copy()
indexer_li(df)[key, 0] = cat
expected = DataFrame({0: Series(cat.astype(object), dtype=object), 1: range(3)})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_ea_inplace(
self, frame_or_series, index_or_series_or_array, has_ref
):
# GH#38952 Case with not setting a full column
# IntegerArray without NAs
arr = array([1, 2, 3, 4])
obj = frame_or_series(arr.to_numpy("i8"))
if has_ref:
view = obj[:] # noqa: F841
if frame_or_series is Series:
values = obj.values
else:
values = obj._mgr.blocks[0].values
if frame_or_series is Series:
obj.iloc[:2] = index_or_series_or_array(arr[2:])
else:
obj.iloc[:2, 0] = index_or_series_or_array(arr[2:])
expected = frame_or_series(np.array([3, 4, 3, 4], dtype="i8"))
tm.assert_equal(obj, expected)
# Check that we are actually in-place
if not has_ref:
if frame_or_series is Series:
assert obj.values is not values
assert np.shares_memory(obj.values, values)
else:
assert np.shares_memory(obj[0].values, values)
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesn't matter for _is_scalar_access
index = Index([1, 2, 1])
ser = Series(range(3), index=index)
assert ser.iloc._is_scalar_access((1,))
df = ser.to_frame()
assert df.iloc._is_scalar_access((1, 0))
@pytest.mark.skipif(
pa_version_under16p0, reason="https://github.com/apache/arrow/issues/40642"
)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.default_rng(2).random((20, 5)), columns=list("ABCDE"))
# lists of positions should raise IndexError!
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, 30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, -30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[100]]
s = df["A"]
with pytest.raises(IndexError, match=msg):
s.iloc[[100]]
with pytest.raises(IndexError, match=msg):
s.iloc[[-100]]
# still raise on a single indexer
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[30]
with pytest.raises(IndexError, match=msg):
df.iloc[-30]
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with pytest.raises(IndexError, match=msg):
s.iloc[30]
with pytest.raises(IndexError, match=msg):
s.iloc[-30]
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
dfl = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB")
)
tm.assert_frame_equal(
dfl.iloc[:, 2:3],
DataFrame(index=dfl.index, columns=Index([], dtype=dfl.columns.dtype)),
)
tm.assert_frame_equal(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
tm.assert_frame_equal(dfl.iloc[4:6], dfl.iloc[[4]])
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[[4, 5, 6]]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[:, 4]
@pytest.mark.parametrize("index,columns", [(np.arange(20), list("ABCDE"))])
@pytest.mark.parametrize(
"index_vals,column_vals",
[
([slice(None), ["A", "D"]]),
(["1", "2"], slice(None)),
([datetime(2019, 1, 1)], slice(None)),
],
)
def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
# GH 25753
df = DataFrame(
np.random.default_rng(2).standard_normal((len(index), len(columns))),
index=index,
columns=columns,
)
msg = ".iloc requires numeric indexers, got"
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
def test_iloc_getitem_invalid_scalar(self, frame_or_series):
# GH 21982
obj = DataFrame(np.arange(100).reshape(10, 10))
obj = tm.get_obj(obj, frame_or_series)
with pytest.raises(TypeError, match="Cannot index by location index"):
obj.iloc["a"]
def test_iloc_array_not_mutating_negative_indices(self):
# GH 21867
array_with_neg_numbers = np.array([1, 2, -1])
array_copy = array_with_neg_numbers.copy()
df = DataFrame(
{"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]},
index=[1, 2, 3],
)
df.iloc[array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
df.iloc[:, array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({"A": [2, 3, 5], "B": [7, 11, 13]})
s = df["A"]
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = Series(["a"], index=["A"])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# GH 6766
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=["A", "B", "A", "B"], name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}])
tm.assert_frame_equal(df.iloc[[0]], expected)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
tm.assert_frame_equal(df.iloc[[0, 1]], expected)
expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2])
result = df.iloc[[0, 2], [1, 2]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_bool(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
result = df.iloc[[True, True, False]]
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2]
)
result = df.iloc[lambda x: x.index % 2 == 0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_iloc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}"
with pytest.raises(IndexError, match=msg):
s.iloc[index]
def test_iloc_getitem_slice(self):
df = DataFrame(
[
{"A": 1, "B": 2, "C": 3},
{"A": 100, "B": 200, "C": 300},
{"A": 1000, "B": 2000, "C": 3000},
]
)
expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
result = df.iloc[:2]
tm.assert_frame_equal(result, expected)
expected = DataFrame([{"A": 100, "B": 200}], index=[1])
result = df.iloc[1:2, 0:2]
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}]
)
result = df.iloc[:, lambda df: [0, 2]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=["A", "A", "B", "B"],
)
df2 = DataFrame(
np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2),
columns=["A", "C"],
)
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem(sel, has_ref):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=np.arange(0, 8, 2),
columns=np.arange(0, 12, 3),
)
if has_ref:
view = df[:] # noqa: F841
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_axis_argument(self, has_ref):
# GH45032
df = DataFrame([[6, "c", 10], [7, "d", 11], [8, "e", 12]])
df[1] = df[1].astype(object)
if has_ref:
view = df[:]
expected = DataFrame([[6, "c", 10], [7, "d", 11], [5, 5, 5]])
expected[1] = expected[1].astype(object)
df.iloc(axis=0)[2] = 5
tm.assert_frame_equal(df, expected)
df = DataFrame([[6, "c", 10], [7, "d", 11], [8, "e", 12]])
df[1] = df[1].astype(object)
if has_ref:
view = df[:] # noqa: F841
expected = DataFrame([[6, "c", 5], [7, "d", 5], [8, "e", 5]])
expected[1] = expected[1].astype(object)
df.iloc(axis=1)[2] = 5
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_list(self, has_ref):
# setitem with an iloc list
df = DataFrame(
np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]
)
if has_ref:
view = df[:] # noqa: F841
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ["B", "B"]
del df["A"]
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(self):
# Same as the "assign back to self" check in test_iloc_setitem_dups
# but on a DataFrame with multiple blocks
df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"])
# setting float values that can be held by existing integer arrays
# is inplace
df.iloc[:, 0] = df.iloc[:, 0].astype("f8")
assert len(df._mgr.blocks) == 1
# if the assigned values cannot be held by existing integer arrays,
# we raise
with pytest.raises(TypeError, match="Invalid value"):
df.iloc[:, 0] = df.iloc[:, 0] + 0.5
expected = df.copy()
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# TODO: GH#27620 this test used to compare iloc against ix; check if this
# is redundant with another test comparing iloc against loc
def test_iloc_getitem_frame(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
index=range(0, 20, 2),
columns=range(0, 8, 2),
)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
exp = df.loc[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
expected = df.loc[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
expected = df.loc[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
expected = df.loc[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indices
result = df.iloc[[-1, 1, 3], [-1, 1]]
expected = df.loc[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indices
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
expected = df.loc[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=range(1, 5), dtype=object)
result = df.iloc[s.index]
expected = df.loc[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
index=list("abcdefghij"),
columns=list("ABCD"),
)
result = df.iloc[1, 1]
exp = df.loc["b", "B"]
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ["C"]]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc["j", "D"]
assert result == exp
# out-of-bounds exception
msg = "index 5 is out of bounds for axis 0 with size 4|index out of bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[10, 5]
# trying to use a label
msg = (
r"Location based indexing can only have \[integer, integer "
r"slice \(START point is INCLUDED, END point is EXCLUDED\), "
r"listlike of integers, boolean array\] types"
)
with pytest.raises(ValueError, match=msg):
df.iloc["j", "D"]
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.default_rng(2).standard_normal((6, 4))
index = date_range("20130101", periods=6)
columns = list("ABCD")
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list("aaaa")
result = df.iloc[3:5, 0:2]
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list("aa"))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.default_rng(2).standard_normal((6, 4))
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._mgr.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_series(self, has_ref):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
index=list("abcdefghij"),
columns=list("ABCD"),
)
if has_ref:
view = df[:] # noqa: F841
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.default_rng(2).standard_normal(10), index=range(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_list_of_lists(self, has_ref):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(
{"A": np.arange(5, dtype="int64"), "B": np.arange(5, 10, dtype="int64")}
)
if has_ref:
view = df[:]
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame({"A": [0, 1, 10, 12, 4], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
df = DataFrame(
{"A": ["a", "b", "c", "d", "e"], "B": np.arange(5, 10, dtype="int64")}
)
if has_ref:
view = df[:] # noqa: F841
df.iloc[2:4] = [["x", 11], ["y", 13]]
expected = DataFrame({"A": ["a", "b", "x", "y", "e"], "B": [5, 6, 11, 13, 9]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
@pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])])
@pytest.mark.parametrize("value", [["Z"], np.array(["Z"])])
def test_iloc_setitem_with_scalar_index(self, has_ref, indexer, value):
# GH #19474
# assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated
# elementwisely, not using "setter('A', ['Z'])".
# Set object type to avoid upcast when setting "Z"
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]).astype({"A": object})
if has_ref:
view = df[:] # noqa: F841
df.iloc[0, indexer] = value
result = df.iloc[0, 0]
assert is_scalar(result) and result == "Z"
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_iloc_mask(self):
# GH 60994, iloc with a mask (of a series) should return accordingly
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
mask = df.a % 2 == 0
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
df.iloc[mask]
mask.index = range(len(mask))
msg = "Unalignable boolean Series provided as indexer"
with pytest.raises(IndexingError, match=msg):
df.iloc[mask]
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2**locs
reps = [bin(num) for num in nums]
df = DataFrame({"locs": locs, "nums": nums}, reps)
expected = {
(None, ""): "0b1100",
(None, ".loc"): "0b1100",
(None, ".iloc"): "0b1100",
("index", ""): "0b11",
("index", ".loc"): "0b11",
(
"index",
".iloc",
): "iLocation based boolean indexing cannot use an indexable as a mask",
("locs", ""): "Unalignable boolean Series provided as indexer",
("locs", ".loc"): "Unalignable boolean Series provided as indexer",
("locs", ".iloc"): "Unalignable boolean Series provided as indexer",
}
# UserWarnings from reindex of a boolean mask
for idx in [None, "index", "locs"]:
mask = (df.nums > 2).values
if idx:
mask_index = getattr(df, idx)[::-1]
mask = Series(mask, list(mask_index))
for method in ["", ".loc", ".iloc"]:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
answer = str(bin(accessor[mask]["nums"].sum()))
except (ValueError, IndexingError) as err:
answer = str(err)
key = (
idx,
method,
)
expected_result = expected.get(key)
# Fix the assertion to check for substring match
if (
idx is None or (idx == "index" and method != ".iloc")
) and "0b" in expected_result:
# For successful numeric results, exact match is needed
assert expected_result == answer, (
f"[{key}] does not match [{answer}]"
)
else:
# For error messages, substring match is sufficient
assert expected_result in answer, f"[{key}] not found in [{answer}]"
def test_iloc_with_numpy_bool_array(self):
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
result = df.iloc[np.array([True, False, True, False, True], dtype=bool)]
expected = DataFrame({"a": [0, 2, 4]}, index=["A", "C", "E"])
tm.assert_frame_equal(result, expected)
def test_iloc_series_mask_with_index_mismatch_raises(self):
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
mask = df.a % 2 == 0
msg = "Unalignable boolean Series provided as indexer"
with pytest.raises(IndexingError, match=msg):
df.iloc[Series([True] * len(mask), dtype=bool)]
def test_iloc_series_mask_all_true(self):
df = DataFrame(list(range(5)), columns=["a"])
mask = Series([True] * len(df), dtype=bool)
result = df.iloc[mask]
tm.assert_frame_equal(result, df)
def test_iloc_series_mask_alternate_true(self):
df = DataFrame(list(range(5)), columns=["a"])
mask = Series([True, False, True, False, True], dtype=bool)
result = df.iloc[mask]
expected = DataFrame({"a": [0, 2, 4]}, index=[0, 2, 4])
tm.assert_frame_equal(result, expected)
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({"A": [0.1] * 3000, "B": [1] * 3000})
idx = np.arange(30) * 99
expected = df.iloc[idx]
df3 = concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000})
df2 = concat([df2, 2 * df2, 3 * df2])
with pytest.raises(KeyError, match="not in index"):
df2.loc[idx]
def test_iloc_empty_list_indexer_is_ok(self):
df = DataFrame(
np.ones((5, 2)),
index=Index([f"i-{i}" for i in range(5)], name="a"),
columns=Index([f"i-{i}" for i in range(2)], name="a"),
)
# vertical empty
tm.assert_frame_equal(
df.iloc[:, []],
df.iloc[:, :0],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[], :],
df.iloc[:0, :],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
assert np.shares_memory(original_df["a"], sliced_df["a"])
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
# depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
assert (sliced_df["a"] == [1, 2, 3]).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
# shallow copy not updated (CoW)
assert all(sliced_series[:3] == [1, 2, 3])
def test_indexing_zerodim_np_array(self):
# GH24919
df = DataFrame([[1, 2], [3, 4]])
result = df.iloc[np.array(0)]
s = Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
# GH24919
s = Series([1, 2])
result = s.iloc[np.array(0)]
assert result == 1
def test_iloc_setitem_categorical_updates_inplace(self):
# Mixed dtype ensures we go through take_split_path in setitem_with_indexer
cat = Categorical(["A", "B", "C"])
df = DataFrame({1: cat, 2: [1, 2, 3]}, copy=False)
assert tm.shares_memory(df[1], cat)
# With the enforcement of GH#45333 in 2.0, this modifies original
# values inplace
df.iloc[:, 0] = cat[::-1]
assert tm.shares_memory(df[1], cat)
expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"])
tm.assert_categorical_equal(cat, expected)
def test_iloc_with_boolean_operation(self):
# GH 20627
result = DataFrame([[0, 1], [2, 3], [4, 5], [6, np.nan]])
result.iloc[result.index <= 2] *= 2
expected = DataFrame([[0, 2], [4, 6], [8, 10], [6, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[result.index > 2] *= 2
expected = DataFrame([[0, 2], [4, 6], [8, 10], [12, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[[True, True, False, False]] *= 2
expected = DataFrame([[0, 4], [8, 12], [8, 10], [12, np.nan]])
tm.assert_frame_equal(result, expected)
result.iloc[[False, False, True, True]] /= 2
expected = DataFrame([[0, 4.0], [8, 12.0], [4, 5.0], [6, np.nan]])
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self):
# GH#29521
df = DataFrame({"x": Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = Categorical(["a"], categories=["a", "b", "c", "d", "e"])
expected = Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
def test_iloc_getitem_categorical_values(self):
# GH#14580
# test iloc() on Series with Categorical data
ser = Series([1, 2, 3]).astype("category")
# get slice
result = ser.iloc[0:2]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get list of indexes
result = ser.iloc[[0, 1]]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get boolean array
result = ser.iloc[[True, False, False]]
expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [None, NaT, np.nan])
def test_iloc_setitem_td64_values_cast_na(self, value):
# GH#18586
series = Series([0, 1, 2], dtype="timedelta64[ns]")
series.iloc[0] = value
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize("not_na", [Interval(0, 1), "a", 1.0])
def test_setitem_mix_of_nan_and_interval(self, not_na, nulls_fixture):
# GH#27937
dtype = CategoricalDtype(categories=[not_na])
ser = Series(
[nulls_fixture, nulls_fixture, nulls_fixture, nulls_fixture], dtype=dtype
)
ser.iloc[:3] = [nulls_fixture, not_na, nulls_fixture]
exp = Series([nulls_fixture, not_na, nulls_fixture, nulls_fixture], dtype=dtype)
tm.assert_series_equal(ser, exp)
def test_iloc_setitem_empty_frame_raises_with_3d_ndarray(self):
idx = Index([])
obj = DataFrame(
np.random.default_rng(2).standard_normal((len(idx), len(idx))),
index=idx,
columns=idx,
)
nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2))
msg = f"Cannot set values with ndim > {obj.ndim}"
with pytest.raises(ValueError, match=msg):
obj.iloc[nd3] = 0
def test_iloc_getitem_read_only_values(self, indexer_li):
# GH#10043 this is fundamentally a test for iloc, but test loc while
# we're here
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
tm.assert_frame_equal(
indexer_li(rw_df)[[1, 2, 3]], indexer_li(ro_df)[[1, 2, 3]]
)
tm.assert_frame_equal(indexer_li(rw_df)[[1]], indexer_li(ro_df)[[1]])
tm.assert_series_equal(indexer_li(rw_df)[1], indexer_li(ro_df)[1])
tm.assert_frame_equal(indexer_li(rw_df)[1:3], indexer_li(ro_df)[1:3])
def test_iloc_getitem_readonly_key(self):
# GH#17192 iloc with read-only array raising TypeError
df = DataFrame({"data": np.ones(100, dtype="float64")})
indices = np.array([1, 3, 6])
indices.flags.writeable = False
result = df.iloc[indices]
expected = df.loc[[1, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df["data"].iloc[indices]
expected = df["data"].loc[[1, 3, 6]]
tm.assert_series_equal(result, expected)
def test_iloc_assign_series_to_df_cell(self):
# GH 37593
df = DataFrame(columns=["a"], index=[0])
df.iloc[0, 0] = Series([1, 2, 3])
expected = DataFrame({"a": [Series([1, 2, 3])]}, columns=["a"], index=[0])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("klass", [list, np.array])
def test_iloc_setitem_bool_indexer(self, klass):
# GH#36741
df = DataFrame({"flag": ["x", "y", "z"], "value": [1, 3, 4]})
indexer = klass([True, False, False])
df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2
expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("has_ref", [True, False])
@pytest.mark.parametrize("indexer", [[1], slice(1, 2)])
def test_iloc_setitem_pure_position_based(self, indexer, has_ref):
# GH#22046
df1 = DataFrame({"a2": [11, 12, 13], "b2": [14, 15, 16]})
df2 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
if has_ref:
view = df2[:] # noqa: F841
df2.iloc[:, indexer] = df1.iloc[:, [0]]
expected = DataFrame({"a": [1, 2, 3], "b": [11, 12, 13], "c": [7, 8, 9]})
tm.assert_frame_equal(df2, expected)
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_setitem_dictionary_value(self, has_ref):
# GH#37728
df = DataFrame({"x": [1, 2], "y": [2, 2]})
if has_ref:
view = df[:]
rhs = {"x": 9, "y": 99}
df.iloc[1] = rhs
expected = DataFrame({"x": [1, 9], "y": [2, 99]})
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2], "y": [2.0, 2.0]})
if has_ref:
view = df[:] # noqa: F841
df.iloc[1] = rhs
expected = DataFrame({"x": [1, 9], "y": [2.0, 99.0]})
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_float_duplicates(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=[0.1, 0.2, 0.2],
columns=list("abc"),
)
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 3)),
index=[1, 0.2, 0.2, 1],
columns=list("abc"),
)
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
def test_iloc_setitem_custom_object(self):
# iloc with an object
class TO:
def __init__(self, value) -> None:
self.value = value
def __str__(self) -> str:
return f"[{self.value}]"
__repr__ = __str__
def __eq__(self, other) -> bool:
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = TO(1)
df.iloc[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = TO(1)
df.iloc[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_getitem_with_duplicates(self):
df = DataFrame(
np.random.default_rng(2).random((3, 3)),
columns=list("ABC"),
index=list("aab"),
)
result = df.iloc[0]
assert isinstance(result, Series)
tm.assert_almost_equal(result.values, df.values[0])
result = df.T.iloc[:, 0]
assert isinstance(result, Series)
tm.assert_almost_equal(result.values, df.values[0])
def test_iloc_getitem_with_duplicates2(self):
# GH#2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
tm.assert_frame_equal(result, expected)
def test_iloc_interval(self):
# GH#17130
df = DataFrame({Interval(1, 2): [1, 2]})
result = df.iloc[0]
expected = Series({Interval(1, 2): 1}, name=0)
tm.assert_series_equal(result, expected)
result = df.iloc[:, 0]
expected = Series([1, 2], name=Interval(1, 2))
tm.assert_series_equal(result, expected)
result = df.copy()
result.iloc[:, 0] += 1
expected = DataFrame({Interval(1, 2): [2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("indexing_func", [list, np.array])
@pytest.mark.parametrize("rhs_func", [list, np.array])
def test_loc_setitem_boolean_list(self, rhs_func, indexing_func):
# GH#20438 testing specifically list key, not arraylike
ser = Series([0, 1, 2])
ser.iloc[indexing_func([True, False, True])] = rhs_func([5, 10])
expected = Series([5, 1, 10])
tm.assert_series_equal(ser, expected)
df = DataFrame({"a": [0, 1, 2]})
df.iloc[indexing_func([True, False, True])] = rhs_func([[5], [10]])
expected = DataFrame({"a": [5, 1, 10]})
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_slice_negative_step_ea_block(self):
# GH#44551
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
res = df.iloc[:, ::-1]
tm.assert_frame_equal(res, df)
df["B"] = "foo"
res = df.iloc[:, ::-1]
expected = DataFrame({"B": df["B"], "A": df["A"]})
tm.assert_frame_equal(res, expected)
def test_iloc_setitem_2d_ndarray_into_ea_block(self):
# GH#44703
df = DataFrame({"status": ["a", "b", "c"]}, dtype="category")
df.iloc[np.array([0, 1]), np.array([0])] = np.array([["a"], ["a"]])
expected = DataFrame({"status": ["a", "a", "c"]}, dtype=df["status"].dtype)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_int_single_ea_block_view(self):
# GH#45241
# TODO: make an extension interface test for this?
arr = interval_range(1, 10.0)._values
df = DataFrame(arr)
# ser should be a *view* on the DataFrame data
ser = df.iloc[2]
# if we have a view, then changing arr[2] should also change ser[0]
assert arr[2] != arr[-1] # otherwise the rest isn't meaningful
arr[2] = arr[-1]
assert ser[0] == arr[-1]
def test_iloc_setitem_multicolumn_to_datetime(self, using_infer_string):
# GH#20511
df = DataFrame({"A": ["2022-01-01", "2022-01-02"], "B": ["2021", "2022"]})
if using_infer_string:
with pytest.raises(TypeError, match="Invalid value"):
df.iloc[:, [0]] = DataFrame({"A": to_datetime(["2021", "2022"])})
else:
df.iloc[:, [0]] = DataFrame({"A": to_datetime(["2021", "2022"])})
expected = DataFrame(
{
"A": [
Timestamp("2021-01-01 00:00:00"),
Timestamp("2022-01-01 00:00:00"),
],
"B": ["2021", "2022"],
}
)
tm.assert_frame_equal(df, expected, check_dtype=False)
| TestiLocBaseIndependent |
python | django__django | tests/admin_checks/tests.py | {
"start": 1706,
"end": 35360
} | class ____(SimpleTestCase):
databases = "__all__"
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
@override_settings(INSTALLED_APPS=["django.contrib.admin"])
def test_apps_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.contenttypes' must be in "
"INSTALLED_APPS in order to use the admin application.",
id="admin.E401",
),
checks.Error(
"'django.contrib.auth' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E405",
),
checks.Error(
"'django.contrib.messages' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E406",
),
]
self.assertEqual(errors, expected)
@override_settings(TEMPLATES=[])
def test_no_template_engines(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"A 'django.template.backends.django.DjangoTemplates' "
"instance must be configured in TEMPLATES in order to use "
"the admin application.",
id="admin.E403",
)
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [],
},
}
],
)
def test_context_processor_dependencies(self):
expected = [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id="admin.E404",
),
checks.Warning(
"'django.template.context_processors.request' must be enabled "
"in DjangoTemplates (TEMPLATES) in order to use the admin "
"navigation sidebar.",
id="admin.W411",
),
]
self.assertEqual(admin.checks.check_dependencies(), expected)
# The first error doesn't happen if
# 'django.contrib.auth.backends.ModelBackend' isn't in
# AUTHENTICATION_BACKENDS.
with self.settings(AUTHENTICATION_BACKENDS=[]):
self.assertEqual(admin.checks.check_dependencies(), expected[1:])
@override_settings(
AUTHENTICATION_BACKENDS=["admin_checks.tests.ModelBackendSubclass"],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
def test_context_processor_dependencies_model_backend_subclass(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.dummy.TemplateStrings",
"DIRS": [],
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
],
)
def test_several_templates_backends(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[])
def test_middleware_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E408",
),
checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E409",
),
checks.Error(
"'django.contrib.sessions.middleware.SessionMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
hint=(
"Insert "
"'django.contrib.sessions.middleware.SessionMiddleware' "
"before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
),
id="admin.E410",
),
]
self.assertEqual(errors, expected)
@override_settings(
MIDDLEWARE=[
"admin_checks.tests.AuthenticationMiddlewareSubclass",
"admin_checks.tests.MessageMiddlewareSubclass",
"admin_checks.tests.SessionMiddlewareSubclass",
]
)
def test_middleware_subclasses(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(
MIDDLEWARE=[
"django.contrib.does.not.Exist",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
]
)
def test_admin_check_ignores_import_error_in_middleware(self):
self.assertEqual(admin.checks.check_dependencies(), [])
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
def test_allows_checks_relying_on_other_modeladmins(self):
class MyBookAdmin(admin.ModelAdmin):
def check(self, **kwargs):
errors = super().check(**kwargs)
if not self.admin_site.is_registered(Author):
errors.append("AuthorAdmin missing!")
return errors
class MyAuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, MyBookAdmin)
admin.site.register(Author, MyAuthorAdmin)
try:
self.assertEqual(admin.site.check(None), [])
finally:
admin.site.unregister(Book)
admin.site.unregister(Author)
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
obj=SongAdmin,
id="admin.E122",
)
]
self.assertEqual(errors, expected)
def test_list_editable_not_a_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
list_editable = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable' must be a list or tuple.",
obj=SongAdmin,
id="admin.E120",
)
],
)
def test_list_editable_missing_field(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ("test",)
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable[0]' refers to 'test', which is "
"not a field of 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E121",
)
],
)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin.",
obj=SongAdmin,
id="admin.E125",
)
]
self.assertEqual(errors, expected)
def test_pk_not_editable(self):
# PKs cannot be edited in the list.
class SongAdmin(admin.ModelAdmin):
list_display = ["title", "id"]
list_editable = ["id"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'id', which is not editable "
"through the admin.",
obj=SongAdmin,
id="admin.E125",
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
The fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
The first fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {"fields": "title"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
The second fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ("title",)}),
("foo", {"fields": "author"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = "foo"
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFields1,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ("name", "name")
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint="Remove duplicates of 'name'.",
obj=ExcludedFields2,
id="admin.E015",
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = "foo"
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFieldsInline,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ["album"]
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'.",
obj=SongInline,
id="admin.E201",
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
A model without a GenericForeignKey raises problems if it's included
in a GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
obj=BookInline,
id="admin.E301",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"""
A GenericInlineModelAdmin errors if the ct_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E302",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"""
A GenericInlineModelAdmin errors if the ct_fk_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E303",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistentAdmin(admin.ModelAdmin):
raw_id_fields = ("nonexistent",)
errors = RawIdNonexistentAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexistent', "
"which is not a field of 'admin_checks.Album'.",
obj=RawIdNonexistentAdmin,
id="admin.E002",
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when
exclude is given) make sure fk_name is honored or things blow up when
there is more than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey "
"to 'admin_checks.Album'. You must specify a 'fk_name' "
"attribute.",
obj=TwoAlbumFKAndAnEInline,
id="admin.E202",
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inlines_property(self):
class CitiesInline(admin.TabularInline):
model = City
class StateAdmin(admin.ModelAdmin):
@property
def inlines(self):
return [CitiesInline]
errors = StateAdmin(State, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
@admin.display
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
@admin.display
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
@admin.display
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[1]' refers to 'nonexistent', which is "
"not a callable, an attribute of 'SongAdmin', or an attribute of "
"'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ["i_dont_exist"] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[0]' refers to 'i_dont_exist', which is "
"not a callable, an attribute of 'CityInline', or an attribute of "
"'admin_checks.City'.",
obj=CityInline,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_readonly_fields_not_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'readonly_fields' must be a list or tuple.",
obj=SongAdmin,
id="admin.E034",
)
],
)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
@admin.display
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M
field that specifies the 'through' option is included in the 'fields'
or the 'fieldsets' ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ["authors"]
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model.",
obj=BookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
("Header 1", {"fields": ("name",)}),
("Header 2", {"fields": ("authors",)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1][\"fields\"]' cannot include the "
"ManyToManyField 'authors', because that field manually specifies a "
"relationship model.",
obj=FieldsetBookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ("price", ("name", "subtitle"))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (("Main", {"fields": ("price", ("name", "subtitle"))}),)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["title", "extra_data"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with
r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = "__all__"
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["extra_data", "title"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ["state", ["state"]]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint="Remove duplicates of 'state'.",
obj=MyModelAdmin,
id="admin.E006",
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["title", "album", ("title", "album")]}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint="Remove duplicates of 'title', 'album'.",
obj=MyModelAdmin,
id="admin.E012",
)
]
self.assertEqual(errors, expected)
def test_check_multiple_duplicates_across_fieldsets(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
("Header 1", {"fields": ["title", "album"]}),
("Header 2", {"fields": ["album", "name"]}),
("Header 3", {"fields": ["name", "other", "title"]}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[1][1]'.",
hint="Remove duplicates of 'album'.",
obj=MyModelAdmin,
id="admin.E012",
),
checks.Error(
"There are duplicate field(s) in 'fieldsets[2][1]'.",
hint="Remove duplicates of 'title', 'name'.",
obj=MyModelAdmin,
id="admin.E012",
),
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ["authorsbooks__featured"]
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
def test_related_field_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "original_release", "album__title"]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_related_field_list_display_wrong_field(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "original_release", "album__hello"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_display[2]' refers to 'album__hello', which is not "
"a callable or attribute of 'SongAdmin', or an attribute, method, or "
"field on 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E108",
)
]
self.assertEqual(errors, expected)
| SystemChecksTestCase |
python | gevent__gevent | src/greentest/3.14/test_socketserver.py | {
"start": 11304,
"end": 11856
} | class ____(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(os_helper.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
| BaseErrorTestServer |
python | pypa__warehouse | tests/unit/metrics/test_event_handlers.py | {
"start": 2349,
"end": 3930
} | class ____:
def test_without_view_duration(self, pyramid_request, metrics):
before_render_start = datetime.datetime.now(datetime.UTC)
pyramid_request.timings = {}
pyramid_request.matched_route = None
with freezegun.freeze_time(before_render_start):
on_before_render({"request": pyramid_request})
assert metrics.timing.calls == []
assert pyramid_request.timings == {
"before_render_start": before_render_start.timestamp() * 1000
}
@pytest.mark.parametrize(
("matched_route", "route_tag"),
[(None, "route:null"), (pretend.stub(name="foo"), "route:foo")],
)
def test_with_view_duration(
self, pyramid_request, metrics, matched_route, route_tag
):
view_code_start = datetime.datetime.now(datetime.UTC)
before_render_start = view_code_start + datetime.timedelta(seconds=1.5)
pyramid_request.timings = {
"view_code_start": view_code_start.timestamp() * 1000
}
pyramid_request.matched_route = matched_route
with freezegun.freeze_time(before_render_start):
on_before_render({"request": pyramid_request})
assert metrics.timing.calls == [
pretend.call("pyramid.request.duration.view", 1500, tags=[route_tag])
]
assert pyramid_request.timings == {
"view_code_start": view_code_start.timestamp() * 1000,
"view_duration": 1500.0,
"before_render_start": before_render_start.timestamp() * 1000,
}
| TestOnBeforeRender |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 72885,
"end": 83503
} | class ____(CodeGen, Generic[CSEVariableType]):
newvar_prefix: str = ""
suffix: str = ""
overrides: Optional[Callable[[], OpsHandler[Any]]] = None
def __init__(
self, args: Optional[KernelArgs] = None, increase_kernel_count: bool = True
) -> None:
super().__init__()
if increase_kernel_count:
# pyrefly: ignore [bad-assignment]
metrics.generated_kernel_count += 1
self.args = args or KernelArgs()
self.loads = IndentedBuffer()
self.compute = IndentedBuffer()
self.stores = IndentedBuffer()
self.atomic_add_found = False
self.num_load = 0
self.num_store = 0
self.num_reduction = 0
self.cse: CSE[CSEVariableType, Any] = CSE(self.newvar_prefix, self.suffix)
self.must_keep_buffers: OrderedSet[str] = OrderedSet()
self.store_buffer_names: OrderedSet[str] = OrderedSet()
self._load_mask: Optional[str] = None
self._load_other: Union[None, int, float] = None
# OrderedSet in set_current_node
self.current_node: Optional[SchedulerNode] = None
self.node_to_bounds: Optional[dict[torch.fx.Node, ValueRanges[Any]]] = None
self.removed_buffers: OrderedSet[str] = OrderedSet()
self.inplaced_to_remove: OrderedSet[str] = OrderedSet()
# key: the buffer to write
# value: the buffer to read and whose memory can be reused for
# the buffer specified by key
self.inplace_update_buffers: dict[str, str] = {}
# Set minimum number of elements processed per thread.
self.min_elem_per_thread = 1
self.kernel_name: Optional[str] = None
@contextlib.contextmanager
def set_current_node(self, node: SchedulerNode) -> Iterator[None]:
prior = self.current_node
self.current_node = node
self.node_to_bounds = node._body.bounds().get_bounds()
try:
yield
finally:
self.current_node = prior
@contextlib.contextmanager
def swap_buffers(
self,
lb: IndentedBuffer,
cb: Optional[IndentedBuffer] = None,
sb: Optional[IndentedBuffer] = None,
) -> Iterator[None]:
if cb is None:
cb = lb
if disallow_stores := sb is None:
sb = IndentedBuffer()
loads = self.loads
compute = self.compute
stores = self.stores
cse = self.cse
self.loads = lb
self.compute = cb
self.stores = sb
self.cse = cse.scoped_copy()
try:
yield
finally:
self.loads = loads
self.compute = compute
self.stores = stores
self.cse = cse
# pyrefly: ignore [unbound-name]
if disallow_stores:
assert not sb, "unexpected store inside swap_buffers"
def load(self, name: str, index: sympy.Expr) -> CSEVariable:
raise NotImplementedError
def indirect_load(self, name: str, index: sympy.Expr) -> CSEVariable:
"""A load the depends on an index we have read"""
prior = self.loads
try:
# put the load in the compute section as it might have deps
self.loads = self.compute
return self.load(name, index)
finally:
self.loads = prior
def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable) -> None:
raise NotImplementedError
def store(
self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None
) -> None:
raise NotImplementedError
def device_assert_async(self, cond: CSEVariable, msg: str) -> None:
raise NotImplementedError(
f"{type(self).__name__}: device_assert_async should be handled by CSEProxy"
)
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: Union[CSEVariable, tuple[CSEVariable, ...]],
) -> Union[CSEVariable, tuple[CSEVariable, ...]]:
raise NotImplementedError
def partial_accumulate(
self,
name: str,
reduction_type: ReductionType,
value: CSEVariable,
extra_meta: dict[str, Any],
) -> None:
raise NotImplementedError
def scan(
self,
dtypes: tuple[torch.dtype, ...],
combine_fn: Callable[
[tuple[CSEVariable, ...], tuple[CSEVariable, ...]], tuple[CSEVariable, ...]
],
values: tuple[CSEVariable, ...],
) -> tuple[CSEVariable, ...]:
raise NotImplementedError
def sort(
self,
dtypes: tuple[torch.dtype, ...],
values: tuple[CSEVariable, ...],
stable: bool,
descending: bool,
) -> tuple[CSEVariable, ...]:
raise NotImplementedError
def var_ranges(self) -> dict[sympy.Symbol, sympy.Expr]:
raise NotImplementedError
def bucketize(
self,
values: CSEVariable,
boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr],
boundary_indices: CSEVariable,
indexing_dtype: torch.dtype,
right: bool,
sorter: Optional[tuple[str, sympy.Expr]] = None,
sorter_indices: Optional[CSEVariable] = None,
) -> CSEVariable:
"""
See [Note: Inductor bucketize op]
"""
raise NotImplementedError
@property
def assert_function(self) -> str:
raise NotImplementedError
def indirect_assert(
self,
var: Union[CSEVariable, str],
lower: Optional[str],
upper: Optional[str],
mask: Optional[Union[CSEVariable, str]] = None,
) -> str:
if isinstance(var, CSEVariable):
var = str(var)
assert isinstance(var, str), type(var)
assert lower is None or isinstance(lower, str)
assert upper is None or isinstance(upper, str)
if lower and upper:
# The conditions need to be in parens because of Python's operator precedence.
# It'd be less error-prone to use and/or/not, which is supported by triton
cond = f"({lower} <= {var}) & ({var} < {upper})"
cond_print = f"{lower} <= {var} < {upper}"
elif lower:
cond = f"{lower} <= {var}"
cond_print = cond
else:
assert upper
cond = f"{var} < {upper}"
cond_print = cond
if mask:
cond = f"({cond}) | ~({mask})"
return f'{self.assert_function}({cond}, "index out of bounds: {cond_print}")'
def check_bounds(
self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool
) -> None:
raise NotImplementedError
def index_to_str(self, index: sympy.Expr) -> str:
raise NotImplementedError
def __enter__(self) -> Self:
super().__enter__()
assert self.overrides
self.exit_stack.enter_context(
V.set_ops_handler(CSEProxy(self, self.overrides()))
)
self.exit_stack.enter_context(V.set_kernel_handler(self))
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.remove_kernel_local_buffers()
super().__exit__(exc_type, exc_val, exc_tb)
def remove_kernel_local_buffers(self) -> None:
"""
Any buffers that are both created and have a last use in the
same kernel can be removed.
Note that V.graph.scheduler can be None when codegening triton template
kernels.
"""
scheduler = V.graph.scheduler
if not scheduler:
return
fused_node_names = OrderedSet(
scheduler.name_to_buf[buf].defining_op_name()
for buf in self.store_buffer_names
if buf in scheduler.name_to_buf
)
names_to_remove: OrderedSet[str] = OrderedSet()
for name in self.store_buffer_names:
if (
name not in self.must_keep_buffers
and name not in self.args.input_buffers
and scheduler.can_buffer_be_removed_through_fusion(
name, fused_node_names
)
):
self.num_store -= 1
names_to_remove.add(name)
for name in names_to_remove:
if name in self.args.inplace_buffers:
buf = self.args.inplace_buffers[name]
if isinstance(buf, RemovedArg):
continue
remove = all(n in names_to_remove for n in buf.other_names)
if remove:
self.remove_inplace_buffer(name)
self.inplaced_to_remove.add(name)
else:
self.remove_buffer(name)
def remove_buffer(self, name: str) -> None:
# Assign a special value instead of deleting the entry
# because we still rely on output_buffers's length to
# generate unique arg name.
log.debug("remove_buffer(%r)", name)
self.args.output_buffers[name] = REMOVED
self.removed_buffers.add(name)
def remove_inplace_buffer(self, name: str) -> None:
log.debug("removing_inplace_buffer(%r)", name)
self.args.inplace_buffers[name] = REMOVED
self.removed_buffers.add(name)
def rename_indexing(
self, index: Union[list[sympy.Expr], tuple[sympy.Expr, ...], sympy.Expr]
) -> sympy.Expr:
# adds the necessary kernel args for index expressions
# and renames variables in index expressions to kernel arg names
if isinstance(index, (list, tuple)):
return [self.rename_indexing(x) for x in index]
index = V.graph.sizevars.simplify(index)
sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name)
replacements = {
x: self.args.size(x)
for x in sorted_symbols
if symbol_is_type(
x,
(
SymT.UNBACKED_INT,
SymT.SIZE,
SymT.PRECOMPUTED_SIZE,
SymT.UNBACKED_FLOAT,
),
)
}
return sympy_subs(index, replacements)
def create_cse_var(self, *args: Any, **kwargs: Any) -> CSEVariable:
return CSEVariable(*args, **kwargs)
def arg_name(self, node: IRNode) -> Optional[str]:
"""
Returns arg name of a given input or output node.
"""
if node is None:
return None
return self.args.arg_name(node.get_name())
@dataclasses.dataclass
| Kernel |
python | pytest-dev__pytest | src/_pytest/stepwise.py | {
"start": 2922,
"end": 7689
} | class ____:
def __init__(self, config: Config) -> None:
self.config = config
self.session: Session | None = None
self.report_status: list[str] = []
assert config.cache is not None
self.cache: Cache = config.cache
self.skip: bool = config.getoption("stepwise_skip")
self.reset: bool = config.getoption("stepwise_reset")
self.cached_info = self._load_cached_info()
def _load_cached_info(self) -> StepwiseCacheInfo:
cached_dict: dict[str, Any] | None = self.cache.get(STEPWISE_CACHE_DIR, None)
if cached_dict:
try:
return StepwiseCacheInfo(
cached_dict["last_failed"],
cached_dict["last_test_count"],
cached_dict["last_cache_date_str"],
)
except (KeyError, TypeError) as e:
error = f"{type(e).__name__}: {e}"
self.report_status.append(f"error reading cache, discarding ({error})")
# Cache not found or error during load, return a new cache.
return StepwiseCacheInfo.empty()
def pytest_sessionstart(self, session: Session) -> None:
self.session = session
def pytest_collection_modifyitems(
self, config: Config, items: list[nodes.Item]
) -> None:
last_test_count = self.cached_info.last_test_count
self.cached_info.last_test_count = len(items)
if self.reset:
self.report_status.append("resetting state, not skipping.")
self.cached_info.last_failed = None
return
if not self.cached_info.last_failed:
self.report_status.append("no previously failed tests, not skipping.")
return
if last_test_count is not None and last_test_count != len(items):
self.report_status.append(
f"test count changed, not skipping (now {len(items)} tests, previously {last_test_count})."
)
self.cached_info.last_failed = None
return
# Check all item nodes until we find a match on last failed.
failed_index = None
for index, item in enumerate(items):
if item.nodeid == self.cached_info.last_failed:
failed_index = index
break
# If the previously failed test was not found among the test items,
# do not skip any tests.
if failed_index is None:
self.report_status.append("previously failed test not found, not skipping.")
else:
cache_age = datetime.now() - self.cached_info.last_cache_date
# Round up to avoid showing microseconds.
cache_age = timedelta(seconds=int(cache_age.total_seconds()))
self.report_status.append(
f"skipping {failed_index} already passed items (cache from {cache_age} ago,"
f" use --sw-reset to discard)."
)
deselected = items[:failed_index]
del items[:failed_index]
config.hook.pytest_deselected(items=deselected)
def pytest_runtest_logreport(self, report: TestReport) -> None:
if report.failed:
if self.skip:
# Remove test from the failed ones (if it exists) and unset the skip option
# to make sure the following tests will not be skipped.
if report.nodeid == self.cached_info.last_failed:
self.cached_info.last_failed = None
self.skip = False
else:
# Mark test as the last failing and interrupt the test session.
self.cached_info.last_failed = report.nodeid
assert self.session is not None
self.session.shouldstop = (
"Test failed, continuing from this test next run."
)
else:
# If the test was actually run and did pass.
if report.when == "call":
# Remove test from the failed ones, if exists.
if report.nodeid == self.cached_info.last_failed:
self.cached_info.last_failed = None
def pytest_report_collectionfinish(self) -> list[str] | None:
if self.config.get_verbosity() >= 0 and self.report_status:
return [f"stepwise: {x}" for x in self.report_status]
return None
def pytest_sessionfinish(self) -> None:
if hasattr(self.config, "workerinput"):
# Do not update cache if this process is a xdist worker to prevent
# race conditions (#10641).
return
self.cached_info.update_date_to_now()
self.cache.set(STEPWISE_CACHE_DIR, dataclasses.asdict(self.cached_info))
| StepwisePlugin |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 61403,
"end": 84918
} | class ____(ClvpPreTrainedModel, GenerationMixin):
config: ClvpConfig
def __init__(self, config: ClvpConfig):
super().__init__(config)
if not isinstance(config.text_config, ClvpEncoderConfig):
raise TypeError(
"config.text_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.speech_config, ClvpEncoderConfig):
raise TypeError(
"config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.speech_config)}."
)
if not isinstance(config.decoder_config, ClvpDecoderConfig):
raise TypeError(
"config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type"
f" {type(config.decoder_config)}."
)
self.conditioning_encoder = ClvpConditioningEncoder(config)
self.speech_decoder_model = ClvpForCausalLM(config.decoder_config)
self.text_encoder_model = ClvpEncoder(config.text_config)
self.speech_encoder_model = ClvpEncoder(config.speech_config)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
# taken from the original repo,
# link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/api.py#L117
def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:
"""
This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
last few tokens of each sequence.
Args:
speech_ids (`torch.LongTensor`):
This refers to the output of the decoder model.
"""
decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes
speech_ids = speech_ids[:, 1:]
stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)
speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])
for i, each_seq_stop_token_index in enumerate(stop_token_indices):
# This means that no stop tokens were found so the sentence was still being generated, in that case we don't need
# to apply any padding so just skip to the next sequence of tokens.
if each_seq_stop_token_index.sum() == 0:
continue
stm = each_seq_stop_token_index.argmax()
speech_ids[i, stm:] = decoder_fixing_codes[0]
if stm - 3 < speech_ids.shape[1]:
speech_ids[i, -3:] = torch.tensor(
[decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long
)
return speech_ids
def get_text_features(
self,
input_ids: Optional[torch.LongTensor] = None,
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
r"""
This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the
projection layer to the pooled output of the CLVP text encoder model.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
[What are input IDs?](../glossary#input-ids)
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for the text encoder model passed in place of `input_ids`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Returns:
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text
Model.
Examples:
```python
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text
>>> text = "This is an example text."
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # Generate processor output and text embeds
>>> processor_output = processor(text=text, return_tensors="pt")
>>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"])
```
"""
outputs = self.text_encoder_model(
input_ids=input_ids,
inputs_embeds=text_encoder_inputs_embeds,
attention_mask=attention_mask,
)
return outputs[0]
def get_speech_features(
self,
speech_ids: Optional[torch.LongTensor] = None,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
**kwargs,
) -> torch.FloatTensor:
r"""
This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech
model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the
decoder model will be used to first generate the speech_ids and then applying the speech model.
Args:
speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*):
Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided
then input_ids and input_features will be automatically ignored.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids
and input_features will be used.
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
generation_config (`GenerationConfig`, *optional*):
generation config to control the generation of speech_ids if they are not provided.
Returns:
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech
Model.
Examples:
```python
>>> import datasets
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
>>> text = "This is an example text."
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
>>> audio = ds.sort("id")["audio"][0]
>>> audio_sample, sr = audio["array"], audio["sampling_rate"]
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # Generate processor output and model output
>>> processor_output = processor(raw_speech=audio_sample, sampling_rate=sr, text=text, return_tensors="pt")
>>> speech_embeds = model.get_speech_features(
... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"]
... )
```
"""
if speech_ids is None:
if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None:
raise ValueError(
"Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided."
)
if generation_config is None:
generation_config = self.generation_config
generation_config.update(**kwargs)
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
inputs_embeds=conditioning_encoder_inputs_embeds,
attention_mask=attention_mask,
)
speech_ids = self.speech_decoder_model.generate(
conditioning_embeds=conditioning_embeds,
generation_config=generation_config,
)
speech_ids = self.fix_speech_decoder_output(speech_ids[0])
outputs = self.speech_encoder_model(
input_ids=speech_ids,
attention_mask=attention_mask,
)
return outputs[0]
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = False,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, ClvpOutput]:
r"""
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
inputs_embeds for the text encoder model passed in place of `input_ids`.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> import datasets
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
>>> text = "This is an example text."
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
>>> audio = ds.sort("id")["audio"][0]
>>> audio_sample, sr = audio["array"], audio["sampling_rate"]
>>> # Define processor and model
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
>>> # processor outputs and model outputs
>>> processor_output = processor(raw_speech=audio_sample, sampling_rate=sr, text=text, return_tensors="pt")
>>> outputs = model(
... input_ids=processor_output["input_ids"],
... input_features=processor_output["input_features"],
... return_dict=True,
... )
```
"""
# Use CLVP model's config for some fields (if specified) instead of those of speech & text components.
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
inputs_embeds=conditioning_encoder_inputs_embeds,
attention_mask=attention_mask,
)
decoder_outputs = self.speech_decoder_model(
inputs_embeds=conditioning_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
speech_ids = decoder_outputs[0]
# since we will get the embeds of shape `(batch_size, seq_len, embedding_dim)` during the forward pass
# we must convert it to tokens, to make it compaitable with speech_transformer
if speech_ids.ndim == 3:
speech_ids = speech_ids.argmax(2)
speech_ids = self.fix_speech_decoder_output(speech_ids)
speech_outputs = self.speech_encoder_model(
input_ids=speech_ids,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_encoder_model(
input_ids=input_ids,
inputs_embeds=text_encoder_inputs_embeds,
attention_mask=attention_mask,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
speech_embeds = speech_outputs[0]
text_embeds = text_outputs[0]
# normalized features
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
logits_per_speech = logits_per_text.t()
loss = None
if return_loss:
loss = clvp_loss(logits_per_text)
if not return_dict:
output = (
logits_per_speech,
logits_per_text,
text_embeds,
speech_embeds,
text_outputs[2],
speech_outputs[2],
)
if output_hidden_states:
output += (
decoder_outputs[-1],
text_outputs[-1],
speech_outputs[-1],
)
return ((loss,) + output) if loss is not None else output
return ClvpOutput(
loss=loss,
logits_per_speech=logits_per_speech,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
speech_embeds=speech_embeds,
text_model_output=text_outputs[2],
speech_model_output=speech_outputs[2],
decoder_hidden_states=decoder_outputs.hidden_states,
text_encoder_hidden_states=text_outputs.hidden_states,
speech_encoder_hidden_states=speech_outputs.hidden_states,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
generation_config: Optional[GenerationConfig] = None,
pad_to_max_mel_tokens: Optional[int] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
):
"""
Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of
`ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using
`ClvpEncoder`.
Args:
input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Input text Tokens. Processed from the [`ClvpTokenizer`].
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
pad_to_max_mel_tokens (`int`, *optional*):
Pads generated speech_ids to the specified value. This is to implement the same logic from the official
repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
and to make sure the logits are same.
This does not affect generation quality so please don't consider using it since it is less efficient.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of decoder model, text encoder and speech encoder models.
Returns:
`ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when
`config.return_dict_in_generate=True`) or a tuple.
"""
# If the input sequences are larger than (self.config.decoder_config.max_text_tokens - 3) then raise error,
# because we need to add 3 tokens ( 1 bos tokens and 2 eos tokens) to the input_ids in ClvpConditioningEncoder to
# properly sample
sequence_length = input_ids.shape[-1]
if sequence_length > (self.config.decoder_config.max_text_tokens - 3):
raise ValueError(
f"Maximum sequence length reached! Found input_ids of length {sequence_length}."
f"Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}"
)
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
# pad input_ids as specified in the original repo
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L380
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
input_ids,
attention_mask,
add_bos_token=False,
bos_token_id=self.config.text_config.bos_token_id,
eos_token_id=self.config.text_config.eos_token_id,
)
conditioning_embeds = self.conditioning_encoder(
input_features=input_features,
input_ids=input_ids,
attention_mask=attention_mask,
)
decoder_outputs = self.speech_decoder_model.generate(
conditioning_embeds=conditioning_embeds,
generation_config=generation_config,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
if isinstance(decoder_outputs, ModelOutput):
speech_ids = decoder_outputs.sequences
# pad to pad_to_max_mel_tokens if given, to replicate the original repo logic
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
if pad_to_max_mel_tokens is not None:
padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1]
speech_ids = torch.nn.functional.pad(
speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id
)
speech_ids = self.fix_speech_decoder_output(speech_ids)
speech_outputs = self.speech_encoder_model(
input_ids=speech_ids,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
text_outputs = self.text_encoder_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_hidden_states=output_hidden_states,
return_dict=generation_config.return_dict_in_generate,
)
speech_embeds = speech_outputs[0]
text_embeds = text_outputs[0]
# normalized features
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
logits_per_speech = logits_per_text.t()
if not generation_config.return_dict_in_generate:
output = (
speech_ids,
logits_per_speech,
logits_per_text,
text_embeds,
speech_embeds,
text_outputs[2],
speech_outputs[2],
)
if output_hidden_states:
output += (
decoder_outputs[-1],
text_outputs[-1],
speech_outputs[-1],
)
return output
return ClvpOutput(
speech_ids=speech_ids,
logits_per_speech=logits_per_speech,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
speech_embeds=speech_embeds,
text_model_output=text_outputs[2],
speech_model_output=speech_outputs[2],
decoder_hidden_states=decoder_outputs.hidden_states,
text_encoder_hidden_states=text_outputs.hidden_states,
speech_encoder_hidden_states=speech_outputs.hidden_states,
)
__all__ = [
"ClvpModelForConditionalGeneration",
"ClvpForCausalLM",
"ClvpModel",
"ClvpPreTrainedModel",
"ClvpEncoder",
"ClvpDecoder",
]
| ClvpModelForConditionalGeneration |
python | doocs__leetcode | solution/1600-1699/1678.Goal Parser Interpretation/Solution2.py | {
"start": 0,
"end": 296
} | class ____:
def interpret(self, command: str) -> str:
ans = []
for i, c in enumerate(command):
if c == 'G':
ans.append(c)
elif c == '(':
ans.append('o' if command[i + 1] == ')' else 'al')
return ''.join(ans)
| Solution |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py | {
"start": 2623,
"end": 2795
} | class ____(BaseModel):
"""Request body for queue operations."""
queue_name: Annotated[str, Field(description="Name of the queue to add or remove.")]
| QueueUpdateRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol19.py | {
"start": 625,
"end": 710
} | class ____:
x: int
c1: ProtoC = ConcreteC1(0)
c2: ProtoC = ConcreteC2(0)
| ConcreteC2 |
python | huggingface__transformers | src/transformers/models/bloom/modeling_bloom.py | {
"start": 5933,
"end": 6323
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
ctx.save_for_backward(input)
return bloom_gelu_forward(input)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
input = ctx.saved_tensors
tmp = bloom_gelu_back(grad_output, input)
return tmp
| GeLUFunction |
python | pytorch__pytorch | torch/nn/modules/dropout.py | {
"start": 5974,
"end": 7678
} | class ____(_DropoutNd):
r"""Randomly zero out entire channels.
A channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`.
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv3d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout3d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
Examples::
>>> m = nn.Dropout3d(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
https://arxiv.org/abs/1411.4280
"""
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.dropout3d(input, self.p, self.training, self.inplace)
| Dropout3d |
python | getsentry__sentry | src/sentry/incidents/endpoints/team_alerts_triggered.py | {
"start": 3082,
"end": 4832
} | class ____(AlertRuleSerializer):
def __init__(self, start, end):
super().__init__()
self.start = start
self.end = end
def get_attrs(self, item_list, user, **kwargs):
result = super().get_attrs(item_list, user, **kwargs)
qs = (
AlertRule.objects.filter(
(
Q(incident__incidentactivity__type=IncidentActivityType.CREATED.value)
| Q(
incident__incidentactivity__type=IncidentActivityType.STATUS_CHANGE.value,
incident__incidentactivity__value__in=[
str(IncidentStatus.OPEN.value),
str(IncidentStatus.CRITICAL.value),
str(IncidentStatus.WARNING.value),
],
)
),
incident__date_added__gte=self.start,
incident__date_added__lt=self.end,
id__in=[item.id for item in item_list],
)
.values("id")
.annotate()
.annotate(count=Count("id"))
)
alert_rule_counts = {row["id"]: row["count"] for row in qs}
weeks = (self.end - self.start).days // 7
for alert_rule in item_list:
alert_rule_attrs = result.setdefault(alert_rule, {})
alert_rule_attrs["weekly_avg"] = alert_rule_counts.get(alert_rule.id, 0) / weeks
return result
def serialize(self, obj, attrs, user, **kwargs):
result = super().serialize(obj, attrs, user)
result["weeklyAvg"] = attrs["weekly_avg"]
result["totalThisWeek"] = obj.count
return result
@region_silo_endpoint
| TriggeredAlertRuleSerializer |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 12570,
"end": 13463
} | class ____(GroupbyAggregationBase):
"""Logical groupby aggregation class
This class lowers itself to concrete implementations for decomposable
or holistic aggregations.
"""
@functools.cached_property
def _meta(self):
return self._lower()._meta
@functools.cached_property
def _is_decomposable(self):
return not any(s[1] in ("median", np.median) for s in self.spec)
def _lower(self):
cls = (
DecomposableGroupbyAggregation
if self._is_decomposable
else HolisticGroupbyAggregation
)
return cls(
self.frame,
self.arg,
self.observed,
self.dropna,
self.split_every,
self.split_out,
self.sort,
self.shuffle_method,
self._slice,
*self.by,
)
| GroupbyAggregation |
python | scipy__scipy | scipy/optimize/tests/test__basinhopping.py | {
"start": 14152,
"end": 17628
} | class ____:
def setup_method(self):
self.T = 2.
self.met = Metropolis(self.T)
self.res_new = OptimizeResult(success=True, fun=0.)
self.res_old = OptimizeResult(success=True, fun=1.)
def test_boolean_return(self):
# the return must be a bool, else an error will be raised in
# basinhopping
ret = self.met(res_new=self.res_new, res_old=self.res_old)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(res_new=self.res_new, res_old=self.res_old))
def test_accept(self):
# test that steps are randomly accepted for f_new > f_old
one_accept = False
one_reject = False
for i in range(1000):
if one_accept and one_reject:
break
res_new = OptimizeResult(success=True, fun=1.)
res_old = OptimizeResult(success=True, fun=0.5)
ret = self.met(res_new=res_new, res_old=res_old)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
# an overflow in exp was producing a RuntimeWarning
# create own object here in case someone changes self.T
met = Metropolis(2)
res_new = OptimizeResult(success=True, fun=0.)
res_old = OptimizeResult(success=True, fun=2000)
with np.errstate(over='raise'):
met.accept_reject(res_new=res_new, res_old=res_old)
def test_gh7799(self):
# gh-7799 reported a problem in which local search was successful but
# basinhopping returned an invalid solution. Show that this is fixed.
def func(x):
return (x**2-8)**2+(x+2)**2
x0 = -4
limit = 50 # Constrain to func value >= 50
con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
res = basinhopping(
func,
x0,
30,
seed=np.random.RandomState(1234),
minimizer_kwargs={'constraints': con}
)
assert res.success
assert_allclose(res.fun, limit, rtol=1e-6)
def test_accept_gh7799(self):
# Metropolis should not accept the result of an unsuccessful new local
# search if the old local search was successful
met = Metropolis(0) # monotonic basin hopping
res_new = OptimizeResult(success=True, fun=0.)
res_old = OptimizeResult(success=True, fun=1.)
# if new local search was successful and energy is lower, accept
assert met(res_new=res_new, res_old=res_old)
# if new res is unsuccessful, don't accept - even if energy is lower
res_new.success = False
assert not met(res_new=res_new, res_old=res_old)
# ...unless the old res was unsuccessful, too. In that case, why not?
res_old.success = False
assert met(res_new=res_new, res_old=res_old)
def test_reject_all_gh7799(self):
# Test the behavior when there is no feasible solution
def fun(x):
return x@x
def constraint(x):
return x + 1
kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
assert not res.success
@pytest.mark.thread_unsafe(reason="shared state")
| Test_Metropolis |
python | pennersr__django-allauth | allauth/socialaccount/providers/twitter/provider.py | {
"start": 1106,
"end": 1794
} | class ____(OAuthProvider):
id = "twitter"
name = "X"
account_class = TwitterAccount
oauth_adapter_class = TwitterOAuthAdapter
def get_auth_url(self, request, action):
if action == AuthAction.REAUTHENTICATE:
url = "https://api.x.com/oauth/authorize"
else:
url = "https://api.x.com/oauth/authenticate"
return url
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
username=data.get("screen_name"),
name=data.get("name"),
email=data.get("email"),
)
provider_classes = [TwitterProvider]
| TwitterProvider |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 9834,
"end": 10019
} | class ____(OAuth2Error):
"""
The authenticated client is not authorized to use this authorization
grant type.
"""
error = 'unauthorized_client'
| UnauthorizedClientError |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 23100,
"end": 23465
} | class ____(_FunctionCaller):
"""Specialization of _Node to PartitionedCall-like operations."""
def __init__(self, node, function, enclosing_graph):
super(_PartitionedCall, self).__init__(
node,
function,
enclosing_graph,
first_function_input=0,
type_attribute="Tin",
function_attributes=["f"])
| _PartitionedCall |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/fixed_length_record_dataset_test.py | {
"start": 2544,
"end": 8240
} | class ____(FixedLengthRecordDatasetTestBase,
parameterized.TestCase):
def _test(self, compression_type=None):
test_filenames = self._createFiles(compression_type=compression_type)
def dataset_fn(filenames, num_epochs, batch_size=None):
repeat_dataset = readers.FixedLengthRecordDataset(
filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
compression_type=compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
# Basic test: read from file 0.
self.assertDatasetProduces(
dataset_fn([test_filenames[0]], 1),
expected_output=[
self._record(0, i) for i in range(self._num_records)
])
# Basic test: read from file 1.
self.assertDatasetProduces(
dataset_fn([test_filenames[1]], 1),
expected_output=[
self._record(1, i) for i in range(self._num_records)
])
# Basic test: read from both files.
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(
dataset_fn(test_filenames, 1), expected_output=expected_output)
# Test repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10))
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test batched and repeated iteration through both files.
get_next = self.getNext(dataset_fn(test_filenames, 10, self._num_records))
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testNoCompression(self):
self._test()
@combinations.generate(test_base.default_test_combinations())
def testGzipCompression(self):
self._test(compression_type="GZIP")
@combinations.generate(test_base.default_test_combinations())
def testZlibCompression(self):
self._test(compression_type="ZLIB")
@combinations.generate(test_base.default_test_combinations())
def testBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testParallelRead(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10,
num_parallel_reads=4)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output,
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testWrongSize(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes + 1, # Incorrect record length.
self._header_bytes,
self._footer_bytes,
buffer_size=10)
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r"Excluding the header \(5 bytes\) and footer \(2 bytes\), input "
r"file \".*fixed_length_record.0.txt\" has body length 21 bytes, "
r"which is not an exact multiple of the record length \(4 bytes\).")
)
@combinations.generate(test_base.default_test_combinations())
def testPathlib(self):
test_filenames = self._createFiles()
test_filenames = [pathlib.Path(f) for f in test_filenames]
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10,
num_parallel_reads=4)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output,
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testName(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
name="fixed_length_record_dataset")
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
| FixedLengthRecordDatasetTest |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 4097,
"end": 9791
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.auth = JWTClientSecretAuthentication()
self.org = self.create_organization(owner=self.user)
self.sentry_app = self.create_sentry_app(name="foo", organization=self.org)
self.installation = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.api_app = self.sentry_app.application
def _create_jwt(self, client_id: str, exp: datetime | None = None) -> str:
"""Helper to create a JWT token"""
if exp is None:
exp = datetime.now() + timedelta(hours=1)
payload = {
"iss": client_id, # Issuer
"sub": client_id, # Subject
"iat": int(datetime.now(UTC).timestamp()), # Issued at
"exp": int(exp.timestamp()), # Expiration
"jti": str(uuid.uuid4()), # JWT ID (unique identifier)
}
return jwt.encode(payload, self.api_app.client_secret, algorithm="HS256")
def test_authenticate(self) -> None:
token = self._create_jwt(self.api_app.client_id)
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
user, _ = self.auth.authenticate(request)
assert user.id == self.sentry_app.proxy_user.id
def test_missing_installation(self) -> None:
token = self._create_jwt(self.api_app.client_id)
fake_uuid = uuid.uuid4()
path = f"/api/0/sentry-app-installations/{fake_uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
with pytest.raises(AuthenticationFailed, match="Installation not found"):
self.auth.authenticate(request)
def test_invalid_client_id(self) -> None:
token = self._create_jwt("wrong-client-id")
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
with pytest.raises(AuthenticationFailed, match="JWT is not valid for this application"):
self.auth.authenticate(request)
def test_expired_token(self) -> None:
expired_time = datetime.now() - timedelta(hours=1)
token = self._create_jwt(self.api_app.client_id, exp=expired_time)
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
with pytest.raises(AuthenticationFailed, match="Could not validate JWT"):
self.auth.authenticate(request)
def test_missing_authorization_header(self) -> None:
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
with pytest.raises(AuthenticationFailed, match="Header is in invalid form"):
self.auth.authenticate(request)
def test_invalid_bearer_format(self) -> None:
token = self._create_jwt(self.api_app.client_id)
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Token {token}" # Wrong scheme
with pytest.raises(AuthenticationFailed, match="Bearer not present in token"):
self.auth.authenticate(request)
def test_malformed_jwt(self) -> None:
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = "Bearer invalid.jwt.token"
with pytest.raises(AuthenticationFailed, match="Could not validate JWT"):
self.auth.authenticate(request)
def test_no_request_data(self) -> None:
token = self._create_jwt(self.api_app.client_id)
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request(path=path) # No data
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
with pytest.raises(AuthenticationFailed, match="Invalid request"):
self.auth.authenticate(request)
def test_jwt_cannot_be_used_twice(self) -> None:
# Test that the same JWT token cannot be used twice
token = self._create_jwt(self.api_app.client_id)
path = f"/api/0/sentry-app-installations/{self.installation.uuid}/authorizations/"
request = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
# First request should succeed
user, _ = self.auth.authenticate(request)
assert user.id == self.sentry_app.proxy_user.id
# Second request with the same token should fail due to cache
request2 = _drf_request({"grant_type": GrantTypes.CLIENT_SECRET_JWT}, path=path)
request2.META["HTTP_AUTHORIZATION"] = f"Bearer {token}"
with pytest.raises(AuthenticationFailed, match="JWT has already been used"):
self.auth.authenticate(request2)
| TestJWTClientSecretAuthentication |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 2442,
"end": 2629
} | class ____(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = hardware.linux.LinuxHardware
collector_class = hardware.linux.LinuxHardwareCollector
| TestLinuxFactsPlatform |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 11021,
"end": 12359
} | class ____(graphene.InputObjectType):
selector = graphene.NonNull(
GrapheneJobOrPipelineSelector,
description="""Defines the job / pipeline and solid subset that should be executed.
All subsequent executions in the same run group (for example, a single-step
re-execution) are scoped to the original run's selector and solid
subset.""",
)
runConfigData = graphene.InputField(GrapheneRunConfigData)
mode = graphene.InputField(graphene.String)
executionMetadata = graphene.InputField(
GrapheneExecutionMetadata,
description="""Defines run tags and parent / root relationships.\n\nNote: To
'restart from failure', provide a `parentRunId` and pass the
'dagster/is_resume_retry' tag. Dagster's automatic step key selection will
override any stepKeys provided.""",
)
stepKeys = graphene.InputField(
graphene.List(graphene.NonNull(graphene.String)),
description="""Defines step keys to execute within the execution plan defined
by the pipeline `selector`. To execute the entire execution plan, you can omit
this parameter, provide an empty array, or provide every step name.""",
)
preset = graphene.InputField(graphene.String)
class Meta:
name = "ExecutionParams"
| GrapheneExecutionParams |
python | sympy__sympy | sympy/tensor/tensor.py | {
"start": 67687,
"end": 83091
} | class ____(Expr, ABC):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 12.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
@call_highest_priority('__radd__')
def __add__(self, other):
return TensAdd(self, other).doit(deep=False)
@call_highest_priority('__add__')
def __radd__(self, other):
return TensAdd(other, self).doit(deep=False)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return TensAdd(self, -other).doit(deep=False)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return TensAdd(other, -self).doit(deep=False)
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
Explanation
===========
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
return TensMul(self, other).doit(deep=False)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return TensMul(other, self).doit(deep=False)
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
other = _sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other).doit(deep=False)
def __rtruediv__(self, other):
raise ValueError('cannot divide by a tensor')
def __pow__(self, other):
deprecate_data()
with ignore_warnings(SymPyDeprecationWarning):
if self.data is None:
raise ValueError("No power without ndarray data.")
from .array import tensorproduct, tensorcontraction
free = self.free
marray = self.data
mdim = marray.rank()
for metric in free:
marray = tensorcontraction(
tensorproduct(
marray,
metric[0].tensor_index_type.data,
marray),
(0, mdim), (mdim+1, mdim+2)
)
return marray ** (other * S.Half)
def __rpow__(self, other):
raise NotImplementedError
@property
@abstractmethod
def nocoeff(self):
raise NotImplementedError("abstract method")
@property
@abstractmethod
def coeff(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_indices(self):
raise NotImplementedError("abstract method")
@abstractmethod
def get_free_indices(self) -> list[TensorIndex]:
raise NotImplementedError("abstract method")
@abstractmethod
def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr:
raise NotImplementedError("abstract method")
def fun_eval(self, *index_tuples):
deprecate_fun_eval()
return self.substitute_indices(*index_tuples)
def get_matrix(self):
"""
DEPRECATED: do not use.
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
"""
from sympy.matrices.dense import Matrix
deprecate_data()
with ignore_warnings(SymPyDeprecationWarning):
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
@staticmethod
def _get_indices_permutation(indices1, indices2):
return [indices1.index(i) for i in indices2]
def _get_free_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_free_indices_set())
return indset
def _get_dummy_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_dummy_indices_set())
return indset
def _get_indices_set(self):
indset = set()
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_indices_set())
return indset
@property
def _iterate_dummy_indices(self):
dummy_set = self._get_dummy_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in dummy_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@property
def _iterate_free_indices(self):
free_set = self._get_free_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in free_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@property
def _iterate_indices(self):
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
yield from recursor(arg, pos+(p,))
return recursor(self, ())
@staticmethod
def _contract_and_permute_with_metric(metric, array, pos, dim):
# TODO: add possibility of metric after (spinors)
from .array import tensorcontraction, tensorproduct, permutedims
array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos))
permu = list(range(dim))
permu.insert(pos, permu.pop(0))
return permutedims(array, permu)
@staticmethod
def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict):
from .array import permutedims
index_types1 = [i.tensor_index_type for i in free_ind1]
# Check if variance of indices needs to be fixed:
pos2up = []
pos2down = []
free2remaining = free_ind2[:]
for pos1, index1 in enumerate(free_ind1):
if index1 in free2remaining:
pos2 = free2remaining.index(index1)
free2remaining[pos2] = None
continue
if -index1 in free2remaining:
pos2 = free2remaining.index(-index1)
free2remaining[pos2] = None
free_ind2[pos2] = index1
if index1.is_up:
pos2up.append(pos2)
else:
pos2down.append(pos2)
else:
index2 = free2remaining[pos1]
if index2 is None:
raise ValueError(f"incompatible indices: {free_ind1} and {free_ind2}")
free2remaining[pos1] = None
free_ind2[pos1] = index1
if index1.is_up ^ index2.is_up:
if index1.is_up:
pos2up.append(pos1)
else:
pos2down.append(pos1)
if len(set(free_ind1) & set(free_ind2)) < len(free_ind1):
raise ValueError(f"incompatible indices: {free_ind1} and {free_ind2}")
# Raise indices:
for pos in pos2up:
index_type_pos = index_types1[pos]
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric)
array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1))
# Lower indices:
for pos in pos2down:
index_type_pos = index_types1[pos]
if index_type_pos not in replacement_dict:
raise ValueError("No metric provided to lower index")
metric = replacement_dict[index_type_pos]
array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1))
if free_ind1:
permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1)
array = permutedims(array, permutation)
if hasattr(array, "rank") and array.rank() == 0:
array = array[()]
return free_ind2, array
def replace_with_arrays(self, replacement_dict, indices=None):
"""
Replace the tensorial expressions with arrays. The final array will
correspond to the N-dimensional array with indices arranged according
to ``indices``.
Parameters
==========
replacement_dict
dictionary containing the replacement rules for tensors.
indices
the index order with respect to which the array is read. The
original index order will be used if no value is passed.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> from sympy.tensor.tensor import TensorHead
>>> from sympy import symbols, diag
>>> L = TensorIndexType("L")
>>> i, j = tensor_indices("i j", L)
>>> A = TensorHead("A", [L])
>>> A(i).replace_with_arrays({A(i): [1, 2]}, [i])
[1, 2]
Since 'indices' is optional, we can also call replace_with_arrays by
this way if no specific index order is needed:
>>> A(i).replace_with_arrays({A(i): [1, 2]})
[1, 2]
>>> expr = A(i)*A(j)
>>> expr.replace_with_arrays({A(i): [1, 2]})
[[1, 2], [2, 4]]
For contractions, specify the metric of the ``TensorIndexType``, which
in this case is ``L``, in its covariant form:
>>> expr = A(i)*A(-i)
>>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)})
-3
Symmetrization of an array:
>>> H = TensorHead("H", [L, L])
>>> a, b, c, d = symbols("a b c d")
>>> expr = H(i, j)/2 + H(j, i)/2
>>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]})
[[a, b/2 + c/2], [b/2 + c/2, d]]
Anti-symmetrization of an array:
>>> expr = H(i, j)/2 - H(j, i)/2
>>> repl = {H(i, j): [[a, b], [c, d]]}
>>> expr.replace_with_arrays(repl)
[[0, b/2 - c/2], [-b/2 + c/2, 0]]
The same expression can be read as the transpose by inverting ``i`` and
``j``:
>>> expr.replace_with_arrays(repl, [j, i])
[[0, -b/2 + c/2], [b/2 - c/2, 0]]
"""
from .array import Array
indices = indices or []
remap = {k.args[0] if k.is_up else -k.args[0]: k for k in self.get_free_indices()}
for i, index in enumerate(indices):
if isinstance(index, (Symbol, Mul)):
if index in remap:
indices[i] = remap[index]
else:
indices[i] = -remap[-index]
replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()}
# Check dimensions of replaced arrays:
for tensor, array in replacement_dict.items():
if isinstance(tensor, TensorIndexType):
expected_shape = [tensor.dim for i in range(2)]
else:
expected_shape = [index_type.dim for index_type in tensor.index_types]
if len(expected_shape) != array.rank() or (not all(dim1 == dim2 if
dim1.is_number else True for dim1, dim2 in zip(expected_shape,
array.shape))):
raise ValueError(f"shapes for tensor {tensor} expected to be {expected_shape}, "\
"replacement array shape is {array.shape}")
ret_indices, array = self._extract_data(replacement_dict)
last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict)
return array
def _check_add_Sum(self, expr, index_symbols):
from sympy.concrete.summations import Sum
indices = self.get_indices()
dum = self.dum
sum_indices = [ (index_symbols[i], 0,
indices[i].tensor_index_type.dim-1) for i, j in dum]
if sum_indices:
expr = Sum(expr, *sum_indices)
return expr
def _expand_partial_derivative(self):
# simply delegate the _expand_partial_derivative() to
# its arguments to expand a possibly found PartialDerivative
return self.func(*[
a._expand_partial_derivative()
if isinstance(a, TensExpr) else a
for a in self.args])
def _matches_simple(self, expr, repl_dict=None, old=False):
"""
Matches assuming there are no wild objects in self.
"""
if repl_dict is None:
repl_dict = {}
else:
repl_dict = repl_dict.copy()
if not isinstance(expr, TensExpr):
if len(self.get_free_indices()) > 0:
#self has indices, but expr does not.
return None
elif set(self.get_free_indices()) != set(expr.get_free_indices()):
#If there are no wilds and the free indices are not the same, they cannot match.
return None
if canon_bp(self - expr) == S.Zero:
return repl_dict
else:
return None
| TensExpr |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataprep.py | {
"start": 1833,
"end": 2352
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute(self, hook_mock):
op = DataprepGetJobsForJobGroupOperator(
dataprep_conn_id=DATAPREP_CONN_ID, job_group_id=JOB_ID, task_id=TASK_ID
)
op.execute(context={})
hook_mock.assert_called_once_with(dataprep_conn_id=DATAPREP_CONN_ID)
hook_mock.return_value.get_jobs_for_job_group.assert_called_once_with(job_id=JOB_ID)
| TestDataprepGetJobsForJobGroupOperator |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 484454,
"end": 485729
} | class ____(DataSource):
"""
InlineData schema wrapper.
Parameters
----------
values : str, dict, Sequence[str], Sequence[bool], Sequence[dict], Sequence[float], :class:`InlineDataset`
The full data set, included inline. This can be an array of objects or primitive
values, an object, or a string. Arrays of primitive values are ingested as objects
with a ``data`` property. Strings are parsed according to the specified format type.
format : dict, :class:`DataFormat`, :class:`CsvDataFormat`, :class:`DsvDataFormat`, :class:`JsonDataFormat`, :class:`TopoDataFormat`
An object that specifies the format for parsing the data.
name : str
Provide a placeholder name and bind data at runtime.
"""
_schema = {"$ref": "#/definitions/InlineData"}
def __init__(
self,
values: Optional[
str
| SchemaBase
| Sequence[Map]
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Map
] = Undefined,
format: Optional[SchemaBase | Map] = Undefined,
name: Optional[str] = Undefined,
**kwds,
):
super().__init__(values=values, format=format, name=name, **kwds)
| InlineData |
python | wandb__wandb | wandb/sdk/lib/fsm.py | {
"start": 3045,
"end": 5044
} | class ____(Generic[T_FsmInputs, T_FsmContext]):
_state_dict: Dict[Type[FsmState], FsmState]
_table: FsmTableWithContext[T_FsmInputs, T_FsmContext]
_state: FsmState[T_FsmInputs, T_FsmContext]
_states: Sequence[FsmState]
def __init__(
self,
states: Sequence[FsmState],
table: FsmTableWithContext[T_FsmInputs, T_FsmContext],
) -> None:
self._states = states
self._table = table
self._state_dict = {type(s): s for s in states}
self._state = self._state_dict[type(states[0])]
def _transition(
self,
inputs: T_FsmInputs,
new_state: Type[FsmState[T_FsmInputs, T_FsmContext]],
action: Optional[Callable[[T_FsmInputs], None]],
) -> None:
if action:
action(inputs)
context = None
if isinstance(self._state, FsmStateExit):
context = self._state.on_exit(inputs)
prev_state = type(self._state)
if prev_state == new_state:
if isinstance(self._state, FsmStateStay):
self._state.on_stay(inputs)
else:
self._state = self._state_dict[new_state]
if context and isinstance(self._state, FsmStateEnterWithContext):
self._state.on_enter(inputs, context=context)
elif isinstance(self._state, FsmStateEnter):
self._state.on_enter(inputs)
def _check_transitions(self, inputs: T_FsmInputs) -> None:
for entry in self._table[type(self._state)]:
if entry.condition(inputs):
self._transition(inputs, entry.target_state, entry.action)
return
def input(self, inputs: T_FsmInputs) -> None:
if isinstance(self._state, FsmStateCheck):
self._state.on_check(inputs)
self._check_transitions(inputs)
if isinstance(self._state, FsmStateOutput):
self._state.on_state(inputs)
Fsm: TypeAlias = FsmWithContext[T_FsmInputs, None]
| FsmWithContext |
python | google__pytype | pytype/rewrite/abstract/functions.py | {
"start": 2145,
"end": 3126
} | class ____(Generic[_FrameT]):
"""Arguments to one function call."""
posargs: tuple[_Var, ...] = ()
kwargs: Mapping[str, _Var] = datatypes.EMPTY_MAP
starargs: _Var | None = None
starstarargs: _Var | None = None
frame: _FrameT | None = None
def get_concrete_starargs(self) -> tuple[Any, ...]:
"""Returns a concrete tuple from starargs or raises ValueError."""
if self.starargs is None:
raise ValueError('No starargs to convert')
starargs = self.starargs.get_atomic_value(internal.FunctionArgTuple) # pytype: disable=attribute-error
return _unpack_splats(starargs.constant)
def get_concrete_starstarargs(self) -> Mapping[str, Any]:
"""Returns a concrete dict from starstarargs or raises ValueError."""
if self.starstarargs is None:
raise ValueError('No starstarargs to convert')
starstarargs = self.starstarargs.get_atomic_value(internal.FunctionArgDict) # pytype: disable=attribute-error
return starstarargs.constant
| Args |
python | getsentry__sentry | tests/sentry/testutils/pytest/mocking/test_mocking.py | {
"start": 2521,
"end": 4587
} | class ____(TestCase):
def test_no_args_no_kwargs_matching(self) -> None:
describe_dogs = MagicMock()
# Call the function more than once to show it's not just the total number of calls being
# counted, and call it with something else second, to show it's not just looking at the most
# recent call
describe_dogs()
describe_dogs("maisey")
assert count_matching_calls(describe_dogs) == 1
def test_arg_matching(self) -> None:
describe_dogs = MagicMock()
describe_dogs("maisey")
describe_dogs("charlie")
describe_dogs("maisey")
describe_dogs("maisey", "charlie")
assert count_matching_calls(describe_dogs, "maisey") == 2
assert count_matching_calls(describe_dogs, "charlie") == 1
assert count_matching_calls(describe_dogs, "maisey", "charlie") == 1
def test_kwarg_matching(self) -> None:
describe_dogs = MagicMock()
describe_dogs(number_1_dog="maisey")
describe_dogs(number_1_dog="charlie")
describe_dogs(number_1_dog="maisey")
describe_dogs(numer_1_dog="maisey", co_number_1_dog="charlie")
assert count_matching_calls(describe_dogs, number_1_dog="maisey") == 2
assert count_matching_calls(describe_dogs, number_1_dog="charlie") == 1
assert (
count_matching_calls(describe_dogs, numer_1_dog="maisey", co_number_1_dog="charlie")
== 1
)
def test_mixed_matching(self) -> None:
describe_dogs = MagicMock()
describe_dogs("maisey", is_number_1_dog=True)
describe_dogs("charlie", is_number_1_dog=True)
describe_dogs("maisey", is_number_1_dog=True)
describe_dogs("maisey", "charlie", co_number_1_dogs=True)
assert count_matching_calls(describe_dogs, "maisey", is_number_1_dog=True) == 2
assert count_matching_calls(describe_dogs, "charlie", is_number_1_dog=True) == 1
assert count_matching_calls(describe_dogs, "maisey", "charlie", co_number_1_dogs=True) == 1
| MockCallCountingTest |
python | realpython__materials | wordcount/tests/realpython/models.py | {
"start": 572,
"end": 718
} | class ____(Enum):
PASSED = "passed"
FAILED = "failed"
SKIPPED = "skipped"
TIMED_OUT = "timed_out"
@dataclass(frozen=True)
| TestStatus |
python | getsentry__sentry | tests/sentry/tasks/test_activity.py | {
"start": 411,
"end": 852
} | class ____(PluginTestCase):
plugin = BasicPreprocessorPlugin
@mock.patch("sentry.tasks.activity.send_activity_notifications")
def test_simple(self, mock_func: mock.MagicMock) -> None:
group = self.create_group()
Activity.objects.create_group_activity(
group, ActivityType.ASSIGNED, user=self.user, data={"assignee": None}
)
assert mock_func.delay.call_count == 1
| ActivityNotificationsTest |
python | miyuchina__mistletoe | test/test_contrib/test_github_wiki.py | {
"start": 209,
"end": 1175
} | class ____(TestCase):
def setUp(self):
token._root_node = Document([])
self.renderer = GithubWikiRenderer()
self.renderer.__enter__()
self.addCleanup(self.renderer.__exit__, None, None, None)
def test_parse(self):
MockRawText = mock.Mock()
RawText = span_token._token_types.pop()
span_token._token_types.append(MockRawText)
try:
tokens = tokenize_inner('text with [[wiki | target]]')
token = tokens[1]
self.assertIsInstance(token, GithubWiki)
self.assertEqual(token.target, 'target')
MockRawText.assert_has_calls([mock.call('text with '), mock.call('wiki')])
finally:
span_token._token_types[-1] = RawText
def test_render(self):
token = next(iter(tokenize_inner('[[wiki|target]]')))
output = '<a href="target">wiki</a>'
self.assertEqual(self.renderer.render(token), output)
| TestGithubWiki |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_build_storage.py | {
"start": 340,
"end": 8490
} | class ____(TestCase):
def setUp(self):
self.test_media_dir = tempfile.mkdtemp()
self.storage = BuildMediaFileSystemStorage(location=self.test_media_dir)
def tearDown(self):
shutil.rmtree(self.test_media_dir, ignore_errors=True)
def assertFileTree(self, source, tree):
"""
Recursively check that ``source`` from storage has the same file tree as ``tree``.
:param source: source path in storage
:param tree: a list of strings representing files
or tuples (string, list) representing directories.
"""
dirs_tree = [e for e in tree if not isinstance(e, str)]
dirs, files = self.storage.listdir(source)
expected_dirs = [e[0] for e in dirs_tree]
expected_files = [e for e in tree if isinstance(e, str)]
self.assertCountEqual(dirs, expected_dirs)
self.assertCountEqual(files, expected_files)
for folder, files in dirs_tree:
self.assertFileTree(self.storage.join(source, folder), files)
def test_copy_directory(self):
self.assertFalse(self.storage.exists("files/test.html"))
with override_settings(DOCROOT=files_dir):
self.storage.copy_directory(files_dir, "files")
self.assertTrue(self.storage.exists("files/test.html"))
self.assertTrue(self.storage.exists("files/conf.py"))
self.assertTrue(self.storage.exists("files/api.fjson"))
self.assertTrue(self.storage.exists("files/api/index.html"))
self.assertFalse(self.storage.exists("files/test-symlink.html"))
self.assertFalse(self.storage.exists("files/dir-symlink"))
def test_sync_directory(self):
tmp_files_dir = os.path.join(tempfile.mkdtemp(), "files")
shutil.copytree(files_dir, tmp_files_dir, symlinks=True)
storage_dir = "files"
tree = [
("api", ["index.html"]),
"404.html",
"api.fjson",
"conf.py",
"index.html",
"test.html",
]
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
tree = [
("api", ["index.html"]),
"404.html",
"conf.py",
"index.html",
"test.html",
]
os.remove(os.path.join(tmp_files_dir, "api.fjson"))
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
tree = [
"404.html",
"conf.py",
"index.html",
"test.html",
]
shutil.rmtree(os.path.join(tmp_files_dir, "api"))
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
def test_sync_directory_source_symlink(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_symlink_dir = Path(tempfile.mkdtemp()) / "files"
tmp_symlink_dir.symlink_to(tmp_dir)
with override_settings(DOCROOT=tmp_dir):
with pytest.raises(SuspiciousFileOperation, match="symbolic link"):
self.storage.rclone_sync_directory(tmp_symlink_dir, "files")
def test_copy_directory_source_symlink(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_symlink_dir = Path(tempfile.mkdtemp()) / "files"
tmp_symlink_dir.symlink_to(tmp_dir)
with override_settings(DOCROOT=tmp_dir):
with pytest.raises(SuspiciousFileOperation, match="symbolic link"):
self.storage.copy_directory(tmp_symlink_dir, "files")
def test_sync_directory_source_outside_docroot(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_docroot = Path(tempfile.mkdtemp()) / "docroot"
tmp_docroot.mkdir()
with override_settings(DOCROOT=tmp_docroot):
with pytest.raises(SuspiciousFileOperation, match="outside the docroot"):
self.storage.rclone_sync_directory(tmp_dir, "files")
def test_copy_directory_source_outside_docroot(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_docroot = Path(tempfile.mkdtemp()) / "docroot"
tmp_docroot.mkdir()
with override_settings(DOCROOT=tmp_docroot):
with pytest.raises(SuspiciousFileOperation, match="outside the docroot"):
self.storage.copy_directory(tmp_dir, "files")
def test_delete_directory(self):
with override_settings(DOCROOT=files_dir):
self.storage.copy_directory(files_dir, "files")
dirs, files = self.storage.listdir("files")
self.assertEqual(dirs, ["api"])
self.assertCountEqual(
files, ["404.html", "api.fjson", "conf.py", "index.html", "test.html"]
)
self.storage.delete_directory("files/")
_, files = self.storage.listdir("files")
self.assertEqual(files, [])
# We don't check "dirs" here - in filesystem backed storages
# the empty directories are not deleted
# Cloud storage generally doesn't consider empty directories to exist
dirs, files = self.storage.listdir("files/api")
self.assertEqual(dirs, [])
self.assertEqual(files, [])
def test_walk(self):
with override_settings(DOCROOT=files_dir):
self.storage.copy_directory(files_dir, "files")
output = list(self.storage.walk("files"))
self.assertEqual(len(output), 2)
top, dirs, files = output[0]
self.assertEqual(top, "files")
self.assertCountEqual(dirs, ["api"])
self.assertCountEqual(
files, ["404.html", "api.fjson", "conf.py", "index.html", "test.html"]
)
top, dirs, files = output[1]
self.assertEqual(top, "files/api")
self.assertCountEqual(dirs, [])
self.assertCountEqual(files, ["index.html"])
def test_rclone_sync(self):
tmp_files_dir = Path(tempfile.mkdtemp()) / "files"
shutil.copytree(files_dir, tmp_files_dir, symlinks=True)
storage_dir = "files"
tree = [
("api", ["index.html"]),
"404.html",
"api.fjson",
"conf.py",
"index.html",
"test.html",
]
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
tree = [
("api", ["index.html"]),
"404.html",
"conf.py",
"index.html",
"test.html",
]
(tmp_files_dir / "api.fjson").unlink()
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
tree = [
"404.html",
"conf.py",
"index.html",
"test.html",
]
shutil.rmtree(tmp_files_dir / "api")
with override_settings(DOCROOT=tmp_files_dir):
self.storage.rclone_sync_directory(tmp_files_dir, storage_dir)
self.assertFileTree(storage_dir, tree)
def test_rclone_sync_source_symlink(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_symlink_dir = Path(tempfile.mkdtemp()) / "files"
tmp_symlink_dir.symlink_to(tmp_dir)
with override_settings(DOCROOT=tmp_dir):
with pytest.raises(SuspiciousFileOperation, match="symbolic link"):
self.storage.rclone_sync_directory(tmp_symlink_dir, "files")
def test_rclone_sync_source_outside_docroot(self):
tmp_dir = Path(tempfile.mkdtemp())
tmp_docroot = Path(tempfile.mkdtemp()) / "docroot"
tmp_docroot.mkdir()
with override_settings(DOCROOT=tmp_docroot):
with pytest.raises(SuspiciousFileOperation, match="outside the docroot"):
self.storage.rclone_sync_directory(tmp_dir, "files")
| TestBuildMediaStorage |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_connector/data_connector.py | {
"start": 435,
"end": 7678
} | class ____(ABC):
"""The abstract base class for all Data Connectors.
Data Connectors produce identifying information, called Batch Specs, that Execution Engines
can use to get individual batches of data. They add flexibility in how to obtain data
such as with time-based partitioning, downsampling, or other techniques appropriate
for the Datasource.
For example, a DataConnector could produce a SQL query that logically represents "rows in
the Events table with a timestamp on February 7, 2012," which an SqlAlchemy Datasource
could use to materialize a SqlAlchemy Dataset corresponding to that Batch of data and
ready for validation.
A Batch is a sample from a data asset, sliced according to a particular rule. For example,
an hourly slide of the Events table or “most recent Users records.” It is the primary
unit of validation in the Great Expectations Data Context. Batches include metadata that
identifies how they were constructed--the same Batch Spec assembled by the data connector.
While not every Datasource will enable re-fetching a specific batch of data, GX can store
snapshots of batches or store metadata from an external data version control system.
Args:
datasource_name: The name of the Datasource associated with this DataConnector instance
data_asset_name: The name of the DataAsset using this DataConnector instance
"""
# needed to select the asset level kwargs needed to build the DataConnector
asset_level_option_keys: ClassVar[tuple[str, ...]] = ()
asset_options_type: ClassVar[Type] = dict
def __init__(
self,
datasource_name: str,
data_asset_name: str,
) -> None:
self._datasource_name: str = datasource_name
self._data_asset_name: str = data_asset_name
@property
def data_asset_name(self) -> str:
return self._data_asset_name
@property
def datasource_name(self) -> str:
return self._datasource_name
@abstractmethod
def get_batch_definition_list(self, batch_request: BatchRequest) -> List[LegacyBatchDefinition]:
"""
This interface method, implemented by subclasses, examines "BatchRequest" and converts it to one or more
"BatchDefinition" objects, each of which can be later converted to ExecutionEngine-specific "BatchSpec" object
for loading "Batch" of data.
Args:
batch_request: (BatchRequest) input "BatchRequest" object
Returns:
List[BatchDefinition] -- list of "BatchDefinition" objects, each corresponding to "Batch" of data downstream
""" # noqa: E501 # FIXME CoP
pass
def build_batch_spec(self, batch_definition: LegacyBatchDefinition) -> BatchSpec:
"""
Builds batch_spec from batch_definition by generating batch_spec params and adding any pass_through params
Args:
batch_definition (LegacyBatchDefinition): required batch_definition parameter for retrieval
Returns:
BatchSpec object built from BatchDefinition
""" # noqa: E501 # FIXME CoP
batch_spec_params: dict = self._generate_batch_spec_parameters_from_batch_definition(
batch_definition=batch_definition
)
batch_spec = BatchSpec(**batch_spec_params)
return batch_spec
def test_connection(self) -> bool:
"""Test the connection to data, accessible to the present "DataConnector" object.
Raises:
bool: True of connection test succeeds; False, otherwise.
"""
return self.get_unmatched_data_reference_count() < self.get_data_reference_count()
@abstractmethod
def get_data_references(self) -> List[Any]:
"""
This interface method lists objects in the underlying data store used to create a list of data_references (type depends on cloud storage environment, SQL DBMS, etc.).
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def get_data_reference_count(self) -> int:
"""
This interface method returns number of all (e.g., cached) data references (useful for diagnostics).
Returns:
int -- number of data references identified
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def get_matched_data_references(self) -> List[Any]:
"""
This interface method returns (e.g., cached) data references that were successfully matched based on "BatchRequest" options.
Returns:
List[Any] -- unmatched data references (type depends on cloud storage environment, SQL DBMS, etc.)
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def get_matched_data_reference_count(self) -> int:
"""
This interface method returns number of all (e.g., cached) matched data references (useful for diagnostics).
Returns:
int -- number of data references identified
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def get_unmatched_data_references(self) -> List[Any]:
"""
This interface method returns (e.g., cached) data references that could not be matched based on "BatchRequest" options.
Returns:
List[Any] -- unmatched data references (type depends on cloud storage environment, SQL DBMS, etc.)
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def get_unmatched_data_reference_count(self) -> int:
"""
This interface method returns number of all (e.g., cached) unmatched data references (useful for diagnostics).
Returns:
int -- number of data references identified
""" # noqa: E501 # FIXME CoP
pass
@abstractmethod
def _generate_batch_spec_parameters_from_batch_definition(
self, batch_definition: LegacyBatchDefinition
) -> dict:
"""
This interface method, implemented by subclasses, examines "BatchDefinition" and converts it to
ExecutionEngine-specific "BatchSpec" object for loading "Batch" of data. Implementers will typically define
their own interfaces that their subclasses must implement in order to provide storage-specific specifics.
Args:
batch_definition: (BatchDefinition) input "BatchRequest" object
Returns:
dict -- dictionary of "BatchSpec" properties
""" # noqa: E501 # FIXME CoP
pass
@staticmethod
def _batch_definition_matches_batch_request(
batch_definition: LegacyBatchDefinition, batch_request: BatchRequest
) -> bool:
if not (
batch_request.datasource_name == batch_definition.datasource_name
and batch_request.data_asset_name == batch_definition.data_asset_name
):
return False
if batch_request.options:
for key, value in batch_request.options.items():
if value is not None and not (
(key in batch_definition.batch_identifiers)
and (batch_definition.batch_identifiers[key] == batch_request.options[key])
):
return False
return True
| DataConnector |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_union.py | {
"start": 18359,
"end": 29125
} | class ____(int):
pass
@pytest.mark.parametrize('reverse', [False, True])
@pytest.mark.parametrize(
'core_schema_left,core_schema_right,input_value,expected_value',
[
(core_schema.int_schema(), core_schema.bool_schema(), True, True),
(core_schema.int_schema(), core_schema.bool_schema(), 1, 1),
(core_schema.str_schema(), core_schema.int_schema(), 1, 1),
(core_schema.str_schema(), core_schema.int_schema(), '1', '1'),
(core_schema.int_schema(), core_schema.bool_schema(), IntSubclass(1), 1),
(
core_schema.decimal_schema(),
core_schema.int_schema(),
Decimal('1'),
Decimal('1'),
),
(core_schema.decimal_schema(), core_schema.int_schema(), 1, 1),
(
core_schema.decimal_schema(),
core_schema.float_schema(),
Decimal('1.'),
Decimal('1.'),
),
(
core_schema.decimal_schema(),
core_schema.str_schema(),
Decimal('_1'),
Decimal('_1'),
),
(
core_schema.decimal_schema(),
core_schema.str_schema(),
'_1',
'_1',
),
(
core_schema.uuid_schema(),
core_schema.str_schema(),
EXAMPLE_UUID,
EXAMPLE_UUID,
),
(
core_schema.uuid_schema(),
core_schema.str_schema(),
str(EXAMPLE_UUID),
str(EXAMPLE_UUID),
),
],
)
def test_union_serializer_picks_exact_type_over_subclass(
core_schema_left, core_schema_right, input_value, expected_value, reverse
):
s = SchemaSerializer(
core_schema.union_schema(
[core_schema_right, core_schema_left] if reverse else [core_schema_left, core_schema_right]
)
)
assert s.to_python(input_value) == expected_value
@pytest.mark.parametrize('reverse', [False, True])
@pytest.mark.parametrize(
'core_schema_left,core_schema_right,input_value,expected_value',
[
(core_schema.int_schema(), core_schema.bool_schema(), True, True),
(core_schema.int_schema(), core_schema.bool_schema(), 1, 1),
(core_schema.str_schema(), core_schema.int_schema(), 1, 1),
(core_schema.str_schema(), core_schema.int_schema(), '1', '1'),
(core_schema.int_schema(), core_schema.bool_schema(), IntSubclass(1), 1),
(
core_schema.decimal_schema(),
core_schema.int_schema(),
Decimal('1'),
'1',
),
(core_schema.decimal_schema(), core_schema.int_schema(), 1, 1),
(
core_schema.decimal_schema(),
core_schema.float_schema(),
Decimal('1.'),
'1',
),
(
core_schema.decimal_schema(),
core_schema.str_schema(),
Decimal('_1'),
'1',
),
(
core_schema.decimal_schema(),
core_schema.str_schema(),
'_1',
'_1',
),
],
)
def test_union_serializer_picks_exact_type_over_subclass_json(
core_schema_left, core_schema_right, input_value, expected_value, reverse
):
s = SchemaSerializer(
core_schema.union_schema(
[core_schema_right, core_schema_left] if reverse else [core_schema_left, core_schema_right]
)
)
assert s.to_python(input_value, mode='json') == expected_value
assert s.to_json(input_value) == json.dumps(expected_value).encode()
def test_tagged_union() -> None:
@dataclasses.dataclass
class ModelA:
field: int
tag: Literal['a'] = 'a'
@dataclasses.dataclass
class ModelB:
field: int
tag: Literal['b'] = 'b'
s = SchemaSerializer(
core_schema.tagged_union_schema(
choices={
'a': core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(name='field', schema=core_schema.int_schema()),
core_schema.dataclass_field(name='tag', schema=core_schema.literal_schema(['a'])),
],
),
['field', 'tag'],
),
'b': core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(name='field', schema=core_schema.int_schema()),
core_schema.dataclass_field(name='tag', schema=core_schema.literal_schema(['b'])),
],
),
['field', 'tag'],
),
},
discriminator='tag',
)
)
assert 'TaggedUnionSerializer' in repr(s)
model_a = ModelA(field=1)
model_b = ModelB(field=1)
assert s.to_python(model_a) == {'field': 1, 'tag': 'a'}
assert s.to_python(model_b) == {'field': 1, 'tag': 'b'}
def test_union_float_int() -> None:
s = SchemaSerializer(core_schema.union_schema([core_schema.float_schema(), core_schema.int_schema()]))
assert s.to_python(1) == 1
assert json.loads(s.to_json(1)) == 1
s = SchemaSerializer(core_schema.union_schema([core_schema.int_schema(), core_schema.float_schema()]))
assert s.to_python(1) == 1
assert json.loads(s.to_json(1)) == 1
def test_custom_serializer() -> None:
s = SchemaSerializer(
core_schema.union_schema(
[
core_schema.dict_schema(
keys_schema=core_schema.any_schema(),
values_schema=core_schema.any_schema(),
serialization=core_schema.plain_serializer_function_ser_schema(lambda x: x['id']),
),
core_schema.list_schema(
items_schema=core_schema.dict_schema(
keys_schema=core_schema.any_schema(),
values_schema=core_schema.any_schema(),
serialization=core_schema.plain_serializer_function_ser_schema(lambda x: x['id']),
)
),
]
)
)
print(s)
assert s.to_python([{'id': 1}, {'id': 2}]) == [1, 2]
assert s.to_python({'id': 1}) == 1
def test_tagged_union_with_aliases() -> None:
@dataclasses.dataclass
class ModelA:
field: int
tag: Literal['a'] = 'a'
@dataclasses.dataclass
class ModelB:
field: int
tag: Literal['b'] = 'b'
s = SchemaSerializer(
core_schema.tagged_union_schema(
choices={
'a': core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(name='field', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='tag',
schema=core_schema.literal_schema(['a']),
validation_alias='TAG',
serialization_alias='TAG',
),
],
),
['field', 'tag'],
),
'b': core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(name='field', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='tag',
schema=core_schema.literal_schema(['b']),
validation_alias='TAG',
serialization_alias='TAG',
),
],
),
['field', 'tag'],
),
},
discriminator=[['tag'], ['TAG']],
)
)
assert 'TaggedUnionSerializer' in repr(s)
model_a = ModelA(field=1)
model_b = ModelB(field=1)
assert s.to_python(model_a, by_alias=True) == {'field': 1, 'TAG': 'a'}
assert s.to_python(model_b, by_alias=True) == {'field': 1, 'TAG': 'b'}
def test_union_model_wrap_serializer():
def wrap_serializer(value, handler):
return handler(value)
class Data:
pass
class ModelA:
a: Data
class ModelB:
a: Data
model_serializer = SchemaSerializer(
core_schema.union_schema(
[
core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(
core_schema.model_schema(
Data,
core_schema.model_fields_schema({}),
)
),
},
),
serialization=core_schema.wrap_serializer_function_ser_schema(wrap_serializer),
),
core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(
core_schema.model_schema(
Data,
core_schema.model_fields_schema({}),
)
),
},
),
serialization=core_schema.wrap_serializer_function_ser_schema(wrap_serializer),
),
],
)
)
input_value = ModelA()
input_value.a = Data()
assert model_serializer.to_python(input_value) == {'a': {}}
assert model_serializer.to_python(input_value, mode='json') == {'a': {}}
assert model_serializer.to_json(input_value) == b'{"a":{}}'
# add some additional attribute, should be ignored & not break serialization
input_value.a._a = 'foo'
assert model_serializer.to_python(input_value) == {'a': {}}
assert model_serializer.to_python(input_value, mode='json') == {'a': {}}
assert model_serializer.to_json(input_value) == b'{"a":{}}'
| IntSubclass |
python | huggingface__transformers | tests/models/jamba/test_modeling_jamba.py | {
"start": 1599,
"end": 3711
} | class ____(ConfigTester):
def _create_attn_config(self, attn_layer_offset: int, attn_layer_period: int):
_input_dict = self.inputs_dict.copy()
_input_dict["attn_layer_offset"] = attn_layer_offset
_input_dict["attn_layer_period"] = attn_layer_period
return self.config_class(**_input_dict)
def _create_expert_config(self, expert_layer_offset: int, expert_layer_period: int):
_input_dict = self.inputs_dict.copy()
_input_dict["expert_layer_offset"] = expert_layer_offset
_input_dict["expert_layer_period"] = expert_layer_period
return self.config_class(**_input_dict)
def test_attn_offsets(self):
self._create_attn_config(attn_layer_offset=0, attn_layer_period=4)
self._create_attn_config(attn_layer_offset=1, attn_layer_period=4)
self._create_attn_config(attn_layer_offset=2, attn_layer_period=4)
self._create_attn_config(attn_layer_offset=3, attn_layer_period=4)
with self.parent.assertRaises(ValueError):
self._create_attn_config(attn_layer_offset=4, attn_layer_period=4)
with self.parent.assertRaises(ValueError):
self._create_attn_config(attn_layer_offset=5, attn_layer_period=4)
def test_expert_offsets(self):
self._create_expert_config(expert_layer_offset=0, expert_layer_period=4)
self._create_expert_config(expert_layer_offset=1, expert_layer_period=4)
self._create_expert_config(expert_layer_offset=2, expert_layer_period=4)
self._create_expert_config(expert_layer_offset=3, expert_layer_period=4)
with self.parent.assertRaises(ValueError):
self._create_expert_config(expert_layer_offset=4, expert_layer_period=4)
with self.parent.assertRaises(ValueError):
self._create_expert_config(expert_layer_offset=5, expert_layer_period=4)
def test_jamba_offset_properties(self):
self.test_attn_offsets()
self.test_expert_offsets()
def run_common_tests(self):
self.test_jamba_offset_properties()
return super().run_common_tests()
| JambaConfigTester |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_meta.py | {
"start": 2777,
"end": 4035
} | class ____(nn.Module):
def __init__(self, device):
super().__init__()
self.lin1 = MyLinear(2, 2, bias=False, device=device)
self.lin1 = wrap(self.lin1)
self.lin2 = MyLinear(2, 2, bias=False, device=device)
self.l3 = MyModel(device=device)
self.l3 = wrap(self.l3)
def forward(self, x):
return self.l3(self.lin2(self.lin1(x)))
def _init_with_reset_params(module: nn.Module):
"""
to_empty + reset_parameters() init function example for modules
initialized with device="meta"
"""
has_meta_states = any(
t.is_meta
for t in itertools.chain(
module.parameters(recurse=False), module.buffers(recurse=False)
)
)
if has_meta_states:
device = torch.device(device_type, torch.accelerator.current_device_index())
module.to_empty(device=device, recurse=False)
module.reset_parameters()
def _init_with_torchdistX(module: nn.Module):
"""
torchdistX-based deferred module initialization function example
using ``materialize_module``.
"""
assert _TORCHDISTX_AVAIL
def check_fn(k):
return not isinstance(k, FSDP)
deferred_init.materialize_module(module, check_fn=check_fn)
| NestedModel |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/vector_store/base.py | {
"start": 1093,
"end": 18173
} | class ____(BaseIndex[IndexDict]):
"""
Vector Store Index.
Args:
use_async (bool): Whether to use asynchronous calls. Defaults to False.
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
store_nodes_override (bool): set to True to always store Node objects in index
store and document store even if vector store keeps text. Defaults to False
"""
index_struct_cls = IndexDict
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
# vector store index params
use_async: bool = False,
store_nodes_override: bool = False,
embed_model: Optional[EmbedType] = None,
insert_batch_size: int = 2048,
# parent class params
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexDict] = None,
storage_context: Optional[StorageContext] = None,
callback_manager: Optional[CallbackManager] = None,
transformations: Optional[List[TransformComponent]] = None,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._use_async = use_async
self._store_nodes_override = store_nodes_override
self._embed_model = resolve_embed_model(
embed_model or Settings.embed_model, callback_manager=callback_manager
)
self._insert_batch_size = insert_batch_size
super().__init__(
nodes=nodes,
index_struct=index_struct,
storage_context=storage_context,
show_progress=show_progress,
objects=objects,
callback_manager=callback_manager,
transformations=transformations,
**kwargs,
)
@classmethod
def from_vector_store(
cls,
vector_store: BasePydanticVectorStore,
embed_model: Optional[EmbedType] = None,
**kwargs: Any,
) -> "VectorStoreIndex":
if not vector_store.stores_text:
raise ValueError(
"Cannot initialize from a vector store that does not store text."
)
kwargs.pop("storage_context", None)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
return cls(
nodes=[],
embed_model=embed_model,
storage_context=storage_context,
**kwargs,
)
@property
def vector_store(self) -> BasePydanticVectorStore:
return self._vector_store
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.vector_store.retrievers import (
VectorIndexRetriever,
)
return VectorIndexRetriever(
self,
node_ids=list(self.index_struct.nodes_dict.values()),
callback_manager=self._callback_manager,
object_map=self._object_map,
**kwargs,
)
def _get_node_with_embedding(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> List[BaseNode]:
"""
Get tuples of id, node, and embedding.
Allows us to store these nodes in a vector store.
Embeddings are called in batches.
"""
id_to_embed_map = embed_nodes(
nodes, self._embed_model, show_progress=show_progress
)
results = []
for node in nodes:
embedding = id_to_embed_map[node.node_id]
result = node.model_copy()
result.embedding = embedding
results.append(result)
return results
async def _aget_node_with_embedding(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
) -> List[BaseNode]:
"""
Asynchronously get tuples of id, node, and embedding.
Allows us to store these nodes in a vector store.
Embeddings are called in batches.
"""
id_to_embed_map = await async_embed_nodes(
nodes=nodes,
embed_model=self._embed_model,
show_progress=show_progress,
)
results = []
for node in nodes:
embedding = id_to_embed_map[node.node_id]
result = node.model_copy()
result.embedding = embedding
results.append(result)
return results
async def _async_add_nodes_to_index(
self,
index_struct: IndexDict,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**insert_kwargs: Any,
) -> None:
"""Asynchronously add nodes to index."""
if not nodes:
return
for nodes_batch in iter_batch(nodes, self._insert_batch_size):
nodes_batch = await self._aget_node_with_embedding(
nodes_batch, show_progress
)
new_ids = await self._vector_store.async_add(nodes_batch, **insert_kwargs)
# if the vector store doesn't store text, we need to add the nodes to the
# index struct and document store
if not self._vector_store.stores_text or self._store_nodes_override:
for node, new_id in zip(nodes_batch, new_ids):
# NOTE: remove embedding from node to avoid duplication
node_without_embedding = node.model_copy()
node_without_embedding.embedding = None
index_struct.add_node(node_without_embedding, text_id=new_id)
await self._docstore.async_add_documents(
[node_without_embedding], allow_update=True
)
else:
# NOTE: if the vector store keeps text,
# we only need to add image and index nodes
for node, new_id in zip(nodes_batch, new_ids):
if isinstance(node, (ImageNode, IndexNode)):
# NOTE: remove embedding from node to avoid duplication
node_without_embedding = node.model_copy()
node_without_embedding.embedding = None
index_struct.add_node(node_without_embedding, text_id=new_id)
await self._docstore.async_add_documents(
[node_without_embedding], allow_update=True
)
def _add_nodes_to_index(
self,
index_struct: IndexDict,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**insert_kwargs: Any,
) -> None:
"""Add document to index."""
if not nodes:
return
for nodes_batch in iter_batch(nodes, self._insert_batch_size):
nodes_batch = self._get_node_with_embedding(nodes_batch, show_progress)
new_ids = self._vector_store.add(nodes_batch, **insert_kwargs)
if not self._vector_store.stores_text or self._store_nodes_override:
# NOTE: if the vector store doesn't store text,
# we need to add the nodes to the index struct and document store
for node, new_id in zip(nodes_batch, new_ids):
# NOTE: remove embedding from node to avoid duplication
node_without_embedding = node.model_copy()
node_without_embedding.embedding = None
index_struct.add_node(node_without_embedding, text_id=new_id)
self._docstore.add_documents(
[node_without_embedding], allow_update=True
)
else:
# NOTE: if the vector store keeps text,
# we only need to add image and index nodes
for node, new_id in zip(nodes_batch, new_ids):
if isinstance(node, (ImageNode, IndexNode)):
# NOTE: remove embedding from node to avoid duplication
node_without_embedding = node.model_copy()
node_without_embedding.embedding = None
index_struct.add_node(node_without_embedding, text_id=new_id)
self._docstore.add_documents(
[node_without_embedding], allow_update=True
)
def _build_index_from_nodes(
self,
nodes: Sequence[BaseNode],
**insert_kwargs: Any,
) -> IndexDict:
"""Build index from nodes."""
index_struct = self.index_struct_cls()
if self._use_async:
tasks = [
self._async_add_nodes_to_index(
index_struct,
nodes,
show_progress=self._show_progress,
**insert_kwargs,
)
]
run_async_tasks(tasks)
else:
self._add_nodes_to_index(
index_struct,
nodes,
show_progress=self._show_progress,
**insert_kwargs,
)
return index_struct
def build_index_from_nodes(
self,
nodes: Sequence[BaseNode],
**insert_kwargs: Any,
) -> IndexDict:
"""
Build the index from nodes.
NOTE: Overrides BaseIndex.build_index_from_nodes.
VectorStoreIndex only stores nodes in document store
if vector store does not store text
"""
# Filter out the nodes that don't have content
content_nodes = [
node
for node in nodes
if node.get_content(metadata_mode=MetadataMode.EMBED) != ""
]
# Report if some nodes are missing content
if len(content_nodes) != len(nodes):
print("Some nodes are missing content, skipping them...")
return self._build_index_from_nodes(content_nodes, **insert_kwargs)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_nodes_to_index(self._index_struct, nodes, **insert_kwargs)
def _validate_serializable(self, nodes: Sequence[BaseNode]) -> None:
"""Validate that the nodes are serializable."""
for node in nodes:
if isinstance(node, IndexNode):
try:
node.dict()
except ValueError:
self._object_map[node.index_id] = node.obj
node.obj = None
async def ainsert_nodes(
self, nodes: Sequence[BaseNode], **insert_kwargs: Any
) -> None:
"""
Insert nodes.
NOTE: overrides BaseIndex.ainsert_nodes.
VectorStoreIndex only stores nodes in document store
if vector store does not store text
"""
self._validate_serializable(nodes)
with self._callback_manager.as_trace("insert_nodes"):
await self._async_add_nodes_to_index(
self._index_struct, nodes, **insert_kwargs
)
self._storage_context.index_store.add_index_struct(self._index_struct)
def insert_nodes(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""
Insert nodes.
NOTE: overrides BaseIndex.insert_nodes.
VectorStoreIndex only stores nodes in document store
if vector store does not store text
"""
self._validate_serializable(nodes)
with self._callback_manager.as_trace("insert_nodes"):
self._insert(nodes, **insert_kwargs)
self._storage_context.index_store.add_index_struct(self._index_struct)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
pass
async def adelete_nodes(
self,
node_ids: List[str],
delete_from_docstore: bool = False,
**delete_kwargs: Any,
) -> None:
"""
Delete a list of nodes from the index.
Args:
node_ids (List[str]): A list of node_ids from the nodes to delete
"""
# delete nodes from vector store
await self._vector_store.adelete_nodes(node_ids, **delete_kwargs)
# delete from docstore only if needed
if (
not self._vector_store.stores_text or self._store_nodes_override
) and delete_from_docstore:
for node_id in node_ids:
self._index_struct.delete(node_id)
await self._docstore.adelete_document(node_id, raise_error=False)
self._storage_context.index_store.add_index_struct(self._index_struct)
def delete_nodes(
self,
node_ids: List[str],
delete_from_docstore: bool = False,
**delete_kwargs: Any,
) -> None:
"""
Delete a list of nodes from the index.
Args:
node_ids (List[str]): A list of node_ids from the nodes to delete
"""
# delete nodes from vector store
self._vector_store.delete_nodes(node_ids, **delete_kwargs)
# delete from docstore only if needed
if (
not self._vector_store.stores_text or self._store_nodes_override
) and delete_from_docstore:
for node_id in node_ids:
self._index_struct.delete(node_id)
self._docstore.delete_document(node_id, raise_error=False)
self._storage_context.index_store.add_index_struct(self._index_struct)
def _delete_from_index_struct(self, ref_doc_id: str) -> None:
# delete from index_struct only if needed
if not self._vector_store.stores_text or self._store_nodes_override:
ref_doc_info = self._docstore.get_ref_doc_info(ref_doc_id)
if ref_doc_info is not None:
for node_id in ref_doc_info.node_ids:
self._index_struct.delete(node_id)
self._vector_store.delete(node_id)
def _delete_from_docstore(self, ref_doc_id: str) -> None:
# delete from docstore only if needed
if not self._vector_store.stores_text or self._store_nodes_override:
self._docstore.delete_ref_doc(ref_doc_id, raise_error=False)
def delete_ref_doc(
self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
) -> None:
"""Delete a document and it's nodes by using ref_doc_id."""
self._vector_store.delete(ref_doc_id, **delete_kwargs)
self._delete_from_index_struct(ref_doc_id)
if delete_from_docstore:
self._delete_from_docstore(ref_doc_id)
self._storage_context.index_store.add_index_struct(self._index_struct)
async def _adelete_from_index_struct(self, ref_doc_id: str) -> None:
"""Delete from index_struct only if needed."""
if not self._vector_store.stores_text or self._store_nodes_override:
ref_doc_info = await self._docstore.aget_ref_doc_info(ref_doc_id)
if ref_doc_info is not None:
for node_id in ref_doc_info.node_ids:
self._index_struct.delete(node_id)
self._vector_store.delete(node_id)
async def _adelete_from_docstore(self, ref_doc_id: str) -> None:
"""Delete from docstore only if needed."""
if not self._vector_store.stores_text or self._store_nodes_override:
await self._docstore.adelete_ref_doc(ref_doc_id, raise_error=False)
async def adelete_ref_doc(
self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
) -> None:
"""Delete a document and it's nodes by using ref_doc_id."""
tasks = [
self._vector_store.adelete(ref_doc_id, **delete_kwargs),
self._adelete_from_index_struct(ref_doc_id),
]
if delete_from_docstore:
tasks.append(self._adelete_from_docstore(ref_doc_id))
await asyncio.gather(*tasks)
self._storage_context.index_store.add_index_struct(self._index_struct)
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
if not self._vector_store.stores_text or self._store_nodes_override:
node_doc_ids = list(self.index_struct.nodes_dict.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
else:
raise NotImplementedError(
"Vector store integrations that store text in the vector store are "
"not supported by ref_doc_info yet."
)
GPTVectorStoreIndex = VectorStoreIndex
| VectorStoreIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.