language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tornadoweb__tornado | tornado/httputil.py | {
"start": 4941,
"end": 13859
} | class ____(StrMutableMapping):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
@typing.overload
def __init__(self, __arg: Mapping[str, List[str]]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, __arg: Mapping[str, str]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, *args: Tuple[str, str]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, **kwargs: str) -> None:
pass
def __init__(self, *args: typing.Any, **kwargs: str) -> None: # noqa: F811
self._dict = {} # type: typing.Dict[str, str]
self._as_list = {} # type: typing.Dict[str, typing.List[str]]
self._last_key = None # type: Optional[str]
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name: str, value: str, *, _chars_are_bytes: bool = True) -> None:
"""Adds a new value for the given key."""
if not _ABNF.field_name.fullmatch(name):
raise HTTPInputError("Invalid header name %r" % name)
if _chars_are_bytes:
if not _ABNF.field_value.fullmatch(to_unicode(value)):
# TODO: the fact we still support bytes here (contrary to type annotations)
# and still test for it should probably be changed.
raise HTTPInputError("Invalid header value %r" % value)
else:
if _FORBIDDEN_HEADER_CHARS_RE.search(value):
raise HTTPInputError("Invalid header value %r" % value)
norm_name = _normalize_header(name)
self._last_key = norm_name
if norm_name in self:
self._dict[norm_name] = (
native_str(self[norm_name]) + "," + native_str(value)
)
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name: str) -> List[str]:
"""Returns all values for the given header as a list."""
norm_name = _normalize_header(name)
return self._as_list.get(norm_name, [])
def get_all(self) -> Iterable[Tuple[str, str]]:
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line: str, *, _chars_are_bytes: bool = True) -> None:
r"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
>>> h.parse_line("Content-Length: 42\r\n")
>>> h.get('content-type')
'text/html'
.. versionchanged:: 6.5
Now supports lines with or without the trailing CRLF, making it possible
to pass lines from AsyncHTTPClient's header_callback directly to this method.
.. deprecated:: 6.5
In Tornado 7.0, certain deprecated features of HTTP will become errors.
Specifically, line folding and the use of LF (with CR) as a line separator
will be removed.
"""
if m := re.search(r"\r?\n$", line):
# RFC 9112 section 2.2: a recipient MAY recognize a single LF as a line
# terminator and ignore any preceding CR.
# TODO(7.0): Remove this support for LF-only line endings.
line = line[: m.start()]
if not line:
# Empty line, or the final CRLF of a header block.
return
if line[0] in HTTP_WHITESPACE:
# continuation of a multi-line header
# TODO(7.0): Remove support for line folding.
if self._last_key is None:
raise HTTPInputError("first header line cannot start with whitespace")
new_part = " " + line.strip(HTTP_WHITESPACE)
if _chars_are_bytes:
if not _ABNF.field_value.fullmatch(new_part[1:]):
raise HTTPInputError("Invalid header continuation %r" % new_part)
else:
if _FORBIDDEN_HEADER_CHARS_RE.search(new_part):
raise HTTPInputError("Invalid header value %r" % new_part)
self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part
else:
try:
name, value = line.split(":", 1)
except ValueError:
raise HTTPInputError("no colon in header line")
self.add(
name, value.strip(HTTP_WHITESPACE), _chars_are_bytes=_chars_are_bytes
)
@classmethod
def parse(cls, headers: str, *, _chars_are_bytes: bool = True) -> "HTTPHeaders":
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
.. versionchanged:: 5.1
Raises `HTTPInputError` on malformed headers instead of a
mix of `KeyError`, and `ValueError`.
"""
# _chars_are_bytes is a hack. This method is used in two places, HTTP headers (in which
# non-ascii characters are to be interpreted as latin-1) and multipart/form-data (in which
# they are to be interpreted as utf-8). For historical reasons, this method handled this by
# expecting both callers to decode the headers to strings before parsing them. This wasn't a
# problem until we started doing stricter validation of the characters allowed in HTTP
# headers (using ABNF rules defined in terms of byte values), which inadvertently started
# disallowing non-latin1 characters in multipart/form-data filenames.
#
# This method should have accepted bytes and a desired encoding, but this change is being
# introduced in a patch release that shouldn't change the API. Instead, the _chars_are_bytes
# flag decides whether to use HTTP-style ABNF validation (treating the string as bytes
# smuggled through the latin1 encoding) or to accept any non-control unicode characters
# as required by multipart/form-data. This method will change to accept bytes in a future
# release.
h = cls()
start = 0
while True:
lf = headers.find("\n", start)
if lf == -1:
h.parse_line(headers[start:], _chars_are_bytes=_chars_are_bytes)
break
line = headers[start : lf + 1]
start = lf + 1
h.parse_line(line, _chars_are_bytes=_chars_are_bytes)
return h
# MutableMapping abstract method implementations.
def __setitem__(self, name: str, value: str) -> None:
norm_name = _normalize_header(name)
self._dict[norm_name] = value
self._as_list[norm_name] = [value]
def __getitem__(self, name: str) -> str:
return self._dict[_normalize_header(name)]
def __delitem__(self, name: str) -> None:
norm_name = _normalize_header(name)
del self._dict[norm_name]
del self._as_list[norm_name]
def __len__(self) -> int:
return len(self._dict)
def __iter__(self) -> Iterator[typing.Any]:
return iter(self._dict)
def copy(self) -> "HTTPHeaders":
# defined in dict but not in MutableMapping.
return HTTPHeaders(self)
# Use our overridden copy method for the copy.copy module.
# This makes shallow copies one level deeper, but preserves
# the appearance that HTTPHeaders is a single container.
__copy__ = copy
def __str__(self) -> str:
lines = []
for name, value in self.get_all():
lines.append(f"{name}: {value}\n")
return "".join(lines)
__unicode__ = __str__
| HTTPHeaders |
python | kamyu104__LeetCode-Solutions | Python/third-maximum-number.py | {
"start": 29,
"end": 669
} | class ____(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
top = [float("-inf")] * 3
for num in nums:
if num > top[0]:
top[0], top[1], top[2] = num, top[0], top[1]
count += 1
elif num != top[0] and num > top[1]:
top[1], top[2] = num, top[1]
count += 1
elif num != top[0] and num != top[1] and num >= top[2]:
top[2] = num
count += 1
if count < 3:
return top[0]
return top[2]
| Solution |
python | python-rapidjson__python-rapidjson | tests/test_dict_subclass.py | {
"start": 590,
"end": 884
} | class ____(Decoder):
def start_object(self):
return []
def test_objects_as_key_value_pairs():
kvp = ObjectsAsKeyValuePairsDecoder()
result = kvp('{"a": 1, "b": {"b1": 1, "b2": 2}}')
assert result == [('a', 1), ('b', [('b1', 1), ('b2', 2)])]
| ObjectsAsKeyValuePairsDecoder |
python | spack__spack | lib/spack/spack/bootstrap/_common.py | {
"start": 668,
"end": 9037
} | class ____(TypedDict, total=False):
spec: spack.spec.Spec
command: spack.util.executable.Executable
def _python_import(module: str) -> bool:
try:
importlib.import_module(module)
except ImportError:
return False
return True
def _try_import_from_store(
module: str, query_spec: Union[str, "spack.spec.Spec"], query_info: Optional[QueryInfo] = None
) -> bool:
"""Return True if the module can be imported from an already
installed spec, False otherwise.
Args:
module: Python module to be imported
query_spec: spec that may provide the module
query_info (dict or None): if a dict is passed it is populated with the
command found and the concrete spec providing it
"""
# If it is a string assume it's one of the root specs by this module
if isinstance(query_spec, str):
# We have to run as part of this python interpreter
query_spec += " ^" + spec_for_current_python()
installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
for candidate_spec in installed_specs:
# previously bootstrapped specs may not have a python-venv dependency.
if candidate_spec.dependencies("python-venv"):
python, *_ = candidate_spec.dependencies("python-venv")
else:
python, *_ = candidate_spec.dependencies("python")
# if python is installed, ask it for the layout
if python.installed:
module_paths = [
os.path.join(candidate_spec.prefix, python.package.purelib),
os.path.join(candidate_spec.prefix, python.package.platlib),
]
# otherwise search for the site-packages directory
# (clingo from binaries with truncated python-venv runtime)
else:
module_paths = glob.glob(
os.path.join(candidate_spec.prefix, "lib", "python*", "site-packages")
)
path_before = list(sys.path)
# NOTE: try module_paths first and last, last allows an existing version in path
# to be picked up and used, possibly depending on something in the store, first
# allows the bootstrap version to work when an incompatible version is in
# sys.path
orders = [module_paths + sys.path, sys.path + module_paths]
for path in orders:
sys.path = path
try:
_fix_ext_suffix(candidate_spec)
if _python_import(module):
msg = (
f"[BOOTSTRAP MODULE {module}] The installed spec "
f'"{query_spec}/{candidate_spec.dag_hash()}" '
f'provides the "{module}" Python module'
)
tty.debug(msg)
if query_info is not None:
query_info["spec"] = candidate_spec
return True
except Exception as exc: # pylint: disable=broad-except
msg = (
"unexpected error while trying to import module "
f'"{module}" from spec "{candidate_spec}" [error="{str(exc)}"]'
)
warnings.warn(msg)
else:
msg = "Spec {0} did not provide module {1}"
warnings.warn(msg.format(candidate_spec, module))
sys.path = path_before
return False
def _fix_ext_suffix(candidate_spec: "spack.spec.Spec"):
"""Fix the external suffixes of Python extensions on the fly for
platforms that may need it
Args:
candidate_spec (Spec): installed spec with a Python module
to be checked.
"""
# Here we map target families to the patterns expected
# by pristine CPython. Only architectures with known issues
# are included. Known issues:
#
# [RHEL + ppc64le]: https://github.com/spack/spack/issues/25734
#
_suffix_to_be_checked = {
"ppc64le": {
"glob": "*.cpython-*-powerpc64le-linux-gnu.so",
"re": r".cpython-[\w]*-powerpc64le-linux-gnu.so",
"fmt": r"{module}.cpython-{major}{minor}m-powerpc64le-linux-gnu.so",
}
}
# If the current architecture is not problematic return
generic_target = spack.vendor.archspec.cpu.host().family
if str(generic_target) not in _suffix_to_be_checked:
return
# If there's no EXT_SUFFIX (Python < 3.5) or the suffix matches
# the expectations, return since the package is surely good
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
if ext_suffix is None:
return
expected = _suffix_to_be_checked[str(generic_target)]
if fnmatch.fnmatch(ext_suffix, expected["glob"]):
return
# If we are here it means the current interpreter expects different names
# than pristine CPython. So:
# 1. Find what we have installed
# 2. Create symbolic links for the other names, it they're not there already
# Check if standard names are installed and if we have to create
# link for this interpreter
standard_extensions = fs.find(candidate_spec.prefix, expected["glob"])
link_names = [re.sub(expected["re"], ext_suffix, s) for s in standard_extensions]
for file_name, link_name in zip(standard_extensions, link_names):
if os.path.exists(link_name):
continue
os.symlink(file_name, link_name)
# Check if this interpreter installed something and we have to create
# links for a standard CPython interpreter
non_standard_extensions = fs.find(candidate_spec.prefix, "*" + ext_suffix)
for abs_path in non_standard_extensions:
directory, filename = os.path.split(abs_path)
module = filename.split(".")[0]
link_name = os.path.join(
directory,
expected["fmt"].format(
module=module, major=sys.version_info[0], minor=sys.version_info[1]
),
)
if os.path.exists(link_name):
continue
os.symlink(abs_path, link_name)
def _executables_in_store(
executables: Sequence[str],
query_spec: Union["spack.spec.Spec", str],
query_info: Optional[QueryInfo] = None,
) -> bool:
"""Return True if at least one of the executables can be retrieved from
a spec in store, False otherwise.
The different executables must provide the same functionality and are
"alternate" to each other, i.e. the function will exit True on the first
executable found.
Args:
executables: list of executables to be searched
query_spec: spec that may provide the executable
query_info (dict or None): if a dict is passed it is populated with the
command found and the concrete spec providing it
"""
executables_str = ", ".join(executables)
msg = "[BOOTSTRAP EXECUTABLES {0}] Try installed specs with query '{1}'"
tty.debug(msg.format(executables_str, query_spec))
installed_specs = spack.store.STORE.db.query(query_spec, installed=True)
if installed_specs:
for concrete_spec in installed_specs:
bin_dir = concrete_spec.prefix.bin
# IF we have a "bin" directory and it contains
# the executables we are looking for
if (
os.path.exists(bin_dir)
and os.path.isdir(bin_dir)
and spack.util.executable.which_string(*executables, path=bin_dir)
):
spack.util.environment.path_put_first("PATH", [bin_dir])
if query_info is not None:
query_info["command"] = spack.util.executable.which(
*executables, path=bin_dir, required=True
)
query_info["spec"] = concrete_spec
return True
return False
def _root_spec(spec_str: str) -> str:
"""Add a proper compiler and target to a spec used during bootstrapping.
Args:
spec_str: spec to be bootstrapped. Must be without compiler and target.
"""
# Add a compiler and platform requirement to the root spec.
platform = str(spack.platforms.host())
spec_str += f" platform={platform}"
target = spack.vendor.archspec.cpu.host().family
spec_str += f" target={target}"
tty.debug(f"[BOOTSTRAP ROOT SPEC] {spec_str}")
return spec_str
| QueryInfo |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 86416,
"end": 88081
} | class ____:
def test_close(self, tree_and_closers):
tree, closers = tree_and_closers
assert not any(closer.closed for closer in closers.values())
tree.close()
assert all(closer.closed for closer in closers.values())
tree.close() # should not error
def test_context_manager(self, tree_and_closers):
tree, closers = tree_and_closers
assert not any(closer.closed for closer in closers.values())
with tree:
pass
assert all(closer.closed for closer in closers.values())
def test_close_child(self, tree_and_closers):
tree, closers = tree_and_closers
assert not any(closer.closed for closer in closers.values())
tree["child"].close() # should only close descendants
assert not closers["/"].closed
assert closers["/child"].closed
assert closers["/child/grandchild"].closed
def test_close_datasetview(self, tree_and_closers):
tree, _ = tree_and_closers
with pytest.raises(
AttributeError,
match=re.escape(
r"cannot close a DatasetView(). Close the associated DataTree node instead"
),
):
tree.dataset.close()
with pytest.raises(
AttributeError, match=re.escape(r"cannot modify a DatasetView()")
):
tree.dataset.set_close(None)
def test_close_dataset(self, tree_and_closers):
tree, closers = tree_and_closers
ds = tree.to_dataset() # should discard closers
ds.close()
assert not closers["/"].closed
# with tree:
# pass
@requires_dask
| TestClose |
python | getsentry__sentry | src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py | {
"start": 543,
"end": 1062
} | class ____:
"""Pattern for matching function and module to ignore SDK crashes.
Use "*" as a wildcard to match any value.
Examples:
- FunctionAndModulePattern("specific.module", "invoke") - matches only "invoke" in "specific.module"
- FunctionAndModulePattern("*", "invoke") - matches "invoke" in any module
- FunctionAndModulePattern("specific.module", "*") - matches any function in "specific.module"
"""
module_pattern: str
function_pattern: str
@dataclass
| FunctionAndModulePattern |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 49292,
"end": 49915
} | class ____(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
| TestMeshGrid |
python | walkccc__LeetCode | solutions/3373. Maximize the Number of Target Nodes After Connecting Trees II/3373.py | {
"start": 0,
"end": 1291
} | class ____:
def maxTargetNodes(
self,
edges1: list[list[int]],
edges2: list[list[int]]
) -> list[int]:
n = len(edges1) + 1
m = len(edges2) + 1
graph1 = self._buildGraph(edges1)
graph2 = self._buildGraph(edges2)
parity1 = [False] * n
parity2 = [False] * m # placeholder (parity2 is not used)
even1 = self._dfs(graph1, 0, -1, parity1, True)
even2 = self._dfs(graph2, 0, -1, parity2, True)
odd1 = n - even1
odd2 = m - even2
# Can connect the current node in tree1 to either an even node or an odd
# node in tree2.
return [(even1 if parity1[i] else odd1) + max(even2, odd2)
for i in range(n)]
def _dfs(
self,
graph: list[list[int]],
u: int,
prev: int,
parity: list[bool],
isEven: bool
) -> int:
"""
Returns the number of nodes that can be reached from u with even steps.
"""
res = 1 if isEven else 0
parity[u] = isEven
for v in graph[u]:
if v != prev:
res += self._dfs(graph, v, u, parity, not isEven)
return res
def _buildGraph(self, edges: list[list[int]]) -> list[list[int]]:
graph = [[] for _ in range(len(edges) + 1)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
return graph
| Solution |
python | paramiko__paramiko | paramiko/client.py | {
"start": 33926,
"end": 34337
} | class ____(MissingHostKeyPolicy):
"""
Policy for logging a Python-style warning for an unknown host key, but
accepting it. This is used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
warnings.warn(
"Unknown {} host key for {}: {}".format(
key.get_name(), hostname, hexlify(key.get_fingerprint())
)
)
| WarningPolicy |
python | pypa__hatch | tests/project/test_config.py | {
"start": 3990,
"end": 5002
} | class ____:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.env.collectors` must be a table"):
_ = ProjectConfig(isolation, {"env": {"collectors": 9000}}).env_collectors
def test_collector_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.env.collectors.foo` must be a table"):
_ = ProjectConfig(isolation, {"env": {"collectors": {"foo": 9000}}}).env_collectors
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {})
assert project_config.env_collectors == project_config.env_collectors == {"default": {}}
def test_defined(self, isolation):
project_config = ProjectConfig(isolation, {"env": {"collectors": {"foo": {"bar": {"baz": 9000}}}}})
assert project_config.env_collectors == {"default": {}, "foo": {"bar": {"baz": 9000}}}
assert list(project_config.env_collectors) == ["default", "foo"]
| TestEnvCollectors |
python | ray-project__ray | python/ray/dag/compiled_dag_node.py | {
"start": 29767,
"end": 143800
} | class ____:
"""Experimental class for accelerated execution.
This class should not be called directly. Instead, create
a ray.dag and call experimental_compile().
See REP https://github.com/ray-project/enhancements/pull/48 for more
information.
"""
@ray.remote(num_cpus=0)
class DAGDriverProxyActor:
"""
To support the driver as a reader, the output writer needs to be able to invoke
remote functions on the driver. This is necessary so that the output writer can
create a reader ref on the driver node, and later potentially create a larger
reader ref on the driver node if the channel backing store needs to be resized.
However, remote functions cannot be invoked on the driver.
A Compiled Graph creates an actor from this class when the DAG is initialized.
The actor is on the same node as the driver. This class has an empty
implementation, though it serves as a way for the output writer to invoke remote
functions on the driver node.
"""
pass
def __init__(
self,
submit_timeout: Optional[float] = None,
buffer_size_bytes: Optional[int] = None,
enable_asyncio: bool = False,
max_inflight_executions: Optional[int] = None,
max_buffered_results: Optional[int] = None,
overlap_gpu_communication: Optional[bool] = None,
default_communicator: Optional[Union[Communicator, str]] = "create",
):
"""
Args:
submit_timeout: The maximum time in seconds to wait for execute() calls.
None means using default timeout (DAGContext.submit_timeout),
0 means immediate timeout (immediate success or timeout without
blocking), -1 means infinite timeout (block indefinitely).
buffer_size_bytes: The initial buffer size in bytes for messages
that can be passed between tasks in the DAG. The buffers will
be automatically resized if larger messages are written to the
channel.
enable_asyncio: Whether to enable asyncio. If enabled, caller must
be running in an event loop and must use `execute_async` to
invoke the DAG. Otherwise, the caller should use `execute` to
invoke the DAG.
max_inflight_executions: The maximum number of in-flight executions that
can be submitted via `execute` or `execute_async` before consuming
the output using `ray.get()`. If the caller submits more executions,
`RayCgraphCapacityExceeded` is raised.
max_buffered_results: The maximum number of results that can be
buffered at the driver. If more results are buffered,
`RayCgraphCapacityExceeded` is raised. Note that
when result corresponding to an execution is retrieved
(by calling `ray.get()` on a `CompiledDAGRef` or
`CompiledDAGRef` or await on a `CompiledDAGFuture), results
corresponding to earlier executions that have not been retrieved
yet are buffered.
overlap_gpu_communication: (experimental) Whether to overlap GPU
communication with computation during DAG execution. If True, the
communication and computation can be overlapped, which can improve
the performance of the DAG execution. If None, the default value
will be used.
_default_communicator: The default communicator to use to transfer
tensors. Three types of values are valid. (1) Communicator:
For p2p operations, this is the default communicator
to use for nodes annotated with `with_tensor_transport()` and when
shared memory is not the desired option (e.g., when transport="accelerator",
or when transport="auto" for communication between two different GPUs).
For collective operations, this is the default communicator to use
when a custom communicator is not specified.
(2) "create": for each collective operation without a custom communicator
specified, a communicator is created and initialized on its involved actors,
or an already created communicator is reused if the set of actors is the same.
For all p2p operations without a custom communicator specified, it reuses
an already created collective communicator if the p2p actors are a subset.
Otherwise, a new communicator is created.
(3) None: a ValueError will be thrown if a custom communicator is not specified.
Returns:
Channel: A wrapper around ray.ObjectRef.
"""
from ray.dag import DAGContext
ctx = DAGContext.get_current()
self._enable_asyncio: bool = enable_asyncio
self._fut_queue = asyncio.Queue()
self._max_inflight_executions = max_inflight_executions
if self._max_inflight_executions is None:
self._max_inflight_executions = ctx.max_inflight_executions
self._max_buffered_results = max_buffered_results
if self._max_buffered_results is None:
self._max_buffered_results = ctx.max_buffered_results
self._dag_id = uuid.uuid4().hex
self._submit_timeout: Optional[float] = submit_timeout
if self._submit_timeout is None:
self._submit_timeout = ctx.submit_timeout
self._get_timeout: Optional[float] = ctx.get_timeout
self._buffer_size_bytes: Optional[int] = buffer_size_bytes
if self._buffer_size_bytes is None:
self._buffer_size_bytes = ctx.buffer_size_bytes
self._overlap_gpu_communication: Optional[bool] = overlap_gpu_communication
if self._overlap_gpu_communication is None:
self._overlap_gpu_communication = ctx.overlap_gpu_communication
self._create_default_communicator = False
if isinstance(default_communicator, str):
if default_communicator == "create":
self._create_default_communicator = True
default_communicator = None
else:
raise ValueError(
"The only allowed string for default_communicator is 'create', "
f"got {default_communicator}"
)
elif default_communicator is not None and not isinstance(
default_communicator, Communicator
):
raise ValueError(
"The default_communicator must be None, a string, or a Communicator, "
f"got {type(default_communicator)}"
)
self._default_communicator: Optional[Communicator] = default_communicator
# Dict from passed-in communicator to set of type hints that refer to it.
self._communicator_to_type_hints: Dict[
Communicator,
Set["ray.experimental.channel.torch_tensor_type.TorchTensorType"],
] = defaultdict(set)
# Dict from set of actors to created communicator ID.
# These communicators are created by Compiled Graph, rather than passed in.
# Communicators are only created when self._create_default_communicator is True.
self._actors_to_created_communicator_id: Dict[
Tuple["ray.actor.ActorHandle"], str
] = {}
# Set of actors involved in P2P communication using an unresolved communicator.
self._p2p_actors_with_unresolved_communicators: Set[
"ray.actor.ActorHandle"
] = set()
# Set of DAG nodes involved in P2P communication using an unresolved communicator.
self._p2p_dag_nodes_with_unresolved_communicators: Set[
"ray.dag.DAGNode"
] = set()
# Set of collective operations using an unresolved communicator.
self._collective_ops_with_unresolved_communicators: Set[
"ray.dag.collective_node._CollectiveOperation"
] = set()
self._default_type_hint: ChannelOutputType = SharedMemoryType(
buffer_size_bytes=self._buffer_size_bytes,
# We conservatively set num_shm_buffers to _max_inflight_executions.
# It means that the DAG can be underutilized, but it guarantees there's
# no false positive timeouts.
num_shm_buffers=self._max_inflight_executions,
)
if not isinstance(self._buffer_size_bytes, int) or self._buffer_size_bytes <= 0:
raise ValueError(
"`buffer_size_bytes` must be a positive integer, found "
f"{self._buffer_size_bytes}"
)
# Used to ensure that the future returned to the
# caller corresponds to the correct DAG output. I.e.
# order of futures added to fut_queue should match the
# order of inputs written to the DAG.
self._dag_submission_lock = asyncio.Lock()
# idx -> CompiledTask.
self.idx_to_task: Dict[int, "CompiledTask"] = {}
# DAGNode -> idx.
self.dag_node_to_idx: Dict["ray.dag.DAGNode", int] = {}
# idx counter.
self.counter: int = 0
# Attributes that are set during preprocessing.
# Preprocessing identifies the input node and output node.
self.input_task_idx: Optional[int] = None
self.output_task_idx: Optional[int] = None
# List of task indices that are input attribute nodes.
self.input_attr_task_idxs: List[int] = []
# Denotes whether execute/execute_async returns a list of refs/futures.
self._returns_list: bool = False
# Number of expected positional args and kwargs that may be passed to
# dag.execute.
self._input_num_positional_args: Optional[int] = None
self._input_kwargs: Tuple[str, ...] = None
# Cached attributes that are set during compilation.
self.dag_input_channels: Optional[List[ChannelInterface]] = None
self.dag_output_channels: Optional[List[ChannelInterface]] = None
self._dag_submitter: Optional[WriterInterface] = None
self._dag_output_fetcher: Optional[ReaderInterface] = None
# ObjectRef for each worker's task. The task is an infinite loop that
# repeatedly executes the method specified in the DAG.
self.worker_task_refs: Dict["ray.actor.ActorHandle", "ray.ObjectRef"] = {}
self.actor_to_tasks: Dict[
"ray.actor.ActorHandle", List["CompiledTask"]
] = defaultdict(list)
# Mapping from actor handle to its GPU IDs.
# This is used for type hint resolution for with_tensor_transport("auto").
self.actor_to_gpu_ids: Dict["ray.actor.ActorHandle", List[str]] = {}
self.actor_to_executable_tasks: Dict[
"ray.actor.ActorHandle", List["ExecutableTask"]
] = {}
# Mapping from the actor handle to the execution schedule which is a list
# of operations to be executed.
self.actor_to_execution_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGNodeOperation]
] = defaultdict(list)
# Mapping from the actor handle to the node ID that the actor is on.
# A None actor handle means the actor is the driver.
self.actor_to_node_id: Dict[Optional["ray.actor.ActorHandle"], str] = {}
# The index of the current execution. It is incremented each time
# the DAG is executed.
self._execution_index: int = -1
# The maximum index of finished executions.
# All results with higher indexes have not been generated yet.
self._max_finished_execution_index: int = -1
# execution_index -> {channel_index -> result}
self._result_buffer: Dict[int, Dict[int, Any]] = defaultdict(dict)
# channel to possible inner channel
self._channel_dict: Dict[ChannelInterface, ChannelInterface] = {}
def _create_proxy_actor() -> "ray.actor.ActorHandle":
# Creates the driver actor on the same node as the driver.
#
# To support the driver as a reader, the output writer needs to be able to
# invoke remote functions on the driver (e.g., to create the reader ref, to
# create a reader ref for a larger object when the channel backing store is
# resized, etc.). The driver actor serves as a way for the output writer
# to invoke remote functions on the driver node.
return CompiledDAG.DAGDriverProxyActor.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(), soft=False
)
).remote()
self._proxy_actor = _create_proxy_actor()
# Set to True when `teardown` API is called.
self._is_teardown = False
# Execution index to set of channel indices for CompiledDAGRefs
# or CompiledDAGFuture whose destructor has been called. A "None"
# channel index means there is only one channel, and its destructor
# has been called.
self._destructed_ref_idxs: Dict[int, Set[Optional[int]]] = dict()
# Execution index to set of channel indices for CompiledDAGRefs
# or CompiledDAGFuture whose get() has been called. A "None"
# channel index means there is only one channel, and its get()
# has been called.
self._got_ref_idxs: Dict[int, Set[Optional[int]]] = dict()
@property
def is_teardown(self) -> bool:
return self._is_teardown
def get_id(self) -> str:
"""
Get the unique ID of the compiled DAG.
"""
return self._dag_id
def __str__(self) -> str:
return f"CompiledDAG({self._dag_id})"
def _add_node(self, node: "ray.dag.DAGNode") -> None:
idx = self.counter
self.idx_to_task[idx] = CompiledTask(idx, node)
self.dag_node_to_idx[node] = idx
self.counter += 1
def _preprocess(self) -> None:
"""Before compiling, preprocess the DAG to build an index from task to
upstream and downstream tasks, and to set the input and output node(s)
of the DAG.
This function is idempotent.
"""
from ray.dag import (
ClassMethodNode,
CollectiveOutputNode,
DAGNode,
FunctionNode,
InputAttributeNode,
InputNode,
MultiOutputNode,
)
self.input_task_idx, self.output_task_idx = None, None
input_attributes: Set[str] = set()
# Find the input node and input attribute nodes in the DAG.
for idx, task in self.idx_to_task.items():
if isinstance(task.dag_node, InputNode):
assert self.input_task_idx is None, "More than one InputNode found"
self.input_task_idx = idx
# handle_unused_attributes:
# Save input attributes in a set.
input_node = task.dag_node
input_attributes.update(input_node.input_attribute_nodes.keys())
elif isinstance(task.dag_node, InputAttributeNode):
self.input_attr_task_idxs.append(idx)
# Find the (multi-)output node to the DAG.
for idx, task in self.idx_to_task.items():
if idx == self.input_task_idx or isinstance(
task.dag_node, InputAttributeNode
):
continue
if (
len(task.downstream_task_idxs) == 0
and task.dag_node.is_cgraph_output_node
):
assert self.output_task_idx is None, "More than one output node found"
self.output_task_idx = idx
assert self.output_task_idx is not None
output_node = self.idx_to_task[self.output_task_idx].dag_node
# Add an MultiOutputNode to the end of the DAG if it's not already there.
if not isinstance(output_node, MultiOutputNode):
output_node = MultiOutputNode([output_node])
self._add_node(output_node)
self.output_task_idx = self.dag_node_to_idx[output_node]
else:
self._returns_list = True
# TODO: Support no-input DAGs (use an empty object to signal).
if self.input_task_idx is None:
raise NotImplementedError(
"Compiled DAGs currently require exactly one InputNode"
)
# Whether the DAG binds directly to the InputNode(), versus binding to
# a positional arg or kwarg of the input. For example, a.foo.bind(inp)
# instead of a.foo.bind(inp[0]) or a.foo.bind(inp.key).
direct_input: Optional[bool] = None
# Collect the set of InputNode keys bound to DAG node args.
input_positional_args: Set[int] = set()
input_kwargs: Set[str] = set()
# Set of tasks with annotation of with_tensor_transport("auto").
# These only correspond to ClassMethodNodes, but not InputNodes
# or InputAttributeNodes.
auto_transport_tasks: Set["CompiledTask"] = set()
# For each task node, set its upstream and downstream task nodes.
# Also collect the set of tasks that produce torch.tensors.
for task_idx, task in self.idx_to_task.items():
dag_node = task.dag_node
if not (
isinstance(dag_node, InputNode)
or isinstance(dag_node, InputAttributeNode)
or isinstance(dag_node, MultiOutputNode)
or isinstance(dag_node, ClassMethodNode)
):
if isinstance(dag_node, FunctionNode):
# TODO(swang): Support non-actor tasks.
raise NotImplementedError(
"Compiled DAGs currently only support actor method nodes"
)
else:
raise ValueError(f"Found unsupported node of type {type(dag_node)}")
if isinstance(dag_node, ClassMethodNode) and dag_node.is_class_method_call:
actor_handle = dag_node._get_actor_handle()
if actor_handle is None:
raise ValueError(
"Compiled DAGs can only bind methods to an actor "
"that is already created with Actor.remote()"
)
if actor_handle not in self.actor_to_gpu_ids:
self.actor_to_gpu_ids[actor_handle] = CompiledDAG._get_gpu_ids(
actor_handle
)
if isinstance(dag_node.type_hint, AutoTransportType):
auto_transport_tasks.add(task)
# Collect actors for accelerator P2P methods.
if dag_node.type_hint.requires_accelerator():
self._track_communicator_usage(dag_node, {actor_handle})
# Collect accelerator collective operations.
if isinstance(dag_node, CollectiveOutputNode):
self._track_communicator_usage(
dag_node,
set(dag_node._collective_op.actor_handles),
collective_op=True,
)
assert not self._overlap_gpu_communication, (
"Currently, the overlap_gpu_communication option is not "
"supported for accelerator collective operations. Please set "
"overlap_gpu_communication=False."
)
elif isinstance(dag_node, InputNode) or isinstance(
dag_node, InputAttributeNode
):
if dag_node.type_hint.requires_accelerator():
raise ValueError(
"DAG inputs cannot be transferred via accelerator because "
"the driver cannot participate in the communicator group"
)
if isinstance(dag_node.type_hint, AutoTransportType):
# Currently driver on GPU is not supported, so we always
# use shared memory to transfer tensors.
dag_node.type_hint = TorchTensorType(
device=dag_node.type_hint.device
)
if type(dag_node.type_hint) is ChannelOutputType:
# No type hint specified by the user. Replace
# with the default type hint for this DAG.
dag_node.type_hint = self._default_type_hint
for _, val in task.kwargs.items():
if isinstance(val, DAGNode):
raise ValueError(
"Compiled DAG currently does not support binding to "
"other DAG nodes as kwargs"
)
for _, arg in enumerate(task.args):
if not isinstance(arg, DAGNode):
continue
upstream_node_idx = self.dag_node_to_idx[arg]
upstream_task = self.idx_to_task[upstream_node_idx]
downstream_actor_handle = None
if (
isinstance(dag_node, ClassMethodNode)
and dag_node.is_class_method_call
):
downstream_actor_handle = dag_node._get_actor_handle()
# Add upstream node as the argument nodes of this task, whose
# type hints may be updated when resolved lazily.
task.arg_nodes.append(upstream_task.dag_node)
if isinstance(upstream_task.dag_node, InputAttributeNode):
# Record all of the keys used to index the InputNode.
# During execution, we will check that the user provides
# the same args and kwargs.
if isinstance(upstream_task.dag_node.key, int):
input_positional_args.add(upstream_task.dag_node.key)
elif isinstance(upstream_task.dag_node.key, str):
input_kwargs.add(upstream_task.dag_node.key)
else:
raise ValueError(
"InputNode() can only be indexed using int "
"for positional args or str for kwargs."
)
if direct_input is not None and direct_input:
raise ValueError(
"All tasks must either use InputNode() "
"directly, or they must index to specific args or "
"kwargs."
)
direct_input = False
# If the upstream node is an InputAttributeNode, treat the
# DAG's input node as the actual upstream node
upstream_task = self.idx_to_task[self.input_task_idx]
elif isinstance(upstream_task.dag_node, InputNode):
if direct_input is not None and not direct_input:
raise ValueError(
"All tasks must either use InputNode() directly, "
"or they must index to specific args or kwargs."
)
direct_input = True
upstream_task.downstream_task_idxs[task_idx] = downstream_actor_handle
if upstream_task.dag_node.type_hint.requires_accelerator():
# Here we are processing the args of the DAGNode, so track
# downstream actors only, upstream actor is already tracked
# when processing the DAGNode itself.
self._track_communicator_usage(
upstream_task.dag_node,
{downstream_actor_handle},
)
# Check that all specified input attributes, e.g., InputNode()["x"],
# are used in the DAG.
_check_unused_dag_input_attributes(output_node, input_attributes)
self._check_leaf_nodes()
self._resolve_auto_transport(auto_transport_tasks)
self._init_communicators()
if direct_input:
self._input_num_positional_args = 1
elif not input_positional_args:
self._input_num_positional_args = 0
else:
self._input_num_positional_args = max(input_positional_args) + 1
self._input_kwargs = tuple(input_kwargs)
def _init_communicators(self) -> None:
"""
Initialize communicators for the DAG.
"""
# First, initialize communicators that are passed in by the user.
for communicator, type_hints in self._communicator_to_type_hints.items():
communicator_id = _init_communicator(
communicator.get_actor_handles(),
communicator,
self._overlap_gpu_communication,
)
for type_hint in type_hints:
type_hint.set_communicator_id(communicator_id)
# Second, get registered accelerator context if any.
accelerator_module_name = AcceleratorContext.get().module_name
accelerator_communicator_cls = AcceleratorContext.get().communicator_cls
# Then, create communicators for collective operations.
# Reuse an already created communicator for the same set of actors.
for collective_op in self._collective_ops_with_unresolved_communicators:
if not self._create_default_communicator:
raise ValueError(
"Communicator creation is not allowed for collective operations."
)
# using tuple to preserve the order of actors for collective operations
actors = tuple(collective_op.actor_handles)
if actors in self._actors_to_created_communicator_id:
communicator_id = self._actors_to_created_communicator_id[actors]
else:
communicator_id = _init_communicator(
list(actors),
None,
self._overlap_gpu_communication,
accelerator_module_name,
accelerator_communicator_cls,
)
self._actors_to_created_communicator_id[actors] = communicator_id
collective_op.type_hint.set_communicator_id(communicator_id)
# Finally, create a communicator for P2P operations.
# Reuse an already created collective op communicator when p2p actors
# are a subset of the actors in the collective op communicator.
p2p_communicator_id = None
if self._p2p_actors_with_unresolved_communicators:
for (
actors,
communicator_id,
) in self._actors_to_created_communicator_id.items():
if self._p2p_actors_with_unresolved_communicators.issubset(actors):
p2p_communicator_id = communicator_id
break
if p2p_communicator_id is None:
p2p_communicator_id = _init_communicator(
list(self._p2p_actors_with_unresolved_communicators),
None,
self._overlap_gpu_communication,
accelerator_module_name,
accelerator_communicator_cls,
)
for dag_node in self._p2p_dag_nodes_with_unresolved_communicators:
dag_node.type_hint.set_communicator_id(p2p_communicator_id)
def _track_communicator_usage(
self,
dag_node: "ray.dag.DAGNode",
actors: Set["ray.actor.ActorHandle"],
collective_op: bool = False,
) -> None:
"""
Track the usage of a communicator.
This method first determines the communicator to use: if a custom
communicator is specified, use it; if not and a default communicator
is available, use it; otherwise, it records necessary information to
create a new communicator later.
This method also performs validation checks on the passed-in communicator.
Args:
dag_node: The DAG node that uses the communicator, this is the node
that has the `with_tensor_transport()` type hint for p2p communication,
or a `CollectiveOutputNode` for collective operations.
actors: The full or partial set of actors that use the communicator.
This method should be called one or multiple times so that all actors
of the communicator are tracked.
collective_op: Whether the communicator is used for a collective operation.
"""
if None in actors:
raise ValueError("Driver cannot participate in the communicator group.")
if collective_op:
type_hint = dag_node._collective_op.type_hint
else:
type_hint = dag_node.type_hint
communicator = type_hint.get_custom_communicator()
if communicator is None:
if (
self._default_communicator is None
and not self._create_default_communicator
):
if dag_node._original_type_hint is not None:
assert isinstance(dag_node._original_type_hint, AutoTransportType)
raise ValueError(
f"with_tensor_transport(transport='auto') is used for DAGNode {dag_node}, "
"This requires specifying a default communicator or 'create' for "
"_default_communicator when calling experimental_compile()."
)
raise ValueError(
f"DAGNode {dag_node} has no custom communicator specified. "
"Please specify a custom communicator for the DAGNode using "
"`with_tensor_transport()`, or specify a communicator or 'create' for "
"_default_communicator when calling experimental_compile()."
)
communicator = self._default_communicator
if communicator is None:
if collective_op:
self._collective_ops_with_unresolved_communicators.add(
dag_node._collective_op
)
else:
self._p2p_dag_nodes_with_unresolved_communicators.add(dag_node)
self._p2p_actors_with_unresolved_communicators.update(actors)
else:
if collective_op:
if set(communicator.get_actor_handles()) != actors:
raise ValueError(
"The passed-in communicator must have the same set "
"of actors as the collective operation. "
f"The passed-in communicator has actors {communicator.get_actor_handles()} "
f"while the collective operation has actors {actors}."
)
else:
if not actors.issubset(set(communicator.get_actor_handles())):
raise ValueError(
"The passed-in communicator must include all of the actors "
"used in the P2P operation. "
f"The passed-in communicator has actors {communicator.get_actor_handles()} "
f"while the P2P operation has actors {actors}."
)
self._communicator_to_type_hints[communicator].add(type_hint)
def _resolve_auto_transport(
self,
auto_transport_tasks: Set["CompiledTask"],
) -> None:
"""
Resolve the auto transport type hint for the DAG.
"""
type_hint_resolver = TypeHintResolver(self.actor_to_gpu_ids)
# Resolve AutoChannelType type hints and track the actors that use accelerator.
# This is needed so that the communicator group can be initialized for
# these actors that use accelerator.
for task in auto_transport_tasks:
writer = task.dag_node._get_actor_handle()
readers = task.downstream_task_idxs.values()
writer_and_node = (writer, self._get_node_id(writer))
reader_and_node_list = [
(reader, self._get_node_id(reader)) for reader in readers
]
# Update the type hint to the resolved one. This is needed because
# the resolved type hint's `register_custom_serializer` will be called
# in preparation for channel I/O.
task.dag_node.type_hint = type_hint_resolver.resolve(
task.dag_node.type_hint,
writer_and_node,
reader_and_node_list,
)
if task.dag_node.type_hint.requires_accelerator():
self._track_communicator_usage(
task.dag_node,
set(readers).union({writer}),
)
def _check_leaf_nodes(self) -> None:
"""
Check if there are leaf nodes in the DAG and raise an error if there are.
"""
from ray.dag import (
ClassMethodNode,
DAGNode,
)
leaf_nodes: List[DAGNode] = []
for _, task in self.idx_to_task.items():
if not isinstance(task.dag_node, ClassMethodNode):
continue
if (
len(task.downstream_task_idxs) == 0
and not task.dag_node.is_cgraph_output_node
):
leaf_nodes.append(task.dag_node)
# Leaf nodes are not allowed because the exception thrown by the leaf
# node will not be propagated to the driver.
if len(leaf_nodes) != 0:
raise ValueError(
"Compiled DAG doesn't support leaf nodes, i.e., nodes that don't have "
"downstream nodes and are not output nodes. There are "
f"{len(leaf_nodes)} leaf nodes in the DAG. Please add the outputs of "
f"{[leaf_node.get_method_name() for leaf_node in leaf_nodes]} to the "
f"the MultiOutputNode."
)
@staticmethod
def _get_gpu_ids(actor_handle: "ray.actor.ActorHandle") -> List[str]:
"""
Get the GPU IDs of an actor handle.
"""
accelerator_ids = ray.get(
actor_handle.__ray_call__.remote(
lambda self: ray.get_runtime_context().get_accelerator_ids()
)
)
return accelerator_ids.get("GPU", [])
def _get_node_id(self, actor_handle: Optional["ray.actor.ActorHandle"]) -> str:
"""
Get the node ID of an actor handle and cache it.
Args:
actor_handle: The actor handle, or None if the actor handle is the
driver.
Returns:
The node ID of the actor handle or driver.
"""
if actor_handle in self.actor_to_node_id:
return self.actor_to_node_id[actor_handle]
node_id = None
if actor_handle == self._proxy_actor or actor_handle is None:
node_id = ray.get_runtime_context().get_node_id()
else:
node_id = ray.get(
actor_handle.__ray_call__.remote(
lambda self: ray.get_runtime_context().get_node_id()
)
)
self.actor_to_node_id[actor_handle] = node_id
return node_id
def _get_or_compile(
self,
) -> None:
"""Compile an execution path. This allocates channels for adjacent
tasks to send/receive values. An infinite task is submitted to each
actor in the DAG that repeatedly receives from input channel(s) and
sends to output channel(s).
This function is idempotent and will cache the previously allocated
channels. After calling this function, _dag_submitter and
_dag_output_fetcher will be set and can be used to invoke and fetch
outputs for the DAG.
"""
from ray.dag import (
ClassMethodNode,
DAGNode,
InputAttributeNode,
InputNode,
MultiOutputNode,
)
if self.input_task_idx is None:
self._preprocess()
assert self.input_task_idx is not None
if self._dag_submitter is not None:
assert self._dag_output_fetcher is not None
return
frontier = [self.input_task_idx]
visited = set()
# Create output buffers. This loop does a breadth-first search through the DAG.
while frontier:
cur_idx = frontier.pop(0)
if cur_idx in visited:
continue
visited.add(cur_idx)
task = self.idx_to_task[cur_idx]
if (
isinstance(task.dag_node, ClassMethodNode)
and task.dag_node.is_class_method_call
):
# Create output buffers for the actor method.
assert len(task.output_channels) == 0
# `output_to_readers` stores the reader tasks for each output of
# the current node. If the current node returns one output, the
# readers are the downstream nodes of the current node. If the
# current node returns multiple outputs, the readers of each
# output are the downstream nodes of the ClassMethodNode that
# is a class method output.
output_to_readers: Dict[CompiledTask, List[CompiledTask]] = defaultdict(
list
)
for idx in task.downstream_task_idxs:
downstream_task = self.idx_to_task[idx]
downstream_node = downstream_task.dag_node
if (
isinstance(downstream_node, ClassMethodNode)
and downstream_node.is_class_method_output
):
output_to_readers[downstream_task] = [
self.idx_to_task[idx]
for idx in downstream_task.downstream_task_idxs
]
else:
if task not in output_to_readers:
output_to_readers[task] = []
output_to_readers[task].append(downstream_task)
fn = task.dag_node._get_remote_method("__ray_call__")
for output, readers in output_to_readers.items():
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]] = []
# Use reader_handles_set to deduplicate readers on the
# same actor, because with CachedChannel each actor will
# only read from the upstream channel once.
reader_handles_set = set()
read_by_multi_output_node = False
for reader in readers:
if isinstance(reader.dag_node, MultiOutputNode):
read_by_multi_output_node = True
# inserting at 0 to make sure driver is first reader as
# expected by CompositeChannel read
reader_and_node_list.insert(
0,
(
self._proxy_actor,
self._get_node_id(self._proxy_actor),
),
)
else:
reader_handle = reader.dag_node._get_actor_handle()
if reader_handle not in reader_handles_set:
reader_handle = reader.dag_node._get_actor_handle()
reader_and_node_list.append(
(reader_handle, self._get_node_id(reader_handle))
)
reader_handles_set.add(reader_handle)
# if driver is an actual actor, gets driver actor id
driver_actor_id = (
ray.get_runtime_context().get_actor_id()
if read_by_multi_output_node
else None
)
# Create an output channel for each output of the current node.
output_channel = ray.get(
fn.remote(
do_allocate_channel,
reader_and_node_list,
task.dag_node.type_hint,
driver_actor_id,
)
)
output_idx = None
downstream_node = output.dag_node
if (
isinstance(downstream_node, ClassMethodNode)
and downstream_node.is_class_method_output
):
output_idx = downstream_node.output_idx
task.output_channels.append(output_channel)
task.output_idxs.append(output_idx)
task.output_node_idxs.append(self.dag_node_to_idx[downstream_node])
actor_handle = task.dag_node._get_actor_handle()
assert actor_handle is not None
self.actor_to_tasks[actor_handle].append(task)
elif (
isinstance(task.dag_node, ClassMethodNode)
and task.dag_node.is_class_method_output
):
task_node = task.dag_node
upstream_node = task_node.class_method_call
assert upstream_node
upstream_task = self.idx_to_task[self.dag_node_to_idx[upstream_node]]
for i in range(len(upstream_task.output_channels)):
if upstream_task.output_idxs[i] == task_node.output_idx:
task.output_channels.append(upstream_task.output_channels[i])
task.output_idxs.append(upstream_task.output_idxs[i])
assert len(task.output_channels) == 1
elif isinstance(task.dag_node, InputNode):
# A dictionary that maps an InputNode or InputAttributeNode to its
# readers and the node on which the reader is running. Use `set` to
# deduplicate readers on the same actor because with CachedChannel
# each actor will only read from the shared memory once.
input_node_to_reader_and_node_set: Dict[
Union[InputNode, InputAttributeNode],
Set[Tuple["ray.actor.ActorHandle", str]],
] = defaultdict(set)
for idx in task.downstream_task_idxs:
reader_task = self.idx_to_task[idx]
assert isinstance(reader_task.dag_node, ClassMethodNode)
reader_handle = reader_task.dag_node._get_actor_handle()
reader_node_id = self._get_node_id(reader_handle)
for arg in reader_task.args:
if isinstance(arg, InputAttributeNode) or isinstance(
arg, InputNode
):
input_node_to_reader_and_node_set[arg].add(
(reader_handle, reader_node_id)
)
# A single channel is responsible for sending the same data to
# corresponding consumers. Therefore, we create a channel for
# each InputAttributeNode, or a single channel for the entire
# input data if there are no InputAttributeNodes.
task.output_channels = []
for input_dag_node in input_node_to_reader_and_node_set:
reader_and_node_list = list(
input_node_to_reader_and_node_set[input_dag_node]
)
output_channel = do_allocate_channel(
self,
reader_and_node_list,
input_dag_node.type_hint,
None,
)
task.output_channels.append(output_channel)
task.output_idxs.append(
None
if isinstance(input_dag_node, InputNode)
else input_dag_node.key
)
# Update the InputAttributeNode's `output_channels`, which is
# used to determine whether to create a CachedChannel.
if isinstance(input_dag_node, InputAttributeNode):
input_attr_idx = self.dag_node_to_idx[input_dag_node]
input_attr_task = self.idx_to_task[input_attr_idx]
input_attr_task.output_channels.append(output_channel)
assert len(input_attr_task.output_channels) == 1
else:
assert isinstance(task.dag_node, InputAttributeNode) or isinstance(
task.dag_node, MultiOutputNode
)
for idx in task.downstream_task_idxs:
frontier.append(idx)
# Validate input channels for tasks that have not been visited
for node_idx, task in self.idx_to_task.items():
if (
node_idx == self.input_task_idx
or node_idx == self.output_task_idx
or isinstance(task.dag_node, InputAttributeNode)
):
continue
if node_idx not in visited:
has_at_least_one_channel_input = False
for arg in task.args:
if isinstance(arg, DAGNode):
has_at_least_one_channel_input = True
if not has_at_least_one_channel_input:
raise ValueError(
"Compiled DAGs require each task to take a ray.dag.InputNode "
"or at least one other DAGNode as an input. "
"Invalid task node:\n"
f"{task.dag_node}\n"
"Please bind the task to proper DAG nodes."
)
from ray.dag.constants import RAY_CGRAPH_ENABLE_DETECT_DEADLOCK
if RAY_CGRAPH_ENABLE_DETECT_DEADLOCK and self._detect_deadlock():
raise ValueError(
"This DAG cannot be compiled because it will deadlock on accelerator "
"calls. If you believe this is a false positive, please disable "
"the graph verification by setting the environment variable "
"RAY_CGRAPH_ENABLE_DETECT_DEADLOCK to 0 and file an issue at "
"https://github.com/ray-project/ray/issues/new/."
)
input_task = self.idx_to_task[self.input_task_idx]
self.dag_input_channels = input_task.output_channels
assert self.dag_input_channels is not None
# Create executable tasks for each actor
for actor_handle, tasks in self.actor_to_tasks.items():
# Dict from arg to the set of tasks that consume it.
arg_to_consumers: Dict[DAGNode, Set[CompiledTask]] = defaultdict(set)
# Step 1: populate `arg_to_consumers` and perform some validation.
for task in tasks:
has_at_least_one_channel_input = False
for arg in task.args:
if isinstance(arg, DAGNode):
has_at_least_one_channel_input = True
arg_to_consumers[arg].add(task)
arg_idx = self.dag_node_to_idx[arg]
upstream_task = self.idx_to_task[arg_idx]
assert len(upstream_task.output_channels) == 1
arg_channel = upstream_task.output_channels[0]
assert arg_channel is not None
# TODO: Support no-input DAGs (use an empty object to signal).
if not has_at_least_one_channel_input:
raise ValueError(
"Compiled DAGs require each task to take a "
"ray.dag.InputNode or at least one other DAGNode as an "
"input"
)
# Step 2: create cached channels if needed
# Dict from original channel to the channel to be used in execution.
# The value of this dict is either the original channel or a newly
# created CachedChannel (if the original channel is read more than once).
for arg, consumers in arg_to_consumers.items():
arg_idx = self.dag_node_to_idx[arg]
upstream_task = self.idx_to_task[arg_idx]
assert len(upstream_task.output_channels) == 1
arg_channel = upstream_task.output_channels[0]
assert arg_channel is not None
if len(consumers) > 1:
self._channel_dict[arg_channel] = CachedChannel(
len(consumers),
arg_channel,
)
else:
self._channel_dict[arg_channel] = arg_channel
# Step 3: create executable tasks for the actor
executable_tasks = []
for task in tasks:
resolved_args: List[Any] = []
for arg in task.args:
if isinstance(arg, DAGNode):
arg_idx = self.dag_node_to_idx[arg]
upstream_task = self.idx_to_task[arg_idx]
assert len(upstream_task.output_channels) == 1
arg_channel = upstream_task.output_channels[0]
assert arg_channel is not None
arg_channel = self._channel_dict[arg_channel]
resolved_args.append(arg_channel)
else:
# Constant arg
resolved_args.append(arg)
executable_task = ExecutableTask(
task,
resolved_args,
task.kwargs,
)
executable_tasks.append(executable_task)
# Sort executable tasks based on their bind index, i.e., submission order
# so that they will be executed in that order.
executable_tasks.sort(key=lambda task: task.bind_index)
self.actor_to_executable_tasks[actor_handle] = executable_tasks
from ray.dag.constants import RAY_CGRAPH_ENABLE_PROFILING
if RAY_CGRAPH_ENABLE_PROFILING:
exec_task_func = do_profile_tasks
else:
exec_task_func = do_exec_tasks
# Build an execution schedule for each actor
self.actor_to_execution_schedule = self._build_execution_schedule()
for actor_handle, executable_tasks in self.actor_to_executable_tasks.items():
self.worker_task_refs[actor_handle] = actor_handle.__ray_call__.options(
concurrency_group="_ray_system"
).remote(
exec_task_func,
executable_tasks,
self.actor_to_execution_schedule[actor_handle],
self._overlap_gpu_communication,
)
assert self.output_task_idx is not None
self.dag_output_channels = []
for output in self.idx_to_task[self.output_task_idx].args:
assert isinstance(output, DAGNode)
output_idx = self.dag_node_to_idx[output]
task = self.idx_to_task[output_idx]
assert len(task.output_channels) == 1
self.dag_output_channels.append(task.output_channels[0])
# Register custom serializers for input, input attribute, and output nodes.
self._register_input_output_custom_serializer()
assert self.dag_input_channels
assert self.dag_output_channels
assert [
output_channel is not None for output_channel in self.dag_output_channels
]
# If no MultiOutputNode was specified during the DAG creation, there is only
# one output. Return a single output channel instead of a list of
# channels.
if not self._returns_list:
assert len(self.dag_output_channels) == 1
# Driver should ray.put on input, ray.get/release on output
self._monitor = self._monitor_failures()
input_task = self.idx_to_task[self.input_task_idx]
if self._enable_asyncio:
self._dag_submitter = AwaitableBackgroundWriter(
self.dag_input_channels,
input_task.output_idxs,
is_input=True,
)
self._dag_output_fetcher = AwaitableBackgroundReader(
self.dag_output_channels,
self._fut_queue,
)
else:
self._dag_submitter = SynchronousWriter(
self.dag_input_channels, input_task.output_idxs, is_input=True
)
self._dag_output_fetcher = SynchronousReader(self.dag_output_channels)
self._dag_submitter.start()
self._dag_output_fetcher.start()
def _generate_dag_operation_graph_node(
self,
) -> Dict["ray.actor.ActorHandle", List[List[_DAGOperationGraphNode]]]:
"""
Generate READ, COMPUTE, and WRITE operations for each DAG node.
Returns:
A dictionary that maps an actor handle to a list of lists of
_DAGOperationGraphNode. For the same actor, the index of the
outer list corresponds to the index of the ExecutableTask in
the list of `executable_tasks` in `actor_to_executable_tasks`,
i.e. `exec_task_idx`. In the inner list, the order of operations
is READ, COMPUTE, and WRITE.
Example:
{
actor1: [
[READ COMPUTE WRITE] # exec_task_idx 0
[READ COMPUTE WRITE] # exec_task_idx 1
]
}
"""
from ray.dag.collective_node import CollectiveOutputNode
assert self.idx_to_task
assert self.actor_to_executable_tasks
actor_to_operation_nodes: Dict[
"ray.actor.ActorHandle", List[List[_DAGOperationGraphNode]]
] = defaultdict(list)
for actor_handle, executable_tasks in self.actor_to_executable_tasks.items():
for exec_task_idx, exec_task in enumerate(executable_tasks):
# Divide a DAG node into three _DAGOperationGraphNodes: READ, COMPUTE,
# and WRITE. Each _DAGOperationGraphNode has a _DAGNodeOperation.
task_idx = exec_task.task_idx
dag_node = self.idx_to_task[task_idx].dag_node
method_name = exec_task.method_name
actor_handle = dag_node._get_actor_handle()
requires_accelerator_read = False
for upstream_node in dag_node._upstream_nodes:
if upstream_node.type_hint.requires_accelerator():
requires_accelerator_read = True
break
requires_accelerator_compute = isinstance(
dag_node, CollectiveOutputNode
)
requires_accelerator_write = dag_node.type_hint.requires_accelerator()
read_node = _DAGOperationGraphNode(
_DAGNodeOperation(
exec_task_idx, _DAGNodeOperationType.READ, method_name
),
task_idx,
actor_handle,
requires_accelerator_read,
)
compute_node = _DAGOperationGraphNode(
_DAGNodeOperation(
exec_task_idx, _DAGNodeOperationType.COMPUTE, method_name
),
task_idx,
actor_handle,
requires_accelerator_compute,
)
write_node = _DAGOperationGraphNode(
_DAGNodeOperation(
exec_task_idx, _DAGNodeOperationType.WRITE, method_name
),
task_idx,
actor_handle,
requires_accelerator_write,
)
actor_to_operation_nodes[actor_handle].append(
[read_node, compute_node, write_node]
)
return actor_to_operation_nodes
def _build_execution_schedule(
self,
) -> Dict["ray.actor.ActorHandle", List[_DAGNodeOperation]]:
"""
Generate an execution schedule for each actor. The schedule is a list of
_DAGNodeOperation.
Step 1: Generate a DAG node operation graph. Refer to the functions
`_generate_dag_operation_graph_node` and `_build_dag_node_operation_graph`
for more details.
Step 2: Topological sort
It is possible to have multiple _DAGOperationGraphNodes with zero in-degree.
Refer to the function `_select_next_nodes` for the logic of selecting nodes.
Then, put the selected nodes into the corresponding actors' schedules.
The schedule should be intuitive to users, meaning that the execution should
perform operations in ascending order of `bind_index` as much as possible.
[Example]:
See `test_execution_schedule` for more examples.
Returns:
actor_to_execution_schedule: A dictionary that maps an actor handle to
the execution schedule which is a list of operations to be executed.
"""
# Step 1: Build a graph of _DAGOperationGraphNode
actor_to_operation_nodes = self._generate_dag_operation_graph_node()
graph = _build_dag_node_operation_graph(
self.idx_to_task, actor_to_operation_nodes
)
# Step 2: Generate an execution schedule for each actor using topological sort
actor_to_execution_schedule = _generate_actor_to_execution_schedule(graph)
# Step 3: Overlap GPU communication for the execution schedule if configured
actor_to_overlapped_schedule = None
if self._overlap_gpu_communication:
actor_to_overlapped_schedule = _generate_overlapped_execution_schedule(
actor_to_execution_schedule
)
if RAY_CGRAPH_VISUALIZE_SCHEDULE:
_visualize_execution_schedule(
actor_to_execution_schedule, actor_to_overlapped_schedule, graph
)
if actor_to_overlapped_schedule is not None:
return _extract_execution_schedule(actor_to_overlapped_schedule)
else:
return _extract_execution_schedule(actor_to_execution_schedule)
def _detect_deadlock(self) -> bool:
"""
TODO (kevin85421): Avoid false negatives.
Currently, a compiled graph may deadlock if there are accelerator channels,
and the readers have control dependencies on the same actor. For example:
actor1.a ---> actor2.f1
|
---> actor2.f2
The control dependency between `actor2.f1` and `actor2.f2` is that `f1` should
run before `f2`. If `actor1.a` writes to `actor2.f2` before `actor2.f1`, a
deadlock will occur.
Currently, the execution schedule is not granular enough to detect this
deadlock.
Returns:
True if a deadlock is detected; otherwise, False.
"""
logger.debug("Deadlock detection has not been implemented yet.")
return False
def _monitor_failures(self):
get_outer = weakref.ref(self)
class Monitor(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self.name = "CompiledGraphMonitorThread"
# Lock to make sure that we only perform teardown for this DAG
# once.
self._in_teardown_lock = threading.Lock()
self._teardown_done = False
def _outer_ref_alive(self) -> bool:
if get_outer() is None:
logger.error(
"CompiledDAG has been destructed before teardown. "
"This should not occur please report an issue at "
"https://github.com/ray-project/ray/issues/new/.",
stack_info=True,
)
return False
return True
def wait_teardown(self, kill_actors: bool = False):
outer = get_outer()
if not self._outer_ref_alive():
return
from ray.dag import DAGContext
ctx = DAGContext.get_current()
teardown_timeout = ctx.teardown_timeout
for actor, ref in outer.worker_task_refs.items():
timeout = False
try:
ray.get(ref, timeout=teardown_timeout)
except ray.exceptions.GetTimeoutError:
msg = (
f"Compiled DAG actor {actor} is still running "
f"{teardown_timeout}s after teardown()."
)
if kill_actors:
msg += (
" Force-killing actor. "
"Increase RAY_CGRAPH_teardown_timeout if you want "
"teardown to wait longer."
)
ray.kill(actor)
else:
msg += (
" Teardown may hang. "
"Call teardown with kill_actors=True if force kill "
"is desired."
)
logger.warning(msg)
timeout = True
except Exception:
# We just want to check that the task has finished so
# we don't care if the actor task ended in an
# exception.
pass
if not timeout:
continue
try:
ray.get(ref)
except Exception:
pass
if kill_actors:
# In the previous loop, we allow the actor tasks to exit first.
# Now, we force kill the actors if not yet.
for actor in outer.worker_task_refs:
logger.info(f"Killing actor: {actor}")
ray.kill(actor)
def teardown(self, kill_actors: bool = False):
with self._in_teardown_lock:
if self._teardown_done:
return
outer = get_outer()
if not self._outer_ref_alive():
return
logger.info("Tearing down compiled DAG")
outer._dag_submitter.close()
outer._dag_output_fetcher.close()
for actor in outer.actor_to_executable_tasks.keys():
logger.info(f"Cancelling compiled worker on actor: {actor}")
# Cancel all actor loops in parallel.
cancel_refs = [
actor.__ray_call__.remote(do_cancel_executable_tasks, tasks)
for actor, tasks in outer.actor_to_executable_tasks.items()
]
for cancel_ref in cancel_refs:
try:
ray.get(cancel_ref, timeout=30)
except RayChannelError:
# Channel error happens when a channel is closed
# or timed out. In this case, do not log.
pass
except Exception:
logger.exception("Error cancelling worker task")
pass
for (
communicator_id
) in outer._actors_to_created_communicator_id.values():
_destroy_communicator(communicator_id)
logger.info("Waiting for worker tasks to exit")
self.wait_teardown(kill_actors=kill_actors)
logger.info("Teardown complete")
self._teardown_done = True
def run(self):
try:
outer = get_outer()
if not self._outer_ref_alive():
return
ray.get(list(outer.worker_task_refs.values()))
except KeyboardInterrupt:
logger.info(
"Received KeyboardInterrupt, tearing down with kill_actors=True"
)
self.teardown(kill_actors=True)
except Exception as e:
logger.debug(f"Handling exception from worker tasks: {e}")
self.teardown()
monitor = Monitor()
monitor.start()
return monitor
def _raise_if_too_many_inflight_executions(self):
num_inflight_executions = (
self._execution_index - self._max_finished_execution_index
)
if num_inflight_executions >= self._max_inflight_executions:
raise ray.exceptions.RayCgraphCapacityExceeded(
"The compiled graph can't have more than "
f"{self._max_inflight_executions} in-flight executions, and you "
f"currently have {num_inflight_executions} in-flight executions. "
"Retrieve an output using ray.get before submitting more requests or "
"increase `_max_inflight_executions`. "
"`dag.experimental_compile(_max_inflight_executions=...)`"
)
def _has_execution_results(
self,
execution_index: int,
) -> bool:
"""Check whether there are results corresponding to the given execution
index stored in self._result_buffer. This helps avoid fetching and
caching results again.
Args:
execution_index: The execution index corresponding to the result.
Returns:
Whether the result for the given index has been fetched and cached.
"""
return execution_index in self._result_buffer
def _cache_execution_results(
self,
execution_index: int,
result: Any,
):
"""Cache execution results in self._result_buffer. Results are converted
to dictionary format to allow efficient element removal and calculation of
the buffer size. This can only be called once per execution index.
Args:
execution_index: The execution index corresponding to the result.
result: The results from all channels to be cached.
"""
if not self._has_execution_results(execution_index):
for chan_idx, res in enumerate(result):
# avoid caching for any CompiledDAGRef that has already been destructed.
if not (
execution_index in self._destructed_ref_idxs
and chan_idx in self._destructed_ref_idxs[execution_index]
):
self._result_buffer[execution_index][chan_idx] = res
def _get_execution_results(
self, execution_index: int, channel_index: Optional[int]
) -> List[Any]:
"""Retrieve execution results from self._result_buffer and return the result.
Results are converted back to original list format ordered by output channel
index.
Args:
execution_index: The execution index to retrieve results from.
channel_index: The index of the output channel corresponding to the result.
Channel indexing is consistent with the order of
self.dag_output_channels. None means that the result wraps outputs from
all output channels.
Returns:
The execution result corresponding to the given execution index and channel
index.
"""
# Although CompiledDAGRef and CompiledDAGFuture guarantee that the same
# execution index and channel index combination will not be requested multiple
# times and therefore self._result_buffer will always have execution_index as
# a key, we still do a sanity check to avoid misuses.
assert execution_index in self._result_buffer
if channel_index is None:
# Convert results stored in self._result_buffer back to original
# list representation
result = [
kv[1]
for kv in sorted(
self._result_buffer.pop(execution_index).items(),
key=lambda kv: kv[0],
)
]
else:
result = [self._result_buffer[execution_index].pop(channel_index)]
if execution_index not in self._got_ref_idxs:
self._got_ref_idxs[execution_index] = set()
self._got_ref_idxs[execution_index].add(channel_index)
self._clean_up_buffers(execution_index)
return result
def _delete_execution_results(self, execution_index: int, channel_index: int):
"""
Delete the execution results for the given execution index and channel index.
This method should be called when a CompiledDAGRef or CompiledDAGFuture is
destructed.
Note that this method maintains metadata for the deleted execution results,
and only actually deletes the buffers lazily when the buffer is not needed
anymore.
Args:
execution_index: The execution index to destruct results from.
channel_index: The index of the output channel corresponding to the result.
"""
if execution_index not in self._destructed_ref_idxs:
self._destructed_ref_idxs[execution_index] = set()
self._destructed_ref_idxs[execution_index].add(channel_index)
self._clean_up_buffers(execution_index)
def _try_release_result_buffer(self, execution_index: int):
"""
Try to release the result buffer for the given execution index.
"""
should_release = False
got_channel_idxs = self._got_ref_idxs.get(execution_index, set())
if None in got_channel_idxs:
assert len(got_channel_idxs) == 1, (
"when None exists in got_channel_idxs, it means all channels, and "
"it should be the only value in the set",
)
should_release = True
else:
destructed_channel_idxs = self._destructed_ref_idxs.get(
execution_index, set()
)
processed_channel_idxs = got_channel_idxs.union(destructed_channel_idxs)
# No more processing is needed for this execution index.
should_release = processed_channel_idxs == set(
range(len(self.dag_output_channels))
)
if not should_release:
return False
self._result_buffer.pop(execution_index, None)
self._destructed_ref_idxs.pop(execution_index, None)
self._got_ref_idxs.pop(execution_index, None)
return True
def _try_release_native_buffer(
self, idx_to_release: int, timeout: Optional[float] = None
) -> bool:
"""
Try to release the native buffer for the given execution index.
Args:
idx_to_release: The execution index to release buffers from.
timeout: The maximum time in seconds to wait for the release.
Returns:
Whether the buffers have been released.
"""
if idx_to_release != self._max_finished_execution_index + 1:
# Native buffer can only be released for the next execution index.
return False
destructed_channel_idxs = self._destructed_ref_idxs.get(idx_to_release, set())
should_release = False
if None in destructed_channel_idxs:
assert len(destructed_channel_idxs) == 1, (
"when None exists in destructed_channel_idxs, it means all channels, "
"and it should be the only value in the set",
)
should_release = True
elif len(destructed_channel_idxs) == len(self.dag_output_channels):
should_release = True
if not should_release:
return False
# refs corresponding to idx_to_release are all destructed,
# and they are never fetched or cached.
assert idx_to_release not in self._result_buffer
assert idx_to_release not in self._got_ref_idxs
try:
self._dag_output_fetcher.release_channel_buffers(timeout)
except RayChannelTimeoutError as e:
raise RayChannelTimeoutError(
"Releasing native buffers corresponding to a stale CompiledDAGRef "
"is taking a long time. If this is expected, increase "
f"RAY_CGRAPH_get_timeout which is currently {self._get_timeout} "
"seconds. Otherwise, this may indicate that the execution "
"is hanging."
) from e
self._destructed_ref_idxs.pop(idx_to_release)
return True
def _try_release_buffer(
self, idx_to_release: int, timeout: Optional[float] = None
) -> bool:
"""
Try to release the buffer for the given execution index.
First try to release the native buffer, then try to release the result buffer.
Args:
idx_to_release: The execution index to release buffers from.
timeout: The maximum time in seconds to wait for the release.
Returns:
Whether the native buffer or result buffer has been released.
"""
if self._try_release_native_buffer(idx_to_release, timeout):
# Releasing native buffer means the corresponding execution result
# is consumed (and discarded).
self._max_finished_execution_index += 1
return True
return self._try_release_result_buffer(idx_to_release)
def _try_release_buffers(self):
"""
Repeatedly release buffer if possible.
This method starts from _max_finished_execution_index + 1 and tries to release
as many buffers as possible. If a native buffer is released,
_max_finished_execution_index will be incremented.
"""
timeout = self._get_timeout
while True:
start_time = time.monotonic()
if not self._try_release_buffer(
self._max_finished_execution_index + 1, timeout
):
break
if timeout != -1:
timeout -= time.monotonic() - start_time
timeout = max(timeout, 0)
def _clean_up_buffers(self, idx_to_release: int):
"""
Clean up native and result buffers.
This method:
1. Tries to release the buffer for the given execution index.
This index is the specific one that requires a clean up,
e.g., right after get() is called or a CompiledDAGRef/CompiledDAGFuture
is destructed.
2. Tries to release all buffers starting from _max_finished_execution_index + 1.
This step is to clean up buffers that are no longer needed.
Args:
idx_to_release: The execution index that requires a clean up,
e.g., right after get() is called or a CompiledDAGRef/CompiledDAGFuture
is destructed.
"""
self._try_release_buffer(idx_to_release)
self._try_release_buffers()
def _execute_until(
self,
execution_index: int,
channel_index: Optional[int] = None,
timeout: Optional[float] = None,
):
"""Repeatedly execute this DAG until the given execution index and
buffer results for all CompiledDagRef's.
If the DAG has already been executed up to the given index, it will do nothing.
Note: If this comes across execution indices for which the corresponding
CompiledDAGRef's have been destructed, it will release the buffer and not
cache the result.
Args:
execution_index: The execution index to execute until.
channel_index: The index of the output channel to get the result from.
Channel indexing is consistent with the order of
self.dag_output_channels. None means wrapping results from all output
channels into a single list.
timeout: The maximum time in seconds to wait for the execution.
None means using default timeout (DAGContext.get_timeout),
0 means immediate timeout (immediate success or timeout without
blocking), -1 means infinite timeout (block indefinitely).
TODO(rui): catch the case that user holds onto the CompiledDAGRefs
"""
if timeout is None:
timeout = self._get_timeout
while self._max_finished_execution_index < execution_index:
if len(self._result_buffer) >= self._max_buffered_results:
raise RayCgraphCapacityExceeded(
"The compiled graph can't have more than "
f"{self._max_buffered_results} buffered results, and you "
f"currently have {len(self._result_buffer)} buffered results. "
"Call `ray.get()` on CompiledDAGRef's (or await on "
"CompiledDAGFuture's) to retrieve results, or increase "
f"`_max_buffered_results` if buffering is desired, note that "
"this will increase driver memory usage."
)
start_time = time.monotonic()
# Fetch results from each output channel up to execution_index and cache
# them separately to enable individual retrieval
# If a CompiledDagRef for a specific execution index has been destructed,
# release the channel buffers for that execution index instead of caching
try:
if not self._try_release_native_buffer(
self._max_finished_execution_index + 1, timeout
):
result = self._dag_output_fetcher.read(timeout)
self._cache_execution_results(
self._max_finished_execution_index + 1,
result,
)
# We have either released the native buffer or fetched and
# cached the result buffer, therefore we always increment
# _max_finished_execution_index.
self._max_finished_execution_index += 1
except RayChannelTimeoutError as e:
raise RayChannelTimeoutError(
"If the execution is expected to take a long time, increase "
f"RAY_CGRAPH_get_timeout which is currently {self._get_timeout} "
"seconds. Otherwise, this may indicate that the execution is "
"hanging."
) from e
if timeout != -1:
timeout -= time.monotonic() - start_time
timeout = max(timeout, 0)
def execute(
self,
*args,
**kwargs,
) -> Union[CompiledDAGRef, List[CompiledDAGRef]]:
"""Execute this DAG using the compiled execution path.
Args:
args: Args to the InputNode.
kwargs: Kwargs to the InputNode
Returns:
A list of Channels that can be used to read the DAG result.
Raises:
RayChannelTimeoutError: If the execution does not complete within
self._submit_timeout seconds.
NOTE: Not thread-safe due to _execution_index etc.
"""
if self._enable_asyncio:
raise ValueError("Use execute_async if enable_asyncio=True")
self._get_or_compile()
self._check_inputs(args, kwargs)
if len(args) == 1 and len(kwargs) == 0:
# When serializing a tuple, the Ray serializer invokes pickle5, which adds
# several microseconds of overhead. One common case for Compiled Graphs is
# passing a single argument (oftentimes of of type `bytes`, which requires
# no serialization). To avoid imposing this overhead on this common case, we
# create a fast path for this case that avoids pickle5.
inp = args[0]
else:
inp = CompiledDAGArgs(args=args, kwargs=kwargs)
# We want to release any buffers we can at this point based on the
# max_finished_execution_index so that the number of inflight executions
# is up to date.
self._try_release_buffers()
self._raise_if_too_many_inflight_executions()
try:
self._dag_submitter.write(inp, self._submit_timeout)
except RayChannelTimeoutError as e:
raise RayChannelTimeoutError(
"If the execution is expected to take a long time, increase "
f"RAY_CGRAPH_submit_timeout which is currently {self._submit_timeout} "
"seconds. Otherwise, this may indicate that execution is hanging."
) from e
self._execution_index += 1
if self._returns_list:
ref = [
CompiledDAGRef(self, self._execution_index, channel_index)
for channel_index in range(len(self.dag_output_channels))
]
else:
ref = CompiledDAGRef(self, self._execution_index)
return ref
def _check_inputs(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> None:
"""
Helper method to check that the DAG args provided by the user during
execution are valid according to the defined DAG.
"""
if len(args) != self._input_num_positional_args:
raise ValueError(
"dag.execute() or dag.execute_async() must be "
f"called with {self._input_num_positional_args} positional args, got "
f"{len(args)}"
)
for kwarg in self._input_kwargs:
if kwarg not in kwargs:
raise ValueError(
"dag.execute() or dag.execute_async() "
f"must be called with kwarg `{kwarg}`"
)
async def execute_async(
self,
*args,
**kwargs,
) -> Union[CompiledDAGFuture, List[CompiledDAGFuture]]:
"""Execute this DAG using the compiled execution path.
NOTE: Not thread-safe.
Args:
args: Args to the InputNode.
kwargs: Kwargs to the InputNode.
Returns:
A list of Channels that can be used to read the DAG result.
"""
if not self._enable_asyncio:
raise ValueError("Use execute if enable_asyncio=False")
self._get_or_compile()
self._check_inputs(args, kwargs)
async with self._dag_submission_lock:
if len(args) == 1 and len(kwargs) == 0:
# When serializing a tuple, the Ray serializer invokes pickle5, which
# adds several microseconds of overhead. One common case for accelerated
# DAGs is passing a single argument (oftentimes of of type `bytes`,
# which requires no serialization). To avoid imposing this overhead on
# this common case, we create a fast path for this case that avoids
# pickle5.
inp = args[0]
else:
inp = CompiledDAGArgs(args=args, kwargs=kwargs)
self._raise_if_too_many_inflight_executions()
await self._dag_submitter.write(inp)
# Allocate a future that the caller can use to get the result.
fut = asyncio.Future()
await self._fut_queue.put(fut)
self._execution_index += 1
if self._returns_list:
fut = [
CompiledDAGFuture(self, self._execution_index, fut, channel_index)
for channel_index in range(len(self.dag_output_channels))
]
else:
fut = CompiledDAGFuture(self, self._execution_index, fut)
return fut
def _visualize_ascii(self) -> str:
"""
Visualize the compiled graph in
ASCII format with directional markers.
This function generates an ASCII visualization of a Compiled Graph,
where each task node is labeled,
and edges use `<` and `>` markers to show data flow direction.
This method is called by:
- `compiled_dag.visualize(format="ascii")`
High-Level Algorithm:
- Topological Sorting: Sort nodes topologically to organize
them into layers based on dependencies.
- Grid Initialization: Set up a 2D grid canvas with dimensions based
on the number of layers and the maximum number of nodes per layer.
- Node Placement: Position each node on the grid according to its
layer and relative position within that layer.
Spacing is added for readability, and directional markers (`<` and `>`)
are added to edges to show input/output flow clearly.
This method should be called
**after** compiling the graph with `experimental_compile()`.
Returns:
ASCII representation of the CG with Nodes Information,
Edges Information and Graph Built.
Limitations:
- Note: This is only used for quick visualization for small graphs.
For complex graph (i.e. more than 20 tasks), please use graphviz.
- Scale: Works best for smaller CGs (typically fewer than 20 tasks).
Larger CGs may result in dense, less readable ASCII
outputs due to limited space for node and edge rendering.
- Shape: Ideal for relatively shallow CGs with clear dependency paths.
For deep, highly branched or densely connected CGs,
readability may suffer.
- Edge Overlap: In cases with high fan-out (i.e., nodes with many children)
or fan-in (nodes with many parents), edge lines may intersect or overlap
in the ASCII visualization, potentially obscuring some connections.
- Multi-output Tasks: Multi-output tasks can be visualized, but positioning
may cause line breaks or overlap when a task has multiple outputs that
feed into nodes at varying depths.
Example:
Basic Visualization:
```python
# Print the CG structure in ASCII format
print(compiled_dag.visualize(format="ascii"))
```
Example of Ordered Visualization (task is build in order
to reduce line intersection):
```python
with InputNode() as i:
o1, o2, o3 = a.return_three.bind(i)
o4 = b.echo.bind(o1)
o5 = b.echo.bind(o2)
o6, o7 = b.return_two.bind(o3)
dag = MultiOutputNode([o4, o5, o6, o7])
compiled_dag = dag.experimental_compile()
compiled_dag.visualize(format="ascii",view=True)
# Output:
# 0:InputNode
# |
# 1:Actor_54777d:return_three
# |---------------------------->|---------------------------->| # noqa
# 2:Output[0] 3:Output[1] 4:Output[2] # noqa
# | | | # noqa
# 5:Actor_c927c9:echo 6:Actor_c927c9:echo 7:Actor_c927c9:return_two # noqa
# | | |---------------------------->| # noqa
# | | 9:Output[0] 10:Output[1] # noqa
# |<----------------------------|-----------------------------|-----------------------------| # noqa
# 8:MultiOutputNode
```
Example of Anti-pattern Visualization (There are intersections):
# We can swtich the nodes ordering to reduce intersections, i.e. swap o2 and o3
```python
with InputNode() as i:
o1, o2, o3 = a.return_three.bind(i)
o4 = b.echo.bind(o1)
o5 = b.echo.bind(o3)
o6, o7 = b.return_two.bind(o2)
dag = MultiOutputNode([o4, o5, o6, o7])
compiled_dag = dag.experimental_compile()
compiled_dag.visualize(format="ascii",view=True)
# Output (Nodes 5, 7, 9, 10 should connect to Node 8):
# 0:InputNode
# |
# 1:Actor_84835a:return_three
# |---------------------------->|---------------------------->| # noqa
# 2:Output[0] 3:Output[1] 4:Output[2] # noqa
# | | | # noqa
# 5:Actor_02a6a1:echo 6:Actor_02a6a1:return_two 7:Actor_02a6a1:echo # noqa
# | |---------------------------->| # noqa
# | 9:Output[0] 10:Output[1] # noqa
# |<----------------------------------------------------------| # noqa
# 8:MultiOutputNode
```
"""
from ray.dag import (
ClassMethodNode,
DAGNode,
InputAttributeNode,
InputNode,
MultiOutputNode,
)
# Check that the DAG has been compiled
if not hasattr(self, "idx_to_task") or not self.idx_to_task:
raise ValueError(
"The DAG must be compiled before calling 'visualize()'. "
"Please call 'experimental_compile()' first."
)
# Check that each CompiledTask has a valid dag_node
for idx, task in self.idx_to_task.items():
if not hasattr(task, "dag_node") or not isinstance(task.dag_node, DAGNode):
raise ValueError(
f"Task at index {idx} does not have a valid 'dag_node'. "
"Ensure that 'experimental_compile()' completed successfully."
)
from collections import defaultdict, deque
# Create adjacency list representation of the DAG
# Adjacency list for DAG; maps a node index to its downstream nodes.
adj_list: Dict[int, List[int]] = defaultdict(list)
# Indegree count for topological sorting; maps a node index to its indegree.
indegree: Dict[int, int] = defaultdict(int)
# Tracks whether a node is a multi-output node.
is_multi_output: Dict[int, bool] = defaultdict(bool)
# Maps child node indices to their parent node indices.
child2parent: Dict[int, int] = defaultdict(int)
ascii_visualization = ""
# Node information; maps a node index to its descriptive label.
node_info: Dict[int, str] = {}
# Edge information; tuples of (upstream_index, downstream_index, edge_label).
edge_info: List[Tuple[int, int, str]] = []
for idx, task in self.idx_to_task.items():
dag_node = task.dag_node
label = f"Task {idx} "
# Determine the type and label of the node
if isinstance(dag_node, InputNode):
label += "InputNode"
elif isinstance(dag_node, InputAttributeNode):
label += f"InputAttributeNode[{dag_node.key}]"
elif isinstance(dag_node, MultiOutputNode):
label += "MultiOutputNode"
elif isinstance(dag_node, ClassMethodNode):
if dag_node.is_class_method_call:
method_name = dag_node.get_method_name()
actor_handle = dag_node._get_actor_handle()
actor_id = (
actor_handle._actor_id.hex()[:6] if actor_handle else "unknown"
)
label += f"Actor: {actor_id}... Method: {method_name}"
elif dag_node.is_class_method_output:
label += f"ClassMethodOutputNode[{dag_node.output_idx}]"
else:
label += "ClassMethodNode"
else:
label += type(dag_node).__name__
node_info[idx] = label
for arg_index, arg in enumerate(dag_node.get_args()):
if isinstance(arg, DAGNode):
upstream_task_idx = self.dag_node_to_idx[arg]
# Get the type hint for this argument
if arg_index < len(task.arg_type_hints):
if task.arg_type_hints[arg_index].requires_accelerator():
type_hint = "Accelerator"
else:
type_hint = type(task.arg_type_hints[arg_index]).__name__
else:
type_hint = "UnknownType"
adj_list[upstream_task_idx].append(idx)
indegree[idx] += 1
edge_info.append((upstream_task_idx, idx, type_hint))
width_adjust = 0
for upstream_task_idx, child_idx_list in adj_list.items():
# Mark as multi-output if the node has more than one output path
if len(child_idx_list) > 1:
for child in child_idx_list:
is_multi_output[child] = True
child2parent[child] = upstream_task_idx
width_adjust = max(width_adjust, len(child_idx_list))
# Topological sort to determine layers
layers = defaultdict(list)
zero_indegree = deque([idx for idx in self.idx_to_task if indegree[idx] == 0])
layer_index = 0
while zero_indegree:
next_layer = deque()
while zero_indegree:
task_idx = zero_indegree.popleft()
layers[layer_index].append(task_idx)
for downstream in adj_list[task_idx]:
indegree[downstream] -= 1
if indegree[downstream] == 0:
next_layer.append(downstream)
zero_indegree = next_layer
layer_index += 1
# Print detailed node information
ascii_visualization += "Nodes Information:\n"
for idx, info in node_info.items():
ascii_visualization += f'{idx} [label="{info}"] \n'
# Print edges
ascii_visualization += "\nEdges Information:\n"
for upstream_task, downstream_task, type_hint in edge_info:
if type_hint == "Accelerator":
edgs_channel = "+++"
else:
edgs_channel = "---"
ascii_visualization += (
f"{upstream_task} {edgs_channel}>" f" {downstream_task}\n"
)
# Add the legend to the output
ascii_visualization += "\nLegend:\n"
ascii_visualization += "+++> : Represents Accelerator-type data channels\n"
ascii_visualization += "---> : Represents Shared Memory data channels\n"
# Find the maximum width (number of nodes in any layer)
max_width = max(len(layer) for layer in layers.values()) + width_adjust
height = len(layers)
# Build grid for ASCII visualization
grid = [[" " for _ in range(max_width * 20)] for _ in range(height * 2 - 1)]
# Place nodes in the grid with more details
task_to_pos = {}
for layer_num, layer_tasks in layers.items():
layer_y = layer_num * 2 # Every second row is for nodes
for col_num, task_idx in enumerate(layer_tasks):
task = self.idx_to_task[task_idx]
task_info = f"{task_idx}:"
# Determine if it's an actor method or a regular task
if isinstance(task.dag_node, ClassMethodNode):
if task.dag_node.is_class_method_call:
method_name = task.dag_node.get_method_name()
actor_handle = task.dag_node._get_actor_handle()
actor_id = (
actor_handle._actor_id.hex()[:6]
if actor_handle
else "unknown"
)
task_info += f"Actor_{actor_id}:{method_name}"
elif task.dag_node.is_class_method_output:
task_info += f"Output[{task.dag_node.output_idx}]"
else:
task_info += "UnknownMethod"
else:
task_info += type(task.dag_node).__name__
adjust_col_num = 0
if task_idx in is_multi_output:
adjust_col_num = layers[layer_num - 1].index(child2parent[task_idx])
col_x = (col_num + adjust_col_num) * 30 # Every 30th column for spacing
# Place the task information into the grid
for i, char in enumerate(task_info):
if col_x + i < len(grid[0]): # Ensure we don't overflow the grid
grid[layer_y][col_x + i] = char
task_to_pos[task_idx] = (layer_y, col_x)
# Connect the nodes with lines
for upstream_task, downstream_tasks in adj_list.items():
upstream_y, upstream_x = task_to_pos[upstream_task]
for downstream_task in downstream_tasks:
downstream_y, downstream_x = task_to_pos[downstream_task]
# Draw vertical line
for y in range(upstream_y + 1, downstream_y):
if grid[y][upstream_x] == " ":
grid[y][upstream_x] = "|"
# Draw horizontal line with directional arrows
if upstream_x != downstream_x:
for x in range(
min(upstream_x, downstream_x) + 1,
max(upstream_x, downstream_x),
):
grid[downstream_y - 1][x] = (
"-"
if grid[downstream_y - 1][x] == " "
else grid[downstream_y - 1][x]
)
# Add arrows to indicate flow direction
if downstream_x > upstream_x:
grid[downstream_y - 1][downstream_x - 1] = ">"
else:
grid[downstream_y - 1][downstream_x + 1] = "<"
# Draw connection to the next task
grid[downstream_y - 1][downstream_x] = "|"
# Ensure proper multi-output task connection
for idx, task in self.idx_to_task.items():
if isinstance(task.dag_node, MultiOutputNode):
output_tasks = task.dag_node.get_args()
for i, output_task in enumerate(output_tasks):
if isinstance(output_task, DAGNode):
output_task_idx = self.dag_node_to_idx[output_task]
if output_task_idx in task_to_pos:
output_y, output_x = task_to_pos[output_task_idx]
grid[output_y - 1][output_x] = "|"
# Convert grid to string for printing
ascii_visualization += "\nGraph Built:\n"
ascii_visualization += "\n".join("".join(row) for row in grid)
return ascii_visualization
def get_channel_details(
self, channel: ChannelInterface, downstream_actor_id: str
) -> str:
"""
Get details about outer and inner channel types and channel ids
based on the channel and the downstream actor ID.
Used for graph visualization.
Args:
channel: The channel to get details for.
downstream_actor_id: The downstream actor ID.
Returns:
A string with details about the channel based on its connection
to the actor provided.
"""
channel_details = type(channel).__name__
# get outer channel
if channel in self._channel_dict and self._channel_dict[channel] != channel:
channel = self._channel_dict[channel]
channel_details += f"\n{type(channel).__name__}"
if type(channel) is CachedChannel:
channel_details += f", {channel._channel_id[:6]}..."
# get inner channel
if (
type(channel) is CompositeChannel
and downstream_actor_id in channel._channel_dict
):
inner_channel = channel._channel_dict[downstream_actor_id]
channel_details += f"\n{type(inner_channel).__name__}"
if type(inner_channel) is IntraProcessChannel:
channel_details += f", {inner_channel._channel_id[:6]}..."
return channel_details
def visualize(
self,
filename="compiled_graph",
format="png",
view=False,
channel_details=False,
) -> str:
"""
Visualize the compiled graph by showing tasks and their dependencies.
This method should be called **after** the graph has been compiled using
`experimental_compile()`.
Args:
filename: For non-ASCII formats, the output file name (without extension).
For ASCII format, the visualization will be printed to the console,
and this argument is ignored.
format: The format of the output file (e.g., 'png', 'pdf', 'ascii').
view: For non-ASCII formats: Whether to open the file with the default
viewer. For ASCII format: Whether to print the visualization and return
None or return the ascii visualization string directly.
channel_details: If True, adds channel details to edges.
Returns:
The string representation of the compiled graph. For Graphviz-based formats
(e.g., 'png', 'pdf', 'jpeg'), returns the Graphviz DOT string representation
of the compiled graph. For ASCII format, returns the ASCII string
representation of the compiled graph.
Raises:
ValueError: If the graph is empty or not properly compiled.
ImportError: If the `graphviz` package is not installed.
"""
if format == "ascii":
if channel_details:
raise ValueError(
"Parameters 'channel_details' are"
" not compatible with 'ascii' format."
)
ascii_visualiztion_str = self._visualize_ascii()
if view:
print(ascii_visualiztion_str)
return ascii_visualiztion_str
try:
import graphviz
except ImportError:
raise ImportError(
"Please install graphviz to visualize the compiled graph. "
"You can install it by running `pip install graphviz`."
)
from ray.dag import (
ClassMethodNode,
DAGNode,
InputAttributeNode,
InputNode,
MultiOutputNode,
)
# Check that the DAG has been compiled
if not hasattr(self, "idx_to_task") or not self.idx_to_task:
raise ValueError(
"The DAG must be compiled before calling 'visualize()'. "
"Please call 'experimental_compile()' first."
)
# Check that each CompiledTask has a valid dag_node
for idx, task in self.idx_to_task.items():
if not hasattr(task, "dag_node") or not isinstance(task.dag_node, DAGNode):
raise ValueError(
f"Task at index {idx} does not have a valid 'dag_node'. "
"Ensure that 'experimental_compile()' completed successfully."
)
# Dot file for debugging
dot = graphviz.Digraph(name="compiled_graph", format=format)
# Give every actor a unique color, colors between 24k -> 40k tested as readable
# other colors may be too dark, especially when wrapping back around to 0
actor_id_to_color = defaultdict(
lambda: f"#{((len(actor_id_to_color) * 2000 + 24000) % 0xFFFFFF):06X}"
)
# Add nodes with task information
for idx, task in self.idx_to_task.items():
dag_node = task.dag_node
# Initialize the label and attributes
label = f"Task {idx}\n"
shape = "oval" # Default shape
style = "filled"
fillcolor = ""
# Handle different types of dag_node
if isinstance(dag_node, InputNode):
label += "InputNode"
shape = "rectangle"
fillcolor = "lightblue"
elif isinstance(dag_node, InputAttributeNode):
label += f"InputAttributeNode[{dag_node.key}]"
shape = "rectangle"
fillcolor = "lightblue"
elif isinstance(dag_node, MultiOutputNode):
label += "MultiOutputNode"
shape = "rectangle"
fillcolor = "yellow"
elif isinstance(dag_node, ClassMethodNode):
if dag_node.is_class_method_call:
# Class Method Call Node
method_name = dag_node.get_method_name()
actor = dag_node._get_actor_handle()
if actor:
class_name = (
actor._ray_actor_creation_function_descriptor.class_name
)
actor_id = actor._actor_id.hex()
label += f"Actor: {class_name}\n"
label += f"ID: {actor_id[:6]}...\n"
label += f"Method: {method_name}"
fillcolor = actor_id_to_color[actor_id]
else:
label += f"Method: {method_name}"
fillcolor = "lightgreen"
shape = "oval"
elif dag_node.is_class_method_output:
# Class Method Output Node
label += f"ClassMethodOutputNode[{dag_node.output_idx}]"
shape = "rectangle"
fillcolor = "orange"
else:
# Unexpected ClassMethodNode
label += "ClassMethodNode"
shape = "diamond"
fillcolor = "red"
else:
# Unexpected node type
label += type(dag_node).__name__
shape = "diamond"
fillcolor = "red"
# Add the node to the graph with attributes
dot.node(str(idx), label, shape=shape, style=style, fillcolor=fillcolor)
channel_type_str = (
(
type(dag_node.type_hint).__name__
if dag_node.type_hint
else "UnknownType"
)
+ "\n"
if channel_details
else None
)
# This logic is built on the assumption that there will only be multiple
# output channels if the task has multiple returns
# case: task with one output
if len(task.output_channels) == 1:
for downstream_node in task.dag_node._downstream_nodes:
downstream_idx = self.dag_node_to_idx[downstream_node]
edge_label = None
if channel_details:
edge_label = channel_type_str
edge_label += self.get_channel_details(
task.output_channels[0],
(
downstream_node._get_actor_handle()._actor_id.hex()
if type(downstream_node) is ClassMethodNode
else self._proxy_actor._actor_id.hex()
),
)
dot.edge(str(idx), str(downstream_idx), label=edge_label)
# case: multi return, output channels connect to class method output nodes
elif len(task.output_channels) > 1:
assert len(task.output_idxs) == len(task.output_channels)
for output_channel, downstream_idx in zip(
task.output_channels, task.output_node_idxs
):
edge_label = None
if channel_details:
edge_label = channel_type_str
edge_label += self.get_channel_details(
output_channel,
task.dag_node._get_actor_handle()._actor_id.hex(),
)
dot.edge(str(idx), str(downstream_idx), label=edge_label)
if type(task.dag_node) is InputAttributeNode:
# Add an edge from the InputAttributeNode to the InputNode
dot.edge(str(self.input_task_idx), str(idx))
dot.render(filename, view=view)
return dot.source
def _register_input_output_custom_serializer(self):
"""
Register custom serializers for input, input attribute, and output nodes.
"""
assert self.input_task_idx is not None
assert self.output_task_idx is not None
# Register custom serializers for input node.
input_task = self.idx_to_task[self.input_task_idx]
input_task.dag_node.type_hint.register_custom_serializer()
# Register custom serializers for input attribute nodes.
for input_attr_task_idx in self.input_attr_task_idxs:
input_attr_task = self.idx_to_task[input_attr_task_idx]
input_attr_task.dag_node.type_hint.register_custom_serializer()
# Register custom serializers for output nodes.
for output in self.idx_to_task[self.output_task_idx].args:
output.type_hint.register_custom_serializer()
def teardown(self, kill_actors: bool = False):
"""
Teardown and cancel all actor tasks for this DAG. After this
function returns, the actors should be available to execute new tasks
or compile a new DAG.
Note: This method is automatically called when the CompiledDAG is destructed
or the script exits. However, this should be explicitly called before compiling
another graph on the same actors. Python may not garbage collect the
CompiledDAG object immediately when you may expect.
"""
if self._is_teardown:
return
monitor = getattr(self, "_monitor", None)
if monitor is not None:
from ray.dag import DAGContext
ctx = DAGContext.get_current()
monitor.teardown(kill_actors=kill_actors)
monitor.join(timeout=ctx.teardown_timeout)
# We do not log a warning here if the thread is still alive because
# wait_teardown already logs upon teardown_timeout.
self._is_teardown = True
def __del__(self):
self.teardown()
@DeveloperAPI
def build_compiled_dag_from_ray_dag(
dag: "ray.dag.DAGNode",
submit_timeout: Optional[float] = None,
buffer_size_bytes: Optional[int] = None,
enable_asyncio: bool = False,
max_inflight_executions: Optional[int] = None,
max_buffered_results: Optional[int] = None,
overlap_gpu_communication: Optional[bool] = None,
default_communicator: Optional[Union[Communicator, str]] = "create",
) -> "CompiledDAG":
compiled_dag = CompiledDAG(
submit_timeout,
buffer_size_bytes,
enable_asyncio,
max_inflight_executions,
max_buffered_results,
overlap_gpu_communication,
default_communicator,
)
def _build_compiled_dag(node):
compiled_dag._add_node(node)
return node
root = dag._find_root()
root.traverse_and_apply(_build_compiled_dag)
compiled_dag._get_or_compile()
global _compiled_dags
_compiled_dags[compiled_dag.get_id()] = compiled_dag
return compiled_dag
| CompiledDAG |
python | coleifer__peewee | tests/models.py | {
"start": 158278,
"end": 158405
} | class ____(TestModel):
seq_id = IntegerField(sequence='seq_id_sequence')
key = TextField()
@requires_pglike
| SequenceModel |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 1381,
"end": 1557
} | class ____:
"""Serializable payload of information for the result of processing a step input."""
input_name: str
type_check_data: TypeCheckData
@record
| StepInputData |
python | jazzband__django-pipeline | pipeline/templatetags/pipeline.py | {
"start": 4362,
"end": 5677
} | class ____(PipelineMixin, template.Node):
def __init__(self, name):
self.name = name
def render(self, context):
super().render(context)
package_name = template.Variable(self.name).resolve(context)
try:
package = self.package_for(package_name, "css")
except PackageNotFound:
w = "Package %r is unknown. Check PIPELINE['STYLESHEETS'] in your settings."
logger.warning(w, package_name)
# fail silently, do not return anything if an invalid group is specified
return ""
return self.render_compressed(package, package_name, "css")
def render_css(self, package, path):
template_name = package.template_name or "pipeline/css.html"
context = package.extra_context
context.update(
{
"type": guess_type(path, "text/css"),
"url": mark_safe(staticfiles_storage.url(path)),
}
)
return render_to_string(template_name, context)
def render_individual_css(self, package, paths, **kwargs):
tags = [self.render_css(package, path) for path in paths]
return "\n".join(tags)
def render_error_css(self, package_name, e):
return super().render_error("CSS", package_name, e)
| StylesheetNode |
python | pypa__warehouse | tests/unit/manage/views/test_organizations.py | {
"start": 124682,
"end": 143862
} | class ____:
def test_manage_organization_publishing_get(self, db_request):
"""Test GET request returns all forms and pending publishers"""
organization = OrganizationFactory.create()
user = UserFactory.create()
db_request.POST = MultiDict()
db_request.user = user
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.manage_organization_publishing()
assert result["organization"] == organization
assert "pending_github_publisher_form" in result
assert "pending_gitlab_publisher_form" in result
assert "pending_google_publisher_form" in result
assert "pending_activestate_publisher_form" in result
assert result["pending_oidc_publishers"] == organization.pending_oidc_publishers
def test_add_pending_github_oidc_publisher_success(self, db_request, monkeypatch):
"""Test successfully adding a pending GitHub OIDC publisher"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
db_request.POST = MultiDict()
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = user
# Mock form
form = pretend.stub(
validate=pretend.call_recorder(lambda: True),
project_name=pretend.stub(data="test-project"),
repository=pretend.stub(data="test-repo"),
normalized_owner="test-owner",
owner_id="12345",
workflow_filename=pretend.stub(data="release.yml"),
normalized_environment="",
)
monkeypatch.setattr(
org_views, "PendingGitHubPublisherForm", lambda *a, **kw: form
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_github_oidc_publisher()
assert isinstance(result, HTTPSeeOther)
assert db_request.session.flash.calls == [
pretend.call(
"Registered a new pending publisher to create the project "
f"'test-project' owned by the '{organization.name}' organization.",
queue="success",
)
]
assert db_request.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_pending_publisher.attempt",
tags=["publisher:GitHub", "organization:true"],
),
pretend.call(
"warehouse.oidc.add_pending_publisher.ok",
tags=["publisher:GitHub", "organization:true"],
),
]
def test_add_pending_github_oidc_publisher_disabled(self, db_request):
"""Test adding GitHub publisher when admin flag disables it"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
db_request.user = user
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(
lambda flag: flag == AdminFlagValue.DISALLOW_GITHUB_OIDC
)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = MultiDict()
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_github_oidc_publisher()
assert result == view.default_response
assert db_request.session.flash.calls == [
pretend.call(
"GitHub-based trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details.",
queue="error",
)
]
def test_add_pending_github_oidc_publisher_over_limit(self, db_request):
"""Adding GitHub publisher fails when org has too many pending publishers"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
# Add 3 existing pending publishers
PendingGitHubPublisherFactory.create_batch(3, organization_id=organization.id)
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = MultiDict()
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_github_oidc_publisher()
assert result == view.default_response
assert db_request.session.flash.calls == [
pretend.call(
"The trusted publisher could not be registered",
queue="error",
)
]
def test_add_pending_gitlab_oidc_publisher_success(self, db_request, monkeypatch):
"""Test successfully adding a pending GitLab OIDC publisher"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda *a: False)
)
db_request.POST = MultiDict()
db_request.path = "/fake/path"
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
db_request.user = user
# Mock form
form = pretend.stub(
validate=pretend.call_recorder(lambda: True),
project_name=pretend.stub(data="test-project"),
namespace=pretend.stub(data="test-namespace"),
project=pretend.stub(data="test-project"),
workflow_filepath=pretend.stub(data=".gitlab-ci.yml"),
environment=pretend.stub(data=""),
issuer_url=pretend.stub(data="https://gitlab.com"),
)
monkeypatch.setattr(
org_views, "PendingGitLabPublisherForm", lambda *a, **kw: form
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_gitlab_oidc_publisher()
assert isinstance(result, HTTPSeeOther)
assert db_request.metrics.increment.calls[-1] == pretend.call(
"warehouse.oidc.add_pending_publisher.ok",
tags=["publisher:GitLab", "organization:true"],
)
def test_gitlab_form_includes_issuer_url_choices(self, db_request, monkeypatch):
"""Test that GitLab form is created with issuer_url_choices"""
organization = OrganizationFactory.create()
user = UserFactory.create()
db_request.POST = MultiDict()
db_request.user = user
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
# Mock GitLabPublisher.get_available_issuer_urls to return multiple issuers
mock_issuers = [
("https://gitlab.com", "GitLab.com"),
("https://gitlab.example.com", "Custom GitLab"),
]
monkeypatch.setattr(
org_views.GitLabPublisher,
"get_available_issuer_urls",
lambda organization: mock_issuers,
)
# Track the form creation to verify issuer_url_choices are passed
form_calls = []
def track_form_creation(*args, **kwargs):
form_calls.append(kwargs)
return pretend.stub(
validate=lambda: False,
project_name=pretend.stub(data=""),
namespace=pretend.stub(data=""),
project=pretend.stub(data=""),
workflow_filepath=pretend.stub(data=""),
environment=pretend.stub(data=""),
issuer_url=pretend.stub(data="", choices=mock_issuers),
)
monkeypatch.setattr(
org_views, "PendingGitLabPublisherForm", track_form_creation
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
# Verify that the form was created with issuer_url_choices
assert len(form_calls) == 1
assert form_calls[0]["issuer_url_choices"] == mock_issuers
# Verify the form has the correct choices
assert view.pending_gitlab_publisher_form.issuer_url.choices == mock_issuers
def test_manage_organization_publishing_get_oidc_disabled(
self, db_request, monkeypatch
):
"""Test GET request when global OIDC is disabled"""
organization = OrganizationFactory.create()
user = UserFactory.create()
db_request.POST = MultiDict()
db_request.user = user
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: True)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
# Mock all form classes since default_response tries to instantiate them
pending_github_publisher_form_obj = pretend.stub()
monkeypatch.setattr(
org_views,
"PendingGitHubPublisherForm",
lambda *a, **kw: pending_github_publisher_form_obj,
)
pending_gitlab_publisher_form_obj = pretend.stub()
monkeypatch.setattr(
org_views,
"PendingGitLabPublisherForm",
lambda *a, **kw: pending_gitlab_publisher_form_obj,
)
pending_google_publisher_form_obj = pretend.stub()
monkeypatch.setattr(
org_views,
"PendingGooglePublisherForm",
lambda *a, **kw: pending_google_publisher_form_obj,
)
pending_activestate_publisher_form_obj = pretend.stub()
monkeypatch.setattr(
org_views,
"PendingActiveStatePublisherForm",
lambda *a, **kw: pending_activestate_publisher_form_obj,
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.manage_organization_publishing()
assert result["organization"] == organization
assert db_request.session.flash.calls == [
pretend.call(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details.",
queue="error",
)
]
def test_add_pending_github_oidc_publisher_already_exists(
self, db_request, monkeypatch
):
"""Test adding GitHub publisher when it already exists"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
# Create an existing pending publisher with matching attributes
existing_publisher = PendingGitHubPublisherFactory.create(
project_name="test-project",
repository_name="test-repo",
repository_owner="test-owner",
repository_owner_id="12345",
workflow_filename="release.yml",
environment="",
organization_id=organization.id,
)
db_request.db.add(existing_publisher)
db_request.db.flush()
db_request.POST = MultiDict()
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = user
# Mock form with same data as existing publisher
form = pretend.stub(
validate=pretend.call_recorder(lambda: True),
project_name=pretend.stub(data="test-project"),
repository=pretend.stub(data="test-repo"),
normalized_owner="test-owner",
owner_id="12345",
workflow_filename=pretend.stub(data="release.yml"),
normalized_environment="",
)
monkeypatch.setattr(
org_views, "PendingGitHubPublisherForm", lambda *a, **kw: form
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_github_oidc_publisher()
assert result == view.default_response
assert db_request.session.flash.calls == [
pretend.call(
"This publisher has already been registered in your organization. "
"See your existing pending publishers below.",
queue="error",
)
]
def test_add_pending_github_oidc_publisher_unique_violation(
self, db_request, monkeypatch
):
"""Test UniqueViolation exception handling during publisher creation"""
organization = OrganizationFactory.create()
user = UserFactory.create(with_verified_primary_email=True)
db_request.POST = MultiDict()
db_request.path = "/fake/path"
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/fake/route")
db_request.user = user
# Mock db.add to raise UniqueViolation (simulates race condition)
db_request.db.add = pretend.raiser(UniqueViolation("foo", "bar", "baz"))
# Mock form
form = pretend.stub(
validate=pretend.call_recorder(lambda: True),
project_name=pretend.stub(data="test-project"),
repository=pretend.stub(data="test-repo"),
normalized_owner="test-owner",
owner_id="12345",
workflow_filename=pretend.stub(data="release.yml"),
normalized_environment="",
)
monkeypatch.setattr(
org_views, "PendingGitHubPublisherForm", lambda *a, **kw: form
)
view = org_views.ManageOrganizationPublishingViews(organization, db_request)
result = view.add_pending_github_oidc_publisher()
# Should return HTTPSeeOther redirect (double-post protection)
assert isinstance(result, HTTPSeeOther)
assert result.location == "/fake/path"
def test_two_orgs_can_create_pending_publishers_for_same_project_name(
self, db_request, monkeypatch
):
"""
Two different organizations can create pending OIDC publishers
for the same future project name but with different OIDC credentials.
"""
# Create two separate organizations with different owners
org1_owner = UserFactory.create(username="org1-owner")
org2_owner = UserFactory.create(username="org2-owner")
EmailFactory.create(user=org1_owner, verified=True, primary=True)
EmailFactory.create(user=org2_owner, verified=True, primary=True)
org1 = OrganizationFactory.create(name="org1")
org2 = OrganizationFactory.create(name="org2")
OrganizationRoleFactory.create(
organization=org1, user=org1_owner, role_name="Owner"
)
OrganizationRoleFactory.create(
organization=org2, user=org2_owner, role_name="Owner"
)
# Setup request for org1
db_request.user = org1_owner
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.registry = pretend.stub(settings={"github.token": "fake-api-token"})
db_request.POST = MultiDict(
{
"project_name": "same-project-name",
"owner": "org1",
"repository": "repo1",
"workflow_filename": "release.yml",
"environment": "",
}
)
# Mock the GitHub API lookup for org1
monkeypatch.setattr(
org_views.PendingGitHubPublisherForm,
"_lookup_owner",
lambda *a: {"login": "org1", "id": "11111"},
)
# Org1 creates a pending publisher
view1 = org_views.ManageOrganizationPublishingViews(org1, db_request)
result1 = view1.add_pending_github_oidc_publisher()
assert isinstance(result1, HTTPSeeOther)
# Verify org1's pending publisher was created
pending_publisher_1 = (
db_request.db.query(PendingGitHubPublisher)
.filter(
PendingGitHubPublisher.organization_id == org1.id,
PendingGitHubPublisher.project_name == "same-project-name",
)
.one()
)
assert pending_publisher_1.repository_owner == "org1"
assert pending_publisher_1.repository_name == "repo1"
assert pending_publisher_1.organization_id == org1.id
# Setup request for org2 with SAME project name but different credentials
db_request.user = org2_owner
db_request.POST = MultiDict(
{
"project_name": "same-project-name", # SAME PROJECT NAME
"owner": "org2", # DIFFERENT GitHub org
"repository": "repo2", # DIFFERENT repo
"workflow_filename": "release.yml",
"environment": "",
}
)
# Mock the GitHub API lookup for org2
monkeypatch.setattr(
org_views.PendingGitHubPublisherForm,
"_lookup_owner",
lambda *a: {"login": "org2", "id": "22222"},
)
# Org2 creates a pending publisher for the SAME project name
view2 = org_views.ManageOrganizationPublishingViews(org2, db_request)
result2 = view2.add_pending_github_oidc_publisher()
assert isinstance(result2, HTTPSeeOther)
# Verify org2's pending publisher was also created
pending_publisher_2 = (
db_request.db.query(PendingGitHubPublisher)
.filter(
PendingGitHubPublisher.organization_id == org2.id,
PendingGitHubPublisher.project_name == "same-project-name",
)
.one()
)
assert pending_publisher_2.repository_owner == "org2"
assert pending_publisher_2.repository_name == "repo2"
assert pending_publisher_2.organization_id == org2.id
# CRITICAL: Both pending publishers should exist
all_pending_publishers = (
db_request.db.query(PendingGitHubPublisher)
.filter(PendingGitHubPublisher.project_name == "same-project-name")
.all()
)
assert len(all_pending_publishers) == 2
# Verify they have different credentials (different organizations)
assert pending_publisher_1.id != pending_publisher_2.id
assert (
pending_publisher_1.repository_owner != pending_publisher_2.repository_owner
)
assert (
pending_publisher_1.repository_name != pending_publisher_2.repository_name
)
| TestManageOrganizationPublishingViews |
python | kamyu104__LeetCode-Solutions | Python/find-longest-self-contained-substring.py | {
"start": 1141,
"end": 2164
} | class ____(object):
def maxSubstringLength(self, s):
"""
:type s: str
:rtype: int
"""
def check(left, right):
for x in idxs:
if not x:
continue
l = bisect.bisect_left(x, left)
r = bisect.bisect_right(x, right)-1
if not (r-l+1 == len(x) or r-l+1 == 0):
return False
return True
idxs = [[] for _ in xrange(26)]
for i, x in enumerate(s):
idxs[ord(x)-ord('a')].append(i)
result = -1
for x in idxs:
if not x:
continue
left = x[0]
for y in idxs:
if not y:
continue
right = y[-1]
if left <= right and right-left+1 != len(s) and check(left, right):
result = max(result, right-left+1)
return result
# Time: O(26 * n)
# Space: O(26)
# freq table, two pointers
| Solution2 |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/base.py | {
"start": 5443,
"end": 6472
} | class ____:
"""Mixin for tool callbacks."""
def on_tool_end(
self,
output: Any,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when the tool ends running.
Args:
output: The output of the tool.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
"""
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run when tool errors.
Args:
error: The error that occurred.
run_id: The run ID. This is the ID of the current run.
parent_run_id: The parent run ID. This is the ID of the parent run.
**kwargs: Additional keyword arguments.
"""
| ToolManagerMixin |
python | jazzband__django-polymorphic | example/orders/admin.py | {
"start": 464,
"end": 822
} | class ____(StackedPolymorphicInline):
"""
An inline for a polymorphic model.
The actual form appearance of each row is determined by
the child inline that corresponds with the actual model type.
"""
model = Payment
child_inlines = (CreditCardPaymentInline, BankPaymentInline, SepaPaymentInline)
@admin.register(Order)
| PaymentInline |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 7447,
"end": 8538
} | class ____(XYGlyph, LineGlyph, FillGlyph, HatchGlyph):
''' Render annuli.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Annulus.py"
_args = ('x', 'y', 'inner_radius', 'outer_radius')
x = NumberSpec(default=field("x"), help="""
The x-coordinates of the center of the annuli.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates of the center of the annuli.
""")
inner_radius = DistanceSpec(default=field("inner_radius"), help="""
The inner radii of the annuli.
""")
outer_radius = DistanceSpec(default=field("outer_radius"), help="""
The outer radii of the annuli.
""")
line_props = Include(LineProps, help="""
The {prop} values for the annuli.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the annuli.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the annuli.
""")
| Annulus |
python | pandas-dev__pandas | pandas/tests/arrays/integer/test_comparison.py | {
"start": 151,
"end": 1212
} | class ____(NumericOps, ComparisonOps):
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, comparison_op, dtype):
ComparisonOps.test_scalar(self, other, comparison_op, dtype)
def test_compare_to_int(self, dtype, comparison_op):
# GH 28930
op_name = f"__{comparison_op.__name__}__"
s1 = pd.Series([1, None, 3], dtype=dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, op_name)
result = method(2)
method = getattr(s2, op_name)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
tm.assert_series_equal(result, expected)
def test_equals():
# GH-30652
# equals is generally tested in /tests/extension/base/methods, but this
# specifically tests that two arrays of the same class but different dtype
# do not evaluate equal
a1 = pd.array([1, 2, None], dtype="Int64")
a2 = pd.array([1, 2, None], dtype="Int32")
assert a1.equals(a2) is False
| TestComparisonOps |
python | apache__airflow | providers/standard/tests/unit/standard/decorators/test_branch_python.py | {
"start": 1221,
"end": 3546
} | class ____:
# when run in "Parallel" test run environment, sometimes this test runs for a long time
# because creating virtualenv and starting new Python interpreter creates a lot of IO/contention
# possibilities. So we are increasing the timeout for this test to 3x of the default timeout
@pytest.mark.execution_timeout(180)
@pytest.mark.parametrize(
("branch_task_name", "skipped_task_name"), [("task_1", "task_2"), ("task_2", "task_1")]
)
def test_branch_one(self, dag_maker, branch_task_name, skipped_task_name):
@task
def dummy_f():
pass
@task
def task_1():
pass
@task
def task_2():
pass
@task.branch(task_id="branching")
def branch_operator():
return branch_task_name
with dag_maker():
branchoperator = branch_operator()
df = dummy_f()
task_1 = task_1()
task_2 = task_2()
df.set_downstream(branchoperator)
branchoperator.set_downstream(task_1)
branchoperator.set_downstream(task_2)
dr = dag_maker.create_dagrun()
dag_maker.run_ti("dummy_f", dr)
if AIRFLOW_V_3_0_1:
with pytest.raises(DownstreamTasksSkipped) as exc_info:
dag_maker.run_ti("branching", dr)
assert exc_info.value.tasks == [(skipped_task_name, -1)]
else:
dag_maker.run_ti("branching", dr)
dag_maker.run_ti("task_1", dr)
dag_maker.run_ti("task_2", dr)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "dummy_f":
assert ti.state == State.SUCCESS
if ti.task_id == "branching":
assert ti.state == State.SUCCESS
if ti.task_id == "task_1" and branch_task_name == "task_1":
assert ti.state == State.SUCCESS
elif ti.task_id == "task_1":
assert ti.state == State.SKIPPED
if ti.task_id == "task_2" and branch_task_name == "task_2":
assert ti.state == State.SUCCESS
elif ti.task_id == "task_2":
assert ti.state == State.SKIPPED
| TestBranchPythonDecoratedOperator |
python | wandb__wandb | wandb/_pydantic/pagination.py | {
"start": 559,
"end": 616
} | class ____(GQLResult, Generic[NodeT]):
node: NodeT
| Edge |
python | langchain-ai__langchain | libs/core/tests/unit_tests/tracers/test_async_base_tracer.py | {
"start": 582,
"end": 21915
} | class ____(AsyncBaseTracer):
"""Fake tracer to test async based tracers."""
def __init__(self) -> None:
"""Initialize the tracer."""
super().__init__()
self.runs: list[Run] = []
async def _persist_run(self, run: Run) -> None:
self.runs.append(run)
def _compare_run_with_error(run: Any, expected_run: Any) -> None:
if run.child_runs:
assert len(expected_run.child_runs) == len(run.child_runs)
for received, expected in zip(
run.child_runs, expected_run.child_runs, strict=False
):
_compare_run_with_error(received, expected)
received = run.dict(exclude={"child_runs"})
received_err = received.pop("error")
expected = expected_run.dict(exclude={"child_runs"})
expected_err = expected.pop("error")
assert received == expected
if expected_err is not None:
assert received_err is not None
assert expected_err in received_err
else:
assert received_err is None
@freeze_time("2023-01-01")
async def test_tracer_llm_run() -> None:
"""Test tracer on an LLM run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=uuid,
parent_run_id=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
async def test_tracer_chat_model_run() -> None:
"""Test tracer on a Chat Model run."""
tracer = FakeAsyncTracer()
manager = AsyncCallbackManager(handlers=[tracer])
run_managers = await manager.on_chat_model_start(
serialized=SERIALIZED_CHAT, messages=[[HumanMessage(content="")]]
)
compare_run = Run(
id=str(run_managers[0].run_id), # type: ignore[arg-type]
name="chat_model",
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED_CHAT,
inputs={"prompts": ["Human: "]},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=run_managers[0].run_id,
dotted_order=f"20230101T000000000000Z{run_managers[0].run_id}",
)
for run_manager in run_managers:
await run_manager.on_llm_end(response=LLMResult(generations=[[]]))
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
async def test_tracer_llm_run_errors_no_start() -> None:
"""Test tracer on an LLM run without a start."""
tracer = FakeAsyncTracer()
with pytest.raises(TracerException):
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid4())
@freeze_time("2023-01-01")
async def test_tracer_multiple_llm_runs() -> None:
"""Test the tracer with multiple runs."""
uuid = uuid4()
compare_run = Run(
id=uuid,
name="llm",
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
num_runs = 10
for _ in range(num_runs):
await tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run] * num_runs
@freeze_time("2023-01-01")
async def test_tracer_chain_run() -> None:
"""Test tracer on a Chain run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs={},
error=None,
run_type="chain",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid)
await tracer.on_chain_end(outputs={}, run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
async def test_tracer_tool_run() -> None:
"""Test tracer on a Tool run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs={"output": "test"},
error=None,
run_type="tool",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_tool_start(
serialized={"name": "tool"}, input_str="test", run_id=uuid
)
await tracer.on_tool_end("test", run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
async def test_tracer_nested_run() -> None:
"""Test tracer on a nested run."""
tracer = FakeAsyncTracer()
chain_uuid = uuid4()
tool_uuid = uuid4()
llm_uuid1 = uuid4()
llm_uuid2 = uuid4()
for _ in range(10):
await tracer.on_chain_start(
serialized={"name": "chain"}, inputs={}, run_id=chain_uuid
)
await tracer.on_tool_start(
serialized={"name": "tool"},
input_str="test",
run_id=tool_uuid,
parent_run_id=chain_uuid,
)
await tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid1,
parent_run_id=tool_uuid,
)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1)
await tracer.on_tool_end("test", run_id=tool_uuid)
await tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid2,
parent_run_id=chain_uuid,
)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2)
await tracer.on_chain_end(outputs={}, run_id=chain_uuid)
compare_run = Run( # type: ignore[call-arg]
id=str(chain_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs={},
run_type="chain",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=tool_uuid,
parent_run_id=chain_uuid,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs={"output": "test"},
error=None,
run_type="tool",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid1), # type: ignore[arg-type]
parent_run_id=str(tool_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid1}",
)
],
),
Run( # type: ignore[call-arg]
id=str(llm_uuid2), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}",
),
],
)
assert tracer.runs[0] == compare_run
assert tracer.runs == [compare_run] * 10
@freeze_time("2023-01-01")
async def test_tracer_llm_run_on_error() -> None:
"""Test tracer on an LLM run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=None,
error=repr(exception),
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
await tracer.on_llm_error(exception, run_id=uuid)
assert len(tracer.runs) == 1
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
async def test_tracer_llm_run_on_error_callback() -> None:
"""Test tracer on an LLM run with an error and a callback."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=None,
error=repr(exception),
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
class FakeTracerWithLlmErrorCallback(FakeAsyncTracer):
error_run = None
async def _on_llm_error(self, run: Run) -> None:
self.error_run = run
tracer = FakeTracerWithLlmErrorCallback()
await tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
await tracer.on_llm_error(exception, run_id=uuid)
_compare_run_with_error(tracer.error_run, compare_run)
@freeze_time("2023-01-01")
async def test_tracer_chain_run_on_error() -> None:
"""Test tracer on a Chain run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs=None,
error=repr(exception),
run_type="chain",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid)
await tracer.on_chain_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
async def test_tracer_tool_run_on_error() -> None:
"""Test tracer on a Tool run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs=None,
error=repr(exception),
run_type="tool",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeAsyncTracer()
await tracer.on_tool_start(
serialized={"name": "tool"}, input_str="test", run_id=uuid
)
await tracer.on_tool_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
async def test_tracer_nested_runs_on_error() -> None:
"""Test tracer on a nested run with an error."""
exception = Exception("test")
tracer = FakeAsyncTracer()
chain_uuid = uuid4()
tool_uuid = uuid4()
llm_uuid1 = uuid4()
llm_uuid2 = uuid4()
llm_uuid3 = uuid4()
for _ in range(3):
await tracer.on_chain_start(
serialized={"name": "chain"}, inputs={}, run_id=chain_uuid
)
await tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid1,
parent_run_id=chain_uuid,
)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1)
await tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid2,
parent_run_id=chain_uuid,
)
await tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2)
await tracer.on_tool_start(
serialized={"name": "tool"},
input_str="test",
run_id=tool_uuid,
parent_run_id=chain_uuid,
)
await tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid3,
parent_run_id=tool_uuid,
)
await tracer.on_llm_error(exception, run_id=llm_uuid3)
await tracer.on_tool_error(exception, run_id=tool_uuid)
await tracer.on_chain_error(exception, run_id=chain_uuid)
compare_run = Run( # type: ignore[call-arg]
id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
error=repr(exception),
inputs={},
outputs=None,
run_type="chain",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid1), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=None,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]], llm_output=None), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid1}",
),
Run( # type: ignore[call-arg]
id=str(llm_uuid2), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=None,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]], llm_output=None), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}",
),
Run( # type: ignore[call-arg]
id=str(tool_uuid), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
error=repr(exception),
inputs={"input": "test"},
outputs=None,
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid3), # type: ignore[arg-type]
parent_run_id=str(tool_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=repr(exception),
inputs={"prompts": []},
outputs=None,
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid3}",
)
],
run_type="tool",
),
],
)
assert len(tracer.runs) == 3
for run in tracer.runs:
_compare_run_with_error(run, compare_run)
| FakeAsyncTracer |
python | pytorch__pytorch | torch/distributions/multivariate_normal.py | {
"start": 3469,
"end": 11256
} | class ____(Distribution):
r"""
Creates a multivariate normal (also called Gaussian) distribution
parameterized by a mean vector and a covariance matrix.
The multivariate normal distribution can be parameterized either
in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}`
or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}`
or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued
diagonal entries, such that
:math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix
can be obtained via e.g. Cholesky decomposition of the covariance.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
>>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
tensor([-0.2102, -0.5429])
Args:
loc (Tensor): mean of the distribution
covariance_matrix (Tensor): positive-definite covariance matrix
precision_matrix (Tensor): positive-definite precision matrix
scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal
Note:
Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or
:attr:`scale_tril` can be specified.
Using :attr:`scale_tril` will be more efficient: all computations internally
are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or
:attr:`precision_matrix` is passed instead, it is only used to compute
the corresponding lower triangular matrices using a Cholesky decomposition.
"""
# pyrefly: ignore [bad-override]
arg_constraints = {
"loc": constraints.real_vector,
"covariance_matrix": constraints.positive_definite,
"precision_matrix": constraints.positive_definite,
"scale_tril": constraints.lower_cholesky,
}
support = constraints.real_vector
has_rsample = True
def __init__(
self,
loc: Tensor,
covariance_matrix: Optional[Tensor] = None,
precision_matrix: Optional[Tensor] = None,
scale_tril: Optional[Tensor] = None,
validate_args: Optional[bool] = None,
) -> None:
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
if (covariance_matrix is not None) + (scale_tril is not None) + (
precision_matrix is not None
) != 1:
raise ValueError(
"Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified."
)
if scale_tril is not None:
if scale_tril.dim() < 2:
raise ValueError(
"scale_tril matrix must be at least two-dimensional, "
"with optional leading batch dimensions"
)
batch_shape = torch.broadcast_shapes(scale_tril.shape[:-2], loc.shape[:-1])
# pyrefly: ignore [read-only]
self.scale_tril = scale_tril.expand(batch_shape + (-1, -1))
elif covariance_matrix is not None:
if covariance_matrix.dim() < 2:
raise ValueError(
"covariance_matrix must be at least two-dimensional, "
"with optional leading batch dimensions"
)
batch_shape = torch.broadcast_shapes(
covariance_matrix.shape[:-2], loc.shape[:-1]
)
# pyrefly: ignore [read-only]
self.covariance_matrix = covariance_matrix.expand(batch_shape + (-1, -1))
else:
assert precision_matrix is not None # helps mypy
if precision_matrix.dim() < 2:
raise ValueError(
"precision_matrix must be at least two-dimensional, "
"with optional leading batch dimensions"
)
batch_shape = torch.broadcast_shapes(
precision_matrix.shape[:-2], loc.shape[:-1]
)
# pyrefly: ignore [read-only]
self.precision_matrix = precision_matrix.expand(batch_shape + (-1, -1))
self.loc = loc.expand(batch_shape + (-1,))
event_shape = self.loc.shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
if scale_tril is not None:
self._unbroadcasted_scale_tril = scale_tril
elif covariance_matrix is not None:
self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix)
else: # precision_matrix is not None
self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
cov_shape = batch_shape + self.event_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
if "covariance_matrix" in self.__dict__:
new.covariance_matrix = self.covariance_matrix.expand(cov_shape)
if "scale_tril" in self.__dict__:
new.scale_tril = self.scale_tril.expand(cov_shape)
if "precision_matrix" in self.__dict__:
new.precision_matrix = self.precision_matrix.expand(cov_shape)
super(MultivariateNormal, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
@lazy_property
def scale_tril(self) -> Tensor:
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape + self._event_shape
)
@lazy_property
def covariance_matrix(self) -> Tensor:
return torch.matmul(
self._unbroadcasted_scale_tril, self._unbroadcasted_scale_tril.mT
).expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def precision_matrix(self) -> Tensor:
return torch.cholesky_inverse(self._unbroadcasted_scale_tril).expand(
self._batch_shape + self._event_shape + self._event_shape
)
@property
def mean(self) -> Tensor:
return self.loc
@property
def mode(self) -> Tensor:
return self.loc
@property
def variance(self) -> Tensor:
return (
self._unbroadcasted_scale_tril.pow(2)
.sum(-1)
.expand(self._batch_shape + self._event_shape)
)
def rsample(self, sample_shape: _size = torch.Size()) -> Tensor:
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff)
half_log_det = (
self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
)
return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det
def entropy(self):
half_log_det = (
self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
)
H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape)
| MultivariateNormal |
python | ray-project__ray | python/ray/train/v2/_internal/state/schema.py | {
"start": 285,
"end": 1236
} | class ____(str, Enum):
"""Enumeration of the possible statuses for a Train run."""
# ====== Active States ======
# The Train run is currently in the process of initializing.
INITIALIZING = "INITIALIZING"
# The Train run is waiting to be scheduled.
SCHEDULING = "SCHEDULING"
# The Train run is currently in progress.
RUNNING = "RUNNING"
# The Train run is recovering from a failure or restart.
RESTARTING = "RESTARTING"
# The Train run is resizing.
RESIZING = "RESIZING"
# ===== Terminal States ======
# The Train run completed successfully.
FINISHED = "FINISHED"
# The Train run failed due to an error in the training workers.
ERRORED = "ERRORED"
# The Train run was terminated due to system or controller errors.
ABORTED = "ABORTED"
def is_terminal(self) -> bool:
return self in [RunStatus.FINISHED, RunStatus.ERRORED, RunStatus.ABORTED]
@DeveloperAPI
| RunStatus |
python | pappasam__jedi-language-server | tests/lsp_test_client/session.py | {
"start": 621,
"end": 13099
} | class ____(MethodDispatcher):
"""Send and Receive messages over LSP as a test LS Client."""
def __init__(self, cwd=None):
self.cwd = cwd if cwd else os.getcwd()
self._thread_pool = ThreadPoolExecutor()
self._sub = None
self._writer = None
self._reader = None
self._endpoint = None
self._notification_callbacks = {}
def __enter__(self):
"""Context manager entrypoint.
shell=True needed for pytest-cov to work in subprocess.
"""
self._sub = subprocess.Popen(
[
sys.executable,
os.path.join(os.path.dirname(__file__), "lsp_run.py"),
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=0,
cwd=self.cwd,
env=os.environ,
shell="WITH_COVERAGE" in os.environ,
)
self._writer = JsonRpcStreamWriter(
os.fdopen(self._sub.stdin.fileno(), "wb")
)
self._reader = JsonRpcStreamReader(
os.fdopen(self._sub.stdout.fileno(), "rb")
)
dispatcher = {
PUBLISH_DIAGNOSTICS: self._publish_diagnostics,
WINDOW_SHOW_MESSAGE: self._window_show_message,
WINDOW_LOG_MESSAGE: self._window_log_message,
}
self._endpoint = Endpoint(dispatcher, self._writer.write)
self._thread_pool.submit(self._reader.listen, self._endpoint.consume)
self._last_cell_id = 0
return self
def __exit__(self, typ, value, _tb):
self.shutdown(True)
try:
self._sub.terminate()
except Exception:
pass
self._endpoint.shutdown()
self._thread_pool.shutdown()
def initialize(
self,
initialize_params=None,
process_server_capabilities=None,
):
"""Sends the initialize request to LSP server."""
server_initialized = Event()
def _after_initialize(fut):
if process_server_capabilities:
process_server_capabilities(fut.result())
self.initialized()
server_initialized.set()
self._send_request(
"initialize",
params=(
initialize_params
if initialize_params is not None
else defaults.VSCODE_DEFAULT_INITIALIZE
),
handle_response=_after_initialize,
)
server_initialized.wait()
def initialized(self, initialized_params=None):
"""Sends the initialized notification to LSP server."""
if initialized_params is None:
initialized_params = {}
self._endpoint.notify("initialized", initialized_params)
def shutdown(self, should_exit, exit_timeout=LSP_EXIT_TIMEOUT):
"""Sends the shutdown request to LSP server."""
def _after_shutdown(_):
if should_exit:
self.exit_lsp(exit_timeout)
self._send_request("shutdown", handle_response=_after_shutdown)
def exit_lsp(self, exit_timeout=LSP_EXIT_TIMEOUT):
"""Handles LSP server process exit."""
self._endpoint.notify("exit")
assert self._sub.wait(exit_timeout) == 0
def text_document_completion(self, completion_params):
"""Sends text document completion request to LSP server."""
fut = self._send_request(
"textDocument/completion", params=completion_params
)
return fut.result()
def text_document_rename(self, rename_params):
"""Sends text document rename request to LSP server."""
fut = self._send_request("textDocument/rename", params=rename_params)
return fut.result()
def text_document_code_action(self, code_action_params):
"""Sends text document code action request to LSP server."""
fut = self._send_request(
"textDocument/codeAction", params=code_action_params
)
return fut.result()
def text_document_hover(self, hover_params):
"""Sends text document hover request to LSP server."""
fut = self._send_request("textDocument/hover", params=hover_params)
return fut.result()
def text_document_signature_help(self, signature_help_params):
"""Sends text document hover request to LSP server."""
fut = self._send_request(
"textDocument/signatureHelp", params=signature_help_params
)
return fut.result()
def text_document_declaration(self, declaration_params):
"""Sends text document declaration request to LSP server."""
fut = self._send_request(
"textDocument/declaration", params=declaration_params
)
return fut.result()
def text_document_definition(self, definition_params):
"""Sends text document definition request to LSP server."""
fut = self._send_request(
"textDocument/definition", params=definition_params
)
return fut.result()
def text_document_symbol(self, document_symbol_params):
"""Sends text document symbol request to LSP server."""
fut = self._send_request(
"textDocument/documentSymbol", params=document_symbol_params
)
return fut.result()
def text_document_highlight(self, document_highlight_params):
"""Sends text document highlight request to LSP server."""
fut = self._send_request(
"textDocument/documentHighlight", params=document_highlight_params
)
return fut.result()
def text_document_references(self, references_params):
"""Sends text document references request to LSP server."""
fut = self._send_request(
"textDocument/references", params=references_params
)
return fut.result()
def text_doc_semantic_tokens_full(self, semantic_tokens_params):
"""Sends text document semantic tokens full request to LSP server."""
fut = self._send_request(
"textDocument/semanticTokens/full", params=semantic_tokens_params
)
return fut.result()
def text_doc_semantic_tokens_range(self, semantic_tokens_range_params):
"""Sends text document semantic tokens range request to LSP server."""
fut = self._send_request(
"textDocument/semanticTokens/range",
params=semantic_tokens_range_params,
)
return fut.result()
def workspace_symbol(self, workspace_symbol_params):
"""Sends workspace symbol request to LSP server."""
fut = self._send_request(
"workspace/symbol", params=workspace_symbol_params
)
return fut.result()
def completion_item_resolve(self, resolve_params):
"""Sends completion item resolve request to LSP server."""
fut = self._send_request(
"completionItem/resolve", params=resolve_params
)
return fut.result()
def notify_did_change_text_document(self, did_change_params):
"""Sends did change text document notification to LSP Server."""
self._send_notification(
"textDocument/didChange", params=did_change_params
)
def notify_did_save_text_document(self, did_save_params):
"""Sends did save text document notification to LSP Server."""
self._send_notification("textDocument/didSave", params=did_save_params)
def notify_did_open_text_document(self, did_open_params):
"""Sends did open text document notification to LSP Server."""
self._send_notification("textDocument/didOpen", params=did_open_params)
def notify_did_close_text_document(self, did_close_params):
"""Sends did close text document notification to LSP Server."""
self._send_notification(
"textDocument/didClose", params=did_close_params
)
def notify_did_change_notebook_document(self, did_change_params):
"""Sends did change notebook document notification to LSP Server."""
self._send_notification(
"notebookDocument/didChange", params=did_change_params
)
def notify_did_save_notebook_document(self, did_save_params):
"""Sends did save notebook document notification to LSP Server."""
self._send_notification(
"notebookDocument/didSave", params=did_save_params
)
def notify_did_open_notebook_document(self, did_open_params):
"""Sends did open notebook document notification to LSP Server."""
self._send_notification(
"notebookDocument/didOpen", params=did_open_params
)
def notify_did_close_notebook_document(self, did_close_params):
"""Sends did close notebook document notification to LSP Server."""
self._send_notification(
"notebookDocument/didClose", params=did_close_params
)
def open_notebook_document(self, path):
"""Opens a notebook document on the LSP Server."""
# Construct did_open_notebook_document params from the notebook file.
notebook = json.loads(path.read_text("utf-8"))
uri = as_uri(path)
lsp_cells = []
lsp_cell_text_documents = []
for cell in notebook["cells"]:
self._last_cell_id += 1
cell_uri = f"{uri}#{self._last_cell_id}"
lsp_cells.append(
{
"kind": 2 if cell["cell_type"] == "code" else 1,
"document": cell_uri,
"metadata": {"metadata": cell["metadata"]},
}
)
lsp_cell_text_documents.append(
{
"uri": cell_uri,
"languageId": "python",
"version": 1,
"text": "".join(cell["source"]),
}
)
# Notify the server.
self.notify_did_open_notebook_document(
{
"notebookDocument": {
"uri": uri,
"notebookType": "jupyter-notebook",
"languageId": "python",
"version": 1,
"cells": lsp_cells,
},
"cellTextDocuments": lsp_cell_text_documents,
}
)
# Return the generated cell URIs.
return [cell["document"] for cell in lsp_cells]
def set_notification_callback(self, notification_name, callback):
"""Set custom LS notification handler."""
self._notification_callbacks[notification_name] = callback
def get_notification_callback(self, notification_name):
"""Gets callback if set or default callback for a given LS notification."""
try:
return self._notification_callbacks[notification_name]
except KeyError:
def _default_handler(_params):
"""Default notification handler."""
return _default_handler
def _publish_diagnostics(self, publish_diagnostics_params):
"""Internal handler for text document publish diagnostics."""
return self._handle_notification(
PUBLISH_DIAGNOSTICS, publish_diagnostics_params
)
def _window_log_message(self, window_log_message_params):
"""Internal handler for window log message."""
return self._handle_notification(
WINDOW_LOG_MESSAGE, window_log_message_params
)
def _window_show_message(self, window_show_message_params):
"""Internal handler for window show message."""
return self._handle_notification(
WINDOW_SHOW_MESSAGE, window_show_message_params
)
def _handle_notification(self, notification_name, params):
"""Internal handler for notifications."""
fut = Future()
def _handler():
callback = self.get_notification_callback(notification_name)
callback(params)
fut.set_result(None)
self._thread_pool.submit(_handler)
return fut
def _send_request(
self, name, params=None, handle_response=lambda f: f.done()
):
"""Sends {name} request to the LSP server."""
fut = self._endpoint.request(name, params)
fut.add_done_callback(handle_response)
return fut
def _send_notification(self, name, params=None):
"""Sends {name} notification to the LSP server."""
self._endpoint.notify(name, params)
| LspSession |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/utils/eks_test_constants.py | {
"start": 7433,
"end": 7809
} | class ____:
"""The names of methods, used when a test is expected to throw an exception."""
CREATE_CLUSTER: str = "CreateCluster"
CREATE_NODEGROUP: str = "CreateNodegroup"
DELETE_CLUSTER: str = "DeleteCluster"
DELETE_NODEGROUP: str = "DeleteNodegroup"
DESCRIBE_CLUSTER: str = "DescribeCluster"
DESCRIBE_NODEGROUP: str = "DescribeNodegroup"
| MethodNames |
python | coleifer__peewee | tests/base_models.py | {
"start": 795,
"end": 911
} | class ____(TestModel):
email = CharField()
user = ForeignKeyField(User, backref='accounts', null=True)
| Account |
python | ray-project__ray | python/ray/experimental/channel/common.py | {
"start": 21147,
"end": 22468
} | class ____(WriterInterface):
def start(self):
for channel in self._output_channels:
channel.ensure_registered_as_writer()
def write(self, val: Any, timeout: Optional[float] = None) -> None:
# If it is an exception, there's only 1 return value.
# We have to send the same data to all channels.
if isinstance(val, Exception):
if len(self._output_channels) > 1:
val = tuple(val for _ in range(len(self._output_channels)))
if not self._is_input:
if len(self._output_channels) > 1:
if not isinstance(val, tuple):
raise ValueError(
f"Expected a tuple of {len(self._output_channels)} outputs, "
f"but got {type(val)}"
)
if len(val) != len(self._output_channels):
raise ValueError(
f"Expected {len(self._output_channels)} outputs, but got "
f"{len(val)} outputs"
)
for i, channel in enumerate(self._output_channels):
idx = self._output_idxs[i]
val_i = _adapt(val, idx, self._is_input)
channel.write(val_i, timeout)
self._num_writes += 1
@DeveloperAPI
| SynchronousWriter |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 68302,
"end": 69055
} | class ____(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a grouped list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
('medium', 'Medium quality'),
),
),
('good', 'Good quality'),
]
)
| TestChoiceFieldWithGroupedChoices |
python | facebook__pyre-check | client/tests/backend_arguments_test.py | {
"start": 701,
"end": 28285
} | class ____(testslide.TestCase):
def test_create_remote_logging(self) -> None:
self.assertIsNone(
RemoteLogging.create(),
)
self.assertIsNone(
RemoteLogging.create(identifier="foo"),
)
self.assertEqual(
RemoteLogging.create(logger="logger"),
RemoteLogging(logger="logger", identifier=""),
)
self.assertEqual(
RemoteLogging.create(logger="logger", identifier="foo"),
RemoteLogging(logger="logger", identifier="foo"),
)
def test_serialize_remote_logging(self) -> None:
self.assertDictEqual(
RemoteLogging(logger="/bin/logger").serialize(),
{"logger": "/bin/logger", "identifier": ""},
)
self.assertDictEqual(
RemoteLogging(logger="/bin/logger", identifier="foo").serialize(),
{"logger": "/bin/logger", "identifier": "foo"},
)
def test_serialize_source_paths(self) -> None:
self.assertDictEqual(
SimpleSourcePath(
[
search_path.SimpleElement("/source0"),
search_path.SimpleElement("/source1"),
]
).serialize(),
{"kind": "simple", "paths": ["/source0", "/source1"]},
)
self.assertDictEqual(
WithUnwatchedDependencySourcePath(
change_indicator_root=Path("/root"),
unwatched_dependency=configuration.UnwatchedDependency(
change_indicator="foo",
files=configuration.UnwatchedFiles(
root="/derp",
checksum_path="CHECKSUMS",
),
),
elements=[
search_path.SimpleElement("/source0"),
search_path.SimpleElement("/source1"),
],
).serialize(),
{
"kind": "with_unwatched_dependency",
"unwatched_dependency": {
"change_indicator": {"root": "/root", "relative": "foo"},
"files": {"root": "/derp", "checksum_path": "CHECKSUMS"},
},
"paths": ["/source0", "/source1"],
},
)
self.assertDictEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source"),
targets=["//foo:bar", "//foo:baz"],
).serialize(),
{
"kind": "buck",
"source_root": "/source",
"artifact_root": "/artifact",
"targets": ["//foo:bar", "//foo:baz"],
"kill_buck_after_build": False,
},
)
self.assertDictEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source"),
targets=["//foo:bar"],
targets_fallback_sources=[search_path.SimpleElement("/source")],
mode="opt",
isolation_prefix=".lsp",
bxl_builder="//foo.bxl:build",
kill_buck_after_build=True,
).serialize(),
{
"kind": "buck",
"source_root": "/source",
"artifact_root": "/artifact",
"targets": ["//foo:bar"],
"targets_fallback_sources": ["/source"],
"mode": "opt",
"isolation_prefix": ".lsp",
"bxl_builder": "//foo.bxl:build",
"kill_buck_after_build": True,
},
)
def test_serialize_base_arguments(self) -> None:
def assert_serialized(
arguments: BaseArguments, items: Iterable[Tuple[str, object]]
) -> None:
serialized = arguments.serialize()
for key, value in items:
if key not in serialized:
self.fail(f"Cannot find key `{key}` in serialized arguments")
else:
self.assertEqual(value, serialized[key])
assert_serialized(
BaseArguments(
log_path="foo",
global_root="bar",
source_paths=SimpleSourcePath([search_path.SimpleElement("source")]),
),
[
("log_path", "foo"),
("global_root", "bar"),
("source_paths", {"kind": "simple", "paths": ["source"]}),
],
)
assert_serialized(
BaseArguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
excludes=["/excludes"],
checked_directory_allowlist=["/allows"],
checked_directory_blocklist=["/blocks"],
extensions=[".typsy"],
),
[
("excludes", ["/excludes"]),
("checked_directory_allowlist", ["/allows"]),
("checked_directory_blocklist", ["/blocks"]),
("extensions", [".typsy"]),
],
)
assert_serialized(
BaseArguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
debug=True,
parallel=True,
number_of_workers=20,
),
[("debug", True), ("parallel", True), ("number_of_workers", 20)],
)
assert_serialized(
BaseArguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
relative_local_root="local",
),
[("local_root", "/project/local")],
)
assert_serialized(
BaseArguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
remote_logging=RemoteLogging(logger="/logger", identifier="baz"),
profiling_output=Path("/derp"),
memory_profiling_output=Path("/derp2"),
),
[
("profiling_output", "/derp"),
("remote_logging", {"logger": "/logger", "identifier": "baz"}),
("memory_profiling_output", "/derp2"),
],
)
def test_find_watchman_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_files_exist(
root_path,
["foo/qux/derp", "foo/bar/.watchmanconfig", "foo/bar/baz/derp"],
)
expected_root = root_path / "foo/bar"
self.assertEqual(
find_watchman_root(root_path / "foo/bar/baz", stop_search_after=3),
expected_root,
)
self.assertEqual(
find_watchman_root(root_path / "foo/bar", stop_search_after=2),
expected_root,
)
self.assertIsNone(
find_watchman_root(root_path / "foo/qux", stop_search_after=2)
)
self.assertIsNone(
find_watchman_root(root_path / "foo", stop_search_after=1)
)
self.assertIsNone(find_watchman_root(root_path, stop_search_after=0))
def test_find_buck2_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_files_exist(
root_path,
[
"foo/.buckconfig",
"foo/qux/derp",
"foo/bar/.buckconfig",
"foo/bar/baz/derp",
],
)
expected_root = root_path / "foo"
self.assertEqual(
find_buck2_root(root_path / "foo/bar/baz", stop_search_after=3),
expected_root,
)
self.assertEqual(
find_buck2_root(root_path / "foo/bar", stop_search_after=2),
expected_root,
)
self.assertEqual(
find_buck2_root(root_path / "foo/qux", stop_search_after=2),
expected_root,
)
self.assertEqual(
find_buck2_root(root_path / "foo", stop_search_after=1), expected_root
)
self.assertIsNone(find_buck2_root(root_path, stop_search_after=0))
def test_get_simple_source_path__exists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "src"])
raw_element = search_path.SimpleRawElement(str(root_path / "src"))
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[raw_element],
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=42,
watchman_root=None,
),
SimpleSourcePath([raw_element.to_element()]),
)
def test_get_simple_source_path__nonexists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre"])
raw_element = search_path.SimpleRawElement(str(root_path / "src"))
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[raw_element],
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=42,
watchman_root=None,
),
SimpleSourcePath([]),
)
def test_get_with_unwatched_dependency_source_path__exists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project/local"])
setup.ensure_files_exist(
root_path, ["src/indicator", "unwatched_root/CHECKSUMS"]
)
watched_root = root_path / "src"
raw_element = search_path.SimpleRawElement(str(root_path / "src"))
unwatched_dependency = configuration.UnwatchedDependency(
change_indicator="indicator",
files=configuration.UnwatchedFiles(
root=str(root_path / "unwatched_root"), checksum_path="CHECKSUMS"
),
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
relative_local_root="local",
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[raw_element],
unwatched_dependency=unwatched_dependency,
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=watched_root,
),
WithUnwatchedDependencySourcePath(
elements=[raw_element.to_element()],
change_indicator_root=watched_root,
unwatched_dependency=unwatched_dependency,
),
)
def test_get_with_unwatched_dependency_source_path__nonexists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project"])
setup.ensure_files_exist(root_path, ["src/indicator"])
watched_root = root_path / "src"
raw_element = search_path.SimpleRawElement(str(root_path / "src"))
unwatched_dependency = configuration.UnwatchedDependency(
change_indicator="indicator",
files=configuration.UnwatchedFiles(
root=str(root_path / "unwatched_root"), checksum_path="CHECKSUMS"
),
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[raw_element],
unwatched_dependency=unwatched_dependency,
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=watched_root,
),
SimpleSourcePath(
elements=[raw_element.to_element()],
),
)
def test_get_with_unwatched_dependency_source_path__not_watched(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project/local"])
setup.ensure_files_exist(
root_path, ["src/indicator", "unwatched_root/CHECKSUMS"]
)
raw_element = search_path.SimpleRawElement(str(root_path / "src"))
unwatched_dependency = configuration.UnwatchedDependency(
change_indicator="indicator",
files=configuration.UnwatchedFiles(
root=str(root_path / "unwatched_root"), checksum_path="CHECKSUMS"
),
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
relative_local_root="local",
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[raw_element],
unwatched_dependency=unwatched_dependency,
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
),
SimpleSourcePath(
elements=[raw_element.to_element()],
),
)
def test_get_buck_source_path__global(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "buck_root"])
setup.ensure_files_exist(root_path, ["buck_root/.buckconfig"])
setup.write_configuration_file(
root_path / "buck_root",
{
"targets": ["//ct:marle", "//ct:lucca"],
"buck_mode": "opt",
"isolation_prefix": ".lsp",
},
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre",
),
root_path / "buck_root",
)
),
artifact_root_name="artifact_root",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
),
BuckSourcePath(
source_root=root_path / "buck_root",
artifact_root=root_path / ".pyre" / "artifact_root",
checked_directory=root_path / "buck_root",
targets=["//ct:marle", "//ct:lucca"],
mode="opt",
isolation_prefix=".lsp",
kill_buck_after_build=False,
),
)
def test_get_buck2_source_path(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "repo_root"])
setup.ensure_files_exist(
root_path, ["repo_root/.buckconfig", "repo_root/buck_root/.buckconfig"]
)
setup.write_configuration_file(
root_path / "repo_root" / "buck_root",
{"targets": ["//ct:lavos"], "bxl_builder": "//ct:robo"},
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre",
),
root_path / "repo_root" / "buck_root",
)
),
artifact_root_name="artifact_root",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=True,
number_of_buck_threads=42,
watchman_root=None,
),
BuckSourcePath(
source_root=root_path / "repo_root",
artifact_root=root_path / ".pyre" / "artifact_root",
checked_directory=root_path / "repo_root" / "buck_root",
targets=["//ct:lavos"],
bxl_builder="//ct:robo",
kill_buck_after_build=True,
number_of_threads=42,
),
)
def test_get_buck_source_path__local(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project/local"])
setup.ensure_files_exist(root_path, ["project/local/.buckconfig"])
setup.write_configuration_file(
root_path / "project",
{
"buck_mode": "opt",
"isolation_prefix": ".lsp",
"bxl_builder": "//ct:robo",
},
)
setup.write_configuration_file(
root_path / "project",
{"targets": ["//ct:chrono"]},
relative="local",
)
self.assertEqual(
get_source_path(
frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
),
root_path / "project",
)
),
artifact_root_name="artifact_root/local",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
),
BuckSourcePath(
source_root=root_path / "project/local",
artifact_root=root_path / ".pyre" / "artifact_root" / "local",
checked_directory=root_path / "project/local",
targets=["//ct:chrono"],
mode="opt",
isolation_prefix=".lsp",
bxl_builder="//ct:robo",
kill_buck_after_build=False,
number_of_threads=None,
),
)
def test_get_buck_source_path__no_buck_root(self) -> None:
# Specify an explicit base directory to make sure the content of parent
# directories will not intervene.
with tempfile.TemporaryDirectory(dir="/tmp") as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project"])
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path / "project",
dot_pyre_directory=(root_path / ".pyre"),
targets=["//ct:frog"],
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
)
def test_get_source_path__no_source_specified(self) -> None:
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=Path("project"),
dot_pyre_directory=Path(".pyre"),
source_directories=None,
targets=None,
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
)
def test_get_source_path__confliciting_source_specified(self) -> None:
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
frontend_configuration.OpenSource(
configuration.Configuration(
global_root=Path("project"),
dot_pyre_directory=Path(".pyre"),
source_directories=[search_path.SimpleRawElement("src")],
targets=["//ct:ayla"],
)
),
artifact_root_name="irrelevant",
flavor=identifiers.PyreFlavor.CLASSIC,
kill_buck_after_build=False,
number_of_buck_threads=None,
watchman_root=None,
)
def test_get_checked_directory_for_simple_source_path(self) -> None:
element0 = search_path.SimpleElement("ozzie")
element1 = search_path.SubdirectoryElement("diva", "flea")
element2 = search_path.SitePackageElement("super", "slash")
self.assertCountEqual(
SimpleSourcePath(
[element0, element1, element2, element0]
).get_checked_directory_allowlist(),
[element0.path(), element1.path(), element2.path()],
)
def test_get_checked_directory_for_buck_source_path(self) -> None:
self.assertCountEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source/ct"),
targets=[
"//ct:robo",
"//ct:magus",
"future//ct/guardia/...",
"//ct/guardia:schala",
],
).get_checked_directory_allowlist(),
["/source/ct"],
)
def test_checked_directory_allowlist(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, ["a", "b/c"])
test_configuration = frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path,
dot_pyre_directory=Path(".pyre"),
only_check_paths=[
str(root_path / "a"),
str(root_path / "b" / "c"),
],
)
)
self.assertCountEqual(
get_checked_directory_allowlist(
test_configuration,
SimpleSourcePath([search_path.SimpleElement("source")]),
),
[
str(root_path / "a"),
str(root_path / "b/c"),
],
)
test_configuration = frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path,
dot_pyre_directory=Path(".pyre"),
only_check_paths=[
str(root_path / "a"),
str(root_path / "b" / "c"),
],
)
)
self.assertCountEqual(
get_checked_directory_allowlist(
test_configuration,
SimpleSourcePath([search_path.SimpleElement(str(root_path))]),
),
[
str(root_path / "a"),
str(root_path / "b/c"),
],
)
test_configuration = frontend_configuration.OpenSource(
configuration.Configuration(
global_root=root_path,
dot_pyre_directory=Path(".pyre"),
only_check_paths=[],
)
)
self.assertCountEqual(
get_checked_directory_allowlist(
test_configuration,
SimpleSourcePath([search_path.SimpleElement(str(root_path))]),
),
[str(root_path)],
)
| ArgumentsTest |
python | getsentry__sentry | src/sentry/grouping/api.py | {
"start": 5088,
"end": 5330
} | class ____(ProjectGroupingConfigLoader):
"""Secondary config to find old groups after config change"""
option_name = "sentry:secondary_grouping_config"
cache_prefix = "secondary-grouping-enhancements:"
| SecondaryGroupingConfigLoader |
python | sqlalchemy__sqlalchemy | examples/dogpile_caching/model.py | {
"start": 2132,
"end": 3034
} | class ____(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
addresses = relationship(Address, collection_class=set)
def __init__(self, name, *addresses):
self.name = name
self.addresses = set(addresses)
def __str__(self):
return self.name
def __repr__(self):
return "Person(name=%r)" % self.name
def format_full(self):
return "\t".join([str(x) for x in [self] + list(self.addresses)])
# Caching options. A set of three RelationshipCache options
# which can be applied to Query(), causing the "lazy load"
# of these attributes to be loaded from cache.
cache_address_bits = (
RelationshipCache(PostalCode.city, "default")
.and_(RelationshipCache(City.country, "default"))
.and_(RelationshipCache(Address.postal_code, "default"))
)
bootstrap()
| Person |
python | keras-team__keras | keras/src/legacy/preprocessing/image.py | {
"start": 15036,
"end": 18813
} | class ____(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk.
DEPRECATED.
"""
allowed_class_modes = {"categorical", "binary", "sparse", "input", None}
def __init__(
self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
follow_links=False,
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
super().set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
)
self.directory = directory
self.classes = classes
if class_mode not in self.allowed_class_modes:
raise ValueError(
"Invalid class_mode: {}; expected one of: {}".format(
class_mode, self.allowed_class_modes
)
)
self.class_mode = class_mode
self.dtype = dtype
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(
_list_valid_filenames_in_directory,
(
dirpath,
self.white_list_formats,
self.split,
self.class_indices,
follow_links,
),
)
)
classes_list = []
for res in results:
classes, filenames = res.get()
classes_list.append(classes)
self.filenames += filenames
self.samples = len(self.filenames)
self.classes = np.zeros((self.samples,), dtype="int32")
for classes in classes_list:
self.classes[i : i + len(classes)] = classes
i += len(classes)
io_utils.print_msg(
f"Found {self.samples} images belonging to "
f"{self.num_classes} classes."
)
pool.close()
pool.join()
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
return self.classes
@property # mixin needs this property to work
def sample_weight(self):
# no sample weights will be returned
return None
@keras_export("keras._legacy.preprocessing.image.NumpyArrayIterator")
| DirectoryIterator |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_Numpy.py | {
"start": 2766,
"end": 3229
} | class ____:
"""test_useless_docs_ignored_argument_names_numpy
Example of a method documenting the return type that an
implementation should return.
"""
def foo(self, arg, _, _ignored): # [useless-type-doc, useless-param-doc]
"""docstring ...
Parameters
----------
arg : int
An argument.
_ : float
Another argument.
_ignored :
Ignored Argument
"""
| Foo |
python | doocs__leetcode | solution/1600-1699/1696.Jump Game VI/Solution.py | {
"start": 0,
"end": 370
} | class ____:
def maxResult(self, nums: List[int], k: int) -> int:
n = len(nums)
f = [0] * n
q = deque([0])
for i in range(n):
if i - q[0] > k:
q.popleft()
f[i] = nums[i] + f[q[0]]
while q and f[q[-1]] <= f[i]:
q.pop()
q.append(i)
return f[-1]
| Solution |
python | fastai__fastai | fastai/callback/tracker.py | {
"start": 503,
"end": 836
} | class ____(Callback):
"A `Callback` that terminates training if loss is NaN."
order=-9
def after_batch(self):
"Test if `last_loss` is NaN and interrupts training."
if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
# %% ../../nbs/17_callback.tracker.ipynb 10
| TerminateOnNaNCallback |
python | django-extensions__django-extensions | django_extensions/db/models.py | {
"start": 789,
"end": 1129
} | class ____(models.Model):
"""
TitleDescriptionModel
An abstract base class model that provides title and description fields.
"""
title = models.CharField(_("title"), max_length=255)
description = models.TextField(_("description"), blank=True, null=True)
class Meta:
abstract = True
| TitleDescriptionModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/strategies.py | {
"start": 377,
"end": 439
} | class ____:
MockConnection = MockConnection
| MockEngineStrategy |
python | walkccc__LeetCode | solutions/2374. Node With Highest Edge Score/2374.py | {
"start": 0,
"end": 192
} | class ____:
def edgeScore(self, edges: list[int]) -> int:
scores = [0] * len(edges)
for i, edge in enumerate(edges):
scores[edge] += i
return scores.index(max(scores))
| Solution |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 33007,
"end": 34479
} | class ____(TypedDict, total=False):
type: Required[Literal['bytes']]
max_length: int
min_length: int
strict: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def bytes_schema(
*,
max_length: int | None = None,
min_length: int | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> BytesSchema:
"""
Returns a schema that matches a bytes value, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.bytes_schema(max_length=10, min_length=2)
v = SchemaValidator(schema)
assert v.validate_python(b'hello') == b'hello'
```
Args:
max_length: The value must be at most this length
min_length: The value must be at least this length
strict: Whether the value should be a bytes or a value that can be converted to a bytes
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='bytes',
max_length=max_length,
min_length=min_length,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| BytesSchema |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_hexadecimal.py | {
"start": 121,
"end": 4063
} | class ____(RegexBasedColumnMapExpectation):
"""Expect column values to be valid hexadecimals."""
regex_camel_name = "HexadecimalNumber"
regex = r"^[0-9a-fA-F]+$"
semantic_type_name_plural = "hexadecimals"
map_metric = RegexBasedColumnMapExpectation.register_metric(
regex_camel_name=regex_camel_name,
regex_=regex,
)
library_metadata = {
"maturity": "experimental",
"tags": ["experimental"],
"contributors": [
"@andrewsx",
"@mkopec87",
],
}
examples = [
{
"data": {
"a": ["3", "aa", "ba", "5A", "60F", "Gh"],
"b": ["Verify", "String", "3Z", "X", "yy", "sun"],
"c": ["0", "BB", "21D", "ca", "20", "1521D"],
"d": ["c8", "ffB", "11x", "apple", "ran", "woven"],
"e": ["a8", "21", "2.0", "1B", "4AA", "31"],
"f": ["a8", "41", "ca", "", "0", "31"],
},
"suppress_test_for": ["mssql", "bigquery", "snowflake"],
"tests": [
{
"title": "positive_test_with_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "a", "mostly": 0.6},
"out": {
"success": True,
"unexpected_index_list": [5],
"unexpected_list": ["Gh"],
},
},
{
"title": "negative_test_without_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "b"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2, 3, 4, 5],
"unexpected_list": ["Verify", "String", "3Z", "X", "yy", "sun"],
},
},
{
"title": "positive_test_without_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "c"},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_mostly",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "d", "mostly": 0.6},
"out": {
"success": False,
"unexpected_index_list": [2, 3, 4, 5],
"unexpected_list": ["11x", "apple", "ran", "woven"],
},
},
{
"title": "negative_test_with_float",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "e"},
"out": {
"success": False,
"unexpected_index_list": [2],
"unexpected_list": ["2.0"],
},
},
{
"title": "negative_test_with_empty_value",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "f"},
"out": {
"success": False,
"unexpected_index_list": [3],
"unexpected_list": [""],
},
},
],
}
]
if __name__ == "__main__":
ExpectColumnValuesToBeHexadecimal().print_diagnostic_checklist()
| ExpectColumnValuesToBeHexadecimal |
python | zarr-developers__zarr-python | tests/test_codecs/test_codecs.py | {
"start": 1112,
"end": 11483
} | class ____:
array: AnyAsyncArray
selection: BasicSelection
async def get(self) -> NDArrayLikeOrScalar:
return await self.array.getitem(self.selection)
async def set(self, value: np.ndarray[Any, Any]) -> None:
return await self.array.setitem(self.selection, value)
def order_from_dim(order: MemoryOrder, ndim: int) -> tuple[int, ...]:
if order == "F":
return tuple(ndim - x - 1 for x in range(ndim))
else:
return tuple(range(ndim))
def test_sharding_pickle() -> None:
"""
Test that sharding codecs can be pickled
"""
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
@pytest.mark.parametrize("input_order", ["F", "C"])
@pytest.mark.parametrize("store_order", ["F", "C"])
@pytest.mark.parametrize("runtime_write_order", ["F", "C"])
@pytest.mark.parametrize("runtime_read_order", ["F", "C"])
@pytest.mark.parametrize("with_sharding", [True, False])
async def test_order(
store: Store,
input_order: MemoryOrder,
store_order: MemoryOrder,
runtime_write_order: MemoryOrder,
runtime_read_order: MemoryOrder,
with_sharding: bool,
) -> None:
data = np.arange(0, 256, dtype="uint16").reshape((32, 8), order=input_order)
path = "order"
spath = StorePath(store, path=path)
a = await zarr.api.asynchronous.create_array(
spath,
shape=data.shape,
chunks=(16, 8) if with_sharding else (32, 8),
shards=(32, 8) if with_sharding else None,
dtype=data.dtype,
fill_value=0,
chunk_key_encoding={"name": "v2", "separator": "."},
filters=[TransposeCodec(order=order_from_dim(store_order, data.ndim))],
config={"order": runtime_write_order},
)
await _AsyncArrayProxy(a)[:, :].set(data)
read_data = await _AsyncArrayProxy(a)[:, :].get()
assert np.array_equal(data, read_data)
with config.set({"array.order": runtime_read_order}):
a = await AsyncArray.open(
spath,
)
read_data = await _AsyncArrayProxy(a)[:, :].get()
assert np.array_equal(data, read_data)
assert isinstance(read_data, np.ndarray)
if runtime_read_order == "F":
assert read_data.flags["F_CONTIGUOUS"]
assert not read_data.flags["C_CONTIGUOUS"]
else:
assert not read_data.flags["F_CONTIGUOUS"]
assert read_data.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
@pytest.mark.parametrize("input_order", ["F", "C"])
@pytest.mark.parametrize("runtime_write_order", ["F", "C"])
@pytest.mark.parametrize("runtime_read_order", ["F", "C"])
@pytest.mark.parametrize("with_sharding", [True, False])
def test_order_implicit(
store: Store,
input_order: MemoryOrder,
runtime_write_order: MemoryOrder,
runtime_read_order: MemoryOrder,
with_sharding: bool,
) -> None:
data = np.arange(0, 256, dtype="uint16").reshape((16, 16), order=input_order)
path = "order_implicit"
spath = StorePath(store, path)
with config.set({"array.order": runtime_write_order}):
a = zarr.create_array(
spath,
shape=data.shape,
chunks=(8, 8) if with_sharding else (16, 16),
shards=(16, 16) if with_sharding else None,
dtype=data.dtype,
fill_value=0,
)
a[:, :] = data
with config.set({"array.order": runtime_read_order}):
a = Array.open(spath)
read_data = a[:, :]
assert np.array_equal(data, read_data)
assert isinstance(read_data, np.ndarray)
if runtime_read_order == "F":
assert read_data.flags["F_CONTIGUOUS"]
assert not read_data.flags["C_CONTIGUOUS"]
else:
assert not read_data.flags["F_CONTIGUOUS"]
assert read_data.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
def test_open(store: Store) -> None:
spath = StorePath(store)
a = zarr.create_array(
spath,
shape=(16, 16),
chunks=(16, 16),
dtype="int32",
fill_value=0,
)
b = Array.open(spath)
assert a.metadata == b.metadata
def test_morton() -> None:
assert list(morton_order_iter((2, 2))) == [(0, 0), (1, 0), (0, 1), (1, 1)]
assert list(morton_order_iter((2, 2, 2))) == [
(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(1, 1, 0),
(0, 0, 1),
(1, 0, 1),
(0, 1, 1),
(1, 1, 1),
]
assert list(morton_order_iter((2, 2, 2, 2))) == [
(0, 0, 0, 0),
(1, 0, 0, 0),
(0, 1, 0, 0),
(1, 1, 0, 0),
(0, 0, 1, 0),
(1, 0, 1, 0),
(0, 1, 1, 0),
(1, 1, 1, 0),
(0, 0, 0, 1),
(1, 0, 0, 1),
(0, 1, 0, 1),
(1, 1, 0, 1),
(0, 0, 1, 1),
(1, 0, 1, 1),
(0, 1, 1, 1),
(1, 1, 1, 1),
]
@pytest.mark.parametrize(
"shape",
[
[2, 2, 2],
[5, 2],
[2, 5],
[2, 9, 2],
[3, 2, 12],
[2, 5, 1],
[4, 3, 6, 2, 7],
[3, 2, 1, 6, 4, 5, 2],
],
)
def test_morton2(shape: tuple[int, ...]) -> None:
order = list(morton_order_iter(shape))
for i, x in enumerate(order):
assert x not in order[:i] # no duplicates
assert all(x[j] < shape[j] for j in range(len(shape))) # all indices are within bounds
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
def test_write_partial_chunks(store: Store) -> None:
data = np.arange(0, 256, dtype="uint16").reshape((16, 16))
spath = StorePath(store)
a = zarr.create_array(
spath,
shape=data.shape,
chunks=(20, 20),
dtype=data.dtype,
fill_value=1,
)
a[0:16, 0:16] = data
assert np.array_equal(a[0:16, 0:16], data)
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
async def test_delete_empty_chunks(store: Store) -> None:
data = np.ones((16, 16))
path = "delete_empty_chunks"
spath = StorePath(store, path)
a = await zarr.api.asynchronous.create_array(
spath,
shape=data.shape,
chunks=(32, 32),
dtype=data.dtype,
fill_value=1,
)
await _AsyncArrayProxy(a)[:16, :16].set(np.zeros((16, 16)))
await _AsyncArrayProxy(a)[:16, :16].set(data)
assert np.array_equal(await _AsyncArrayProxy(a)[:16, :16].get(), data)
assert await store.get(f"{path}/c0/0", prototype=default_buffer_prototype()) is None
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
async def test_dimension_names(store: Store) -> None:
data = np.arange(0, 256, dtype="uint16").reshape((16, 16))
path = "dimension_names"
spath = StorePath(store, path)
await zarr.api.asynchronous.create_array(
spath,
shape=data.shape,
chunks=(16, 16),
dtype=data.dtype,
fill_value=0,
dimension_names=("x", "y"),
)
assert isinstance(
meta := (await zarr.api.asynchronous.open_array(store=spath)).metadata, ArrayV3Metadata
)
assert meta.dimension_names == (
"x",
"y",
)
path2 = "dimension_names2"
spath2 = StorePath(store, path2)
await zarr.api.asynchronous.create_array(
spath2,
shape=data.shape,
chunks=(16, 16),
dtype=data.dtype,
fill_value=0,
)
assert isinstance(meta := (await AsyncArray.open(spath2)).metadata, ArrayV3Metadata)
assert meta.dimension_names is None
zarr_json_buffer = await store.get(f"{path2}/zarr.json", prototype=default_buffer_prototype())
assert zarr_json_buffer is not None
assert "dimension_names" not in json.loads(zarr_json_buffer.to_bytes())
@pytest.mark.parametrize(
"codecs",
[
(BytesCodec(), TransposeCodec(order=order_from_dim("F", 2))),
(TransposeCodec(order=order_from_dim("F", 2)),),
],
)
def test_invalid_metadata(codecs: tuple[Codec, ...]) -> None:
shape = (16,)
chunks = (16,)
data_type = UInt8()
with pytest.raises(ValueError, match="The `order` tuple must have as many entries"):
ArrayV3Metadata(
shape=shape,
chunk_grid={"name": "regular", "configuration": {"chunk_shape": chunks}},
chunk_key_encoding={"name": "default", "configuration": {"separator": "/"}},
fill_value=0,
data_type=data_type,
codecs=codecs,
attributes={},
dimension_names=None,
)
def test_invalid_metadata_create_array() -> None:
with pytest.warns(
ZarrUserWarning,
match="codec disables partial reads and writes, which may lead to inefficient performance",
):
zarr.create_array(
{},
shape=(16, 16),
chunks=(16, 16),
dtype=np.dtype("uint8"),
fill_value=0,
serializer=ShardingCodec(chunk_shape=(8, 8)),
compressors=[
GzipCodec(),
],
)
@pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"])
async def test_resize(store: Store) -> None:
data = np.zeros((16, 18), dtype="uint16")
path = "resize"
spath = StorePath(store, path)
a = await zarr.api.asynchronous.create_array(
spath,
shape=data.shape,
chunks=(10, 10),
dtype=data.dtype,
chunk_key_encoding={"name": "v2", "separator": "."},
fill_value=1,
)
await _AsyncArrayProxy(a)[:16, :18].set(data)
assert await store.get(f"{path}/1.1", prototype=default_buffer_prototype()) is not None
assert await store.get(f"{path}/0.0", prototype=default_buffer_prototype()) is not None
assert await store.get(f"{path}/0.1", prototype=default_buffer_prototype()) is not None
assert await store.get(f"{path}/1.0", prototype=default_buffer_prototype()) is not None
await a.resize((10, 12))
assert a.metadata.shape == (10, 12)
assert a.shape == (10, 12)
assert await store.get(f"{path}/0.0", prototype=default_buffer_prototype()) is not None
assert await store.get(f"{path}/0.1", prototype=default_buffer_prototype()) is not None
assert await store.get(f"{path}/1.0", prototype=default_buffer_prototype()) is None
assert await store.get(f"{path}/1.1", prototype=default_buffer_prototype()) is None
| _AsyncArraySelectionProxy |
python | pytorch__pytorch | torch/fx/passes/splitter_base.py | {
"start": 13893,
"end": 14034
} | class ____:
is_acc: bool
nodes: NodeList
device_ordinal: Optional[int] = None
@compatibility(is_backward_compatible=False)
| Subgraph |
python | streamlit__streamlit | lib/tests/streamlit/data_mocks/snowpandas_mocks.py | {
"start": 731,
"end": 1715
} | class ____:
"""This is dummy DataFrame class, which imitates
snowflake.snowpark.modin.pandas.dataframe.DataFrame class for testing purposes.
We use this to make sure that our code does a special handling
if it detects a Snowpark Pandas Dataframe.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "snowflake.snowpark.modin.pandas.dataframe"
def __init__(self, data: pd.DataFrame):
self._data: pd.DataFrame = data
def to_pandas(self) -> pd.DataFrame:
return self._data
def head(self, n: int) -> DataFrame:
"""Returns the top n element of a mock version of Snowpark Pandas DataFrame"""
return DataFrame(self[:n])
def __getitem__(self, key: slice | int) -> DataFrame:
# Allow slicing and integer indexing
return DataFrame(self._data[key])
| DataFrame |
python | pytorch__pytorch | torch/testing/_internal/common_jit.py | {
"start": 5767,
"end": 15860
} | class ____(TestCase):
def createFunctionFromGraph(self, trace):
graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
return torch._C._create_function_from_graph("forward", graph)
def assertExportImport(self, trace, inputs):
m = self.createFunctionFromGraph(trace)
self.assertExportImportModule(m, inputs)
def assertExportImportModule(self, m, inputs):
m_import = self.getExportImportCopy(m)
a = self.runAndSaveRNG(m, inputs)
b = self.runAndSaveRNG(m_import, inputs)
self.assertEqual(a, b, "Results of original model and "
"exported/imported version of model differed")
def runAndSaveRNG(self, func, inputs, kwargs=None):
kwargs = kwargs if kwargs else {}
with freeze_rng_state():
results = func(*inputs, **kwargs)
return results
def getExportImportCopy(self, m, also_test_file=True, map_location=None):
buffer = io.BytesIO()
torch.jit.save(m, buffer)
buffer.seek(0)
imported = torch.jit.load(buffer, map_location=map_location)
if not also_test_file:
return imported
with TemporaryFileName() as fname:
torch.jit.save(imported, fname)
return torch.jit.load(fname, map_location=map_location)
def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
fusion_nodes_not_found, non_fusible_nodes_being_fused,
fusion_nodes_found, nodes_in_diff_graph):
err_msg = "\nFailure in testing nodes' autodifferentiation. "
if should_autodiff_node:
err_msg += "One or more nodes were expected to be autodiffed, " \
"but were not found in specified fusible/nonfusible " \
"DifferentiableGraph groups. \nSpecifically:"
# The node is intended to appear in a differentiable graph but doesn't
diff_nodes_missing = []
# The node is intended to appear in a differentiable graph
# outside of a fusion group but instead is in a fusion group
diff_nodes_in_fusion = []
# The node is intended to appear in a fusion group but doesn't
fusion_nodes_missing = []
# The node is intended to appear in a fusion group but instead
# is just in an outer differentiable graph
fusion_nodes_in_diff = []
for node in nodes_not_in_diff_graph:
if node in non_fusible_nodes_being_fused:
diff_nodes_in_fusion.append(node)
else:
diff_nodes_missing.append(node)
for node in fusion_nodes_not_found:
if node in nodes_in_diff_graph:
fusion_nodes_in_diff.append(node)
else:
fusion_nodes_missing.append(node)
if len(diff_nodes_missing) > 0:
err_msg += f"\n {diff_nodes_missing} were not in one of the " \
"DifferentiableGraphs when they were expected to be. " \
"Did you intend for these nodes to be autodiffed? " \
"If not, remove them from the list of nonfusible nodes."
if len(diff_nodes_in_fusion) > 0:
err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
"when they were expected to be just in a DifferentiableGraph. If it was " \
"intended for these nodes to be in FusionGroups, reclassify these nodes as " \
"fusible nodes. If these nodes were not intended to be fused, your " \
"autodifferentiation logic might be wrong."
if len(fusion_nodes_missing) > 0:
err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
"of the DifferentiableGraphs when they were expected to be. " \
"They were also not found in an outer DifferentiableGraph. Did you " \
"intend for these nodes to be autodifferentiated? If not, you should " \
"remove these nodes from the test's fusible nodes. Otherwise your " \
"autodifferentiation logic might be wrong."
if len(fusion_nodes_in_diff) > 0:
err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
"of the DifferentiableGraphs when they were expected to be, " \
"instead they were found just in an outer DifferentiableGraph. " \
"Did you intend for these nodes to be fused? If not, you should " \
"move these nodes into the test's nonfusible nodes. Otherwise your " \
"autodifferentiation logic might be wrong."
else:
err_msg += "One or more nodes were not expected to be autodiffed " \
"but were found in a DifferentiableGraph or in a FusionGroup " \
"of a DifferentiableGraph. Did you intend for these nodes to be " \
"autodiffed? If so, change this test to expect autodifferentiation. " \
"\nSpecifically:"
if len(fusion_nodes_found) > 0:
err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
"one of the DifferentiableGraphs, but appeared in a FusionGroup " \
"of a DifferentiableGraph. "
if len(nodes_in_diff_graph) > 0:
err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
"be in one of the DifferentiableGraphs but were."
return err_msg
def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
# Note: currently no tests have fusible_nodes
fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
# For any non-fusible node, it must show up in one of the DifferentiableGraphs.
nodes_in_diff_graph = []
nodes_not_in_diff_graph = []
non_fusible_nodes_being_fused = []
for node in nonfusible_nodes:
if any(g.findNode(node) is not None for g in diff_subgraphs):
nodes_in_diff_graph.append(node)
else:
nodes_not_in_diff_graph.append(node)
if any(g.findNode(node) is not None for g in fusion_subgraphs):
non_fusible_nodes_being_fused.append(node)
found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
# For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
fusion_nodes_found = []
fusion_nodes_not_found = []
for node in fusible_nodes:
if any(g.findNode(node) is not None for g in fusion_subgraphs):
fusion_nodes_found.append(node)
else:
fusion_nodes_not_found.append(node)
found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
if should_autodiff_node is not None:
err_msg = self.autoDiffErrorMessage(should_autodiff_node,
nodes_not_in_diff_graph,
fusion_nodes_not_found,
non_fusible_nodes_being_fused,
fusion_nodes_found,
nodes_in_diff_graph)
self.assertEqual(should_autodiff_node,
found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
def checkShapeAnalysis(self, out_sizes: Union[list[int], list[list[int]]],
traced_graph, assert_propagation, constant_prop=True):
# repropagte input shapes provided by tracing,
prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
for enable_test_mode in [True, False]:
# here we are testing allowing/disallowing substituting in complete shapes as constants,
# disallowing constants helps stress test partial eval and substitution pipeline
torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
torch._C._jit_erase_non_input_shape_information(traced_graph)
if constant_prop:
torch._C._jit_pass_constant_propagation(traced_graph)
torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
# Add sizes to default tensor type to avoid checking something out of scope
# and difficulties with tracer leaving in other parts of tensor type
output = next(traced_graph.outputs()).type()
def test_type(type, actual_size):
sizes = type.symbolic_sizes()
out_type = TensorType.get().with_sizes(sizes)
actual_type = TensorType.get().with_sizes(actual_size)
# always check actual shape is a subtype of the output
self.assertTrue(actual_type.isSubtypeOf(out_type))
# and then if assertion flag is provided, check shape analysis
# is successful
if assert_propagation:
self.assertEqual(out_type.sizes(), actual_size)
if output.isSubtypeOf(torch._C.TensorType.get()):
test_type(output, out_sizes)
else:
tuple_elements = output.elements()
for i in range(len(tuple_elements)):
test_type(tuple_elements[i], out_sizes[i])
torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
| JitCommonTestCase |
python | huggingface__transformers | src/transformers/models/beit/modeling_beit.py | {
"start": 33611,
"end": 37707
} | class ____(BeitPreTrainedModel):
def __init__(self, config: BeitConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.beit = BeitModel(config, add_pooling_layer=False)
# Classifier head
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return None
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
>>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, logits = outputs.loss, outputs.logits
>>> list(logits.shape)
[1, 196, 8192]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.beit(
pixel_values,
bool_masked_pos=bool_masked_pos,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.layernorm(sequence_output)
prediction_scores = self.lm_head(sequence_output[:, 1:])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels)
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
hidden states of the patch tokens) e.g. for ImageNet.
"""
)
| BeitForMaskedImageModeling |
python | google__pytype | pytype/tools/analyze_project/parse_args_test.py | {
"start": 297,
"end": 712
} | class ____(unittest.TestCase):
"""Test parse_args.convert_string."""
def test_int(self):
self.assertEqual(parse_args.convert_string('3'), 3)
def test_bool(self):
self.assertIs(parse_args.convert_string('True'), True)
self.assertIs(parse_args.convert_string('False'), False)
def test_whitespace(self):
self.assertEqual(parse_args.convert_string('err1,\nerr2'), 'err1,err2')
| TestConvertString |
python | walkccc__LeetCode | solutions/95. Unique Binary Search Trees II/95.py | {
"start": 0,
"end": 500
} | class ____:
def generateTrees(self, n: int) -> list[TreeNode]:
if n == 0:
return []
def generateTrees(mn: int, mx: int) -> list[int | None]:
if mn > mx:
return [None]
ans = []
for i in range(mn, mx + 1):
for left in generateTrees(mn, i - 1):
for right in generateTrees(i + 1, mx):
ans.append(TreeNode(i))
ans[-1].left = left
ans[-1].right = right
return ans
return generateTrees(1, n)
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/workers.py | {
"start": 58089,
"end": 59551
} | class ____(Request):
"""
Returns worker statistics metric keys grouped by categories.
:param worker_ids: List of worker ids to collect metrics for. If not provided
or empty then all the company workers metrics are analyzed.
:type worker_ids: Sequence[str]
"""
_service = "workers"
_action = "get_metric_keys"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"worker_ids": {
"description": "List of worker ids to collect metrics for. If not provided or empty then all the company workers metrics are analyzed.",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, worker_ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetMetricKeysRequest, self).__init__(**kwargs)
self.worker_ids = worker_ids
@schema_property("worker_ids")
def worker_ids(self) -> Optional[List[str]]:
return self._property_worker_ids
@worker_ids.setter
def worker_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_worker_ids = None
return
self.assert_isinstance(value, "worker_ids", (list, tuple))
self.assert_isinstance(value, "worker_ids", six.string_types, is_array=True)
self._property_worker_ids = value
| GetMetricKeysRequest |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 207994,
"end": 208785
} | class ____(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
| UDPLITETimeoutTest |
python | getsentry__sentry | src/sentry/core/endpoints/scim/teams.py | {
"start": 3050,
"end": 4585
} | class ____(serializers.Serializer):
# we don't actually use "schemas" for anything atm but its part of the spec
schemas = serializers.ListField(child=serializers.CharField(), required=True)
Operations = serializers.ListField(
child=SCIMTeamPatchOperationSerializer(),
required=True,
source="operations",
help_text="""The list of operations to perform. Valid operations are:
* Renaming a team:
```json
{
"Operations": [{
"op": "replace",
"value": {
"id": 23,
"displayName": "newName"
}
}]
}
```
* Adding a member to a team:
```json
{
"Operations": [{
"op": "add",
"path": "members",
"value": [
{
"value": 23,
"display": "testexample@example.com"
}
]
}]
}
```
* Removing a member from a team:
```json
{
"Operations": [{
"op": "remove",
"path": "members[value eq \"23\"]"
}]
}
```
* Replacing an entire member set of a team:
```json
{
"Operations": [{
"op": "replace",
"path": "members",
"value": [
{
"value": 23,
"display": "testexample2@sentry.io"
},
{
"value": 24,
"display": "testexample3@sentry.io"
}
]
}]
}
```
""",
)
def _team_expand(excluded_attributes):
return None if "members" in excluded_attributes else ["members"]
| SCIMTeamPatchRequestSerializer |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_reflection.py | {
"start": 18054,
"end": 19674
} | class ____(fixtures.TestBase):
__only_on__ = "oracle"
__sparse_driver_backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("create table my_table (id integer)")
conn.exec_driver_sql(
"create global temporary table my_temp_table (id integer)",
)
conn.exec_driver_sql(
"create table foo_table (id integer) tablespace SYSTEM"
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table my_temp_table")
conn.exec_driver_sql("drop table my_table")
conn.exec_driver_sql("drop table foo_table")
def test_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_table_names(), ["my_table"])
def test_temp_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_temp_table_names(), ["my_temp_table"])
def test_table_names_w_system(self):
engine = testing_engine(options={"exclude_tablespaces": ["FOO"]})
insp = inspect(engine)
eq_(
set(insp.get_table_names()).intersection(
["my_table", "foo_table"]
),
{"my_table", "foo_table"},
)
def test_reflect_system_table(self):
meta = MetaData()
t = Table("foo_table", meta, autoload_with=testing.db)
assert t.columns.keys() == ["id"]
t = Table("my_temp_table", meta, autoload_with=testing.db)
assert t.columns.keys() == ["id"]
| SystemTableTablenamesTest |
python | huggingface__transformers | src/transformers/models/layoutlmv3/modeling_layoutlmv3.py | {
"start": 23429,
"end": 35958
} | class ____(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
if config.text_embed:
self.embeddings = LayoutLMv3TextEmbeddings(config)
if config.visual_embed:
# use the default pre-training parameters for fine-tuning (e.g., input_size)
# when the input_size is larger in fine-tuning, we will interpolate the position embeddings in forward
self.patch_embed = LayoutLMv3PatchEmbeddings(config)
size = int(config.input_size / config.patch_size)
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size))
self.pos_drop = nn.Dropout(p=0.0)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
self.init_visual_bbox(image_size=(size, size))
self.norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.encoder = LayoutLMv3Encoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
"""
Create the bounding boxes for the visual (patch) tokens.
"""
visual_bbox_x = torch.div(
torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode="trunc"
)
visual_bbox_y = torch.div(
torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode="trunc"
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(image_size[0], 1),
visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_size[0], 1),
visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, 4)
cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
def calculate_visual_bbox(self, device, dtype, batch_size):
visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1)
visual_bbox = visual_bbox.to(device).type(dtype)
return visual_bbox
def forward_image(self, pixel_values):
embeddings = self.patch_embed(pixel_values)
# add [CLS] token
batch_size, seq_len, _ = embeddings.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add position embeddings
if self.pos_embed is not None:
embeddings = embeddings + self.pos_embed
embeddings = self.pos_drop(embeddings)
embeddings = self.norm(embeddings)
return embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, token_sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
token_type_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, token_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif pixel_values is not None:
batch_size = len(pixel_values)
device = pixel_values.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
if input_ids is not None or inputs_embeds is not None:
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
final_bbox = final_position_ids = None
patch_height = patch_width = None
if pixel_values is not None:
patch_height, patch_width = (
torch_int(pixel_values.shape[2] / self.config.patch_size),
torch_int(pixel_values.shape[3] / self.config.patch_size),
)
visual_embeddings = self.forward_image(pixel_values)
visual_attention_mask = torch.ones(
(batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device
)
if attention_mask is not None:
attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
else:
attention_mask = visual_attention_mask
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size)
if bbox is not None:
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
else:
final_bbox = visual_bbox
visual_position_ids = torch.arange(
0, visual_embeddings.shape[1], dtype=torch.long, device=device
).repeat(batch_size, 1)
if input_ids is not None or inputs_embeds is not None:
position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
position_ids = position_ids.expand(input_shape)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
else:
final_position_ids = visual_position_ids
if input_ids is not None or inputs_embeds is not None:
embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)
else:
embedding_output = visual_embeddings
embedding_output = self.LayerNorm(embedding_output)
embedding_output = self.dropout(embedding_output)
elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
final_bbox = bbox
if self.config.has_relative_attention_bias:
position_ids = self.embeddings.position_ids[:, : input_shape[1]]
position_ids = position_ids.expand_as(input_ids)
final_position_ids = position_ids
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, None, device, dtype=embedding_output.dtype
)
encoder_outputs = self.encoder(
embedding_output,
bbox=final_bbox,
position_ids=final_position_ids,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
patch_height=patch_height,
patch_width=patch_width,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| LayoutLMv3Model |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 78494,
"end": 79370
} | class ____(Request):
"""
:param project: Project id
:type project: str
"""
_service = "projects"
_action = "get_by_id"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"project": {"description": "Project id", "type": "string"}},
"required": ["project"],
"type": "object",
}
def __init__(self, project: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.project = project
@schema_property("project")
def project(self) -> str:
return self._property_project
@project.setter
def project(self, value: str) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
| GetByIdRequest |
python | encode__django-rest-framework | tests/test_utils.py | {
"start": 898,
"end": 993
} | class ____(APIView):
def get_view_name(self):
return "Foo"
| CustomNameResourceInstance |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/connectors/asyncio.py | {
"start": 12565,
"end": 14151
} | class ____:
"""Mixin for a AsyncAdapt_dbapi_connection to add terminate support."""
__slots__ = ()
def terminate(self) -> None:
if in_greenlet():
# in a greenlet; this is the connection was invalidated case.
try:
# try to gracefully close; see #10717
await_(asyncio.shield(self._terminate_graceful_close()))
except self._terminate_handled_exceptions() as e:
# in the case where we are recycling an old connection
# that may have already been disconnected, close() will
# fail. In this case, terminate
# the connection without any further waiting.
# see issue #8419
self._terminate_force_close()
if isinstance(e, asyncio.CancelledError):
# re-raise CancelledError if we were cancelled
raise
else:
# not in a greenlet; this is the gc cleanup case
self._terminate_force_close()
def _terminate_handled_exceptions(self) -> Tuple[Type[BaseException], ...]:
"""Returns the exceptions that should be handled when
calling _graceful_close.
"""
return (asyncio.TimeoutError, asyncio.CancelledError, OSError)
async def _terminate_graceful_close(self) -> None:
"""Try to close connection gracefully"""
raise NotImplementedError
def _terminate_force_close(self) -> None:
"""Terminate the connection"""
raise NotImplementedError
| AsyncAdapt_terminate |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 319,
"end": 521
} | class ____(graphene.Enum):
DEPLOYMENT_JOB_CONNECTION_STATE_UNSPECIFIED = 1
NOT_SET_UP = 2
CONNECTED = 3
NOT_FOUND = 4
REQUIRED_PARAMETERS_CHANGED = 5
| MlflowDeploymentJobConnectionState |
python | kamyu104__LeetCode-Solutions | Python/minimum-score-after-removals-on-a-tree.py | {
"start": 48,
"end": 1883
} | class ____(object):
def minimumScore(self, nums, edges):
"""
:type nums: List[int]
:type edges: List[List[int]]
:rtype: int
"""
def is_ancestor(a, b):
return left[a] <= left[b] and right[b] <= right[a]
def iter_dfs():
cnt = 0
left = [0]*len(nums)
right = [0]*len(nums)
stk = [(1, (0, -1))]
while stk:
step, args = stk.pop()
if step == 1:
u, p = args
left[u] = cnt
cnt += 1
stk.append((2, (u, p)))
for v in adj[u]:
if v == p:
continue
stk.append((1, (v, u)))
elif step == 2:
u, p = args
for v in adj[u]:
if v == p:
continue
nums[u] ^= nums[v]
right[u] = cnt
return left, right
adj = [[] for _ in xrange(len(nums))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
left, right = iter_dfs()
result = float("inf")
for i in xrange(1, len(nums)):
for j in xrange(i+1, len(nums)):
if is_ancestor(i, j):
a, b, c = nums[0]^nums[i], nums[i]^nums[j], nums[j]
elif is_ancestor(j, i):
a, b, c = nums[0]^nums[j], nums[j]^nums[i], nums[i]
else:
a, b, c = nums[0]^nums[i]^nums[j], nums[i], nums[j]
result = min(result, max(a, b, c)-min(a, b, c))
return result
# Time: O(n^2)
# Space: O(n)
# dfs with recursion
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_pickle.py | {
"start": 66,
"end": 302
} | class ____:
def test_pickle_after_set_freq(self):
tdi = timedelta_range("1 day", periods=4, freq="s")
tdi = tdi._with_freq(None)
res = tm.round_trip_pickle(tdi)
tm.assert_index_equal(res, tdi)
| TestPickle |
python | getsentry__sentry | src/sentry/notifications/platform/types.py | {
"start": 2423,
"end": 2788
} | class ____:
"""
A rendered action for an integration.
"""
label: str
"""
The text content of the action (usually appears as a button).
This string should not contain any formatting, and will be displayed as is.
"""
link: str
"""
The underlying link of the action.
"""
@dataclass(frozen=True)
| NotificationRenderedAction |
python | pytorch__pytorch | test/torch_np/test_ndarray_methods.py | {
"start": 4444,
"end": 6205
} | class ____(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.array([]).nonzero(), ([],))
assert_equal(np.nonzero(np.array([0])), ([],))
assert_equal(np.array([0]).nonzero(), ([],))
assert_equal(np.nonzero(np.array([1])), ([0],))
assert_equal(np.array([1]).nonzero(), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
assert_equal(x.nonzero(), ([0, 2, 3, 6],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
assert_equal(x.nonzero(), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
assert_equal(x.nonzero(), ([0, 1, 2], [0, 1, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
assert_equal(c.nonzero()[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i : 20 + i] = True
c[20 + i * 2] = True
assert_equal(
np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2])),
)
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
@instantiate_parametrized_tests
| TestNonzero |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_group_search_views.py | {
"start": 17833,
"end": 23749
} | class ____(APITestCase):
def create_base_data_with_page_filters(self) -> None:
self.team_1 = self.create_team(organization=self.organization, slug="team-1")
self.team_2 = self.create_team(organization=self.organization, slug="team-2")
# User 1 is on team 1 only
user_1 = self.user
self.create_team_membership(user=user_1, team=self.team_1)
# User 2 is on team 1 and team 2
self.user_2 = self.create_user()
self.create_member(
organization=self.organization, user=self.user_2, teams=[self.team_1, self.team_2]
)
# User 3 has no views and should get the default views
self.user_3 = self.create_user()
self.create_member(organization=self.organization, user=self.user_3, teams=[self.team_1])
# User 4 is part of no teams, should error out
self.user_4 = self.create_user()
self.create_member(organization=self.organization, user=self.user_4)
# This project should NEVER get chosen as a default since it does not belong to any teams
self.project1 = self.create_project(
organization=self.organization, slug="project-a", teams=[]
)
# This project should be User 2's default project since it's the alphabetically the first one
self.project2 = self.create_project(
organization=self.organization, slug="project-b", teams=[self.team_2]
)
# This should be User 1's default project since it's the only one that the user has access to
self.project3 = self.create_project(
organization=self.organization, slug="project-c", teams=[self.team_1, self.team_2]
)
first_issue_view_user_one = GroupSearchView.objects.create(
name="Issue View One",
organization=self.organization,
user_id=user_1.id,
query="is:unresolved",
query_sort="date",
is_all_projects=False,
time_filters={"period": "14d"},
environments=[],
)
GroupSearchViewStarred.objects.create(
organization=self.organization,
user_id=user_1.id,
group_search_view=first_issue_view_user_one,
position=0,
)
first_issue_view_user_one.projects.set([self.project3])
second_issue_view_user_one = GroupSearchView.objects.create(
name="Issue View Two",
organization=self.organization,
user_id=user_1.id,
query="is:resolved",
query_sort="new",
is_all_projects=False,
time_filters={"period": "7d"},
environments=["staging", "production"],
)
GroupSearchViewStarred.objects.create(
organization=self.organization,
user_id=user_1.id,
group_search_view=second_issue_view_user_one,
position=1,
)
second_issue_view_user_one.projects.set([])
third_issue_view_user_one = GroupSearchView.objects.create(
name="Issue View Three",
organization=self.organization,
user_id=user_1.id,
query="is:ignored",
query_sort="freq",
is_all_projects=True,
time_filters={"period": "30d"},
environments=["development"],
)
GroupSearchViewStarred.objects.create(
organization=self.organization,
user_id=user_1.id,
group_search_view=third_issue_view_user_one,
position=2,
)
third_issue_view_user_one.projects.set([])
first_issue_view_user_two = GroupSearchView.objects.create(
name="Issue View One",
organization=self.organization,
user_id=self.user_2.id,
query="is:unresolved",
query_sort="date",
is_all_projects=False,
time_filters={"period": "14d"},
environments=[],
)
GroupSearchViewStarred.objects.create(
organization=self.organization,
user_id=self.user_2.id,
group_search_view=first_issue_view_user_two,
position=0,
)
first_issue_view_user_two.projects.set([])
first_issue_view_user_four = GroupSearchView.objects.create(
name="Issue View One",
organization=self.organization,
user_id=self.user_4.id,
query="is:unresolved",
query_sort="date",
is_all_projects=False,
time_filters={"period": "14d"},
environments=[],
)
GroupSearchViewStarred.objects.create(
organization=self.organization,
user_id=self.user_4.id,
group_search_view=first_issue_view_user_four,
position=0,
)
first_issue_view_user_four.projects.set([])
def setUp(self) -> None:
self.create_base_data_with_page_filters()
self.url = reverse(
"sentry-api-0-organization-group-search-views",
kwargs={"organization_id_or_slug": self.organization.slug},
)
def test_basic_get_page_filters_with_global_filters(self) -> None:
self.login_as(user=self.user)
response = self.client.get(self.url)
assert response.data[0]["timeFilters"] == {"period": "14d"}
assert response.data[0]["projects"] == [self.project3.id]
assert response.data[0]["environments"] == []
assert response.data[1]["timeFilters"] == {"period": "7d"}
assert response.data[1]["projects"] == []
assert response.data[1]["environments"] == ["staging", "production"]
assert response.data[2]["timeFilters"] == {"period": "30d"}
assert response.data[2]["projects"] == [-1]
assert response.data[2]["environments"] == ["development"]
| OrganizationGroupSearchViewsGetPageFiltersTest |
python | django__django | django/template/response.py | {
"start": 143,
"end": 5098
} | class ____(HttpResponse):
rendering_attrs = ["template_name", "context_data", "_post_render_callbacks"]
def __init__(
self,
template,
context=None,
content_type=None,
status=None,
charset=None,
using=None,
headers=None,
):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super().__init__("", content_type, status, charset=charset, headers=headers)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""
Raise an exception if trying to pickle an unrendered response. Pickle
only rendered data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError(
"The response content must be rendered before it can be pickled."
)
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"""Accept a template object, path-to-template, or list of paths."""
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, str):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Return the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
return template.render(context, self._request)
def add_post_render_callback(self, callback):
"""Add a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Return the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError(
"The response content must be rendered before it can be iterated over."
)
return super().__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError(
"The response content must be rendered before it can be accessed."
)
return super().content
@content.setter
def content(self, value):
"""Set the content for the response."""
HttpResponse.content.fset(self, value)
self._is_rendered = True
| SimpleTemplateResponse |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 29981,
"end": 32310
} | class ____(Widget, Generic[T]):
"""A representation of ``st.selectbox``."""
_value: T | None | InitialValue
proto: SelectboxProto = field(repr=False)
label: str
options: list[str]
help: str
form_id: str
def __init__(self, proto: SelectboxProto, root: ElementTree) -> None:
super().__init__(proto, root)
self._value = InitialValue()
self.type = "selectbox"
self.options = list(proto.options)
@property
def index(self) -> int | None:
"""The index of the current selection. (int)""" # noqa: D400
if self.value is None:
return None
if len(self.options) == 0:
return 0
return self.options.index(self.format_func(self.value))
@property
def value(self) -> T | None:
"""The currently selected value from the options. (Any)""" # noqa: D400
if not isinstance(self._value, InitialValue):
return self._value
state = self.root.session_state
assert state
return cast("T", state[self.id])
@property
def format_func(self) -> Callable[[Any], Any]:
"""The widget's formatting function for displaying options. (callable)""" # noqa: D400
ss = self.root.session_state
return cast("Callable[[Any], Any]", ss[TESTING_KEY][self.id])
def set_value(self, v: T | None) -> Selectbox[T]:
"""Set the selection by value."""
self._value = v
return self
def select(self, v: T | None) -> Selectbox[T]:
"""Set the selection by value."""
return self.set_value(v)
def select_index(self, index: int | None) -> Selectbox[T]:
"""Set the selection by index."""
if index is None:
return self.set_value(None)
return self.set_value(cast("T", self.options[index]))
@property
def _widget_state(self) -> WidgetState:
"""Protobuf message representing the state of the widget, including
any interactions that have happened.
Should be the same as the frontend would produce for those interactions.
"""
ws = WidgetState()
ws.id = self.id
if self.index is not None and len(self.options) > 0:
ws.string_value = self.options[self.index]
return ws
@dataclass(repr=False)
| Selectbox |
python | wandb__wandb | wandb/vendor/pygments/lexers/business.py | {
"start": 11958,
"end": 22098
} | class ____(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
.. versionadded:: 1.1
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap', '*.ABAP']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
(r'##\w+', Comment.Special),
],
'variable-names': [
(r'<\S+>', Name.Variable),
(r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
# function calls
(r'CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION)',
Keyword),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()(\w+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation)),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function),
# text elements
(r'(TEXT)(-)(\d{3})',
bygroups(Keyword, Punctuation, Number.Integer)),
(r'(TEXT)(-)(\w{3})',
bygroups(Keyword, Punctuation, Name.Variable)),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS|NO-DISPLAY'
r')\b', Keyword),
# keyword kombinations
(r'(?<![-\>])(CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'(PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)\s+((LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'(INSERT|APPEND)(\s+INITIAL\s+LINE\s+(IN)?TO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
r'(REPLACE|FIND)\s+((FIRST|ALL)\s+OCCURRENCES?\s+OF\s+)?(SUBSTRING|REGEX)?|'
r'MATCH\s+(LENGTH|COUNT|LINE|OFFSET)|'
r'(RESPECTING|IGNORING)\s+CASE|'
r'IN\s+UPDATE\s+TASK|'
r'(SOURCE|RESULT)\s+(XML)?|'
r'REFERENCE\s+INTO|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE|COMMON\s+PART)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ABSTRACT|ADD|ALIASES|ALIGN|ALPHA|'
r'ASSERT|AS|ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|COUNTRY|CURRENCY|'
r'DATA|DATE|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|DUMMY|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCATCH|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDINTERFACE|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|ENDWHILE|'
r'ENHANCEMENT|EVENTS|EXACT|EXCEPTIONS?|EXIT|EXPONENT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FOR|FORM|FORMAT|FREE|FROM|FUNCTION|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LANGUAGE|LEAVE|LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'NEXT|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFIER|MODIFY|MOVE|MULTIPLY|'
r'NODES|NUMBER|'
r'OBLIGATORY|OBJECT|OF|OFF|ON|OTHERS|OVERLAY|'
r'PACK|PAD|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|PF\d\d|'
r'RAISE|RAISING|RANGES?|READ|RECEIVE|REDEFINITION|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|REPLACE|'
r'SCROLL|SEARCH|SELECT|SHIFT|SIGN|SINGLE|SIZE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'STYLE|SUBMATCHES|SUBMIT|SUBTRACT|SUM(?!\()|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMESTAMP|TIMES?|TIMEZONE|TITLE|\??TO|'
r'TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|VARYING|VARY|'
r'WAIT|WHEN|WHERE|WIDTH|WHILE|WITH|WINDOW|WRITE|XSD|ZERO)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|OR|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator.Word),
include('variable-names'),
# standard operators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+&]', Operator),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Single),
(r"([|}])([^{}|]*?)([|{])",
bygroups(Punctuation, String.Single, Punctuation)),
(r'[/;:()\[\],.]', Punctuation),
(r'(!)(\w+)', bygroups(Operator, Name)),
],
}
| ABAPLexer |
python | numba__numba | numba/core/dispatcher.py | {
"start": 38726,
"end": 41539
} | class ____(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
"""
Implementation of the hidden dispatcher objects used for lifted code
(a lifted loop is really compiled as a separate function).
"""
_fold_args = False
can_cache = False
def __init__(self, func_ir, typingctx, targetctx, flags, locals):
self.func_ir = func_ir
self.lifted_from = None
self.typingctx = typingctx
self.targetctx = targetctx
self.flags = flags
self.locals = locals
_DispatcherBase.__init__(self, self.func_ir.arg_count,
self.func_ir.func_id.func,
self.func_ir.func_id.pysig,
can_fallback=True,
exact_match_required=False)
def _reduce_states(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
NOTE: part of ReduceMixin protocol
"""
return dict(
uuid=self._uuid, func_ir=self.func_ir, flags=self.flags,
locals=self.locals, extras=self._reduce_extras(),
)
def _reduce_extras(self):
"""
NOTE: sub-class can override to add extra states
"""
return {}
@classmethod
def _rebuild(cls, uuid, func_ir, flags, locals, extras):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
pass
# NOTE: We are assuming that this is must be cpu_target, which is true
# for now.
# TODO: refactor this to not assume on `cpu_target`
from numba.core import registry
typingctx = registry.cpu_target.typing_context
targetctx = registry.cpu_target.target_context
self = cls(func_ir, typingctx, targetctx, flags, locals, **extras)
self._set_uuid(uuid)
return self
def get_source_location(self):
"""Return the starting line number of the loop.
"""
return self.func_ir.loc.line
def _pre_compile(self, args, return_type, flags):
"""Pre-compile actions
"""
pass
@abstractmethod
def compile(self, sig):
"""Lifted code should implement a compilation method that will return
a CompileResult.entry_point for the given signature."""
pass
def _get_dispatcher_for_current_target(self):
# Lifted code does not honor the target switch currently.
# No work has been done to check if this can be allowed.
return self
| LiftedCode |
python | ipython__ipython | IPython/core/interactiveshell.py | {
"start": 159736,
"end": 159889
} | class ____(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| InteractiveShellABC |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 123463,
"end": 124317
} | class ____(BaseModel):
shard_id: int = Field(..., description="")
to_shard_id: Optional[int] = Field(
default=None,
description="Target shard ID if different than source shard ID Used exclusively with `ReshardStreamRecords` transfer method.",
)
from_: int = Field(..., description="Source peer id", alias="from")
to: int = Field(..., description="Destination peer id")
sync: bool = Field(
...,
description="If `true` transfer is a synchronization of a replicas If `false` transfer is a moving of a shard from one peer to another",
)
method: Optional["ShardTransferMethod"] = Field(default=None, description="")
comment: Optional[str] = Field(
default=None, description="A human-readable report of the transfer progress. Available only on the source peer."
)
| ShardTransferInfo |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 56696,
"end": 58186
} | class ____(PreTrainedModel):
config: ClapConfig
base_model_prefix = "clap"
input_modalities = ("audio", "text")
supports_gradient_checkpointing = False
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, ClapTextEmbeddings):
init.normal_(module.position_embeddings.weight, mean=0.0, std=factor * 0.02)
init.normal_(module.token_type_embeddings.weight, mean=0.0, std=factor * 0.02)
elif isinstance(module, ClapModel):
init.constant_(module.logit_scale_a, math.log(self.config.logit_scale_init_value))
init.constant_(module.logit_scale_t, math.log(self.config.logit_scale_init_value))
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=factor * 0.02)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, (nn.Conv2d, nn.Linear)):
in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor
init.normal_(module.weight, std=in_proj_std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, ClapAudioSelfAttention):
init.zeros_(module.relative_position_bias_table)
| ClapPreTrainedModel |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_marker.py | {
"start": 1377,
"end": 11488
} | class ____(markers.MarkerStyle):
"""
A MarkerStyle where the snap threshold is force-disabled.
This is used to compare to polygon/star/asterisk markers which do not have
any snap threshold set.
"""
def _recache(self):
super()._recache()
self._snap_threshold = None
@check_figures_equal(extensions=['png', 'pdf', 'svg'])
def test_poly_marker(fig_test, fig_ref):
ax_test = fig_test.add_subplot()
ax_ref = fig_ref.add_subplot()
# Note, some reference sizes must be different because they have unit
# *length*, while polygon markers are inscribed in a circle of unit
# *radius*. This introduces a factor of np.sqrt(2), but since size is
# squared, that becomes 2.
size = 20**2
# Squares
ax_test.scatter([0], [0], marker=(4, 0, 45), s=size)
ax_ref.scatter([0], [0], marker='s', s=size/2)
# Diamonds, with and without rotation argument
ax_test.scatter([1], [1], marker=(4, 0), s=size)
ax_ref.scatter([1], [1], marker=UnsnappedMarkerStyle('D'), s=size/2)
ax_test.scatter([1], [1.5], marker=(4, 0, 0), s=size)
ax_ref.scatter([1], [1.5], marker=UnsnappedMarkerStyle('D'), s=size/2)
# Pentagon, with and without rotation argument
ax_test.scatter([2], [2], marker=(5, 0), s=size)
ax_ref.scatter([2], [2], marker=UnsnappedMarkerStyle('p'), s=size)
ax_test.scatter([2], [2.5], marker=(5, 0, 0), s=size)
ax_ref.scatter([2], [2.5], marker=UnsnappedMarkerStyle('p'), s=size)
# Hexagon, with and without rotation argument
ax_test.scatter([3], [3], marker=(6, 0), s=size)
ax_ref.scatter([3], [3], marker='h', s=size)
ax_test.scatter([3], [3.5], marker=(6, 0, 0), s=size)
ax_ref.scatter([3], [3.5], marker='h', s=size)
# Rotated hexagon
ax_test.scatter([4], [4], marker=(6, 0, 30), s=size)
ax_ref.scatter([4], [4], marker='H', s=size)
# Octagons
ax_test.scatter([5], [5], marker=(8, 0, 22.5), s=size)
ax_ref.scatter([5], [5], marker=UnsnappedMarkerStyle('8'), s=size)
ax_test.set(xlim=(-0.5, 5.5), ylim=(-0.5, 5.5))
ax_ref.set(xlim=(-0.5, 5.5), ylim=(-0.5, 5.5))
def test_star_marker():
# We don't really have a strict equivalent to this marker, so we'll just do
# a smoke test.
size = 20**2
fig, ax = plt.subplots()
ax.scatter([0], [0], marker=(5, 1), s=size)
ax.scatter([1], [1], marker=(5, 1, 0), s=size)
ax.set(xlim=(-0.5, 0.5), ylim=(-0.5, 1.5))
# The asterisk marker is really a star with 0-size inner circle, so the ends
# are corners and get a slight bevel. The reference markers are just singular
# lines without corners, so they have no bevel, and we need to add a slight
# tolerance.
@check_figures_equal(extensions=['png', 'pdf', 'svg'], tol=1.45)
def test_asterisk_marker(fig_test, fig_ref, request):
ax_test = fig_test.add_subplot()
ax_ref = fig_ref.add_subplot()
# Note, some reference sizes must be different because they have unit
# *length*, while asterisk markers are inscribed in a circle of unit
# *radius*. This introduces a factor of np.sqrt(2), but since size is
# squared, that becomes 2.
size = 20**2
def draw_ref_marker(y, style, size):
# As noted above, every line is doubled. Due to antialiasing, these
# doubled lines make a slight difference in the .png results.
ax_ref.scatter([y], [y], marker=UnsnappedMarkerStyle(style), s=size)
if request.getfixturevalue('ext') == 'png':
ax_ref.scatter([y], [y], marker=UnsnappedMarkerStyle(style),
s=size)
# Plus
ax_test.scatter([0], [0], marker=(4, 2), s=size)
draw_ref_marker(0, '+', size)
ax_test.scatter([0.5], [0.5], marker=(4, 2, 0), s=size)
draw_ref_marker(0.5, '+', size)
# Cross
ax_test.scatter([1], [1], marker=(4, 2, 45), s=size)
draw_ref_marker(1, 'x', size/2)
ax_test.set(xlim=(-0.5, 1.5), ylim=(-0.5, 1.5))
ax_ref.set(xlim=(-0.5, 1.5), ylim=(-0.5, 1.5))
# The bullet mathtext marker is not quite a circle, so this is not a perfect match, but
# it is close enough to confirm that the text-based marker is centred correctly. But we
# still need a small tolerance to work around that difference.
@check_figures_equal(tol=1.86)
def test_text_marker(fig_ref, fig_test):
ax_ref = fig_ref.add_subplot()
ax_test = fig_test.add_subplot()
ax_ref.plot(0, 0, marker=r'o', markersize=100, markeredgewidth=0)
ax_test.plot(0, 0, marker=r'$\bullet$', markersize=100, markeredgewidth=0)
@check_figures_equal(extensions=['png', 'pdf', 'svg'])
def test_marker_clipping(fig_ref, fig_test):
# Plotting multiple markers can trigger different optimized paths in
# backends, so compare single markers vs multiple to ensure they are
# clipped correctly.
marker_count = len(markers.MarkerStyle.markers)
marker_size = 50
ncol = 7
nrow = marker_count // ncol + 1
width = 2 * marker_size * ncol
height = 2 * marker_size * nrow * 2
fig_ref.set_size_inches((width / fig_ref.dpi, height / fig_ref.dpi))
ax_ref = fig_ref.add_axes((0, 0, 1, 1))
fig_test.set_size_inches((width / fig_test.dpi, height / fig_ref.dpi))
ax_test = fig_test.add_axes((0, 0, 1, 1))
for i, marker in enumerate(markers.MarkerStyle.markers):
x = i % ncol
y = i // ncol * 2
# Singular markers per call.
ax_ref.plot([x, x], [y, y + 1], c='k', linestyle='-', lw=3)
ax_ref.plot(x, y, c='k',
marker=marker, markersize=marker_size, markeredgewidth=10,
fillstyle='full', markerfacecolor='white')
ax_ref.plot(x, y + 1, c='k',
marker=marker, markersize=marker_size, markeredgewidth=10,
fillstyle='full', markerfacecolor='white')
# Multiple markers in a single call.
ax_test.plot([x, x], [y, y + 1], c='k', linestyle='-', lw=3,
marker=marker, markersize=marker_size, markeredgewidth=10,
fillstyle='full', markerfacecolor='white')
ax_ref.set(xlim=(-0.5, ncol), ylim=(-0.5, 2 * nrow))
ax_test.set(xlim=(-0.5, ncol), ylim=(-0.5, 2 * nrow))
ax_ref.axis('off')
ax_test.axis('off')
def test_marker_init_transforms():
"""Test that initializing marker with transform is a simple addition."""
marker = markers.MarkerStyle("o")
t = Affine2D().translate(1, 1)
t_marker = markers.MarkerStyle("o", transform=t)
assert marker.get_transform() + t == t_marker.get_transform()
def test_marker_init_joinstyle():
marker = markers.MarkerStyle("*")
styled_marker = markers.MarkerStyle("*", joinstyle="round")
assert styled_marker.get_joinstyle() == "round"
assert marker.get_joinstyle() != "round"
def test_marker_init_captyle():
marker = markers.MarkerStyle("*")
styled_marker = markers.MarkerStyle("*", capstyle="round")
assert styled_marker.get_capstyle() == "round"
assert marker.get_capstyle() != "round"
@pytest.mark.parametrize("marker,transform,expected", [
(markers.MarkerStyle("o"), Affine2D().translate(1, 1),
Affine2D().translate(1, 1)),
(markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)),
Affine2D().translate(1, 1), Affine2D().translate(2, 2)),
(markers.MarkerStyle("$|||$", transform=Affine2D().translate(1, 1)),
Affine2D().translate(1, 1), Affine2D().translate(2, 2)),
(markers.MarkerStyle(
markers.TICKLEFT, transform=Affine2D().translate(1, 1)),
Affine2D().translate(1, 1), Affine2D().translate(2, 2)),
])
def test_marker_transformed(marker, transform, expected):
new_marker = marker.transformed(transform)
assert new_marker is not marker
assert new_marker.get_user_transform() == expected
assert marker._user_transform is not new_marker._user_transform
def test_marker_rotated_invalid():
marker = markers.MarkerStyle("o")
with pytest.raises(ValueError):
new_marker = marker.rotated()
with pytest.raises(ValueError):
new_marker = marker.rotated(deg=10, rad=10)
@pytest.mark.parametrize("marker,deg,rad,expected", [
(markers.MarkerStyle("o"), 10, None, Affine2D().rotate_deg(10)),
(markers.MarkerStyle("o"), None, 0.01, Affine2D().rotate(0.01)),
(markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)),
10, None, Affine2D().translate(1, 1).rotate_deg(10)),
(markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)),
None, 0.01, Affine2D().translate(1, 1).rotate(0.01)),
(markers.MarkerStyle("$|||$", transform=Affine2D().translate(1, 1)),
10, None, Affine2D().translate(1, 1).rotate_deg(10)),
(markers.MarkerStyle(
markers.TICKLEFT, transform=Affine2D().translate(1, 1)),
10, None, Affine2D().translate(1, 1).rotate_deg(10)),
])
def test_marker_rotated(marker, deg, rad, expected):
new_marker = marker.rotated(deg=deg, rad=rad)
assert new_marker is not marker
assert new_marker.get_user_transform() == expected
assert marker._user_transform is not new_marker._user_transform
def test_marker_scaled():
marker = markers.MarkerStyle("1")
new_marker = marker.scaled(2)
assert new_marker is not marker
assert new_marker.get_user_transform() == Affine2D().scale(2)
assert marker._user_transform is not new_marker._user_transform
new_marker = marker.scaled(2, 3)
assert new_marker is not marker
assert new_marker.get_user_transform() == Affine2D().scale(2, 3)
assert marker._user_transform is not new_marker._user_transform
marker = markers.MarkerStyle("1", transform=Affine2D().translate(1, 1))
new_marker = marker.scaled(2)
assert new_marker is not marker
expected = Affine2D().translate(1, 1).scale(2)
assert new_marker.get_user_transform() == expected
assert marker._user_transform is not new_marker._user_transform
def test_alt_transform():
m1 = markers.MarkerStyle("o", "left")
m2 = markers.MarkerStyle("o", "left", Affine2D().rotate_deg(90))
assert m1.get_alt_transform().rotate_deg(90) == m2.get_alt_transform()
| UnsnappedMarkerStyle |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Data.py | {
"start": 5361,
"end": 5724
} | class ____(QtWidgets.QTextEdit):
def __init__(self, on_update):
super().__init__()
self.on_update = on_update
self.lastText = None
def focusOutEvent(self, ev):
text = self.toPlainText()
if text != self.lastText:
self.lastText = text
self.on_update()
super().focusOutEvent(ev)
| TextEdit |
python | sympy__sympy | sympy/physics/mechanics/actuator.py | {
"start": 22231,
"end": 33203
} | class ____(ActuatorBase):
"""Torque-producing actuator.
Explanation
===========
A ``TorqueActuator`` is an actuator that produces a pair of equal and
opposite torques on a pair of bodies.
Examples
========
To construct a torque actuator, an expression (or symbol) must be supplied
to represent the torque it can produce, alongside a vector specifying the
axis about which the torque will act, and a pair of frames on which the
torque will act.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (ReferenceFrame, RigidBody,
... TorqueActuator)
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> torque = symbols('T')
>>> axis = N.z
>>> parent = RigidBody('parent', frame=N)
>>> child = RigidBody('child', frame=A)
>>> bodies = (child, parent)
>>> actuator = TorqueActuator(torque, axis, *bodies)
>>> actuator
TorqueActuator(T, axis=N.z, target_frame=A, reaction_frame=N)
Note that because torques actually act on frames, not bodies,
``TorqueActuator`` will extract the frame associated with a ``RigidBody``
when one is passed instead of a ``ReferenceFrame``.
Parameters
==========
torque : Expr
The scalar expression defining the torque that the actuator produces.
axis : Vector
The axis about which the actuator applies torques.
target_frame : ReferenceFrame | RigidBody
The primary frame on which the actuator will apply the torque.
reaction_frame : ReferenceFrame | RigidBody | None
The secondary frame on which the actuator will apply the torque. Note
that the (equal and opposite) reaction torque is applied to this frame.
"""
def __init__(self, torque, axis, target_frame, reaction_frame=None):
"""Initializer for ``TorqueActuator``.
Parameters
==========
torque : Expr
The scalar expression defining the torque that the actuator
produces.
axis : Vector
The axis about which the actuator applies torques.
target_frame : ReferenceFrame | RigidBody
The primary frame on which the actuator will apply the torque.
reaction_frame : ReferenceFrame | RigidBody | None
The secondary frame on which the actuator will apply the torque.
Note that the (equal and opposite) reaction torque is applied to
this frame.
"""
self.torque = torque
self.axis = axis
self.target_frame = target_frame
self.reaction_frame = reaction_frame
@classmethod
def at_pin_joint(cls, torque, pin_joint):
"""Alternate constructor to instantiate from a ``PinJoint`` instance.
Examples
========
To create a pin joint the ``PinJoint`` class requires a name, parent
body, and child body to be passed to its constructor. It is also
possible to control the joint axis using the ``joint_axis`` keyword
argument. In this example let's use the parent body's reference frame's
z-axis as the joint axis.
>>> from sympy.physics.mechanics import (PinJoint, ReferenceFrame,
... RigidBody, TorqueActuator)
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> parent = RigidBody('parent', frame=N)
>>> child = RigidBody('child', frame=A)
>>> pin_joint = PinJoint(
... 'pin',
... parent,
... child,
... joint_axis=N.z,
... )
Let's also create a symbol ``T`` that will represent the torque applied
by the torque actuator.
>>> from sympy import symbols
>>> torque = symbols('T')
To create the torque actuator from the ``torque`` and ``pin_joint``
variables previously instantiated, these can be passed to the alternate
constructor class method ``at_pin_joint`` of the ``TorqueActuator``
class. It should be noted that a positive torque will cause a positive
displacement of the joint coordinate or that the torque is applied on
the child body with a reaction torque on the parent.
>>> actuator = TorqueActuator.at_pin_joint(torque, pin_joint)
>>> actuator
TorqueActuator(T, axis=N.z, target_frame=A, reaction_frame=N)
Parameters
==========
torque : Expr
The scalar expression defining the torque that the actuator
produces.
pin_joint : PinJoint
The pin joint, and by association the parent and child bodies, on
which the torque actuator will act. The pair of bodies acted upon
by the torque actuator are the parent and child bodies of the pin
joint, with the child acting as the reaction body. The pin joint's
axis is used as the axis about which the torque actuator will apply
its torque.
"""
if not isinstance(pin_joint, PinJoint):
msg = (
f'Value {repr(pin_joint)} passed to `pin_joint` was of type '
f'{type(pin_joint)}, must be {PinJoint}.'
)
raise TypeError(msg)
return cls(
torque,
pin_joint.joint_axis,
pin_joint.child_interframe,
pin_joint.parent_interframe,
)
@property
def torque(self):
"""The magnitude of the torque produced by the actuator."""
return self._torque
@torque.setter
def torque(self, torque):
if hasattr(self, '_torque'):
msg = (
f'Can\'t set attribute `torque` to {repr(torque)} as it is '
f'immutable.'
)
raise AttributeError(msg)
self._torque = sympify(torque, strict=True)
@property
def axis(self):
"""The axis about which the torque acts."""
return self._axis
@axis.setter
def axis(self, axis):
if hasattr(self, '_axis'):
msg = (
f'Can\'t set attribute `axis` to {repr(axis)} as it is '
f'immutable.'
)
raise AttributeError(msg)
if not isinstance(axis, Vector):
msg = (
f'Value {repr(axis)} passed to `axis` was of type '
f'{type(axis)}, must be {Vector}.'
)
raise TypeError(msg)
self._axis = axis
@property
def target_frame(self):
"""The primary reference frames on which the torque will act."""
return self._target_frame
@target_frame.setter
def target_frame(self, target_frame):
if hasattr(self, '_target_frame'):
msg = (
f'Can\'t set attribute `target_frame` to {repr(target_frame)} '
f'as it is immutable.'
)
raise AttributeError(msg)
if isinstance(target_frame, RigidBody):
target_frame = target_frame.frame
elif not isinstance(target_frame, ReferenceFrame):
msg = (
f'Value {repr(target_frame)} passed to `target_frame` was of '
f'type {type(target_frame)}, must be {ReferenceFrame}.'
)
raise TypeError(msg)
self._target_frame = target_frame
@property
def reaction_frame(self):
"""The primary reference frames on which the torque will act."""
return self._reaction_frame
@reaction_frame.setter
def reaction_frame(self, reaction_frame):
if hasattr(self, '_reaction_frame'):
msg = (
f'Can\'t set attribute `reaction_frame` to '
f'{repr(reaction_frame)} as it is immutable.'
)
raise AttributeError(msg)
if isinstance(reaction_frame, RigidBody):
reaction_frame = reaction_frame.frame
elif (
not isinstance(reaction_frame, ReferenceFrame)
and reaction_frame is not None
):
msg = (
f'Value {repr(reaction_frame)} passed to `reaction_frame` was '
f'of type {type(reaction_frame)}, must be {ReferenceFrame}.'
)
raise TypeError(msg)
self._reaction_frame = reaction_frame
def to_loads(self):
"""Loads required by the equations of motion method classes.
Explanation
===========
``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be
passed to the ``loads`` parameters of its ``kanes_equations`` method
when constructing the equations of motion. This method acts as a
utility to produce the correctly-structred pairs of points and vectors
required so that these can be easily concatenated with other items in
the list of loads and passed to ``KanesMethod.kanes_equations``. These
loads are also in the correct form to also be passed to the other
equations of motion method classes, e.g. ``LagrangesMethod``.
Examples
========
The below example shows how to generate the loads produced by a torque
actuator that acts on a pair of bodies attached by a pin joint.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (PinJoint, ReferenceFrame,
... RigidBody, TorqueActuator)
>>> torque = symbols('T')
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> parent = RigidBody('parent', frame=N)
>>> child = RigidBody('child', frame=A)
>>> pin_joint = PinJoint(
... 'pin',
... parent,
... child,
... joint_axis=N.z,
... )
>>> actuator = TorqueActuator.at_pin_joint(torque, pin_joint)
The forces produces by the damper can be generated by calling the
``to_loads`` method.
>>> actuator.to_loads()
[(A, T*N.z), (N, - T*N.z)]
Alternatively, if a torque actuator is created without a reaction frame
then the loads returned by the ``to_loads`` method will contain just
the single load acting on the target frame.
>>> actuator = TorqueActuator(torque, N.z, N)
>>> actuator.to_loads()
[(N, T*N.z)]
"""
loads = [
Torque(self.target_frame, self.torque*self.axis),
]
if self.reaction_frame is not None:
loads.append(Torque(self.reaction_frame, -self.torque*self.axis))
return loads
def __repr__(self):
"""Representation of a ``TorqueActuator``."""
string = (
f'{self.__class__.__name__}({self.torque}, axis={self.axis}, '
f'target_frame={self.target_frame}'
)
if self.reaction_frame is not None:
string += f', reaction_frame={self.reaction_frame})'
else:
string += ')'
return string
| TorqueActuator |
python | viewflow__viewflow | tests/json/test_json__basics.py | {
"start": 3234,
"end": 3341
} | class ____(forms.ModelForm):
class Meta:
model = VIPClient
exclude = ["data"]
| VIPClientForm |
python | getsentry__sentry | src/sentry/issue_detection/detectors/consecutive_db_detector.py | {
"start": 1290,
"end": 11094
} | class ____(PerformanceDetector):
"""
Let X and Y be the consecutive db span count threshold and the span duration threshold respectively,
each defined in the threshold settings.
The detector first looks for X number of consecutive db query spans,
Once these set of spans are found, the detector will compare each db span in the consecutive list
to determine if they are dependant on one another.
If the sum of the durations of the independent spans exceeds Y, then a performance issue is found.
This detector assuming spans are ordered chronologically
"""
type = DetectorType.CONSECUTIVE_DB_OP
settings_key = DetectorType.CONSECUTIVE_DB_OP
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.consecutive_db_spans: list[Span] = []
self.independent_db_spans: list[Span] = []
def visit_span(self, span: Span) -> None:
span_id = span.get("span_id", None)
if not span_id or not self._is_db_query(span) or self._overlaps_last_span(span):
self._validate_and_store_performance_problem()
self._reset_variables()
return
self.consecutive_db_spans.append(span)
def _validate_and_store_performance_problem(self) -> None:
self._set_independent_spans(self.consecutive_db_spans)
if not len(self.independent_db_spans):
return
exceeds_count_threshold = len(self.consecutive_db_spans) >= self.settings.get(
"consecutive_count_threshold"
)
if not exceeds_count_threshold:
return
exceeds_span_duration_threshold = all(
get_span_duration(span).total_seconds() * 1000
> self.settings.get("span_duration_threshold")
for span in self.independent_db_spans
)
if not exceeds_span_duration_threshold:
return
time_saved = self._calculate_time_saved(self.independent_db_spans)
total_time = get_total_span_duration(self.consecutive_db_spans)
exceeds_time_saved_threshold = time_saved >= self.settings.get("min_time_saved")
if not exceeds_time_saved_threshold:
return
exceeds_time_saved_threshold_ratio = False
if total_time > 0:
exceeds_time_saved_threshold_ratio = time_saved / total_time >= self.settings.get(
"min_time_saved_ratio"
)
if not exceeds_time_saved_threshold_ratio:
return
self._store_performance_problem()
def _store_performance_problem(self) -> None:
fingerprint = self._fingerprint()
offender_span_ids = [span["span_id"] for span in self.independent_db_spans]
cause_span_ids = [span["span_id"] for span in self.consecutive_db_spans]
query: str = self.independent_db_spans[0].get("description", "")
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op="db",
desc=query, # TODO: figure out which query to use for description
type=PerformanceConsecutiveDBQueriesGroupType,
cause_span_ids=cause_span_ids,
parent_span_ids=None,
offender_span_ids=offender_span_ids,
evidence_data={
"op": "db",
"cause_span_ids": cause_span_ids,
"parent_span_ids": None,
"offender_span_ids": offender_span_ids,
"transaction_name": self._event.get("transaction", ""),
"span_evidence_key_value": [
{"key": str(_("Transaction")), "value": self._event.get("transaction", "")},
{"key": str(_("Starting Span")), "value": self._get_starting_span()},
{
"key": str(_("Parallelizable Spans")),
"value": self._get_parallelizable_spans(),
"is_multi_value": True,
},
],
"transaction_duration": self._get_duration(self._event),
"slow_span_duration": self._calculate_time_saved(self.independent_db_spans),
"repeating_spans": get_span_evidence_value(self.independent_db_spans[0]),
"repeating_spans_compact": get_span_evidence_value(
self.independent_db_spans[0], include_op=False
),
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
"db",
query,
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
self._reset_variables()
def _get_duration(self, item: Mapping[str, Any] | None) -> float:
if not item:
return 0
start = float(item.get("start_timestamp", 0))
end = float(item.get("timestamp", 0))
return (end - start) * 1000
def _get_parallelizable_spans(self) -> list[str]:
if not self.independent_db_spans or len(self.independent_db_spans) < 1:
return [""]
return [span.get("description", "") for span in self.independent_db_spans]
def _get_starting_span(self) -> str:
if not self.consecutive_db_spans or len(self.consecutive_db_spans) < 1:
return ""
return self.consecutive_db_spans[0].get("description", "")
def _set_independent_spans(self, spans: list[Span]) -> None:
"""
Given a list of spans, checks if there is at least a single span that is independent of the rest.
To start, we are just checking for a span in a list of consecutive span without a WHERE clause
"""
independent_spans = []
for span in spans[1:]:
query = span.get("description", None)
if (
query
and contains_complete_query(span)
and "WHERE" not in query.upper()
and not CONTAINS_PARAMETER_REGEX.search(query)
):
independent_spans.append(span)
self.independent_db_spans = independent_spans
def _calculate_time_saved(self, independent_spans: list[Span]) -> float:
"""
Calculates the cost saved by running spans in parallel,
this is the maximum time saved of running all independent queries in parallel
note, maximum means it does not account for db connection times and overhead associated with parallelization,
this is where thresholds come in
"""
consecutive_spans = self.consecutive_db_spans
total_duration = get_total_span_duration(consecutive_spans)
max_independent_span_duration = get_max_span_duration(independent_spans)
sum_of_dependent_span_durations = 0.0
for span in consecutive_spans:
if span not in independent_spans:
sum_of_dependent_span_durations += get_span_duration(span).total_seconds() * 1000
return total_duration - max(max_independent_span_duration, sum_of_dependent_span_durations)
def _overlaps_last_span(self, span: Span) -> bool:
if len(self.consecutive_db_spans) == 0:
return False
last_span = self.consecutive_db_spans[-1]
last_span_ends = timedelta(seconds=last_span.get("timestamp", 0))
current_span_begins = timedelta(seconds=span.get("start_timestamp", 0))
return last_span_ends > current_span_begins
def _reset_variables(self) -> None:
self.consecutive_db_spans = []
self.independent_db_spans = []
def _is_db_query(self, span: Span) -> bool:
op: str = span.get("op", "") or ""
description: str = span.get("description", "") or ""
is_db_op = op.startswith("db")
is_query = description.strip().upper().startswith("SELECT")
return is_db_op and is_query
def _fingerprint(self) -> str:
prior_span_index = self.consecutive_db_spans.index(self.independent_db_spans[0]) - 1
hashed_spans = fingerprint_spans(
[self.consecutive_db_spans[prior_span_index]] + self.independent_db_spans
)
return f"1-{PerformanceConsecutiveDBQueriesGroupType.type_id}-{hashed_spans}"
def on_complete(self) -> None:
self._validate_and_store_performance_problem()
def is_creation_allowed_for_organization(self, organization: Organization) -> bool:
return True
def is_creation_allowed_for_project(self, project: Project) -> bool:
return self.settings["detection_enabled"]
@classmethod
def is_event_eligible(cls, event: dict[str, Any], project: Project | None = None) -> bool:
request = event.get("request", None) or None
sdk_name = get_sdk_name(event) or ""
if request:
url = request.get("url", "") or ""
# TODO(nar): `method` can be removed once SDK adoption has increased and
# we are receiving `http.method` consistently, likely beyond October 2023
method = request.get("http.method", "") or request.get("method", "") or ""
if url.endswith("/graphql") and method.lower() in ["post", "get"]:
return False
return "php" not in sdk_name.lower()
def contains_complete_query(span: Span, is_source: bool | None = False) -> bool:
# Remove the truncation check from the n_plus_one db detector.
query = span.get("description")
if is_source and query:
return True
else:
return bool(query and not query.endswith("..."))
| ConsecutiveDBSpanDetector |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position01.py | {
"start": 315,
"end": 875
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"object_position": 1})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | RaRe-Technologies__gensim | gensim/test/test_glove2word2vec.py | {
"start": 428,
"end": 1604
} | class ____(unittest.TestCase):
def setUp(self):
self.datapath = datapath('test_glove.txt')
self.output_file = get_tmpfile('glove2word2vec.test')
def test_conversion(self):
check_output(args=[
sys.executable, '-m', 'gensim.scripts.glove2word2vec',
'--input', self.datapath, '--output', self.output_file
])
# test that the converted model loads successfully
try:
self.test_model = gensim.models.KeyedVectors.load_word2vec_format(self.output_file)
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
except Exception:
if os.path.isfile(os.path.join(self.output_file)):
self.fail('model file %s was created but could not be loaded.' % self.output_file)
else:
self.fail(
'model file %s creation failed, check the parameters and input file format.' % self.output_file
)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestGlove2Word2Vec |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 240099,
"end": 251106
} | class ____:
def test_invalid_init(self):
with pytest.raises(TypeError):
x509.Admission(
42, # type:ignore[arg-type]
None,
[],
)
with pytest.raises(TypeError):
x509.Admission(
None,
42, # type:ignore[arg-type]
[],
)
with pytest.raises(TypeError):
x509.Admission(
None,
None,
42, # type:ignore[arg-type]
)
with pytest.raises(TypeError):
x509.Admission(
None,
None,
[42], # type:ignore[list-item]
)
def test_eq(self):
admission1 = x509.Admission(None, None, [])
admission2 = x509.Admission(None, None, [])
assert admission1 == admission2
admission1 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission2 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
assert admission1 == admission2
def test_ne(self):
admission1 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission2 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[],
)
admission3 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
None,
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission4 = x509.Admission(
None,
None,
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission5 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
None,
[],
)
admission6 = x509.Admission(
None,
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[],
)
admission7 = x509.Admission(None, None, [])
assert admission1 != admission2
assert admission1 != admission3
assert admission1 != admission4
assert admission1 != admission5
assert admission1 != admission6
assert admission1 != admission7
assert admission1 != object()
def test_repr(self):
admission = x509.Admission(None, None, [])
assert repr(admission) == (
"<Admission("
"admission_authority=None, "
"naming_authority=None, "
"profession_infos=[])>"
)
admission = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
assert repr(admission) == (
"<Admission("
"admission_authority=<OtherName("
"type_id=<ObjectIdentifier("
"oid=2.5.4.6, name=countryName)>, "
"value=b'\\x04\\x04\\x13\\x02DE')>, "
"naming_authority=<NamingAuthority("
"id=<ObjectIdentifier(oid=1.2.3, name=Unknown OID)>, "
"url=https://example.com, text=spam)>, "
"profession_infos=[<ProfessionInfo("
"naming_authority=<NamingAuthority("
"id=<ObjectIdentifier(oid=1.2.3.4, name=Unknown OID)>, "
"url=https://example.org, text=eggs)>, "
"profession_items=['bacon'], "
"profession_oids=[<ObjectIdentifier("
"oid=1.2.3.4.5, name=Unknown OID)>], "
"registration_number=sausage, "
"add_profession_info=b'\\x01\\x02\\x03')>])>"
)
def test_hash(self):
admission1 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission2 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission3 = x509.Admission(
x509.UniformResourceIdentifier(value="https://www.example.de"),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission4 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(None, None, None),
[
x509.ProfessionInfo(
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3.4"),
"https://example.org",
"eggs",
),
["bacon"],
[x509.ObjectIdentifier("1.2.3.4.5")],
"sausage",
b"\x01\x02\x03",
)
],
)
admission5 = x509.Admission(
x509.OtherName(
type_id=x509.oid.NameOID.COUNTRY_NAME,
value=b"\x04\x04\x13\x02DE",
),
x509.NamingAuthority(
x509.ObjectIdentifier("1.2.3"), "https://example.com", "spam"
),
[],
)
admission6 = x509.Admission(None, None, [])
assert hash(admission1) == hash(admission2)
assert hash(admission1) != hash(admission3)
assert hash(admission1) != hash(admission4)
assert hash(admission1) != hash(admission5)
assert hash(admission1) != hash(admission6)
| TestAdmission |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_compiler.py | {
"start": 27992,
"end": 47046
} | class ____(fixtures.TestBase, AssertsCompiledSQL, CacheKeyFixture):
"""Tests MySQL-dialect specific compilation."""
__dialect__ = mysql.dialect()
def test_precolumns(self):
dialect = self.__dialect__
def gen(distinct=None, prefixes=None):
stmt = select(column("q"))
if distinct:
stmt = stmt.distinct()
if prefixes is not None:
stmt = stmt.prefix_with(*prefixes)
return str(stmt.compile(dialect=dialect))
eq_(gen(None), "SELECT q")
eq_(gen(True), "SELECT DISTINCT q")
eq_(gen(prefixes=["ALL"]), "SELECT ALL q")
eq_(gen(prefixes=["DISTINCTROW"]), "SELECT DISTINCTROW q")
# Interaction with MySQL prefix extensions
eq_(gen(None, ["straight_join"]), "SELECT straight_join q")
eq_(
gen(False, ["HIGH_PRIORITY", "SQL_SMALL_RESULT", "ALL"]),
"SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL q",
)
eq_(
gen(True, ["high_priority", sql.text("sql_cache")]),
"SELECT high_priority sql_cache DISTINCT q",
)
def test_backslash_escaping(self):
self.assert_compile(
sql.column("foo").like("bar", escape="\\"),
"foo LIKE %s ESCAPE '\\\\'",
)
dialect = mysql.dialect()
dialect._backslash_escapes = False
self.assert_compile(
sql.column("foo").like("bar", escape="\\"),
"foo LIKE %s ESCAPE '\\'",
dialect=dialect,
)
def test_limit(self):
t = sql.table("t", sql.column("col1"), sql.column("col2"))
self.assert_compile(
select(t).limit(10).offset(20),
"SELECT t.col1, t.col2 FROM t LIMIT %s, %s",
{"param_1": 20, "param_2": 10},
)
self.assert_compile(
select(t).limit(10),
"SELECT t.col1, t.col2 FROM t LIMIT %s",
{"param_1": 10},
)
self.assert_compile(
select(t).offset(10),
"SELECT t.col1, t.col2 FROM t LIMIT %s, 18446744073709551615",
{"param_1": 10},
)
@testing.combinations(
(String,),
(VARCHAR,),
(String(),),
(VARCHAR(),),
(NVARCHAR(),),
(Unicode,),
(Unicode(),),
)
def test_varchar_raise(self, type_):
type_ = sqltypes.to_instance(type_)
assert_raises_message(
exc.CompileError,
"VARCHAR requires a length on dialect mysql",
type_.compile,
dialect=mysql.dialect(),
)
t1 = Table("sometable", MetaData(), Column("somecolumn", type_))
assert_raises_message(
exc.CompileError,
r"\(in table 'sometable', column 'somecolumn'\)\: "
r"(?:N)?VARCHAR requires a length on dialect mysql",
schema.CreateTable(t1).compile,
dialect=mysql.dialect(),
)
def test_legacy_update_limit(self):
t = sql.table("t", sql.column("col1"), sql.column("col2"))
self.assert_compile(
t.update().values({"col1": 123}), "UPDATE t SET col1=%s"
)
self.assert_compile(
t.update()
.values({"col1": 123})
.with_dialect_options(mysql_limit=5),
"UPDATE t SET col1=%s LIMIT 5",
)
# does not make sense but we want this to compile
self.assert_compile(
t.update()
.values({"col1": 123})
.with_dialect_options(mysql_limit=0),
"UPDATE t SET col1=%s LIMIT 0",
)
self.assert_compile(
t.update()
.values({"col1": 123})
.with_dialect_options(mysql_limit=None),
"UPDATE t SET col1=%s",
)
self.assert_compile(
t.update()
.where(t.c.col2 == 456)
.values({"col1": 123})
.with_dialect_options(mysql_limit=1),
"UPDATE t SET col1=%s WHERE t.col2 = %s LIMIT 1",
)
def test_legacy_delete_limit(self):
t = sql.table("t", sql.column("col1"), sql.column("col2"))
self.assert_compile(t.delete(), "DELETE FROM t")
self.assert_compile(
t.delete().with_dialect_options(mysql_limit=5),
"DELETE FROM t LIMIT 5",
)
# does not make sense but we want this to compile
self.assert_compile(
t.delete().with_dialect_options(mysql_limit=0),
"DELETE FROM t LIMIT 0",
)
self.assert_compile(
t.delete().with_dialect_options(mysql_limit=None),
"DELETE FROM t",
)
self.assert_compile(
t.delete()
.where(t.c.col2 == 456)
.with_dialect_options(mysql_limit=1),
"DELETE FROM t WHERE t.col2 = %s LIMIT 1",
)
@testing.combinations((update,), (delete,))
def test_legacy_update_delete_limit_int_only(self, crud_fn):
t = sql.table("t", sql.column("col1"), sql.column("col2"))
with expect_raises(ValueError):
crud_fn(t).with_dialect_options(mysql_limit="not an int").compile(
dialect=mysql.dialect()
)
def test_utc_timestamp(self):
self.assert_compile(func.utc_timestamp(), "utc_timestamp()")
def test_utc_timestamp_fsp(self):
self.assert_compile(
func.utc_timestamp(5),
"utc_timestamp(%s)",
checkparams={"utc_timestamp_1": 5},
)
def test_sysdate(self):
self.assert_compile(func.sysdate(), "SYSDATE()")
m = mysql
@testing.combinations(
(Integer, "CAST(t.col AS SIGNED INTEGER)"),
(INT, "CAST(t.col AS SIGNED INTEGER)"),
(m.MSInteger, "CAST(t.col AS SIGNED INTEGER)"),
(m.MSInteger(unsigned=True), "CAST(t.col AS UNSIGNED INTEGER)"),
(SmallInteger, "CAST(t.col AS SIGNED INTEGER)"),
(m.MSSmallInteger, "CAST(t.col AS SIGNED INTEGER)"),
(m.MSTinyInteger, "CAST(t.col AS SIGNED INTEGER)"),
# 'SIGNED INTEGER' is a bigint, so this is ok.
(m.MSBigInteger, "CAST(t.col AS SIGNED INTEGER)"),
(m.MSBigInteger(unsigned=False), "CAST(t.col AS SIGNED INTEGER)"),
(m.MSBigInteger(unsigned=True), "CAST(t.col AS UNSIGNED INTEGER)"),
# this is kind of sucky. thank you default arguments!
(NUMERIC, "CAST(t.col AS DECIMAL)"),
(DECIMAL, "CAST(t.col AS DECIMAL)"),
(Numeric, "CAST(t.col AS DECIMAL)"),
(m.MSNumeric, "CAST(t.col AS DECIMAL)"),
(m.MSDecimal, "CAST(t.col AS DECIMAL)"),
(TIMESTAMP, "CAST(t.col AS DATETIME)"),
(DATETIME, "CAST(t.col AS DATETIME)"),
(DATE, "CAST(t.col AS DATE)"),
(TIME, "CAST(t.col AS TIME)"),
(DateTime, "CAST(t.col AS DATETIME)"),
(Date, "CAST(t.col AS DATE)"),
(Time, "CAST(t.col AS TIME)"),
(DateTime, "CAST(t.col AS DATETIME)"),
(Date, "CAST(t.col AS DATE)"),
(m.MSTime, "CAST(t.col AS TIME)"),
(m.MSTimeStamp, "CAST(t.col AS DATETIME)"),
(String, "CAST(t.col AS CHAR)"),
(Unicode, "CAST(t.col AS CHAR)"),
(UnicodeText, "CAST(t.col AS CHAR)"),
(VARCHAR, "CAST(t.col AS CHAR)"),
(NCHAR, "CAST(t.col AS CHAR)"),
(CHAR, "CAST(t.col AS CHAR)"),
(m.CHAR(charset="utf8"), "CAST(t.col AS CHAR CHARACTER SET utf8)"),
(CLOB, "CAST(t.col AS CHAR)"),
(TEXT, "CAST(t.col AS CHAR)"),
(m.TEXT(charset="utf8"), "CAST(t.col AS CHAR CHARACTER SET utf8)"),
(String(32), "CAST(t.col AS CHAR(32))"),
(Unicode(32), "CAST(t.col AS CHAR(32))"),
(CHAR(32), "CAST(t.col AS CHAR(32))"),
(CHAR(0), "CAST(t.col AS CHAR(0))"),
(m.MSString, "CAST(t.col AS CHAR)"),
(m.MSText, "CAST(t.col AS CHAR)"),
(m.MSTinyText, "CAST(t.col AS CHAR)"),
(m.MSMediumText, "CAST(t.col AS CHAR)"),
(m.MSLongText, "CAST(t.col AS CHAR)"),
(m.MSNChar, "CAST(t.col AS CHAR)"),
(m.MSNVarChar, "CAST(t.col AS CHAR)"),
(LargeBinary, "CAST(t.col AS BINARY)"),
(BLOB, "CAST(t.col AS BINARY)"),
(m.MSBlob, "CAST(t.col AS BINARY)"),
(m.MSBlob(32), "CAST(t.col AS BINARY)"),
(m.MSTinyBlob, "CAST(t.col AS BINARY)"),
(m.MSMediumBlob, "CAST(t.col AS BINARY)"),
(m.MSLongBlob, "CAST(t.col AS BINARY)"),
(m.MSBinary, "CAST(t.col AS BINARY)"),
(m.MSBinary(32), "CAST(t.col AS BINARY)"),
(m.MSVarBinary, "CAST(t.col AS BINARY)"),
(m.MSVarBinary(32), "CAST(t.col AS BINARY)"),
(Interval, "CAST(t.col AS DATETIME)"),
)
def test_cast(self, type_, expected):
t = sql.table("t", sql.column("col"))
self.assert_compile(cast(t.c.col, type_), expected)
def test_cast_type_decorator(self):
class MyInteger(sqltypes.TypeDecorator):
impl = Integer
cache_ok = True
type_ = MyInteger()
t = sql.table("t", sql.column("col"))
self.assert_compile(
cast(t.c.col, type_), "CAST(t.col AS SIGNED INTEGER)"
)
def test_cast_literal_bind(self):
expr = cast(column("foo", Integer) + 5, Integer())
self.assert_compile(
expr, "CAST(foo + 5 AS SIGNED INTEGER)", literal_binds=True
)
def test_unsupported_cast_literal_bind(self):
expr = cast(column("foo", Integer) + 5, Float)
with expect_warnings(
"Datatype FLOAT does not support CAST on MySQL/MariaDb;"
):
self.assert_compile(expr, "(foo + 5)", literal_binds=True)
m = mysql
@testing.combinations(
(m.MSBit, "t.col"),
(FLOAT, "t.col"),
(Float, "t.col"),
(m.MSFloat, "t.col"),
(m.MSDouble, "t.col"),
(DOUBLE, "t.col"),
(Double, "t.col"),
(m.MSReal, "t.col"),
(m.MSYear, "t.col"),
(m.MSYear(2), "t.col"),
(Boolean, "t.col"),
(BOOLEAN, "t.col"),
(m.MSEnum, "t.col"),
(m.MSEnum("1", "2"), "t.col"),
(m.MSSet, "t.col"),
(m.MSSet("1", "2"), "t.col"),
)
def test_unsupported_casts(self, type_, expected):
t = sql.table("t", sql.column("col"))
with expect_warnings(
"Datatype .* does not support CAST on MySQL/MariaDb;"
):
self.assert_compile(cast(t.c.col, type_), expected)
@testing.combinations(
(m.FLOAT, "CAST(t.col AS FLOAT)"),
(Float, "CAST(t.col AS FLOAT)"),
(FLOAT, "CAST(t.col AS FLOAT)"),
(Double, "CAST(t.col AS DOUBLE)"),
(DOUBLE, "CAST(t.col AS DOUBLE)"),
(m.DOUBLE, "CAST(t.col AS DOUBLE)"),
(m.FLOAT, "CAST(t.col AS FLOAT)"),
argnames="type_,expected",
)
@testing.combinations(True, False, argnames="maria_db")
def test_float_cast(self, type_, expected, maria_db):
dialect = mysql.dialect()
if maria_db:
dialect.is_mariadb = maria_db
dialect.server_version_info = (10, 4, 5)
else:
dialect.server_version_info = (8, 0, 17)
t = sql.table("t", sql.column("col"))
self.assert_compile(cast(t.c.col, type_), expected, dialect=dialect)
def test_cast_grouped_expression_non_castable(self):
with expect_warnings(
"Datatype FLOAT does not support CAST on MySQL/MariaDb;"
):
self.assert_compile(
cast(sql.column("x") + sql.column("y"), Float), "(x + y)"
)
def test_extract(self):
t = sql.table("t", sql.column("col1"))
for field in "year", "month", "day":
self.assert_compile(
select(extract(field, t.c.col1)),
"SELECT EXTRACT(%s FROM t.col1) AS anon_1 FROM t" % field,
)
# milliseconds to millisecond
self.assert_compile(
select(extract("milliseconds", t.c.col1)),
"SELECT EXTRACT(millisecond FROM t.col1) AS anon_1 FROM t",
)
def test_too_long_index(self):
exp = "ix_zyrenian_zyme_zyzzogeton_zyzzogeton_zyrenian_zyme_zyz_5cd2"
tname = "zyrenian_zyme_zyzzogeton_zyzzogeton"
cname = "zyrenian_zyme_zyzzogeton_zo"
t1 = Table(tname, MetaData(), Column(cname, Integer, index=True))
ix1 = list(t1.indexes)[0]
self.assert_compile(
schema.CreateIndex(ix1),
"CREATE INDEX %s ON %s (%s)" % (exp, tname, cname),
)
def test_innodb_autoincrement(self):
t1 = Table(
"sometable",
MetaData(),
Column(
"assigned_id", Integer(), primary_key=True, autoincrement=False
),
Column("id", Integer(), primary_key=True, autoincrement=True),
mysql_engine="InnoDB",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (assigned_id "
"INTEGER NOT NULL, id INTEGER NOT NULL "
"AUTO_INCREMENT, PRIMARY KEY (id, assigned_id)"
")ENGINE=InnoDB",
)
t1 = Table(
"sometable",
MetaData(),
Column(
"assigned_id", Integer(), primary_key=True, autoincrement=True
),
Column("id", Integer(), primary_key=True, autoincrement=False),
mysql_engine="InnoDB",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (assigned_id "
"INTEGER NOT NULL AUTO_INCREMENT, id "
"INTEGER NOT NULL, PRIMARY KEY "
"(assigned_id, id))ENGINE=InnoDB",
)
def test_innodb_autoincrement_reserved_word_column_name(self):
t1 = Table(
"sometable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=False),
Column("order", Integer(), primary_key=True, autoincrement=True),
mysql_engine="InnoDB",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable ("
"id INTEGER NOT NULL, "
"`order` INTEGER NOT NULL AUTO_INCREMENT, "
"PRIMARY KEY (`order`, id)"
")ENGINE=InnoDB",
)
def test_create_table_with_partition(self):
t1 = Table(
"testtable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=True),
Column(
"other_id", Integer(), primary_key=True, autoincrement=False
),
mysql_partitions="2",
mysql_partition_by="KEY(other_id)",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE testtable ("
"id INTEGER NOT NULL AUTO_INCREMENT, "
"other_id INTEGER NOT NULL, "
"PRIMARY KEY (id, other_id)"
")PARTITION BY KEY(other_id) PARTITIONS 2",
)
def test_create_table_with_subpartition(self):
t1 = Table(
"testtable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=True),
Column(
"other_id", Integer(), primary_key=True, autoincrement=False
),
mysql_partitions="2",
mysql_partition_by="KEY(other_id)",
mysql_subpartition_by="HASH(some_expr)",
mysql_subpartitions="2",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE testtable ("
"id INTEGER NOT NULL AUTO_INCREMENT, "
"other_id INTEGER NOT NULL, "
"PRIMARY KEY (id, other_id)"
")PARTITION BY KEY(other_id) PARTITIONS 2 "
"SUBPARTITION BY HASH(some_expr) SUBPARTITIONS 2",
)
def test_create_table_with_partition_hash(self):
t1 = Table(
"testtable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=True),
Column(
"other_id", Integer(), primary_key=True, autoincrement=False
),
mysql_partitions="2",
mysql_partition_by="HASH(other_id)",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE testtable ("
"id INTEGER NOT NULL AUTO_INCREMENT, "
"other_id INTEGER NOT NULL, "
"PRIMARY KEY (id, other_id)"
")PARTITION BY HASH(other_id) PARTITIONS 2",
)
def test_create_table_with_partition_and_other_opts(self):
t1 = Table(
"testtable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=True),
Column(
"other_id", Integer(), primary_key=True, autoincrement=False
),
mysql_stats_sample_pages="2",
mysql_partitions="2",
mysql_partition_by="HASH(other_id)",
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE testtable ("
"id INTEGER NOT NULL AUTO_INCREMENT, "
"other_id INTEGER NOT NULL, "
"PRIMARY KEY (id, other_id)"
")STATS_SAMPLE_PAGES=2 PARTITION BY HASH(other_id) PARTITIONS 2",
)
def test_create_table_with_collate(self):
# issue #5411
t1 = Table(
"testtable",
MetaData(),
Column("id", Integer(), primary_key=True, autoincrement=True),
mysql_engine="InnoDB",
mysql_collate="utf8_icelandic_ci",
mysql_charset="utf8",
)
first_part = (
"CREATE TABLE testtable ("
"id INTEGER NOT NULL AUTO_INCREMENT, "
"PRIMARY KEY (id))"
)
try:
self.assert_compile(
schema.CreateTable(t1),
first_part
+ "ENGINE=InnoDB CHARSET=utf8 COLLATE utf8_icelandic_ci",
)
except AssertionError:
self.assert_compile(
schema.CreateTable(t1),
first_part
+ "CHARSET=utf8 ENGINE=InnoDB COLLATE utf8_icelandic_ci",
)
def test_inner_join(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
self.assert_compile(
t1.join(t2, t1.c.x == t2.c.y), "t1 INNER JOIN t2 ON t1.x = t2.y"
)
def test_outer_join(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
self.assert_compile(
t1.outerjoin(t2, t1.c.x == t2.c.y),
"t1 LEFT OUTER JOIN t2 ON t1.x = t2.y",
)
def test_full_outer_join(self):
t1 = table("t1", column("x"))
t2 = table("t2", column("y"))
self.assert_compile(
t1.outerjoin(t2, t1.c.x == t2.c.y, full=True),
"t1 FULL OUTER JOIN t2 ON t1.x = t2.y",
)
| SQLTest |
python | simplejson__simplejson | simplejson/tests/test_namedtuple.py | {
"start": 997,
"end": 1041
} | class ____(object):
_asdict = None
| DeadDuck |
python | davidhalter__jedi | test/completion/pep0484_typing.py | {
"start": 3861,
"end": 4822
} | class ____(typing.Dict[str, int]):
def setdud(self):
pass
def testdict(x: TestDict):
#? ["setdud", "setdefault"]
x.setd
for key in x.keys():
#? str()
key
for value in x.values():
#? int()
value
x = TestDict()
#? ["setdud", "setdefault"]
x.setd
for key in x.keys():
#? str()
key
for value in x.values():
#? int()
value
WrappingType = typing.NewType('WrappingType', str) # Chosen arbitrarily
y = WrappingType(0) # Per https://github.com/davidhalter/jedi/issues/1015#issuecomment-355795929
#? str()
y
def testnewtype(y: WrappingType):
#? str()
y
#? ["upper"]
y.u
WrappingType2 = typing.NewType()
def testnewtype2(y: WrappingType2):
#?
y
#? []
y.
# The type of a NewType is equivalent to the type of its underlying type.
MyInt = typing.NewType('MyInt', int)
x = type(MyInt)
#? type.mro
x.mro
PlainInt = int
y = type(PlainInt)
#? type.mro
y.mro
| TestDict |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 19271,
"end": 22512
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
# get all proj
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_values is not None:
# decoder-only roberta can have a simple dynamic cache for example
current_past_key_values = past_key_values
if isinstance(past_key_values, EncoderDecoderCache):
current_past_key_values = past_key_values.self_attention_cache
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
key_layer, value_layer = current_past_key_values.update(
key_layer,
value_layer,
self.layer_idx,
{"cache_position": cache_position},
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.roberta.modeling_roberta.RobertaCrossAttention with Roberta->BridgeTower
| BridgeTowerSelfAttention |
python | Lightning-AI__lightning | tests/tests_fabric/utilities/test_device_dtype_mixin.py | {
"start": 432,
"end": 4017
} | class ____(_DeviceDtypeModuleMixin):
def __init__(self) -> None:
super().__init__()
self.module = SubModule()
@pytest.mark.parametrize(
("dst_device_str", "dst_type"),
[
("cpu", torch.half),
("cpu", torch.float),
("cpu", torch.double),
pytest.param("cuda:0", torch.half, marks=RunIf(min_cuda_gpus=1)),
pytest.param("cuda:0", torch.float, marks=RunIf(min_cuda_gpus=1)),
pytest.param("cuda:0", torch.double, marks=RunIf(min_cuda_gpus=1)),
pytest.param("mps:0", torch.float, marks=RunIf(mps=True)), # double and half are not yet supported.
],
)
@RunIf(min_cuda_gpus=1)
def test_submodules_device_and_dtype(dst_device_str, dst_type):
"""Test that the device and dtype property updates propagate through mixed nesting of regular nn.Modules and the
special modules of type DeviceDtypeModuleMixin (e.g. Metric or LightningModule)."""
dst_device = torch.device(dst_device_str)
model = TopModule()
assert model.device == torch.device("cpu")
model = model.to(device=dst_device, dtype=dst_type)
# nn.Module does not have these attributes
assert not hasattr(model.module, "_device")
assert not hasattr(model.module, "_dtype")
# device and dtype change should propagate down into all children
assert model.device == model.module.module.device == dst_device
assert model.dtype == model.module.module.dtype == dst_type
@pytest.mark.parametrize(
"dst_device_str",
[
"cpu",
pytest.param("cuda:0", marks=RunIf(min_cuda_gpus=1)),
pytest.param("mps:0", marks=RunIf(mps=True)),
],
)
@pytest.mark.parametrize(
"dst_type",
[
torch.float,
pytest.param(torch.half, marks=RunIf(mps=False)),
pytest.param(torch.double, marks=RunIf(mps=False)),
],
)
def test_submodules_context_device_and_dtype(dst_device_str, dst_type):
dst_device = torch.device(dst_device_str)
with _DtypeContextManager(dst_type), dst_device:
model = TopModule()
assert model.device == dst_device
assert model.dtype == dst_type
@pytest.mark.parametrize(
"device",
[
None, # explicitly call without an index to see if the returning device contains an index
0,
torch.device("cuda", 0),
],
)
@RunIf(min_cuda_gpus=1)
def test_cuda_device(device):
model = TopModule()
model.cuda(device)
device = model.device
assert device.type == "cuda"
assert device.index is not None
assert device.index == torch.cuda.current_device()
@RunIf(min_cuda_gpus=1)
def test_cpu_device():
model = SubSubModule().cuda()
assert model.device.type == "cuda"
assert model.device.index == 0
model.cpu()
assert model.device.type == "cpu"
assert model.device.index is None
@RunIf(min_cuda_gpus=2)
def test_cuda_current_device():
"""Test that calling .cuda() moves the model to the correct device and respects current cuda device setting."""
class CudaModule(_DeviceDtypeModuleMixin):
def __init__(self):
super().__init__()
self.layer = nn.Linear(1, 1)
model = CudaModule()
torch.cuda.set_device(0)
model.cuda(1)
assert model.device == torch.device("cuda", 1)
assert model.layer.weight.device == torch.device("cuda", 1)
torch.cuda.set_device(1)
model.cuda() # model is already on device 1, and calling .cuda() without device index should not move model
assert model.device == torch.device("cuda", 1)
assert model.layer.weight.device == torch.device("cuda", 1)
| TopModule |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_apps.py | {
"start": 19321,
"end": 34909
} | class ____(SentryAppsTest):
method = "post"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def assert_sentry_app_status_code(self, sentry_app: SentryApp, status_code: int) -> None:
token = ApiToken.objects.create(
application=sentry_app.application,
user_id=self.user.id,
refresh_token=None,
scope_list=["project:read", "event:read", "org:read"],
)
with assume_test_silo_mode(SiloMode.REGION):
url = reverse("sentry-api-0-organization-projects", args=[self.organization.slug])
response = self.client.get(
url, HTTP_ORIGIN="http://example.com", HTTP_AUTHORIZATION=f"Bearer {token.token}"
)
assert response.status_code == status_code
def test_creates_sentry_app(self) -> None:
response = self.get_success_response(**self.get_data(), status_code=201)
content = orjson.loads(response.content)
for key, value in EXPECTED.items():
assert key in content
if isinstance(value, list):
assert sorted(content[key]) == sorted(value)
else:
assert content[key] == value
def test_non_unique_app_slug_fails(self) -> None:
sentry_app = self.create_sentry_app(name="Foo Bar", organization=self.organization)
deletions.exec_sync(sentry_app)
data = self.get_data(name=sentry_app.name)
response = self.get_error_response(**data, status_code=400)
assert response.data == {"name": ["Name Foo Bar is already taken, please use another."]}
def test_same_name_internal_integration(self) -> None:
self.create_project(organization=self.organization)
sentry_app = self.create_internal_integration(
name="Foo Bar", organization=self.organization
)
data = self.get_data(name=sentry_app.name)
response = self.get_success_response(**data, status_code=201)
assert response.data["name"] == sentry_app.name
assert response.data["slug"] != sentry_app.slug
def test_cannot_create_app_without_organization(self) -> None:
self.create_project(organization=self.organization)
sentry_app = self.create_internal_integration(name="Foo Bar")
data = self.get_data(name=sentry_app.name, organization=None)
response = self.get_error_response(**data, status_code=404)
assert response.data == {
"detail": "Please provide a valid value for the 'organization' field.",
}
def test_cannot_create_app_in_alien_organization(self) -> None:
other_organization = self.create_organization()
self.create_project(organization=other_organization)
sentry_app = self.create_internal_integration(name="Foo Bar")
data = self.get_data(name=sentry_app.name, organization=other_organization.slug)
response = self.get_error_response(**data, status_code=403)
assert response.data["detail"].startswith("User does not belong to")
def test_user_cannot_create_app_in_nonexistent_organization(self) -> None:
self.create_project(organization=self.organization)
sentry_app = self.create_internal_integration(name="Foo Bar")
data = self.get_data(name=sentry_app.name, organization="some-non-existent-org")
response = self.get_error_response(**data, status_code=403)
assert response.data["detail"].startswith("User does not belong to")
def test_nonsuperuser_cannot_create_with_popularity(self) -> None:
response = self.get_success_response(
**self.get_data(popularity=POPULARITY), status_code=201
)
assert {"popularity": self.default_popularity}.items() <= orjson.loads(
response.content
).items()
def test_long_name_internal_integration(self) -> None:
self.create_project(organization=self.organization)
response = self.get_error_response(**self.get_data(name="k" * 58), status_code=400)
assert response.data == {"name": ["Cannot exceed 57 characters"]}
def test_invalid_with_missing_webhook_url_scheme(self) -> None:
data = self.get_data(webhookUrl="example.com")
response = self.get_error_response(**data, status_code=400)
assert response.data == {"webhookUrl": ["URL must start with http[s]://"]}
def test_cannot_create_app_without_correct_permissions(self) -> None:
data = self.get_data(scopes=("project:read",))
response = self.get_error_response(**data, status_code=400)
assert response.data == {"events": ["issue webhooks require the event:read permission."]}
def test_create_alert_rule_action(self) -> None:
expected = {**EXPECTED, "schema": {"elements": [self.create_alert_rule_action_schema()]}}
data = self.get_data(schema={"elements": [self.create_alert_rule_action_schema()]})
response = self.get_success_response(**data, status_code=201)
content = orjson.loads(response.content)
for key, value in expected.items():
assert key in content
if isinstance(value, list):
assert sorted(content[key]) == sorted(value)
else:
assert content[key] == value
@patch("sentry.analytics.record")
def test_wrong_schema_format(self, record: MagicMock) -> None:
kwargs = {
"schema": {
"elements": [
{
"type": "alert-rule-action",
"title": "Create task",
"settings": {
"type": "alert-rule-settings",
"uri": "/sentry/alert-rule",
"required_fields": [
{
"type": "select",
"label": "Channel",
"name": "channel",
"options": [
# Option items should have 2 elements
# i.e. ['channel_id', '#general']
["#general"]
],
}
],
},
}
]
}
}
response = self.get_error_response(**self.get_data(**kwargs), status_code=400)
assert response.data == {
"schema": ["['#general'] is too short for element of type 'alert-rule-action'"]
}
assert_last_analytics_event(
record,
SentryAppSchemaValidationError(
schema=orjson.dumps(kwargs["schema"]).decode(),
user_id=self.user.id,
sentry_app_name="MyApp",
organization_id=self.organization.id,
error_message="['#general'] is too short for element of type 'alert-rule-action'",
),
)
@with_feature("organizations:integrations-event-hooks")
def test_can_create_with_error_created_hook_with_flag(self) -> None:
expected = {**EXPECTED, "events": ["error"]}
response = self.get_success_response(**self.get_data(events=("error",)), status_code=201)
content = orjson.loads(response.content)
for key, value in expected.items():
assert key in content
if isinstance(value, list):
assert sorted(content[key]) == sorted(value)
else:
assert content[key] == value
def test_cannot_create_with_error_created_hook_without_flag(self) -> None:
with Feature({"organizations:integrations-event-hooks": False}):
response = self.get_error_response(**self.get_data(events=("error",)), status_code=403)
assert response.data == {
"non_field_errors": [
"Your organization does not have access to the 'error' resource subscription."
]
}
def test_allows_empty_schema(self) -> None:
self.get_success_response(**self.get_data(shema={}), status_code=201)
def test_generated_slug_not_entirely_numeric(self) -> None:
response = self.get_success_response(**self.get_data(name="1234"), status_code=201)
slug = response.data["slug"]
assert slug.startswith("1234-")
assert not slug.isdecimal()
def test_missing_name(self) -> None:
response = self.get_error_response(**self.get_data(name=None), status_code=400)
assert "name" in response.data
def test_invalid_events(self) -> None:
response = self.get_error_response(**self.get_data(events=["project"]), status_code=400)
assert "events" in response.data
def test_invalid_scope(self) -> None:
response = self.get_error_response(**self.get_data(scopes="not:ascope"), status_code=400)
assert "scopes" in response.data
def test_missing_webhook_url(self) -> None:
response = self.get_error_response(**self.get_data(webhookUrl=None), status_code=400)
assert "webhookUrl" in response.data
def test_allows_empty_permissions(self) -> None:
response = self.get_success_response(**self.get_data(scopes=None), status_code=201)
assert response.data["scopes"] == []
def test_creates_internal_integration(self) -> None:
self.create_project(organization=self.organization)
response = self.get_success_response(**self.get_data(isInternal=True), status_code=201)
assert re.match(r"myapp\-[0-9a-zA-Z]+", response.data["slug"])
assert response.data["status"] == SentryAppStatus.as_str(SentryAppStatus.INTERNAL)
assert not response.data["verifyInstall"]
# Verify no tokens are created.
sentry_app = SentryApp.objects.get(slug=response.data["slug"])
sentry_app_installation = SentryAppInstallation.objects.get(sentry_app=sentry_app)
with pytest.raises(SentryAppInstallationToken.DoesNotExist):
SentryAppInstallationToken.objects.get(sentry_app_installation=sentry_app_installation)
def test_no_author_public_integration(self) -> None:
response = self.get_error_response(**self.get_data(author=None), status_code=400)
assert response.data == {"author": ["author required for public integrations"]}
def test_no_author_internal_integration(self) -> None:
self.create_project(organization=self.organization)
self.get_success_response(**self.get_data(isInternal=True, author=None), status_code=201)
def test_create_integration_with_allowed_origins(self) -> None:
response = self.get_success_response(
**self.get_data(allowedOrigins=("google.com", "example.com")), status_code=201
)
sentry_app = SentryApp.objects.get(slug=response.data["slug"])
assert sentry_app.application is not None
assert sentry_app.application.get_allowed_origins() == ["google.com", "example.com"]
def test_create_internal_integration_with_allowed_origins_and_test_route(self) -> None:
self.create_project(organization=self.organization)
data = self.get_data(
isInternal=True,
allowedOrigins=("example.com",),
scopes=("project:read", "event:read", "org:read"),
)
response = self.get_success_response(**data, status_code=201)
sentry_app = SentryApp.objects.get(slug=response.data["slug"])
assert sentry_app.application is not None
assert sentry_app.application.get_allowed_origins() == ["example.com"]
self.assert_sentry_app_status_code(sentry_app, status_code=200)
def test_create_internal_integration_without_allowed_origins_and_test_route(self) -> None:
self.create_project(organization=self.organization)
data = self.get_data(isInternal=True, scopes=("project:read", "event:read", "org:read"))
response = self.get_success_response(**data, status_code=201)
sentry_app = SentryApp.objects.get(slug=response.data["slug"])
assert sentry_app.application is not None
assert sentry_app.application.get_allowed_origins() == []
self.assert_sentry_app_status_code(sentry_app, status_code=400)
def test_members_cant_create(self) -> None:
# create extra owner because we are demoting one
self.create_member(organization=self.organization, user=self.create_user(), role="owner")
with assume_test_silo_mode(SiloMode.REGION):
member_om = OrganizationMember.objects.get(
user_id=self.user.id, organization=self.organization
)
member_om.role = "member"
member_om.save()
self.get_error_response(**self.get_data(), status_code=403)
def test_create_integration_exceeding_scopes(self) -> None:
# create extra owner because we are demoting one
self.create_member(organization=self.organization, user=self.create_user(), role="owner")
with assume_test_silo_mode(SiloMode.REGION):
member_om = OrganizationMember.objects.get(
user_id=self.user.id, organization=self.organization
)
member_om.role = "manager"
member_om.save()
data = self.get_data(events=(), scopes=("org:read", "org:write", "org:admin"))
response = self.get_error_response(**data, status_code=400)
assert response.data == {
"scopes": [
"Requested permission of org:admin exceeds requester's permission."
" Please contact an administrator to make the requested change.",
]
}
def test_create_integration_with_token_only_scopes(self) -> None:
"""Test that token-only scopes (like project:distribution) can be granted
even if the user doesn't have them in their role."""
self.create_project(organization=self.organization)
# Token-only scopes like project:distribution are not in any user role,
# but should still be grantable to integration tokens
data = self.get_data(
events=(),
scopes=("project:read", "project:distribution"),
isInternal=True,
)
response = self.get_success_response(**data, status_code=201)
assert response.data["scopes"] == ["project:distribution", "project:read"]
def test_create_internal_integration_with_non_globally_unique_name(self) -> None:
# Internal integration names should only need to be unique within an organization.
self.create_project(organization=self.organization)
other_organization = self.create_organization()
other_organization_integration = self.create_sentry_app(
name="Foo Bar", organization=other_organization
)
self.get_success_response(
**self.get_data(name=other_organization_integration.name, isInternal=True),
status_code=201,
)
other_organization = self.create_organization()
self.create_project(organization=other_organization)
other_organization_internal_integration = self.create_internal_integration(
name="Foo Bar 2", organization=other_organization
)
self.get_success_response(
**self.get_data(name=other_organization_internal_integration.name, isInternal=True),
status_code=201,
)
| PostSentryAppsTest |
python | scipy__scipy | scipy/sparse/linalg/_dsolve/linsolve.py | {
"start": 782,
"end": 31179
} | class ____(UserWarning):
"""Warning for exactly singular matrices."""
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
if ``scikits.umfpack`` is installed. Default: True
assumeSortedIndices : bool, optional
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
Has effect only if useUmfpack is True and ``scikits.umfpack`` is
installed. Default: False
Notes
-----
The default sparse solver is UMFPACK when available
(``scikits.umfpack`` is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
References
----------
.. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [2] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import use_solver, spsolve
>>> from scipy.sparse import csc_array
>>> R = np.random.randn(5, 5)
>>> A = csc_array(R)
>>> b = np.random.randn(5)
>>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
>>> x = spsolve(A, b)
>>> np.allclose(A.dot(x), b)
True
>>> use_solver(useUmfpack=True) # reset umfPack usage to default
"""
global useUmfpack
if 'useUmfpack' in kwargs:
useUmfpack.u = kwargs['useUmfpack']
if useUmfpack.u and 'assumeSortedIndices' in kwargs:
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
# A.dtype.name can only be "float64" or
# "complex128" in control flow
f_type = getattr(np, A.dtype.name)
# control flow may allow for more index
# types to get through here
i_type = getattr(np, A.indices.dtype.name)
try:
family = _families[(f_type, i_type)]
except KeyError as e:
msg = ('only float64 or complex128 matrices with int32 or int64 '
f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
raise ValueError(msg) from e
# See gh-8278. Considered converting only if
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
# but that didn't always fix the issue.
family = family[0] + "l"
A_new = copy.copy(A)
A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
A_new.indices = np.asarray(A.indices, dtype=np.int64)
return family, A_new
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse array or matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse array or matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
use_umfpack : bool, optional
if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
[6]_ . This is only referenced if b is a vector and
``scikits.umfpack`` is installed.
Returns
-------
x : ndarray or sparse array or matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
References
----------
.. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
COLAMD, an approximate column minimum degree ordering algorithm,
ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
:doi:`10.1145/1024074.1024080`
.. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
minimum degree ordering algorithm, ACM Trans. on Mathematical
Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
.. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [4] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> B = csc_array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve(A, B)
>>> np.allclose(A.dot(x).toarray(), B.toarray())
True
"""
is_pydata_sparse = is_pydata_spmatrix(b)
pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
A = convert_pydata_sparse_to_scipy(A)
b = convert_pydata_sparse_to_scipy(b)
if not (issparse(A) and A.format in ("csc", "csr")):
A = csc_array(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning, stacklevel=2)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = issparse(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
result_dtype = np.promote_types(A.dtype, b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError(f"matrix must be square (has shape {(M, N)})")
if M != b.shape[0]:
raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
use_umfpack = use_umfpack and useUmfpack.u
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if A.format == "csc":
flag = 1 # CSC format
else:
flag = 0 # CSR format
indices = A.indices.astype(np.intc, copy=False)
indptr = A.indptr.astype(np.intc, copy=False)
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not (b.format == "csc" or is_pydata_spmatrix(b)):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format',
SparseEfficiencyWarning, stacklevel=2)
b = csc_array(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].toarray().ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.full(segment_length, j, dtype=int))
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
idx_dtype = get_index_dtype(maxval=max(b.shape))
sparse_row = np.concatenate(row_segs, dtype=idx_dtype)
sparse_col = np.concatenate(col_segs, dtype=idx_dtype)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
if is_pydata_sparse:
x = pydata_sparse_cls.from_scipy_sparse(x)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
relax=None, panel_size=None, options=None):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse array or matrix
Sparse array to factorize. Most efficient when provided in CSC
format. Other formats will be converted to CSC before factorization.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
When a real array is factorized and the returned SuperLU object's ``solve()``
method is used with complex arguments an error is generated. Instead, cast the
initial array to complex and then factorize.
This function uses the SuperLU library.
References
----------
.. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import splu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = splu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse array to factorize. Most efficient when provided in CSC format.
Other formats will be converted to CSC before factorization.
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
When a real array is factorized and the returned SuperLU object's ``solve()`` method
is used with complex arguments an error is generated. Instead, cast the initial
array to complex and then factorize.
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spilu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = spilu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('spilu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input. A in CSC format is most efficient. A CSR format matrix will
be converted to CSC before factorization.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import factorized
>>> from scipy.sparse import csc_array
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(csc_array(A)) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
if useUmfpack.u:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
A = A._asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
# Make LU decomposition.
umf.numeric(A)
def solve(b):
with np.errstate(divide="ignore", invalid="ignore"):
# Ignoring warnings with numpy >= 1.23.0, see gh-16523
result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return result
return solve
else:
return splu(A).solve
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
unit_diagonal=False):
"""
Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
Parameters
----------
A : (M, M) sparse array or matrix
A sparse square triangular matrix. Should be in CSR or CSC format.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``A x = b``
lower : bool, optional
Whether `A` is a lower or upper triangular matrix.
Default is lower triangular matrix.
overwrite_A : bool, optional
Allow changing `A`.
Enabling gives a performance gain. Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b`.
Enabling gives a performance gain. Default is False.
If `overwrite_b` is True, it should be ensured that
`b` has an appropriate dtype to be able to store the result.
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1.
.. versionadded:: 1.4.0
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``A x = b``. Shape of return matches shape
of `b`.
Raises
------
LinAlgError
If `A` is singular or not triangular.
ValueError
If shape of `A` or shape of `b` do not match the requirements.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve_triangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve_triangular(A, B)
>>> np.allclose(A.dot(x), B)
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
trans = "N"
if issparse(A) and A.format == "csr":
A = A.T
trans = "T"
lower = not lower
if not (issparse(A) and A.format == "csc"):
warn('CSC or CSR matrix format is required. Converting to CSC matrix.',
SparseEfficiencyWarning, stacklevel=2)
A = csc_array(A)
elif not overwrite_A:
A = A.copy()
M, N = A.shape
if M != N:
raise ValueError(
f'A must be a square matrix but its shape is {A.shape}.')
if unit_diagonal:
with catch_warnings():
simplefilter('ignore', SparseEfficiencyWarning)
A.setdiag(1)
else:
diag = A.diagonal()
if np.any(diag == 0):
raise LinAlgError(
'A is singular: zero entry on diagonal.')
invdiag = 1/diag
if trans == "N":
A = A @ diags_array(invdiag)
else:
A = (A.T @ diags_array(invdiag)).T
# sum duplicates for non-canonical format
A.sum_duplicates()
b = np.asanyarray(b)
if b.ndim not in [1, 2]:
raise ValueError(
f'b must have 1 or 2 dims but its shape is {b.shape}.')
if M != b.shape[0]:
raise ValueError(
'The size of the dimensions of A must be equal to '
'the size of the first dimension of b but the shape of A is '
f'{A.shape} and the shape of b is {b.shape}.'
)
result_dtype = np.promote_types(np.promote_types(A.dtype, np.float32), b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
elif not overwrite_b:
b = b.copy()
if lower:
L = A
U = csc_array((N, N), dtype=result_dtype)
else:
L = eye_array(N, dtype=result_dtype, format='csc')
U = A
U.setdiag(0)
x, info = _superlu.gstrs(trans,
N, L.nnz, L.data, L.indices, L.indptr,
N, U.nnz, U.data, U.indices, U.indptr,
b)
if info:
raise LinAlgError('A is singular.')
if not unit_diagonal:
invdiag = invdiag.reshape(-1, *([1] * (len(x.shape) - 1)))
x = x * invdiag
return x
def is_sptriangular(A):
"""Returns 2-tuple indicating lower/upper triangular structure for sparse ``A``
Checks for triangular structure in ``A``. The result is summarized in
two boolean values ``lower`` and ``upper`` to designate whether ``A`` is
lower triangular or upper triangular respectively. Diagonal ``A`` will
result in both being True. Non-triangular structure results in False for both.
Only the sparse structure is used here. Values are not checked for zeros.
This function will convert a copy of ``A`` to CSC format if it is not already
CSR or CSC format. So it may be more efficient to convert it yourself if you
have other uses for the CSR/CSC version.
If ``A`` is not square, the portions outside the upper left square of the
matrix do not affect its triangular structure. You probably want to work
with the square portion of the matrix, though it is not requred here.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
lower, upper : 2-tuple of bool
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array, eye_array
>>> from scipy.sparse.linalg import is_sptriangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> is_sptriangular(A)
(True, False)
>>> D = eye_array(3, format='csr')
>>> is_sptriangular(D)
(True, True)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok", "lil")):
warn('is_sptriangular needs sparse and not BSR format. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr is better off converting to csr
if A.format == "dia":
return A.offsets.max() <= 0, A.offsets.min() >= 0
elif A.format == "coo":
rows, cols = A.coords
return (cols <= rows).all(), (cols >= rows).all()
elif A.format == "dok":
return all(c <= r for r, c in A.keys()), all(c >= r for r, c in A.keys())
elif A.format == "lil":
lower = all(col <= row for row, cols in enumerate(A.rows) for col in cols)
upper = all(col >= row for row, cols in enumerate(A.rows) for col in cols)
return lower, upper
# format in ("csc", "csr")
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
lower, upper = True, True
# check middle, 1st, last col (treat as CSC and switch at end if CSR)
for col in [N // 2, 0, -1]:
rows = indices[indptr[col]:indptr[col + 1]]
upper = upper and (col >= rows).all()
lower = lower and (col <= rows).all()
if not upper and not lower:
return False, False
# check all cols
cols = np.repeat(np.arange(N), np.diff(indptr))
rows = indices
upper = upper and (cols >= rows).all()
lower = lower and (cols <= rows).all()
if A.format == 'csr':
return upper, lower
return lower, upper
def spbandwidth(A):
"""Return the lower and upper bandwidth of a 2D numeric array.
Computes the lower and upper limits on the bandwidth of the
sparse 2D array ``A``. The result is summarized as a 2-tuple
of positive integers ``(lo, hi)``. A zero denotes no sub/super
diagonal entries on that side (triangular). The maximum value
for ``lo`` (``hi``) is one less than the number of rows(cols).
Only the sparse structure is used here. Values are not checked for zeros.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
below, above : 2-tuple of int
The distance to the farthest non-zero diagonal below/above the
main diagonal.
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import spbandwidth
>>> from scipy.sparse import csc_array, eye_array
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> spbandwidth(A)
(2, 0)
>>> D = eye_array(3, format='csr')
>>> spbandwidth(D)
(0, 0)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok")):
warn('spbandwidth needs sparse format not LIL and BSR. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr and lil are better off converting to csr
if A.format == "dia":
return max(0, -A.offsets.min().item()), max(0, A.offsets.max().item())
if A.format in ("csc", "csr"):
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
gap = np.repeat(np.arange(N), np.diff(indptr)) - indices
if A.format == 'csr':
gap = -gap
elif A.format == "coo":
gap = A.coords[1] - A.coords[0]
elif A.format == "dok":
gap = [(c - r) for r, c in A.keys()] + [0]
return -min(gap), max(gap)
return max(-np.min(gap).item(), 0), max(np.max(gap).item(), 0)
| MatrixRankWarning |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 3256,
"end": 3381
} | class ____:
"""Test class docstring."""
pass
def test_function():
"""Test function docstring."""
pass
| TestClass |
python | python-visualization__folium | folium/plugins/timestamped_wmstilelayer.py | {
"start": 207,
"end": 4810
} | class ____(JSCSSMixin, MacroElement):
"""
Creates a TimestampedWmsTileLayer that takes a WmsTileLayer and adds time
control with the Leaflet.TimeDimension plugin.
Parameters
----------
data: WmsTileLayer.
The WmsTileLayer that you want to add time support to.
Must be created like a typical WmsTileLayer and added to the map
before being passed to this class.
transition_time: int, default 200.
The duration in ms of a transition from between timestamps.
loop: bool, default False
Whether the animation shall loop, default is to reduce load on WMS
services.
auto_play: bool, default False
Whether the animation shall start automatically at startup, default
is to reduce load on WMS services.
period: str, default 'P1D'
Used to construct the array of available times starting
from the first available time. Format: ISO8601 Duration
ex: 'P1M' -> 1/month, 'P1D' -> 1/day, 'PT1H' -> 1/hour, and 'PT1M' -> 1/minute
Note: this seems to be overridden by the WMS Tile Layer GetCapabilities.
Examples
--------
>>> w0 = WmsTileLayer(
... "http://this.wms.server/ncWMS/wms",
... name="Test WMS Data",
... styles="",
... fmt="image/png",
... transparent=True,
... layers="test_data",
... COLORSCALERANGE="0,10",
... )
>>> w0.add_to(m)
>>> w1 = WmsTileLayer(
... "http://this.wms.server/ncWMS/wms",
... name="Test WMS Data",
... styles="",
... fmt="image/png",
... transparent=True,
... layers="test_data_2",
... COLORSCALERANGE="0,5",
... )
>>> w1.add_to(m)
>>> # Add WmsTileLayers to time control.
>>> time = TimestampedWmsTileLayers([w0, w1])
>>> time.add_to(m)
See https://github.com/socib/Leaflet.TimeDimension for more information.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
{{ this._parent.get_name() }}.timeDimension = L.timeDimension(
{{ this.options|tojavascript }}
);
{{ this._parent.get_name() }}.timeDimensionControl =
L.control.timeDimension(
{{ this.options_control|tojavascript }}
);
{{ this._parent.get_name() }}.addControl(
{{ this._parent.get_name() }}.timeDimensionControl
);
{% for layer in this.layers %}
var {{ layer.get_name() }} = L.timeDimension.layer.wms(
{{ layer.get_name() }},
{
updateTimeDimension: false,
wmsVersion: {{ layer.options['version']|tojson }},
}
).addTo({{ this._parent.get_name() }});
{% endfor %}
{% endmacro %}
"""
)
default_js = [
(
"jquery3.7.1",
"https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.1/jquery.min.js",
),
(
"jqueryui1.10.2",
"https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js",
),
(
"iso8601",
"https://cdn.jsdelivr.net/npm/iso8601-js-period@0.2.1/iso8601.min.js",
),
(
"leaflet.timedimension",
"https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.min.js",
),
]
default_css = [
(
"highlight.js_css",
"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.4/styles/default.min.css",
),
(
"leaflet.timedimension_css",
"https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.control.css",
),
]
def __init__(
self,
data,
transition_time=200,
loop=False,
auto_play=False,
period="P1D",
time_interval=False,
):
super().__init__()
self._name = "TimestampedWmsTileLayers"
self.options = remove_empty(
period=period,
time_interval=time_interval,
)
self.options_control = dict(
position="bottomleft",
auto_play=auto_play,
player_options={
"transitionTime": int(transition_time),
"loop": loop,
},
)
if isinstance(data, WmsTileLayer):
self.layers = [data]
else:
self.layers = data # Assume iterable
| TimestampedWmsTileLayers |
python | pytorch__pytorch | test/onnx/test_symbolic_helper.py | {
"start": 254,
"end": 2323
} | class ____(common_utils.TestCase):
def setUp(self):
super().setUp()
self._initial_training_mode = GLOBALS.training_mode
def tearDown(self):
GLOBALS.training_mode = self._initial_training_mode
@common_utils.parametrize(
"op_train_mode,export_mode",
[
common_utils.subtest(
[1, torch.onnx.TrainingMode.PRESERVE], name="export_mode_is_preserve"
),
common_utils.subtest(
[0, torch.onnx.TrainingMode.EVAL],
name="modes_match_op_train_mode_0_export_mode_eval",
),
common_utils.subtest(
[1, torch.onnx.TrainingMode.TRAINING],
name="modes_match_op_train_mode_1_export_mode_training",
),
],
)
def test_check_training_mode_does_not_warn_when(
self, op_train_mode: int, export_mode: torch.onnx.TrainingMode
):
GLOBALS.training_mode = export_mode
self.assertNotWarn(
lambda: symbolic_helper.check_training_mode(op_train_mode, "testop")
)
@common_utils.parametrize(
"op_train_mode,export_mode",
[
common_utils.subtest(
[0, torch.onnx.TrainingMode.TRAINING],
name="modes_do_not_match_op_train_mode_0_export_mode_training",
),
common_utils.subtest(
[1, torch.onnx.TrainingMode.EVAL],
name="modes_do_not_match_op_train_mode_1_export_mode_eval",
),
],
)
def test_check_training_mode_warns_when(
self,
op_train_mode: int,
export_mode: torch.onnx.TrainingMode,
):
with self.assertWarnsRegex(
UserWarning, f"ONNX export mode is set to {export_mode}"
):
GLOBALS.training_mode = export_mode
symbolic_helper.check_training_mode(op_train_mode, "testop")
common_utils.instantiate_parametrized_tests(TestHelperFunctions)
if __name__ == "__main__":
common_utils.run_tests()
| TestHelperFunctions |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 7494,
"end": 21071
} | class ____(unittest.TestCase):
def test_moe_and_qkv_conversion(self):
model = DummyRoot()
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
"experts.gate_up_proj.weight",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
"experts.down_proj.weight",
operations=[MergeModulelist(dim=0)],
),
WeightConverter(
"model.layers.0.self_attn.qkv_proj.weight",
[
"model.layers.0.self_attn.q_proj.weight",
"model.layers.0.self_attn.k_proj.weight",
"model.layers.0.self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
missing, unexpected, mismatch, _, misc = convert_and_load_state_dict_in_model(
model, state_dict, weight_mapping, tp_plan=None, hf_quantizer=None
)
self.assertEqual(
missing,
{
"model.layers.1.self_attn.k_proj.weight",
"model.layers.1.self_attn.v_proj.weight",
"model.layers.1.self_attn.q_proj.weight",
},
)
self.assertEqual(unexpected, {"model.layers.1.self_attn.qkv_proj.weight"})
self.assertEqual(mismatch, set())
self.assertEqual(misc, {})
model_state = model.state_dict()
def cat_gate(layer_prefix: str) -> torch.Tensor:
w1 = [
raw_tensors[f"{layer_prefix}.experts.0.w1.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w1.weight"],
]
w3 = [
raw_tensors[f"{layer_prefix}.experts.0.w3.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w3.weight"],
]
return torch.cat([torch.stack(w1, dim=0), torch.stack(w3, dim=0)], dim=1)
torch.testing.assert_close(
model_state["model.layers.0.experts.gate_up_proj.weight"], cat_gate("model.layers.0")
)
torch.testing.assert_close(
model_state["model.layers.1.experts.gate_up_proj.weight"], cat_gate("model.layers.1")
)
def stack_down(layer_prefix: str) -> torch.Tensor:
return torch.stack(
[
raw_tensors[f"{layer_prefix}.experts.0.w2.weight"],
raw_tensors[f"{layer_prefix}.experts.1.w2.weight"],
],
dim=0,
)
torch.testing.assert_close(
model_state["model.layers.0.experts.down_proj.weight"], stack_down("model.layers.0")
)
torch.testing.assert_close(
model_state["model.layers.1.experts.down_proj.weight"], stack_down("model.layers.1")
)
for layer_idx in range(2):
key = f"model.layers.{layer_idx}.self_attn.qkv_proj.weight"
expected_q, expected_k, expected_v = torch.chunk(raw_tensors[key], chunks=3, dim=0)
prefix = f"model.layers.{layer_idx}.self_attn"
if layer_idx == 1:
# These were missing and thus not loaded
continue
torch.testing.assert_close(model_state[f"{prefix}.q_proj.weight"], expected_q)
torch.testing.assert_close(model_state[f"{prefix}.k_proj.weight"], expected_k)
torch.testing.assert_close(model_state[f"{prefix}.v_proj.weight"], expected_v)
torch.testing.assert_close(model_state["mlp.down_proj.weight"], raw_tensors["mlp.w2.weight"])
def test_moe_and_qkv_conversion_reversed(self):
model = DummyRoot()
model.config = PretrainedConfig()
raw_tensors = {
"model.layers.0.experts.0.w1.weight": torch.tensor([[0.0, 1.0], [2.0, 3.0]]),
"model.layers.0.experts.1.w1.weight": torch.tensor([[10.0, 11.0], [12.0, 13.0]]),
"model.layers.0.experts.0.w3.weight": torch.tensor([[4.0, 5.0], [6.0, 7.0]]),
"model.layers.0.experts.1.w3.weight": torch.tensor([[14.0, 15.0], [16.0, 17.0]]),
"model.layers.0.experts.0.w2.weight": torch.tensor([[20.0, 21.0], [22.0, 23.0]]),
"model.layers.0.experts.1.w2.weight": torch.tensor([[24.0, 25.0], [26.0, 27.0]]),
"model.layers.1.experts.0.w1.weight": torch.tensor([[30.0, 31.0], [32.0, 33.0]]),
"model.layers.1.experts.1.w1.weight": torch.tensor([[34.0, 35.0], [36.0, 37.0]]),
"model.layers.1.experts.0.w3.weight": torch.tensor([[38.0, 39.0], [40.0, 41.0]]),
"model.layers.1.experts.1.w3.weight": torch.tensor([[42.0, 43.0], [44.0, 45.0]]),
"model.layers.1.experts.0.w2.weight": torch.tensor([[46.0, 47.0], [48.0, 49.0]]),
"model.layers.1.experts.1.w2.weight": torch.tensor([[50.0, 51.0], [52.0, 53.0]]),
"model.layers.0.self_attn.qkv_proj.weight": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
"model.layers.1.self_attn.qkv_proj.weight": torch.tensor([[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]),
"mlp.w2.weight": torch.tensor([[60.0, 61.0], [62.0, 63.0]]),
}
state_dict = {k: v.clone() for k, v in raw_tensors.items()}
weight_mapping = [
WeightConverter(
["experts.*.w1.weight", "experts.*.w3.weight"],
"experts.gate_up_proj.weight",
operations=[MergeModulelist(dim=0), Concatenate(dim=1)],
),
WeightConverter(
"experts.*.w2.weight",
"experts.down_proj.weight",
operations=[MergeModulelist(dim=0)],
),
WeightConverter(
"self_attn.qkv_proj.weight",
[
"self_attn.q_proj.weight",
"self_attn.k_proj.weight",
"self_attn.v_proj.weight",
],
operations=[Chunk(dim=0)],
),
WeightRenaming("mlp.w2.weight", "mlp.down_proj.weight"),
]
# Use the mapping to load
missing, unexpected, mismatch, _, misc = convert_and_load_state_dict_in_model(
model, state_dict, weight_mapping, tp_plan=None, hf_quantizer=None
)
self.assertTrue(len(missing) == 0)
self.assertTrue(len(unexpected) == 0)
self.assertTrue(len(mismatch) == 0)
self.assertTrue(len(misc) == 0)
# Try to revert the mapping
reversed_state_dict = revert_weight_conversion(model, model.state_dict())
# Make sure both saved state_dict are identical
self.assertTrue(compare_state_dicts(reversed_state_dict, state_dict))
def test_qkv_chunk_rope_permute_with_fp8_quantization(self):
if is_triton_available():
from transformers.integrations.finegrained_fp8 import Fp8Dequantize
else:
self.skipTest("Fine-grained FP8 integration tests require Triton to be installed.")
n_heads = 2
head_dim = 4
in_dim = 4
out_dim = n_heads * head_dim
block_size = (4, 4)
class RopeProjector(nn.Module):
def __init__(self, *, with_scale: bool = False):
super().__init__()
self.weight = nn.Parameter(torch.zeros(out_dim, in_dim))
if with_scale:
scale_shape = (out_dim // block_size[0], in_dim // block_size[1])
self.weight_scale_inv = nn.Parameter(torch.ones(scale_shape))
class RopeSelfAttn(nn.Module):
def __init__(self):
super().__init__()
self.q_proj = RopeProjector(with_scale=True)
self.k_proj = RopeProjector()
self.v_proj = RopeProjector()
class RopeLayer(nn.Module):
def __init__(self):
super().__init__()
self.self_attn = RopeSelfAttn()
class RopeModel(nn.Module):
base_model_prefix = "model"
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([RopeLayer()])
model = RopeModel()
model.config = PretrainedConfig()
model.config.num_attention_heads = n_heads
raw_q = torch.tensor(
[
[1.0, -1.0, 1.0, -1.0],
[0.5, -0.5, 0.5, -0.5],
[-1.0, 1.0, -1.0, 1.0],
[-0.5, 0.5, -0.5, 0.5],
[1.0, 1.0, -1.0, -1.0],
[0.5, 0.5, -0.5, -0.5],
[-1.0, -1.0, 1.0, 1.0],
[-0.5, -0.5, 0.5, 0.5],
],
dtype=torch.float32,
)
raw_k = torch.arange(out_dim * in_dim, dtype=torch.float32).reshape(out_dim, in_dim)
raw_v = torch.arange(out_dim * in_dim, dtype=torch.float32).reshape(out_dim, in_dim) + 100.0
raw_qkv = torch.cat([raw_q, raw_k, raw_v], dim=0)
state_dict = {"model.layers.0.self_attn.qkv_proj.weight": raw_qkv.clone()}
quantizer_cls = type(
"FineGrainedFP8HfQuantizer",
(),
{
"__init__": lambda self, bs=block_size: setattr(
self, "quantization_config", SimpleNamespace(weight_block_size=bs)
),
"param_needs_quantization": lambda self, _model, param_name: param_name.endswith("q_proj.weight"),
"pre_quantized": False,
},
)
quantizer = quantizer_cls()
weight_mapping = [
WeightConverter(
"model.layers.*.self_attn.qkv_proj.weight",
[
"model.layers.*.self_attn.q_proj.weight",
"model.layers.*.self_attn.k_proj.weight",
"model.layers.*.self_attn.v_proj.weight",
],
operations=[Chunk(dim=0), PermuteForRope()],
)
]
missing, unexpected, mismatch, _, misc = convert_and_load_state_dict_in_model(
model, state_dict, weight_mapping, tp_plan=None, hf_quantizer=quantizer
)
self.assertEqual(missing, set())
self.assertEqual(unexpected, set())
self.assertEqual(mismatch, set())
self.assertEqual(misc, {})
permute_op = PermuteForRope()
permute_op.config = model.config
expected_q = permute_op._apply(raw_q)
expected_k = permute_op._apply(raw_k)
expected_v = permute_op._apply(raw_v)
model_state = model.state_dict()
self.assertFalse(torch.allclose(raw_k, expected_k))
torch.testing.assert_close(model_state["model.layers.0.self_attn.k_proj.weight"], expected_k)
torch.testing.assert_close(model_state["model.layers.0.self_attn.v_proj.weight"], expected_v)
q_weight_key = "model.layers.0.self_attn.q_proj.weight"
scale_key = "model.layers.0.self_attn.q_proj.weight_scale_inv"
self.assertIn(scale_key, model_state)
expected_dtype = torch.float8_e4m3fn if hasattr(torch, "float8_e4m3fn") else torch.int8
self.assertEqual(model_state[q_weight_key].dtype, expected_dtype)
self.assertEqual(model_state[q_weight_key].shape, torch.Size((out_dim, in_dim)))
self.assertEqual(model_state[scale_key].dtype, torch.float32)
self.assertEqual(
model_state[scale_key].shape,
torch.Size((out_dim // block_size[0], in_dim // block_size[1])),
)
dequant = Fp8Dequantize(block_size=block_size)
dequantized_q = dequant.convert(
[model_state[q_weight_key], model_state[scale_key]],
context={"quantization_config": quantizer.quantization_config},
)
torch.testing.assert_close(dequantized_q, expected_q, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
unittest.main()
| TestConvertAndLoadStateDict |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset_test.py | {
"start": 10214,
"end": 11860
} | class ____(test.TestCase):
"""Test cases for TfRecordRepresentativeDatasetLoader."""
def test_tf_record_saver_with_generator_dataset(self):
tf_record_path = self.create_tempfile().full_path
path_map = {'serving_default': tf_record_path}
num_samples = 2
def data_gen():
for _ in range(num_samples):
yield {'x': [1, 2]}
repr_ds_map = {'serving_default': data_gen()}
saver = repr_dataset.TfRecordRepresentativeDatasetSaver(path_map)
dataset_file_map = saver.save(repr_ds_map)
self.assertCountEqual(dataset_file_map.keys(), ['serving_default'])
dataset_map = repr_dataset.TfRecordRepresentativeDatasetLoader(
dataset_file_map
).load()
self.assertCountEqual(dataset_map.keys(), ['serving_default'])
samples = dataset_map['serving_default']
for sample in samples:
self.assertCountEqual(sample.keys(), {'x'})
self.assertAllEqual(sample['x'], np.array([1, 2]))
self.assertLen(samples, num_samples)
def test_tf_record_saver_when_signature_def_key_mismatch_raises_error(self):
tf_record_path = self.create_tempfile().full_path
representative_dataset = [{'x': [2]}]
repr_ds_map = {'my_signature_key': representative_dataset}
path_map = {'different_signature_key': tf_record_path}
saver = repr_dataset.TfRecordRepresentativeDatasetSaver(path_map)
with self.assertRaisesRegex(
ValueError,
(
'SignatureDef key does not exist in the provided path_map:'
' my_signature_key'
),
):
saver.save(repr_ds_map)
if __name__ == '__main__':
test.main()
| TfRecordRepresentativeDatasetTest |
python | mlflow__mlflow | mlflow/store/artifact/http_artifact_repo.py | {
"start": 1113,
"end": 9088
} | class ____(ArtifactRepository, MultipartUploadMixin):
"""Stores artifacts in a remote artifact storage using HTTP requests"""
@property
def _host_creds(self):
return get_default_host_creds(self.artifact_uri)
def log_artifact(self, local_file, artifact_path=None):
verify_artifact_path(artifact_path)
# Try to perform multipart upload if the file is large.
# If the server does not support, or if the upload failed, revert to normal upload.
if (
MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD.get()
and os.path.getsize(local_file) >= MLFLOW_MULTIPART_UPLOAD_MINIMUM_FILE_SIZE.get()
):
try:
self._try_multipart_upload(local_file, artifact_path)
return
except _UnsupportedMultipartUploadException:
pass
file_name = os.path.basename(local_file)
mime_type = _guess_mime_type(file_name)
paths = (artifact_path, file_name) if artifact_path else (file_name,)
endpoint = posixpath.join("/", *paths)
extra_headers = {"Content-Type": mime_type}
with open(local_file, "rb") as f:
resp = http_request(
self._host_creds, endpoint, "PUT", data=f, extra_headers=extra_headers
)
augmented_raise_for_status(resp)
def log_artifacts(self, local_dir, artifact_path=None):
local_dir = os.path.abspath(local_dir)
for root, _, filenames in os.walk(local_dir):
if root == local_dir:
artifact_dir = artifact_path
else:
rel_path = os.path.relpath(root, local_dir)
rel_path = relative_path_to_artifact_path(rel_path)
artifact_dir = (
posixpath.join(artifact_path, rel_path) if artifact_path else rel_path
)
for f in filenames:
self.log_artifact(os.path.join(root, f), artifact_dir)
def list_artifacts(self, path=None):
endpoint = "/mlflow-artifacts/artifacts"
url, tail = self.artifact_uri.split(endpoint, maxsplit=1)
root = tail.lstrip("/")
params = {"path": posixpath.join(root, path) if path else root}
host_creds = get_default_host_creds(url)
resp = http_request(host_creds, endpoint, "GET", params=params)
augmented_raise_for_status(resp)
file_infos = []
for f in resp.json().get("files", []):
validated_path = validate_path_is_safe(f["path"])
file_info = FileInfo(
posixpath.join(path, validated_path) if path else validated_path,
f["is_dir"],
int(f["file_size"]) if ("file_size" in f) else None,
)
file_infos.append(file_info)
return sorted(file_infos, key=lambda f: f.path)
def _download_file(self, remote_file_path, local_path):
endpoint = posixpath.join("/", remote_file_path)
resp = http_request(self._host_creds, endpoint, "GET", stream=True)
augmented_raise_for_status(resp)
with open(local_path, "wb") as f:
chunk_size = 1024 * 1024 # 1 MB
for chunk in resp.iter_content(chunk_size=chunk_size):
f.write(chunk)
def delete_artifacts(self, artifact_path=None):
endpoint = posixpath.join("/", artifact_path) if artifact_path else "/"
resp = http_request(self._host_creds, endpoint, "DELETE", stream=True)
augmented_raise_for_status(resp)
def _construct_mpu_uri_and_path(self, base_endpoint, artifact_path):
uri, path = self.artifact_uri.split("/mlflow-artifacts/artifacts", maxsplit=1)
path = path.strip("/")
endpoint = (
posixpath.join(base_endpoint, path, artifact_path)
if artifact_path
else posixpath.join(base_endpoint, path)
)
return uri, endpoint
def create_multipart_upload(self, local_file, num_parts=1, artifact_path=None):
uri, endpoint = self._construct_mpu_uri_and_path(
"/mlflow-artifacts/mpu/create", artifact_path
)
host_creds = get_default_host_creds(uri)
params = {
"path": local_file,
"num_parts": num_parts,
}
resp = http_request(host_creds, endpoint, "POST", json=params)
augmented_raise_for_status(resp)
return CreateMultipartUploadResponse.from_dict(resp.json())
def complete_multipart_upload(self, local_file, upload_id, parts=None, artifact_path=None):
uri, endpoint = self._construct_mpu_uri_and_path(
"/mlflow-artifacts/mpu/complete", artifact_path
)
host_creds = get_default_host_creds(uri)
params = {
"path": local_file,
"upload_id": upload_id,
"parts": [part.to_dict() for part in parts],
}
resp = http_request(host_creds, endpoint, "POST", json=params)
augmented_raise_for_status(resp)
def abort_multipart_upload(self, local_file, upload_id, artifact_path=None):
uri, endpoint = self._construct_mpu_uri_and_path(
"/mlflow-artifacts/mpu/abort", artifact_path
)
host_creds = get_default_host_creds(uri)
params = {
"path": local_file,
"upload_id": upload_id,
}
resp = http_request(host_creds, endpoint, "POST", json=params)
augmented_raise_for_status(resp)
@staticmethod
def _upload_part(credential: MultipartUploadCredential, local_file, size, start_byte):
data = read_chunk(local_file, size, start_byte)
response = requests.put(credential.url, data=data, headers=credential.headers)
augmented_raise_for_status(response)
return MultipartUploadPart(
part_number=credential.part_number,
etag=response.headers.get("ETag", ""),
url=credential.url,
)
def _try_multipart_upload(self, local_file, artifact_path=None):
"""
Attempts to perform multipart upload to log an artifact.
Returns if the multipart upload is successful.
Raises UnsupportedMultipartUploadException if multipart upload is unsupported.
"""
chunk_size = MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get()
num_parts = _compute_num_chunks(local_file, chunk_size)
try:
create = self.create_multipart_upload(local_file, num_parts, artifact_path)
except HTTPError as e:
# return False if server does not support multipart upload
error_message = e.response.json().get("message", "")
if isinstance(error_message, str) and error_message.startswith(
_UnsupportedMultipartUploadException.MESSAGE
):
raise _UnsupportedMultipartUploadException()
raise
try:
futures = {}
for i, credential in enumerate(create.credentials):
future = self.thread_pool.submit(
self._upload_part,
credential=credential,
local_file=local_file,
size=chunk_size,
start_byte=chunk_size * i,
)
futures[future] = credential.part_number
parts, errors = _complete_futures(futures, local_file)
if errors:
raise MlflowException(
f"Failed to upload at least one part of {local_file}. Errors: {errors}"
)
parts = sorted(parts.values(), key=lambda part: part.part_number)
self.complete_multipart_upload(local_file, create.upload_id, parts, artifact_path)
except Exception as e:
self.abort_multipart_upload(local_file, create.upload_id, artifact_path)
_logger.warning(f"Failed to upload file {local_file} using multipart upload: {e}")
raise
| HttpArtifactRepository |
python | tensorflow__tensorflow | tensorflow/python/data/ops/from_tensors_op.py | {
"start": 1017,
"end": 1721
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` with a single element."""
def __init__(self, element, name=None):
"""See `tf.data.Dataset.from_tensors` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
self._name = name
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure),
metadata=self._metadata.SerializeToString())
super().__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
| _TensorDataset |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 6062,
"end": 6105
} | class ____(Foo, RootModel[int]):
pass
| Bar |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql.py | {
"start": 4592,
"end": 8811
} | class ____:
def setup_method(self):
self.task_id = "test_task"
self.conn_id = "sql_default"
self._operator = SQLExecuteQueryOperator(task_id=self.task_id, conn_id=self.conn_id, sql="sql")
def _construct_operator(self, sql, **kwargs):
dag = DAG("test_dag", schedule=None, start_date=datetime.datetime(2017, 1, 1))
return SQLExecuteQueryOperator(
task_id="test_task",
conn_id="default_conn",
sql=sql,
**kwargs,
dag=dag,
)
@mock.patch.object(SQLExecuteQueryOperator, "_process_output")
@mock.patch.object(SQLExecuteQueryOperator, "get_db_hook")
@pytest.mark.parametrize("requires_result_fetch", [True, False])
def test_do_xcom_push(self, mock_get_db_hook, mock_process_output, requires_result_fetch):
operator = self._construct_operator(
"SELECT 1;", do_xcom_push=True, requires_result_fetch=requires_result_fetch
)
operator.execute(context=MagicMock())
mock_get_db_hook.return_value.run.assert_called_once_with(
sql="SELECT 1;",
autocommit=False,
handler=fetch_all_handler,
parameters=None,
return_last=True,
)
mock_process_output.assert_called()
@mock.patch.object(SQLExecuteQueryOperator, "_process_output")
@mock.patch.object(SQLExecuteQueryOperator, "get_db_hook")
def test_dont_xcom_push(self, mock_get_db_hook, mock_process_output):
operator = self._construct_operator("SELECT 1;", do_xcom_push=False)
operator.execute(context=MagicMock())
mock_get_db_hook.return_value.run.assert_called_once_with(
sql="SELECT 1;",
autocommit=False,
parameters=None,
handler=None,
return_last=True,
)
mock_process_output.assert_not_called()
@mock.patch.object(SQLExecuteQueryOperator, "_process_output")
@mock.patch.object(SQLExecuteQueryOperator, "get_db_hook")
def test_requires_result_fetch_dont_xcom_push(self, mock_get_db_hook, mock_process_output):
operator = self._construct_operator("SELECT 1;", requires_result_fetch=True, do_xcom_push=False)
operator.execute(context=MagicMock())
mock_get_db_hook.return_value.run.assert_called_once_with(
sql="SELECT 1;",
autocommit=False,
handler=fetch_all_handler,
parameters=None,
return_last=True,
)
mock_process_output.assert_not_called()
@mock.patch.object(SQLExecuteQueryOperator, "get_db_hook")
def test_output_processor(self, mock_get_db_hook):
data = [(1, "Alice"), (2, "Bob")]
mock_hook = MagicMock()
mock_hook.run.return_value = data
mock_hook.descriptions = ("id", "name")
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
sql="SELECT * FROM users;",
output_processor=lambda results, descriptions: (descriptions, results),
return_last=False,
)
descriptions, result = operator.execute(context=MagicMock())
assert descriptions == ("id", "name")
assert result == [(1, "Alice"), (2, "Bob")]
@skip_if_force_lowest_dependencies_marker
def test_sql_operator_extra_dejson_fields_to_hook_params(self):
with mock.patch(
"airflow.providers.common.sql.operators.sql.BaseHook.get_connection",
return_value=Connection(conn_id="sql_default", conn_type="postgres"),
) as mock_get_conn:
mock_get_conn.return_value = Connection(
conn_id="google_cloud_bigquery_default",
conn_type="gcpbigquery",
extra={"use_legacy_sql": False, "priority": "INTERACTIVE"},
)
self._operator.hook_params = {"use_legacy_sql": True, "location": "us-east1"}
assert self._operator._hook.conn_type == "gcpbigquery"
assert self._operator._hook.use_legacy_sql is True
assert self._operator._hook.location == "us-east1"
assert self._operator._hook.priority == "INTERACTIVE"
| TestSQLExecuteQueryOperator |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py | {
"start": 2064,
"end": 2366
} | class ____(Enum):
"""Enum of specific views the user tries to access."""
CLUSTER_ACTIVITY = "CLUSTER_ACTIVITY"
DOCS = "DOCS"
IMPORT_ERRORS = "IMPORT_ERRORS"
JOBS = "JOBS"
PLUGINS = "PLUGINS"
PROVIDERS = "PROVIDERS"
TRIGGERS = "TRIGGERS"
WEBSITE = "WEBSITE"
| AccessView |
python | google__pytype | pytype/inspect/graph.py | {
"start": 281,
"end": 2805
} | class ____:
"""Networkx graph builder."""
def __init__(self, program, ignored, only_cfg=False):
self.graph = nx.MultiDiGraph()
self._add_cfg(program, ignored)
if not only_cfg:
self._add_variables(program, ignored)
def add_node(self, obj, **kwargs):
self.graph.add_node(obj_key(obj), **kwargs)
def add_edge(self, obj1, obj2, **kwargs):
self.graph.add_edge(obj_key(obj1), obj_key(obj2), **kwargs)
def to_dot(self):
return nx.nx_pydot.to_pydot(self.graph).to_string()
def _add_cfg(self, program, ignored):
"""Add program cfg nodes."""
for node in program.cfg_nodes:
if node in ignored:
continue
self.add_node(
node, label=f"<{node.id}>{node.name}", shape="polygon", sides=4
)
for other in node.outgoing:
self.add_edge(node, other, penwidth=2.0)
def _add_variables(self, program, ignored):
"""A dd program variables and bindings."""
def _is_constant(val):
return all(origin.where == program.entrypoint for origin in val.origins)
for variable in program.variables:
if variable.id in ignored:
continue
# Ignore "boring" values (a.k.a. constants)
if all(_is_constant(value) for value in variable.bindings):
continue
self.add_node(
variable,
label=f"v{variable.id}",
shape="polygon",
sides=4,
distortion=0.1,
)
for val in variable.bindings:
label = f"{obj_repr(val)}@0x{id(val.data):x}"
color = "white" if val.origins else "red"
self.add_node(val, label=label, fillcolor=color)
self.add_edge(variable, val, arrowhead="none")
for origin in val.origins:
if origin.where == program.entrypoint:
continue
for srcs in origin.source_sets:
self.add_node(srcs, label="")
self.add_edge(val, srcs, color="pink", arrowhead="none", weight=40)
if origin.where not in ignored:
self.add_edge(
origin.where, srcs, arrowhead="none", style="dotted", weight=5
)
for src in srcs:
self.add_edge(src, srcs, color="lightblue", weight=2)
def write_svg_from_dot(svg_file, dot):
with subprocess.Popen(
["/usr/bin/dot", "-T", "svg", "-o", svg_file],
stdin=subprocess.PIPE,
universal_newlines=True,
) as proc:
(_, stderr) = proc.communicate(dot)
if stderr:
log.info("Failed to create %s: %s", svg_file, stderr)
| TypeGraph |
python | mlflow__mlflow | mlflow/tracing/otel/translation/traceloop.py | {
"start": 281,
"end": 3512
} | class ____(OtelSchemaTranslator):
"""
Translator for Traceloop/OpenLLMetry semantic conventions.
Only defines the attribute keys and mappings. All translation logic
is inherited from the base class.
"""
# Traceloop span kind attribute key
# Reference: https://github.com/traceloop/openllmetry/blob/e66894fd7f8324bd7b2972d7f727da39e7d93181/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py#L301
SPAN_KIND_ATTRIBUTE_KEY = "traceloop.span.kind"
# Mapping from Traceloop span kinds to MLflow span types
SPAN_KIND_TO_MLFLOW_TYPE = {
"workflow": SpanType.WORKFLOW,
"task": SpanType.TASK,
"agent": SpanType.AGENT,
"tool": SpanType.TOOL,
"unknown": SpanType.UNKNOWN,
}
# Token usage attribute keys
# Reference: https://github.com/traceloop/openllmetry/blob/e66894fd7f8324bd7b2972d7f727da39e7d93181/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py
INPUT_TOKEN_KEY = "gen_ai.usage.prompt_tokens"
OUTPUT_TOKEN_KEY = "gen_ai.usage.completion_tokens"
TOTAL_TOKEN_KEY = "llm.usage.total_tokens"
# Input/Output attribute keys
# Reference: https://github.com/traceloop/openllmetry/blob/e66894fd7f8324bd7b2972d7f727da39e7d93181/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py
INPUT_VALUE_KEYS = [
"traceloop.entity.input",
# https://github.com/traceloop/openllmetry/blob/cf28145905fcda3f5d90add78dbee16256a96db2/packages/opentelemetry-instrumentation-writer/opentelemetry/instrumentation/writer/span_utils.py#L153
re.compile(r"gen_ai\.prompt\.\d+\.content"),
# https://github.com/traceloop/openllmetry/blob/cf28145905fcda3f5d90add78dbee16256a96db2/packages/opentelemetry-instrumentation-writer/opentelemetry/instrumentation/writer/span_utils.py#L167
re.compile(r"gen_ai\.completion\.\d+\.tool_calls\.\d+\.arguments"),
]
OUTPUT_VALUE_KEYS = ["traceloop.entity.output", re.compile(r"gen_ai\.completion\.\d+\.content")]
def get_attribute_value(
self, attributes: dict[str, Any], valid_keys: list[str | re.Pattern] | None = None
) -> Any:
"""
Get attribute value from OTEL attributes by checking whether
the keys in valid_keys are present in the attributes.
Args:
attributes: Dictionary of span attributes
valid_keys: List of attribute keys to check
Returns:
Attribute value or None if not found
"""
if valid_keys:
for key in valid_keys:
if isinstance(key, str) and (
value := self._get_and_check_attribute_value(attributes, key)
):
return value
elif isinstance(key, re.Pattern):
for attr_key, attr_value in attributes.items():
if (
isinstance(attr_key, str)
and key.match(attr_key)
and (value := self._get_and_check_attribute_value(attributes, attr_key))
):
return value
| TraceloopTranslator |
python | openai__gym | gym/vector/async_vector_env.py | {
"start": 828,
"end": 27608
} | class ____(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel.
It uses ``multiprocessing`` processes, and pipes for communication.
Example::
>>> import gym
>>> env = gym.vector.AsyncVectorEnv([
... lambda: gym.make("Pendulum-v0", g=9.81),
... lambda: gym.make("Pendulum-v0", g=1.62)
... ])
>>> env.reset()
array([[-0.8286432 , 0.5597771 , 0.90249056],
[-0.85009176, 0.5266346 , 0.60007906]], dtype=float32)
"""
def __init__(
self,
env_fns: Sequence[callable],
observation_space: Optional[gym.Space] = None,
action_space: Optional[gym.Space] = None,
shared_memory: bool = True,
copy: bool = True,
context: Optional[str] = None,
daemon: bool = True,
worker: Optional[callable] = None,
):
"""Vectorized environment that runs multiple environments in parallel.
Args:
env_fns: Functions that create the environments.
observation_space: Observation space of a single environment. If ``None``,
then the observation space of the first environment is taken.
action_space: Action space of a single environment. If ``None``,
then the action space of the first environment is taken.
shared_memory: If ``True``, then the observations from the worker processes are communicated back through
shared variables. This can improve the efficiency if the observations are large (e.g. images).
copy: If ``True``, then the :meth:`~AsyncVectorEnv.reset` and :meth:`~AsyncVectorEnv.step` methods
return a copy of the observations.
context: Context for `multiprocessing`_. If ``None``, then the default context is used.
daemon: If ``True``, then subprocesses have ``daemon`` flag turned on; that is, they will quit if
the head process quits. However, ``daemon=True`` prevents subprocesses to spawn children,
so for some environments you may want to have it set to ``False``.
worker: If set, then use that worker in a subprocess instead of a default one.
Can be useful to override some inner vector env logic, for instance, how resets on termination or truncation are handled.
Warnings: worker is an advanced mode option. It provides a high degree of flexibility and a high chance
to shoot yourself in the foot; thus, if you are writing your own worker, it is recommended to start
from the code for ``_worker`` (or ``_worker_shared_memory``) method, and add changes.
Raises:
RuntimeError: If the observation space of some sub-environment does not match observation_space
(or, by default, the observation space of the first sub-environment).
ValueError: If observation_space is a custom space (i.e. not a default space in Gym,
such as gym.spaces.Box, gym.spaces.Discrete, or gym.spaces.Dict) and shared_memory is True.
"""
ctx = mp.get_context(context)
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
dummy_env = env_fns[0]()
self.metadata = dummy_env.metadata
if (observation_space is None) or (action_space is None):
observation_space = observation_space or dummy_env.observation_space
action_space = action_space or dummy_env.action_space
dummy_env.close()
del dummy_env
super().__init__(
num_envs=len(env_fns),
observation_space=observation_space,
action_space=action_space,
)
if self.shared_memory:
try:
_obs_buffer = create_shared_memory(
self.single_observation_space, n=self.num_envs, ctx=ctx
)
self.observations = read_from_shared_memory(
self.single_observation_space, _obs_buffer, n=self.num_envs
)
except CustomSpaceError:
raise ValueError(
"Using `shared_memory=True` in `AsyncVectorEnv` "
"is incompatible with non-standard Gym observation spaces "
"(i.e. custom spaces inheriting from `gym.Space`), and is "
"only compatible with default Gym spaces (e.g. `Box`, "
"`Tuple`, `Dict`) for batching. Set `shared_memory=False` "
"if you use custom observation spaces."
)
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros
)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = _worker_shared_memory if self.shared_memory else _worker
target = worker or target
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(
target=target,
name=f"Worker<{type(self).__name__}>-{idx}",
args=(
idx,
CloudpickleWrapper(env_fn),
child_pipe,
parent_pipe,
_obs_buffer,
self.error_queue,
),
)
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_spaces()
def reset_async(
self,
seed: Optional[Union[int, List[int]]] = None,
options: Optional[dict] = None,
):
"""Send calls to the :obj:`reset` methods of the sub-environments.
To get the results of these calls, you may invoke :meth:`reset_wait`.
Args:
seed: List of seeds for each environment
options: The reset option
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
method (e.g. :meth:`step_async`). This can be caused by two consecutive
calls to :meth:`reset_async`, with no call to :meth:`reset_wait` in between.
"""
self._assert_is_running()
if seed is None:
seed = [None for _ in range(self.num_envs)]
if isinstance(seed, int):
seed = [seed + i for i in range(self.num_envs)]
assert len(seed) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `reset_async` while waiting for a pending call to `{self._state.value}` to complete",
self._state.value,
)
for pipe, single_seed in zip(self.parent_pipes, seed):
single_kwargs = {}
if single_seed is not None:
single_kwargs["seed"] = single_seed
if options is not None:
single_kwargs["options"] = options
pipe.send(("reset", single_kwargs))
self._state = AsyncState.WAITING_RESET
def reset_wait(
self,
timeout: Optional[Union[int, float]] = None,
seed: Optional[int] = None,
options: Optional[dict] = None,
) -> Union[ObsType, Tuple[ObsType, List[dict]]]:
"""Waits for the calls triggered by :meth:`reset_async` to finish and returns the results.
Args:
timeout: Number of seconds before the call to `reset_wait` times out. If `None`, the call to `reset_wait` never times out.
seed: ignored
options: ignored
Returns:
A tuple of batched observations and list of dictionaries
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
NoAsyncCallError: If :meth:`reset_wait` was called without any prior call to :meth:`reset_async`.
TimeoutError: If :meth:`reset_wait` timed out.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError(
"Calling `reset_wait` without any prior " "call to `reset_async`.",
AsyncState.WAITING_RESET.value,
)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
f"The call to `reset_wait` has timed out after {timeout} second(s)."
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
infos = {}
results, info_data = zip(*results)
for i, info in enumerate(info_data):
infos = self._add_info(infos, info, i)
if not self.shared_memory:
self.observations = concatenate(
self.single_observation_space, results, self.observations
)
return (deepcopy(self.observations) if self.copy else self.observations), infos
def step_async(self, actions: np.ndarray):
"""Send the calls to :obj:`step` to each sub-environment.
Args:
actions: Batch of actions. element of :attr:`~VectorEnv.action_space`
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
method (e.g. :meth:`reset_async`). This can be caused by two consecutive
calls to :meth:`step_async`, with no call to :meth:`step_wait` in
between.
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `step_async` while waiting for a pending call to `{self._state.value}` to complete.",
self._state.value,
)
actions = iterate(self.action_space, actions)
for pipe, action in zip(self.parent_pipes, actions):
pipe.send(("step", action))
self._state = AsyncState.WAITING_STEP
def step_wait(
self, timeout: Optional[Union[int, float]] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, List[dict]]:
"""Wait for the calls to :obj:`step` in each sub-environment to finish.
Args:
timeout: Number of seconds before the call to :meth:`step_wait` times out. If ``None``, the call to :meth:`step_wait` never times out.
Returns:
The batched environment step information, (obs, reward, terminated, truncated, info)
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
NoAsyncCallError: If :meth:`step_wait` was called without any prior call to :meth:`step_async`.
TimeoutError: If :meth:`step_wait` timed out.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError(
"Calling `step_wait` without any prior call " "to `step_async`.",
AsyncState.WAITING_STEP.value,
)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
f"The call to `step_wait` has timed out after {timeout} second(s)."
)
observations_list, rewards, terminateds, truncateds, infos = [], [], [], [], {}
successes = []
for i, pipe in enumerate(self.parent_pipes):
result, success = pipe.recv()
obs, rew, terminated, truncated, info = result
successes.append(success)
observations_list.append(obs)
rewards.append(rew)
terminateds.append(terminated)
truncateds.append(truncated)
infos = self._add_info(infos, info, i)
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
self.observations = concatenate(
self.single_observation_space,
observations_list,
self.observations,
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards),
np.array(terminateds, dtype=np.bool_),
np.array(truncateds, dtype=np.bool_),
infos,
)
def call_async(self, name: str, *args, **kwargs):
"""Calls the method with name asynchronously and apply args and kwargs to the method.
Args:
name: Name of the method or property to call.
*args: Arguments to apply to the method call.
**kwargs: Keyword arguments to apply to the method call.
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: Calling `call_async` while waiting for a pending call to complete
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
"Calling `call_async` while waiting "
f"for a pending call to `{self._state.value}` to complete.",
self._state.value,
)
for pipe in self.parent_pipes:
pipe.send(("_call", (name, args, kwargs)))
self._state = AsyncState.WAITING_CALL
def call_wait(self, timeout: Optional[Union[int, float]] = None) -> list:
"""Calls all parent pipes and waits for the results.
Args:
timeout: Number of seconds before the call to `step_wait` times out.
If `None` (default), the call to `step_wait` never times out.
Returns:
List of the results of the individual calls to the method or property for each environment.
Raises:
NoAsyncCallError: Calling `call_wait` without any prior call to `call_async`.
TimeoutError: The call to `call_wait` has timed out after timeout second(s).
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_CALL:
raise NoAsyncCallError(
"Calling `call_wait` without any prior call to `call_async`.",
AsyncState.WAITING_CALL.value,
)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
f"The call to `call_wait` has timed out after {timeout} second(s)."
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
return results
def set_attr(self, name: str, values: Union[list, tuple, object]):
"""Sets an attribute of the sub-environments.
Args:
name: Name of the property to be set in each individual environment.
values: Values of the property to be set to. If ``values`` is a list or
tuple, then it corresponds to the values for each individual
environment, otherwise a single value is set for all environments.
Raises:
ValueError: Values must be a list or tuple with length equal to the number of environments.
AlreadyPendingCallError: Calling `set_attr` while waiting for a pending call to complete.
"""
self._assert_is_running()
if not isinstance(values, (list, tuple)):
values = [values for _ in range(self.num_envs)]
if len(values) != self.num_envs:
raise ValueError(
"Values must be a list or tuple with length equal to the "
f"number of environments. Got `{len(values)}` values for "
f"{self.num_envs} environments."
)
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
"Calling `set_attr` while waiting "
f"for a pending call to `{self._state.value}` to complete.",
self._state.value,
)
for pipe, value in zip(self.parent_pipes, values):
pipe.send(("_setattr", (name, value)))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
def close_extras(
self, timeout: Optional[Union[int, float]] = None, terminate: bool = False
):
"""Close the environments & clean up the extra resources (processes and pipes).
Args:
timeout: Number of seconds before the call to :meth:`close` times out. If ``None``,
the call to :meth:`close` never times out. If the call to :meth:`close`
times out, then all processes are terminated.
terminate: If ``True``, then the :meth:`close` operation is forced and all processes are terminated.
Raises:
TimeoutError: If :meth:`close` timed out.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn(
f"Calling `close` while waiting for a pending call to `{self._state.value}` to complete."
)
function = getattr(self, f"{self._state.value}_wait")
function(timeout)
except mp.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(("close", None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll(self, timeout=None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.perf_counter() + timeout
delta = None
for pipe in self.parent_pipes:
delta = max(end_time - time.perf_counter(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_spaces(self):
self._assert_is_running()
spaces = (self.single_observation_space, self.single_action_space)
for pipe in self.parent_pipes:
pipe.send(("_check_spaces", spaces))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
same_observation_spaces, same_action_spaces = zip(*results)
if not all(same_observation_spaces):
raise RuntimeError(
"Some environments have an observation space different from "
f"`{self.single_observation_space}`. In order to batch observations, "
"the observation spaces from all environments must be equal."
)
if not all(same_action_spaces):
raise RuntimeError(
"Some environments have an action space different from "
f"`{self.single_action_space}`. In order to batch actions, the "
"action spaces from all environments must be equal."
)
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError(
f"Trying to operate on `{type(self).__name__}`, after a call to `close()`."
)
def _raise_if_errors(self, successes):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for i in range(num_errors):
index, exctype, value = self.error_queue.get()
logger.error(
f"Received the following error from Worker-{index}: {exctype.__name__}: {value}"
)
logger.error(f"Shutting down Worker-{index}.")
self.parent_pipes[index].close()
self.parent_pipes[index] = None
if i == num_errors - 1:
logger.error("Raising the last exception back to the main process.")
raise exctype(value)
def __del__(self):
"""On deleting the object, checks that the vector environment is closed."""
if not getattr(self, "closed", True) and hasattr(self, "_state"):
self.close(terminate=True)
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation, info = env.reset(**data)
pipe.send(((observation, info), True))
elif command == "step":
(
observation,
reward,
terminated,
truncated,
info,
) = env.step(data)
if terminated or truncated:
old_observation, old_info = observation, info
observation, info = env.reset()
info["final_observation"] = old_observation
info["final_info"] = old_info
pipe.send(((observation, reward, terminated, truncated, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_call":
name, args, kwargs = data
if name in ["reset", "step", "seed", "close"]:
raise ValueError(
f"Trying to call function `{name}` with "
f"`_call`. Use `{name}` directly instead."
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == "_setattr":
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == "_check_spaces":
pipe.send(
(
(data[0] == env.observation_space, data[1] == env.action_space),
True,
)
)
else:
raise RuntimeError(
f"Received unknown command `{command}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
"`_setattr`, `_check_spaces`}."
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation, info = env.reset(**data)
write_to_shared_memory(
observation_space, index, observation, shared_memory
)
pipe.send(((None, info), True))
elif command == "step":
(
observation,
reward,
terminated,
truncated,
info,
) = env.step(data)
if terminated or truncated:
old_observation, old_info = observation, info
observation, info = env.reset()
info["final_observation"] = old_observation
info["final_info"] = old_info
write_to_shared_memory(
observation_space, index, observation, shared_memory
)
pipe.send(((None, reward, terminated, truncated, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_call":
name, args, kwargs = data
if name in ["reset", "step", "seed", "close"]:
raise ValueError(
f"Trying to call function `{name}` with "
f"`_call`. Use `{name}` directly instead."
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == "_setattr":
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == "_check_spaces":
pipe.send(
((data[0] == observation_space, data[1] == env.action_space), True)
)
else:
raise RuntimeError(
f"Received unknown command `{command}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
"`_setattr`, `_check_spaces`}."
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
| AsyncVectorEnv |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 95780,
"end": 97445
} | class ____(TestCase):
databases = {"default", "other"}
def setUp(self):
self.user = get_user_model().objects.create(
username="username", email="username@test.com", password="top_secret"
)
def test_history_user_with_fk_in_different_db_raises_value_error(self):
instance = ExternalModel(name="random_name")
instance._history_user = self.user
with self.assertRaises(ValueError):
instance.save()
def test_history_user_with_integer_field(self):
instance = ExternalModelWithCustomUserIdField(name="random_name")
instance._history_user = self.user
instance.save()
self.assertEqual(self.user.id, instance.history.first().history_user_id)
self.assertEqual(self.user, instance.history.first().history_user)
def test_history_user_is_none(self):
instance = ExternalModelWithCustomUserIdField.objects.create(name="random_name")
self.assertIsNone(instance.history.first().history_user_id)
self.assertIsNone(instance.history.first().history_user)
def test_history_user_does_not_exist(self):
instance = ExternalModelWithCustomUserIdField(name="random_name")
instance._history_user = self.user
instance.save()
self.assertEqual(self.user.id, instance.history.first().history_user_id)
self.assertEqual(self.user, instance.history.first().history_user)
user_id = self.user.id
self.user.delete()
self.assertEqual(user_id, instance.history.first().history_user_id)
self.assertIsNone(instance.history.first().history_user)
| MultiDBExplicitHistoryUserIDTest |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 26116,
"end": 33292
} | class ____:
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# https://www.mathworks.com/help/stats/kurtosis.html
# https://www.mathworks.com/help/stats/skewness.html
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
expect = np.asarray(expect)
if shape is not None:
expect = np.broadcast_to(expect, shape)
assert_array_equal(actual, expect)
if dtype is None:
dtype = expect.dtype
assert actual.dtype == dtype
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
# check array_like input for moment
y = mstats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = mstats.moment(self.testcase, 0.0)
assert_allclose(y, 1.0)
assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = mstats.moment([])
self._assert_equal(y, np.nan, dtype=np.float64)
y = mstats.moment(np.array([], dtype=np.float32))
self._assert_equal(y, np.nan, dtype=np.float32)
y = mstats.moment(np.zeros((1, 0)), axis=0)
self._assert_equal(y, [], shape=(0,), dtype=np.float64)
y = mstats.moment([[]], axis=1)
self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
y = mstats.moment([[]], moment=[0, 1], axis=0)
self._assert_equal(y, [], shape=(2, 0))
x = np.arange(10.)
x[9] = np.nan
assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_variation_ddof(self):
# test variation with delta degrees of freedom
# regression test for gh-13341
a = np.array([1, 2, 3, 4, 5])
y = mstats.variation(a, ddof=1)
assert_almost_equal(y, 0.5270462766947299)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
# test that skew works on multidimensional masked arrays
correct_2d = ma.array(
np.array([0.6882870394455785, 0, 0.2665647526856708,
0, -0.05211472114254485]),
mask=np.array([False, False, False, True, False], dtype=bool)
)
assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.skew(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([1.685952043212545, 0.0, 0.3973712716070531, 0,
-0.09026534484117164]),
mask=np.array([False, False, False, True, False], dtype=bool)
)
assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.skew(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_allclose(mstats.skew(self.testcase_2d[2, :]),
stats.skew(self.testcase_2d[2, :]))
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_almost_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047, 10)
y = mstats.kurtosis(self.testcase, 0, 0)
assert_almost_equal(y, 1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
| TestMoments |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.