language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 5092,
"end": 5306
} | class ____(BatchFusion):
def __init__(self, op, **kwargs) -> None:
super().__init__(**kwargs)
self.op = op
@register_fusion("batch_linear_post_grad", pre_grad=False)
| BatchPointwiseOpsFusionFactory |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 6678,
"end": 6871
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.gate_up_proj = DummyParamModule((2, 4, 2))
self.down_proj = DummyParamModule((2, 2, 2))
| DummyExperts |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 108256,
"end": 123449
} | class ____(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
pdf(x, df, scale)
Probability density function.
logpdf(x, df, scale)
Log of the probability density function.
rvs(df, scale, size=1, random_state=None)
Draw random samples from an inverse Wishart distribution.
entropy(df, scale)
Differential entropy of the distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Raises
------
scipy.linalg.LinAlgError
If the scale matrix `scale` is not positive definite.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported. Symmetry is not checked; only the lower triangular
portion is used.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
Instead of inverting a randomly generated Wishart matrix as described in [2],
here the algorithm in [4] is used to directly generate a random inverse-Wishart
matrix without inversion.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
.. [3] Gupta, M. and Srivastava, S. "Parametric Bayesian Estimation of
Differential Entropy and Relative Entropy". Entropy 12, 818 - 843.
2010.
.. [4] S.D. Axen, "Efficiently generating inverse-Wishart matrices and
their Cholesky factors", :arXiv:`2310.15884v1`. 2023.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
>>> plt.show()
The input quantiles can be any shape of array, as long as the last
axis labels the components.
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
>>> rv = invwishart(df=1, scale=1)
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, log_det_scale, C):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# Retrieve tr(scale x^{-1})
log_det_x = np.empty(x.shape[-1])
tr_scale_x_inv = np.empty(x.shape[-1])
trsm = get_blas_funcs(('trsm'), (x,))
if dim > 1:
for i in range(x.shape[-1]):
Cx, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
A = trsm(1., Cx, C, side=0, lower=True)
tr_scale_x_inv[i] = np.linalg.norm(A)**2
else:
log_det_x[:] = np.log(x[0, 0])
tr_scale_x_inv[:] = C[0, 0]**2 / x[0, 0]
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _inv_standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
A : ndarray
Random variates of shape (`shape`) + (``dim``, ``dim``).
Each slice `A[..., :, :]` is lower-triangular, and its
inverse is the lower Cholesky factor of a draw from
`invwishart(df, np.eye(dim))`.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
A = np.zeros(shape + (dim, dim))
# Random normal variates for off-diagonal elements
tri_rows, tri_cols = np.tril_indices(dim, k=-1)
n_tril = dim * (dim-1) // 2
A[..., tri_rows, tri_cols] = random_state.normal(
size=(*shape, n_tril),
)
# Random chi variates for diagonal elements
rows = np.arange(dim)
chi_dfs = (df - dim + 1) + rows
A[..., rows, rows] = random_state.chisquare(
df=chi_dfs, size=(*shape, dim),
)**0.5
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that inv(A) ~ iW(df, I)
A = self._inv_standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
trsm = get_blas_funcs(('trsm'), (A,))
trmm = get_blas_funcs(('trmm'), (A,))
for index in np.ndindex(A.shape[:-2]):
if dim > 1:
# Calculate CA
# Get CA = C A^{-1} via triangular solver
CA = trsm(1., A[index], C, side=1, lower=True)
# get SA
A[index] = trmm(1., CA, CA, side=1, lower=True, trans_a=True)
else:
A[index][0, 0] = (C[0, 0] / A[index][0, 0])**2
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (``dim``, ``dim``), where
``dim`` is the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
# reference: eq. (17) from ref. 3
psi_eval_points = [0.5 * (df - dim + i) for i in range(1, dim + 1)]
psi_eval_points = np.asarray(psi_eval_points)
return multigammaln(0.5 * df, dim) + 0.5 * dim * df + \
0.5 * (dim + 1) * (log_det_scale - _LOG_2) - \
0.5 * (df + dim + 1) * \
psi(psi_eval_points, out=psi_eval_points).sum()
def entropy(self, df, scale):
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
invwishart = invwishart_gen()
| invwishart_gen |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 29465,
"end": 29708
} | class ____(torch.nn.Module):
torchdynamo_force_dynamic = True # forced to be a UnspecializedNNModule
def forward(self, x):
if x.sum() > 0:
return x + 1
else:
return x - 1
| UnspecNonInlinableModule |
python | django__django | tests/admin_filters/tests.py | {
"start": 7185,
"end": 7638
} | class ____(ModelAdmin):
empty_value_display = "???"
list_filter = (
"author",
DecadeListFilterWithTitleAndParameter,
"is_best_seller",
"category",
"date_registered",
("author__email", AllValuesFieldListFilter),
("contributors", RelatedOnlyFieldListFilter),
("category", EmptyFieldListFilter),
DepartmentOwnershipListFilter,
)
ordering = ("-id",)
| DecadeFilterBookAdmin |
python | ray-project__ray | python/ray/autoscaler/_private/cluster_dump.py | {
"start": 1797,
"end": 19664
} | class ____:
"""Archive object to collect and compress files into a single file.
Objects of this class can be passed around to different data collection
functions. These functions can use the :meth:`subdir` method to add
files to a sub directory of the archive.
"""
def __init__(self, file: Optional[str] = None):
self.file = file or tempfile.mkstemp(prefix="ray_logs_", suffix=".tar.gz")[1]
self.tar = None
self._lock = threading.Lock()
@property
def is_open(self):
return bool(self.tar)
def open(self):
self.tar = tarfile.open(self.file, "w:gz")
def close(self):
self.tar.close()
self.tar = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@contextmanager
def subdir(self, subdir: str, root: Optional[str] = "/"):
"""Open a context to add files to the archive.
Example:
.. code-block:: python
with Archive("file.tar.gz") as archive:
with archive.subdir("logfiles", root="/tmp/logs") as sd:
# Will be added as `logfiles/nested/file.txt`
sd.add("/tmp/logs/nested/file.txt")
Args:
subdir: Subdir to which to add files to. Calling the
``add(path)`` command will place files into the ``subdir``
directory of the archive.
root: Root path. Files without an explicit ``arcname``
will be named relatively to this path.
Yields:
A context object that can be used to add files to the archive.
"""
root = os.path.abspath(root)
class _Context:
@staticmethod
def add(path: str, arcname: Optional[str] = None):
path = os.path.abspath(path)
arcname = arcname or os.path.join(subdir, os.path.relpath(path, root))
self._lock.acquire()
self.tar.add(path, arcname=arcname)
self._lock.release()
yield _Context()
###
# Functions to gather logs and information on the local node
###
def get_local_ray_logs(
archive: Archive,
exclude: Optional[Sequence[str]] = None,
session_log_dir: str = "/tmp/ray/session_latest",
) -> Archive:
"""Copy local log files into an archive.
Args:
archive: Archive object to add log files to.
exclude (Sequence[str]): Sequence of regex patterns. Files that match
any of these patterns will not be included in the archive.
session_dir: Path to the Ray session files. Defaults to
``/tmp/ray/session_latest``
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
exclude = exclude or []
session_log_dir = os.path.join(os.path.expanduser(session_log_dir), "logs")
with archive.subdir("logs", root=session_log_dir) as sd:
for root, dirs, files in os.walk(session_log_dir):
for file in files:
file_path = os.path.join(root, file)
rel_path = os.path.relpath(file_path, start=session_log_dir)
# Skip file if it matches any pattern in `exclude`
if any(re.match(pattern, rel_path) for pattern in exclude):
continue
sd.add(file_path)
return archive
def get_local_debug_state(
archive: Archive, session_dir: str = "/tmp/ray/session_latest"
) -> Archive:
"""Copy local log files into an archive.
Args:
archive: Archive object to add log files to.
session_dir: Path to the Ray session files. Defaults to
``/tmp/ray/session_latest``
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
session_dir = os.path.expanduser(session_dir)
debug_state_file = os.path.join(session_dir, "logs/debug_state.txt")
if not os.path.exists(debug_state_file):
raise LocalCommandFailed("No `debug_state.txt` file found.")
with archive.subdir("", root=session_dir) as sd:
sd.add(debug_state_file)
return archive
def get_local_pip_packages(archive: Archive):
"""Get currently installed pip packages and write into an archive.
Args:
archive: Archive object to add meta files to.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
with tempfile.NamedTemporaryFile("wt") as fp:
for line in freeze.freeze():
fp.writelines([line, "\n"])
fp.flush()
with archive.subdir("") as sd:
sd.add(fp.name, "pip_packages.txt")
return archive
def get_local_ray_processes(
archive: Archive,
processes: Optional[List[Tuple[str, bool]]] = None,
verbose: bool = False,
):
"""Get the status of all the relevant ray processes.
Args:
archive: Archive object to add process info files to.
processes: List of processes to get information on. The first
element of the tuple is a string to filter by, and the second
element is a boolean indicating if we should filter by command
name (True) or command line including parameters (False)
verbose: If True, show entire executable command line.
If False, show just the first term.
Returns:
Open archive object.
"""
if not processes:
# local import to avoid circular dependencies
from ray.autoscaler._private.constants import RAY_PROCESSES
processes = RAY_PROCESSES
process_infos = []
for process in psutil.process_iter(["pid", "name", "cmdline", "status"]):
try:
with process.oneshot():
cmdline = " ".join(process.cmdline())
process_infos.append(
(
{
"executable": cmdline
if verbose
else cmdline.split("--", 1)[0][:-1],
"name": process.name(),
"pid": process.pid,
"status": process.status(),
},
process.cmdline(),
)
)
except Exception as exc:
raise LocalCommandFailed(exc) from exc
relevant_processes = {}
for process_dict, cmdline in process_infos:
for keyword, filter_by_cmd in processes:
if filter_by_cmd:
corpus = process_dict["name"]
else:
corpus = subprocess.list2cmdline(cmdline)
if keyword in corpus and process_dict["pid"] not in relevant_processes:
relevant_processes[process_dict["pid"]] = process_dict
with tempfile.NamedTemporaryFile("wt") as fp:
for line in relevant_processes.values():
fp.writelines([yaml.dump(line), "\n"])
fp.flush()
with archive.subdir("meta") as sd:
sd.add(fp.name, "process_info.txt")
return archive
def get_all_local_data(archive: Archive, parameters: GetParameters):
"""Get all local data.
Gets:
- The Ray logs of the latest session
- The currently installed pip packages
Args:
archive: Archive object to add meta files to.
parameters: Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
if parameters.logs:
try:
get_local_ray_logs(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.debug_state:
try:
get_local_debug_state(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.pip:
try:
get_local_pip_packages(archive=archive)
except LocalCommandFailed as exc:
cli_logger.error(exc)
if parameters.processes:
try:
get_local_ray_processes(
archive=archive,
processes=parameters.processes_list,
verbose=parameters.processes_verbose,
)
except LocalCommandFailed as exc:
cli_logger.error(exc)
return archive
###
# Functions to invoke remote scripts and gather data from remote nodes
###
def _wrap(items: List[str], quotes="'"):
return f"{quotes}{' '.join(items)}{quotes}"
def create_and_get_archive_from_remote_node(
remote_node: Node, parameters: GetParameters, script_path: str = "ray"
) -> Optional[str]:
"""Create an archive containing logs on a remote node and transfer.
This will call ``ray local-dump --stream`` on the remote
node. The resulting file will be saved locally in a temporary file and
returned.
Args:
remote_node: Remote node to gather archive from.
script_path: Path to this script on the remote node.
parameters: Parameters (settings) for getting data.
Returns:
Path to a temporary file containing the node's collected data.
"""
cmd = [
"ssh",
"-o StrictHostKeyChecking=no",
"-o UserKnownHostsFile=/dev/null",
"-o LogLevel=ERROR",
"-i",
remote_node.ssh_key,
f"{remote_node.ssh_user}@{remote_node.host}",
]
if remote_node.docker_container:
cmd += [
"docker",
"exec",
remote_node.docker_container,
]
collect_cmd = [script_path, "local-dump", "--stream"]
collect_cmd += ["--logs"] if parameters.logs else ["--no-logs"]
collect_cmd += ["--debug-state"] if parameters.debug_state else ["--no-debug-state"]
collect_cmd += ["--pip"] if parameters.pip else ["--no-pip"]
collect_cmd += ["--processes"] if parameters.processes else ["--no-processes"]
if parameters.processes:
collect_cmd += (
["--processes-verbose"]
if parameters.processes_verbose
else ["--no-proccesses-verbose"]
)
cmd += ["/bin/bash", "-c", _wrap(collect_cmd, quotes='"')]
cat = "node" if not remote_node.is_head else "head"
cli_logger.print(f"Collecting data from remote node: {remote_node.host}")
tmp = tempfile.mkstemp(prefix=f"ray_{cat}_{remote_node.host}_", suffix=".tar.gz")[1]
with open(tmp, "wb") as fp:
try:
subprocess.check_call(cmd, stdout=fp, stderr=sys.stderr)
except subprocess.CalledProcessError as exc:
raise RemoteCommandFailed(
f"Gathering logs from remote node failed: {' '.join(cmd)}"
) from exc
return tmp
def create_and_add_remote_data_to_local_archive(
archive: Archive, remote_node: Node, parameters: GetParameters
):
"""Create and get data from remote node and add to local archive.
Args:
archive: Archive object to add remote data to.
remote_node: Remote node to gather archive from.
parameters: Parameters (settings) for getting data.
Returns:
Open archive object.
"""
tmp = create_and_get_archive_from_remote_node(remote_node, parameters)
if not archive.is_open:
archive.open()
cat = "node" if not remote_node.is_head else "head"
with archive.subdir("", root=os.path.dirname(tmp)) as sd:
sd.add(tmp, arcname=f"ray_{cat}_{remote_node.host}.tar.gz")
return archive
def create_and_add_local_data_to_local_archive(
archive: Archive, parameters: GetParameters
):
"""Create and get data from this node and add to archive.
Args:
archive: Archive object to add remote data to.
parameters: Parameters (settings) for getting data.
Returns:
Open archive object.
"""
with Archive() as local_data_archive:
get_all_local_data(local_data_archive, parameters)
if not archive.is_open:
archive.open()
with archive.subdir("", root=os.path.dirname(local_data_archive.file)) as sd:
sd.add(local_data_archive.file, arcname="local_node.tar.gz")
os.remove(local_data_archive.file)
return archive
def create_archive_for_remote_nodes(
archive: Archive, remote_nodes: Sequence[Node], parameters: GetParameters
):
"""Create an archive combining data from the remote nodes.
This will parallelize calls to get data from remote nodes.
Args:
archive: Archive object to add remote data to.
remote_nodes (Sequence[Node]): Sequence of remote nodes.
parameters: Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
with ThreadPoolExecutor(max_workers=MAX_PARALLEL_SSH_WORKERS) as executor:
for remote_node in remote_nodes:
executor.submit(
create_and_add_remote_data_to_local_archive,
archive=archive,
remote_node=remote_node,
parameters=parameters,
)
return archive
def create_archive_for_local_and_remote_nodes(
archive: Archive, remote_nodes: Sequence[Node], parameters: GetParameters
):
"""Create an archive combining data from the local and remote nodes.
This will parallelize calls to get data from remote nodes.
Args:
archive: Archive object to add data to.
remote_nodes (Sequence[Node]): Sequence of remote nodes.
parameters: Parameters (settings) for getting data.
Returns:
Open archive object.
"""
if not archive.is_open:
archive.open()
try:
create_and_add_local_data_to_local_archive(archive, parameters)
except CommandFailed as exc:
cli_logger.error(exc)
create_archive_for_remote_nodes(archive, remote_nodes, parameters)
cli_logger.print(
f"Collected data from local node and {len(remote_nodes)} " f"remote nodes."
)
return archive
###
# Ray cluster info
###
def get_info_from_ray_cluster_config(
cluster_config: str,
) -> Tuple[List[str], str, str, Optional[str], Optional[str]]:
"""Get information from Ray cluster config.
Return list of host IPs, ssh user, ssh key file, and optional docker
container.
Args:
cluster_config: Path to ray cluster config.
Returns:
Tuple of list of host IPs, ssh user name, ssh key file path,
optional docker container name, optional cluster name.
"""
from ray.autoscaler._private.commands import _bootstrap_config
cli_logger.print(
f"Retrieving cluster information from ray cluster file: " f"{cluster_config}"
)
cluster_config = os.path.expanduser(cluster_config)
config = yaml.safe_load(open(cluster_config).read())
config = _bootstrap_config(config, no_config_cache=True)
provider = _get_node_provider(config["provider"], config["cluster_name"])
head_nodes = provider.non_terminated_nodes({TAG_RAY_NODE_KIND: NODE_KIND_HEAD})
worker_nodes = provider.non_terminated_nodes({TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
hosts = [provider.external_ip(node) for node in head_nodes + worker_nodes]
ssh_user = config["auth"]["ssh_user"]
ssh_key = config["auth"]["ssh_private_key"]
docker = None
docker_config = config.get("docker", None)
if docker_config:
docker = docker_config.get("container_name", None)
cluster_name = config.get("cluster_name", None)
return hosts, ssh_user, ssh_key, docker, cluster_name
def _info_from_params(
cluster: Optional[str] = None,
host: Optional[str] = None,
ssh_user: Optional[str] = None,
ssh_key: Optional[str] = None,
docker: Optional[str] = None,
):
"""Parse command line arguments.
Note: This returns a list of hosts, not a comma separated string!
"""
if not host and not cluster:
bootstrap_config = os.path.expanduser("~/ray_bootstrap_config.yaml")
if os.path.exists(bootstrap_config):
cluster = bootstrap_config
cli_logger.warning(
f"Detected cluster config file at {cluster}. "
f"If this is incorrect, specify with "
f"`ray cluster-dump <config>`"
)
elif cluster:
cluster = os.path.expanduser(cluster)
cluster_name = None
if cluster:
h, u, k, d, cluster_name = get_info_from_ray_cluster_config(cluster)
ssh_user = ssh_user or u
ssh_key = ssh_key or k
docker = docker or d
hosts = host.split(",") if host else h
if not hosts:
raise LocalCommandFailed(
f"Invalid cluster file or cluster has no running nodes: " f"{cluster}"
)
elif host:
hosts = host.split(",")
else:
raise LocalCommandFailed(
"You need to either specify a `<cluster_config>` or `--host`."
)
if not ssh_user:
ssh_user = DEFAULT_SSH_USER
cli_logger.warning(
f"Using default SSH user `{ssh_user}`. "
f"If this is incorrect, specify with `--ssh-user <user>`"
)
if not ssh_key:
for cand_key in DEFAULT_SSH_KEYS:
cand_key_file = os.path.expanduser(cand_key)
if os.path.exists(cand_key_file):
ssh_key = cand_key_file
cli_logger.warning(
f"Auto detected SSH key file: {ssh_key}. "
f"If this is incorrect, specify with `--ssh-key <key>`"
)
break
return cluster, hosts, ssh_user, ssh_key, docker, cluster_name
| Archive |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 7823,
"end": 14887
} | class ____:
"""
Used for AOTDispatch.
This dataclass gives us the information we need to reconstruct a tensor subclass
from our flat inputs.
Why is this important? The graph that we'd like to trace out contains flat tensor inputs,
But the user's original model may have subclass inputs and outputs.
So we need to wrap/unwrap subclasses as necessary to translate between the user's
view (subclass inps/outs), and the backend compiler's view (graph with no subclass args).
Complications arise mostly from the fact that a subclass can hold more than one inner tensor;
So for a given subclass input/output, we need to carefully track which indices map
to the subclass tensor in the corresponding "dense-tensor-only" graph.
"""
# In the inner graph that only takes in dense tensor inputs,
# this maps to the first index of "tensors that should go in this subclass wrapper"
flat_tensor_start_idx: int
# arg_count is inclusive of the arg_counts of any
# inner tensor subclasses: If I have a TwoTensor and
# both of its inner elements are TwoTensors, then the
# arg_count of the outer-most subclass will be 4
arg_count: int
# Mark where or not symints were included. This flag is only used in one assertion
# in "wrap_tensor_subclasses"
included_subclass_symints: bool
# meta and attrs are produced by the subclass's __tensor_flatten__.
# We need to keep them around along with outer_size / outer_stride to plumb them
# into __tensor_unflatten__
attrs: dict[str, Union[SubclassCreationMeta, PlainTensorMeta]]
outer_size: Iterable[Union[None, int, torch.SymInt]]
outer_stride: Iterable[Union[None, int, torch.SymInt]]
meta: Any
# Stores the original subclass itself.
# This is needed because we need the autograd metadata on the original subclass
# (this is guaranteed to be a wrapper subclass that holds a fake tensor,
# so holding onto this at runtime shouldn't leak memory)
# This field is nulled out after calling make_runtime_safe()
original_subclass: Optional[torch.Tensor]
# Used at runtime to determine the subclass type, so we don't need to save the original subclass
original_subclass_type: Optional[type] = None
memory_format: Optional[MemoryFormatMeta] = None
def compute_outer_size_and_stride(
self,
all_args,
*,
curr_start_idx: int,
):
from .subclass_utils import compute_symint_placeholders
def compute(outer, start_idx):
placeholders = compute_symint_placeholders(outer)
has_symbolic = any(placeholders)
if has_symbolic:
start = curr_start_idx
end = start_idx + sum(placeholders)
it_args = iter(all_args[start:end])
it_placeholders = iter(placeholders)
return pytree.tree_map_only(
lambda _: next(it_placeholders), lambda _: next(it_args), outer
), start + len(placeholders)
else:
return outer, start_idx
outer_size, next_idx = compute(self.outer_size, curr_start_idx)
outer_stride, _ = compute(self.outer_stride, next_idx)
return outer_size, outer_stride
def creation_fn(
self,
all_args,
*,
is_runtime: bool,
):
inner_tensors = {}
curr_start_idx = self.flat_tensor_start_idx
for attr, creation_meta in self.attrs.items():
if isinstance(creation_meta, PlainTensorMeta):
subclass = all_args[curr_start_idx]
curr_start_idx += 1
else:
subclass = creation_meta.creation_fn(
all_args,
is_runtime=is_runtime,
)
curr_start_idx += creation_meta.arg_count
inner_tensors[attr] = subclass
if is_runtime:
assert self.original_subclass_type is not None
original_subclass_type = self.original_subclass_type
else:
original_subclass_type = type(self.original_subclass)
if is_runtime:
outer_size, outer_stride = self.compute_outer_size_and_stride(
all_args,
curr_start_idx=curr_start_idx,
)
else:
outer_size, outer_stride = self.outer_size, self.outer_stride
rebuilt = original_subclass_type.__tensor_unflatten__( # type: ignore[attr-defined]
inner_tensors, self.meta, outer_size, outer_stride
)
if not is_runtime:
# After wrapping up the inner dense tensors into a subclass, we need to make sure that our new wrapper
# has correct autograd metadata, since we'll be tracing through the autograd engine with the subclass.
# We don't trace through the autograd engine at runtime though, so no need
# to compute this extra metadata then!
torch._mirror_autograd_meta_to(self.original_subclass, rebuilt) # type: ignore[attr-defined]
return rebuilt
def make_runtime_safe(self):
def _make_size_runtime_safe(x: Union[None, int, torch.SymInt]) -> Optional[int]:
dummy = -1
if isinstance(x, torch.SymInt):
# Replace nested ints by a dummy value (-1) as NJT ignores
# the outer_size/outer_stride at runtime.
return dummy if x.node.is_nested_int() else None
return x
assert self.original_subclass is not None
self.original_subclass_type = type(self.original_subclass)
self.original_subclass = None
# Note: NJT outer_size in AOTDispatcher
# `_make_size_runtime_safe` replaces any nested int with a dummy value (-1)
# to prevent serializing a SymInt at runtime. Internally, nested tensor __tensor_unflatten__
# is designed to safely ignore this dummy value.
# For more details, see: https://github.com/pytorch/pytorch/blob/5141ade8e30c64e873e14dcc8de233da45d15025/torch/nested/_internal/nested_tensor.py#L266-L299 # noqa: B950
self.outer_size = tuple(map(_make_size_runtime_safe, self.outer_size))
self.outer_stride = tuple(map(_make_size_runtime_safe, self.outer_stride))
# Recurse on nested subclass info
for creation_meta in self.attrs.values():
if isinstance(creation_meta, SubclassCreationMeta):
creation_meta.make_runtime_safe()
def __post_init__(self):
# sanity assert to make sure we don't leak memory
assert is_fake(self.original_subclass)
# This class encapsulates all aliasing + mutation info we need about the forward graph
# See a more detailed overview of the edge case handling at
# https://docs.google.com/document/d/19UoIh_SVrMy_b2Sx5ZaeOJttm6P0Qmyss2rdBuyfoic/edit
# NOTE: This class is saved in AOTAutogradCache, If you are adding elements, make sure
# they are covered by warm cache tests.
@dataclass(eq=False)
| SubclassCreationMeta |
python | kamyu104__LeetCode-Solutions | Python/path-with-maximum-minimum-value.py | {
"start": 81,
"end": 1415
} | class ____(object):
def maximumMinimumPath(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(A, val, r, c, lookup):
if r == len(A)-1 and c == len(A[0])-1:
return True
lookup.add((r, c))
for d in directions:
nr, nc = r + d[0], c + d[1]
if 0 <= nr < len(A) and \
0 <= nc < len(A[0]) and \
(nr, nc) not in lookup and \
A[nr][nc] >= val and \
check(A, val, nr, nc, lookup):
return True
return False
vals, ceil = [], min(A[0][0], A[-1][-1])
for i in xrange(len(A)):
for j in xrange(len(A[0])):
if A[i][j] <= ceil:
vals.append(A[i][j])
vals = list(set(vals))
vals.sort()
left, right = 0, len(vals)-1
while left <= right:
mid = left + (right-left)//2
if not check(A, vals[mid], 0, 0, set()):
right = mid-1
else:
left = mid+1
return vals[right]
# Time: O(m * n * log(m * n))
# Space: O(m * n)
import heapq
# Dijkstra algorithm solution
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_capacity_requirements.py | {
"start": 383,
"end": 6129
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'requests': 'dict(str, str)'
}
attribute_map = {
'requests': 'requests'
}
def __init__(self, requests=None, local_vars_configuration=None): # noqa: E501
"""V1CapacityRequirements - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._requests = None
self.discriminator = None
if requests is not None:
self.requests = requests
@property
def requests(self):
"""Gets the requests of this V1CapacityRequirements. # noqa: E501
Requests represent individual device resource requests for distinct resources, all of which must be provided by the device. This value is used as an additional filtering condition against the available capacity on the device. This is semantically equivalent to a CEL selector with `device.capacity[<domain>].<name>.compareTo(quantity(<request quantity>)) >= 0`. For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0. When a requestPolicy is defined, the requested amount is adjusted upward to the nearest valid value based on the policy. If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows— the device is considered ineligible for allocation. For any capacity that is not explicitly requested: - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity (i.e., the whole device is claimed). - If a requestPolicy is set, the default consumed capacity is determined according to that policy. If the device allows multiple allocation, the aggregated amount across all requests must not exceed the capacity value. The consumed capacity, which may be adjusted based on the requestPolicy if defined, is recorded in the resource claim’s status.devices[*].consumedCapacity field. # noqa: E501
:return: The requests of this V1CapacityRequirements. # noqa: E501
:rtype: dict(str, str)
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1CapacityRequirements.
Requests represent individual device resource requests for distinct resources, all of which must be provided by the device. This value is used as an additional filtering condition against the available capacity on the device. This is semantically equivalent to a CEL selector with `device.capacity[<domain>].<name>.compareTo(quantity(<request quantity>)) >= 0`. For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0. When a requestPolicy is defined, the requested amount is adjusted upward to the nearest valid value based on the policy. If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows— the device is considered ineligible for allocation. For any capacity that is not explicitly requested: - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity (i.e., the whole device is claimed). - If a requestPolicy is set, the default consumed capacity is determined according to that policy. If the device allows multiple allocation, the aggregated amount across all requests must not exceed the capacity value. The consumed capacity, which may be adjusted based on the requestPolicy if defined, is recorded in the resource claim’s status.devices[*].consumedCapacity field. # noqa: E501
:param requests: The requests of this V1CapacityRequirements. # noqa: E501
:type: dict(str, str)
"""
self._requests = requests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CapacityRequirements):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CapacityRequirements):
return True
return self.to_dict() != other.to_dict()
| V1CapacityRequirements |
python | dagster-io__dagster | python_modules/libraries/dagster-postgres/dagster_postgres_tests/test_event_log.py | {
"start": 734,
"end": 5034
} | class ____(TestEventLogStorage):
__test__ = True
@pytest.fixture(name="instance", scope="function")
def instance(self, conn_string):
PostgresEventLogStorage.create_clean_storage(conn_string)
with instance_for_test(
overrides={"storage": {"postgres": {"postgres_url": conn_string}}}
) as instance:
yield instance
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self, instance):
event_log_storage = instance.event_log_storage
assert isinstance(event_log_storage, PostgresEventLogStorage)
yield event_log_storage
def can_wipe_asset_partitions(self) -> bool:
return False
def test_event_log_storage_two_watchers(self, conn_string):
with _clean_storage(conn_string) as storage:
run_id = make_new_run_id()
watched_1 = []
watched_2 = []
def watch_one(event, _cursor):
watched_1.append(event)
def watch_two(event, _cursor):
watched_2.append(event)
assert len(storage.get_logs_for_run(run_id)) == 0
storage.store_event(create_test_event_log_record(str(1), run_id=run_id))
assert len(storage.get_logs_for_run(run_id)) == 1
assert len(watched_1) == 0
storage.watch(run_id, str(EventLogCursor.from_storage_id(1)), watch_one)
storage.store_event(create_test_event_log_record(str(2), run_id=run_id))
storage.store_event(create_test_event_log_record(str(3), run_id=run_id))
storage.watch(run_id, str(EventLogCursor.from_storage_id(3)), watch_two)
storage.store_event(create_test_event_log_record(str(4), run_id=run_id))
attempts = 10
while (len(watched_1) < 3 or len(watched_2) < 1) and attempts > 0:
time.sleep(0.5)
attempts -= 1
assert len(watched_1) == 3
assert len(watched_2) == 1
assert len(storage.get_logs_for_run(run_id)) == 4
storage.end_watch(run_id, watch_one)
time.sleep(0.3) # this value scientifically selected from a range of attractive values
storage.store_event(create_test_event_log_record(str(5), run_id=run_id))
attempts = 10
while len(watched_2) < 2 and attempts > 0:
time.sleep(0.5)
attempts -= 1
assert len(watched_1) == 3
assert len(watched_2) == 2
storage.end_watch(run_id, watch_two)
assert len(storage.get_logs_for_run(run_id)) == 5
storage.delete_events(run_id)
assert len(storage.get_logs_for_run(run_id)) == 0
assert len(watched_1) == 3
assert len(watched_2) == 2
assert [int(evt.message) for evt in watched_1] == [2, 3, 4]
assert [int(evt.message) for evt in watched_2] == [4, 5]
assert len(objgraph.by_type("SqlPollingEventWatcher")) == 1
# ensure we clean up poller on exit
gc.collect()
assert len(objgraph.by_type("SqlPollingEventWatcher")) == 0
def test_load_from_config(self, hostname):
url_cfg = f"""
event_log_storage:
module: dagster_postgres.event_log
class: PostgresEventLogStorage
config:
postgres_url: postgresql://test:test@{hostname}:5432/test
"""
explicit_cfg = f"""
event_log_storage:
module: dagster_postgres.event_log
class: PostgresEventLogStorage
config:
postgres_db:
username: test
password: test
hostname: {hostname}
db_name: test
"""
with instance_for_test(overrides=yaml.safe_load(url_cfg)) as from_url_instance:
from_url = from_url_instance._event_storage # noqa: SLF001
with instance_for_test(overrides=yaml.safe_load(explicit_cfg)) as explicit_instance:
from_explicit = explicit_instance._event_storage # noqa: SLF001
assert from_url.postgres_url == from_explicit.postgres_url # pyright: ignore[reportAttributeAccessIssue]
| TestPostgresEventLogStorage |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_teams.py | {
"start": 396,
"end": 6939
} | class ____(APITestCase):
def test_simple(self) -> None:
user = self.create_user()
org = self.create_organization(owner=self.user)
team1 = self.create_team(organization=org, name="foo")
team2 = self.create_team(organization=org, name="bar")
self.create_member(organization=org, user=user, has_global_access=False, teams=[team1])
path = f"/api/0/organizations/{org.slug}/teams/"
self.login_as(user=user)
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]["id"] == str(team2.id)
assert not response.data[0]["isMember"]
assert response.data[1]["id"] == str(team1.id)
assert response.data[1]["isMember"]
def test_simple_results_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=self.user)
team1 = self.create_team(organization=org, name="foo")
self.create_team(organization=org, name="bar")
self.create_member(organization=org, user=user, has_global_access=False, teams=[team1])
path = f"/api/0/organizations/{org.slug}/teams/?detailed=0"
self.login_as(user=user)
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert "projects" not in response.data[0]
assert "projects" not in response.data[1]
def test_search(self) -> None:
user = self.create_user()
org = self.create_organization(owner=self.user)
team = self.create_team(organization=org, name="bar", slug="bar")
self.create_member(organization=org, user=user, has_global_access=False, teams=[team])
self.login_as(user=user)
path = f"/api/0/organizations/{org.slug}/teams/?query=bar"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(team.id)
path = f"/api/0/organizations/{org.slug}/teams/?query=baz"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_list_external_teams(self) -> None:
self.external_team = self.create_external_team(
self.team, external_name="@getsentry/ecosystem"
)
path = f"/api/0/organizations/{self.organization.slug}/teams/?detailed=1"
self.login_as(user=self.user)
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.team.id)
assert len(response.data[0]["externalTeams"]) == 1
assert response.data[0]["externalTeams"][0] == {
"id": str(self.external_team.id),
"integrationId": str(self.external_team.integration_id),
"provider": get_provider_string(self.external_team.provider),
"externalName": self.external_team.external_name,
"teamId": str(self.team.id),
}
def test_has_external_teams_query(self) -> None:
team = self.create_team(organization=self.organization, name="foo")
self.login_as(user=self.user)
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=hasExternalTeams:true"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 0
self.create_external_team(team, external_name="@getsentry/ecosystem")
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(team.id)
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=hasExternalTeams:false"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_query_by_slug(self) -> None:
self.create_team(organization=self.organization, name="foo")
self.create_team(organization=self.organization, name="bar")
self.login_as(user=self.user)
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=slug:foo"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=slug:foo+slug:bar"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 2
def test_query_by_id(self) -> None:
team1 = self.create_team(organization=self.organization, name="foo")
team2 = self.create_team(organization=self.organization, name="bar")
self.login_as(user=self.user)
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=id:undefined"
response = self.client.get(path)
assert response.status_code == 400, response.content
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=id:{team1.id}"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 1
path = f"/api/0/organizations/{self.organization.slug}/teams/?query=id:{team1.id}+id:{team2.id}"
response = self.client.get(path)
assert response.status_code == 200, response.content
assert len(response.data) == 2
def test_hanging_project_team(self) -> None:
user = self.create_user()
org = self.create_organization(owner=self.user)
external_org = self.create_organization()
team1 = self.create_team(organization=org, name="foo")
external_team = self.create_team(organization=external_org, name="bar")
self.create_member(organization=org, user=user, has_global_access=False, teams=[team1])
ProjectTeam.objects.create(project=self.project, team=team1)
ProjectTeam.objects.create(project=self.project, team=external_team)
self.login_as(user=user)
path = f"/api/0/organizations/{org.slug}/teams/"
response = self.client.get(path)
assert response.status_code == 200, response.content
| OrganizationTeamsListTest |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 2140,
"end": 2327
} | class ____(CookiecutterException):
"""
Exception for existing output directory.
Raised when the output directory of the project exists already.
"""
| OutputDirExistsException |
python | pytorch__pytorch | test/distributed/_composable/test_replicate.py | {
"start": 924,
"end": 2619
} | class ____(MultiProcessTestCase):
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _init_pg(self):
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=dist.FileStore(self.file_name, self.world_size),
)
def _check_state_dict_parity(self, sd_1, sd_2):
for k1, k2 in zip(sd_1.keys(), sd_2.keys()):
self.assertEqual(k1, k2)
for v1, v2 in zip(sd_1.values(), sd_2.values()):
self.assertEqual(v1, v2)
def test_replicate_single_module_save_load(self):
"""
Tests that replicate() on a single module state_dict
matches local module state_dict.
"""
self._init_pg()
model = Net()
replicate_model = replicate(deepcopy(model))
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
def test_replicate_non_root_multiple_save_load(self):
"""
Tests the replicate() on multiple submodules matches
local module state_dict.
"""
self._init_pg()
model = Net()
replicate_model = deepcopy(model)
replicate(replicate_model.fc1)
replicate(replicate_model.fc2)
replicate(replicate_model.fc3)
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
| ReplicateStateDictTest |
python | huggingface__transformers | tests/generation/test_fsdp.py | {
"start": 5702,
"end": 7394
} | class ____(TestCasePlus):
nproc_per_node = 2
def test_generic_task_model_can_be_sharded(self):
script_to_run = textwrap.dedent(
"""
import torch
from torch.distributed.fsdp import fully_shard
from transformers import AutoModelForTokenClassification
torch.distributed.init_process_group(
backend="nccl" if torch.cuda.is_available() else "gloo", init_method="env://"
)
rank = torch.distributed.get_rank()
if torch.cuda.is_available():
torch.cuda.set_device(rank)
# Make sure it works
model = AutoModelForTokenClassification.from_pretrained("Qwen/Qwen2-0.5B")
module = fully_shard(model)
torch.distributed.destroy_process_group()
"""
)
torchrun(script_to_run, self.nproc_per_node, env=self.get_env())
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/generation/test_fsdp.py --fsdp
class CLIArgs(argparse.Namespace):
fsdp: bool
fsdp2: bool
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("--fsdp", action="store_true")
group.add_argument("--fsdp2", action="store_true")
args = parser.parse_args(namespace=CLIArgs())
if args.fsdp:
fsdp_generate()
elif args.fsdp2:
fsdp2_generate()
else:
raise ValueError("Missing test selection")
| TestFSDPGenericTaskModel |
python | numba__numba | numba/tests/test_unsafe_intrinsics.py | {
"start": 4123,
"end": 4723
} | class ____(TestCase):
"""Tests for numba.unsafe.bytes
"""
def test_memcpy_region(self):
@njit
def foo(dst, dst_index, src, src_index, nbytes):
# last arg is assume 1 byte alignment
memcpy_region(dst.ctypes.data, dst_index,
src.ctypes.data, src_index, nbytes, 1)
d = np.zeros(10, dtype=np.int8)
s = np.arange(10, dtype=np.int8)
# copy s[1:6] to d[4:9]
foo(d, 4, s, 1, 5)
expected = [0, 0, 0, 0, 1, 2, 3, 4, 5, 0]
np.testing.assert_array_equal(d, expected)
| TestBytesIntrinsic |
python | dask__dask | dask/dataframe/dask_expr/_shuffle.py | {
"start": 6423,
"end": 8340
} | class ____(ShuffleBase):
def _lower(self):
frame = self.frame
partitioning_index = self.partitioning_index
npartitions_out = self.npartitions_out
ignore_index = self.ignore_index
options = self.options
index_shuffle = self.index_shuffle
# Normalize partitioning_index
if isinstance(partitioning_index, str):
partitioning_index = [partitioning_index]
if index_shuffle:
pass
elif not isinstance(partitioning_index, (list, Expr)):
raise ValueError(
f"{type(partitioning_index)} not a supported type for partitioning_index"
)
if not isinstance(partitioning_index, Expr) and not index_shuffle:
cs = [col for col in partitioning_index if col not in frame.columns]
if len(cs) == 1:
frame = Assign(frame, "_partitions_0", frame.index)
partitioning_index = partitioning_index.copy()
partitioning_index[partitioning_index.index(cs[0])] = "_partitions_0"
# Assign new "_partitions" column
index_added = AssignPartitioningIndex(
frame,
partitioning_index,
"_partitions",
npartitions_out,
frame._meta,
index_shuffle,
)
# Apply shuffle
shuffled = Shuffle(
index_added,
"_partitions",
npartitions_out,
ignore_index,
self.method,
options,
original_partitioning_index=self._partitioning_index,
)
if frame.ndim == 1:
# Reduce back to series
return shuffled[index_added.columns[0]]
# Drop "_partitions" column and return
return shuffled[
[c for c in shuffled.columns if c not in ["_partitions", "_partitions_0"]]
]
| RearrangeByColumn |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 43750,
"end": 44695
} | class ____(PipesBlobStoreMessageWriter):
"""Message writer that writes messages by periodically writing message chunks to a GCS bucket.
Args:
client (google.cloud.storage.Client): A google.cloud.storage.Client object.
interval (float): interval in seconds between upload chunk uploads
"""
def __init__(self, client: "GCSClient", *, interval: float = 10):
super().__init__(interval=interval)
self._client = client
def make_channel(
self,
params: PipesParams,
) -> "PipesGCSMessageWriterChannel":
bucket = _assert_env_param_type(params, "bucket", str, self.__class__)
key_prefix = _assert_opt_env_param_type(params, "key_prefix", str, self.__class__)
return PipesGCSMessageWriterChannel(
client=self._client,
bucket=bucket,
key_prefix=key_prefix,
interval=self.interval,
)
| PipesGCSMessageWriter |
python | tensorflow__tensorflow | tensorflow/examples/speech_commands/generate_streaming_test_wav_test.py | {
"start": 890,
"end": 1306
} | class ____(test.TestCase):
def testMixInAudioSample(self):
track_data = np.zeros([10000])
sample_data = np.ones([1000])
generate_streaming_test_wav.mix_in_audio_sample(
track_data, 2000, sample_data, 0, 1000, 1.0, 100, 100)
self.assertNear(1.0, track_data[2500], 0.0001)
self.assertNear(0.0, track_data[3500], 0.0001)
if __name__ == "__main__":
test.main()
| GenerateStreamingTestWavTest |
python | encode__django-rest-framework | rest_framework/test.py | {
"start": 13375,
"end": 13467
} | class ____(testcases.TransactionTestCase):
client_class = APIClient
| APITransactionTestCase |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_serverless_job.py | {
"start": 1790,
"end": 2448
} | class ____(TestEmrServerlessJobSensor):
@pytest.mark.parametrize(
("state", "expected_result"),
[
("PENDING", False),
("RUNNING", False),
("SCHEDULED", False),
("SUBMITTED", False),
("SUCCESS", True),
],
)
def test_poke_returns_expected_result_for_states(self, state, expected_result):
get_job_run_return_value = {"jobRun": {"state": state}}
self.set_get_job_run_return_value(get_job_run_return_value)
assert self.sensor.poke(None) == expected_result
self.assert_get_job_run_was_called_once_with_app_and_run_id()
| TestPokeReturnValue |
python | doocs__leetcode | solution/3700-3799/3732.Maximum Product of Three Elements After One Replacement/Solution.py | {
"start": 0,
"end": 223
} | class ____:
def maxProduct(self, nums: List[int]) -> int:
nums.sort()
a, b = nums[0], nums[1]
c, d = nums[-2], nums[-1]
x = 10**5
return max(a * b * x, c * d * x, a * d * -x)
| Solution |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_select_rates_regression.py | {
"start": 294,
"end": 3654
} | class ____(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(SelectRegressionRates)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 4)
self.assertFalse((transformation == 0).all())
transformation, original = _test_preprocessing(
SelectRegressionRates, make_sparse=True
)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1] / 2))
# Makes sure that the features are reduced, not the number of samples
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = SelectRegressionRates.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None
},
)
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 21)
def test_default_configuration_regression(self):
transformation, original = _test_preprocessing(
SelectRegressionRates,
dataset="boston",
)
self.assertEqual(transformation.shape[0], original.shape[0])
# From 13 to 12 features
self.assertEqual(transformation.shape[1], 12)
self.assertFalse((transformation == 0).all())
def test_preprocessing_dtype_regression(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("boston")
self.assertEqual(X_train.dtype, np.float32)
dataset_properties = {"target_type": "regression"}
configuration_space = SelectRegressionRates.get_hyperparameter_search_space(
dataset_properties
)
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(
random_state=1, **{hp_name: default[hp_name] for hp_name in default}
)
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("boston")
X_train = X_train.astype(np.float64)
configuration_space = SelectRegressionRates.get_hyperparameter_search_space(
dataset_properties
)
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(
random_state=1, **{hp_name: default[hp_name] for hp_name in default}
)
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| SelectRegressionRatesComponentTest |
python | tornadoweb__tornado | tornado/test/queues_test.py | {
"start": 12627,
"end": 13114
} | class ____(QueueJoinTest):
queue_class = queues.LifoQueue
@gen_test
def test_order(self):
q = self.queue_class(maxsize=2)
q.put_nowait(1)
q.put_nowait(0)
self.assertTrue(q.full())
q.put(3)
q.put(2)
self.assertEqual(3, q.get_nowait())
self.assertEqual(2, (yield q.get()))
self.assertEqual(0, q.get_nowait())
self.assertEqual(1, (yield q.get()))
self.assertTrue(q.empty())
| LifoQueueJoinTest |
python | kamyu104__LeetCode-Solutions | Python/permutation-in-string.py | {
"start": 50,
"end": 628
} | class ____(object):
def checkInclusion(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
counts = collections.Counter(s1)
l = len(s1)
for i in xrange(len(s2)):
if counts[s2[i]] > 0:
l -= 1
counts[s2[i]] -= 1
if l == 0:
return True
start = i + 1 - len(s1)
if start >= 0:
counts[s2[start]] += 1
if counts[s2[start]] > 0:
l += 1
return False
| Solution |
python | huggingface__transformers | src/transformers/models/mllama/image_processing_mllama.py | {
"start": 19933,
"end": 37456
} | class ____(BaseImageProcessor):
"""
Constructs a Mllama image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
Only has an effect if the input image is in the PIL format.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to 0.0):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to 4):
The maximum number of tiles to split the image into.
"""
model_input_names = ["pixel_values", "num_tiles", "aspect_ratio_ids", "aspect_ratio_mask"]
valid_kwargs = MllamaImageProcessorKwargs
def __init__(
self,
do_convert_rgb: bool = True,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: bool = True,
max_image_tiles: int = 4,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_convert_rgb = do_convert_rgb
self.do_resize = do_resize
self.size = size if size is not None else {"height": 224, "width": 224}
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.max_image_tiles = max_image_tiles
_validate_mllama_preprocess_arguments(self.do_resize, self.size, self.do_pad, self.max_image_tiles)
def preprocess(
self,
images: ImageInput,
do_convert_rgb: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
max_image_tiles: Optional[int] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
):
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image tile. Should be a dictionary containing 'height' and 'width' keys, both with integer values.
The height and width values should be equal.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether or not to pad the images to the largest height and width in the batch.
max_image_tiles (`int`, *optional*, defaults to `self.max_image_tiles`):
The maximum number of tiles to split the image into.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
Returns:
`BatchFeature` of the following structure:
- **pixel_values** (`TensorType`): The preprocessed pixel values.
- **aspect_ratio_ids** (`TensorType`): The aspect ratio ids of the images.
- **num_tiles** (`list[list[int]]`): The number of tiles for each image in the batch.
"""
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
max_image_tiles = max_image_tiles if max_image_tiles is not None else self.max_image_tiles
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# extra validation
_validate_mllama_preprocess_arguments(do_resize, size, do_pad, max_image_tiles)
images = self.fetch_images(images)
images_list = make_nested_list_of_images(images)
if self.do_convert_rgb:
images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
batch_images = []
batch_aspect_ratios = []
# iterate over batch samples
for images in images_list:
sample_images = []
sample_aspect_ratios = []
# iterate over images in a batch sample
for image in images:
# default PIL images to channels_last
if input_data_format is None and isinstance(image, Image.Image):
input_data_format = ChannelDimension.LAST
# convert to numpy array for processing
image = to_numpy_array(image)
# convert images to channels first format for faster processing
# LAST is slower for `pad` and not supported by `split_to_tiles`
data_format = ChannelDimension.FIRST
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
# do_resize=False is not supported, validated
image, aspect_ratio = self.resize(
image=image,
size=size,
resample=resample,
max_image_tiles=max_image_tiles,
input_data_format=data_format,
data_format=data_format,
)
# do_pad=False is not supported, validated
image = self.pad(
image=image,
size=size,
aspect_ratio=aspect_ratio,
input_data_format=data_format,
data_format=data_format,
)
if do_rescale:
image = self.rescale(
image=image,
scale=rescale_factor,
input_data_format=data_format,
data_format=data_format,
)
if do_normalize:
image = self.normalize(
image=image,
mean=image_mean,
std=image_std,
input_data_format=data_format,
data_format=data_format,
)
num_tiles_height, num_tiles_width = aspect_ratio
image = split_to_tiles(image, num_tiles_height, num_tiles_width)
sample_images.append(image)
sample_aspect_ratios.append((num_tiles_height, num_tiles_width))
batch_images.append(sample_images)
batch_aspect_ratios.append(sample_aspect_ratios)
images, num_tiles = pack_images(batch_images, max_image_tiles)
aspect_ratio_ids = convert_aspect_ratios_to_ids(batch_aspect_ratios, max_image_tiles=max_image_tiles)
aspect_ratio_mask = build_aspect_ratio_mask(batch_aspect_ratios, max_image_tiles=max_image_tiles)
# images (np.ndarray) with shape (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width)
# aspect_ratio_ids (np.ndarray) with shape (batch_size, max_num_images) - aspect ratio ids for each image, padded to max_num_images with 0
# num_tiles (list[list[int]]) with (batch_size, num_images_in_batch) - real number of tiles for each image, not padded
# aspect_ratio_mask (np.ndarray) with shape (batch_size, max_num_images, max_image_tiles) - number of tiles for each image, padded to max_num_images with 0
encoded_inputs = BatchFeature(
data={
"pixel_values": images,
"aspect_ratio_ids": aspect_ratio_ids,
"aspect_ratio_mask": aspect_ratio_mask,
},
tensor_type=return_tensors,
)
encoded_inputs["num_tiles"] = num_tiles
return encoded_inputs
def pad(
self,
image: np.ndarray,
size: dict[str, int],
aspect_ratio: tuple[int, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is
(1, 2), the image will be padded to 224x448.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
aspect_ratio (`tuple[int, int]`):
The aspect ratio of the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The padded image.
"""
_validate_size(size)
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
num_tiles_height, num_tiles_width = aspect_ratio
padded_height = num_tiles_height * size["height"]
padded_width = num_tiles_width * size["width"]
pad_size = ((0, padded_height - image_height), (0, padded_width - image_width))
image = pad(
image,
pad_size,
mode=PaddingMode.CONSTANT,
constant_values=0,
data_format=data_format,
input_data_format=input_data_format,
)
return image
def resize(
self,
image: np.ndarray,
size: dict[str, int],
max_image_tiles: int,
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> Union[np.ndarray, tuple[int, int]]:
"""
Resizes an image to fit within a tiled canvas while maintaining its aspect ratio.
The optimal canvas size is calculated based on the maximum number of tiles and the tile size.
The function first determines the best tile arrangement for the image, then resizes the image
to fit within this canvas. The resized image and the number of tiles along the height and width
dimensions are returned.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
max_image_tiles (`int`):
The maximum number of tiles to split the image into.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`Union[np.ndarray, tuple[int, int]]`: The resized image and a tuple containing the number of tiles
along the height and width dimensions.
"""
_validate_size(size)
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
tile_size = size["height"]
canvas_height, canvas_width = get_optimal_tiled_canvas(
image_height=image_height,
image_width=image_width,
max_image_tiles=max_image_tiles,
tile_size=tile_size,
)
num_tiles_height = canvas_height // tile_size
num_tiles_width = canvas_width // tile_size
new_height, new_width = get_image_size_fit_to_canvas(
image_height=image_height,
image_width=image_width,
canvas_height=canvas_height,
canvas_width=canvas_width,
tile_size=tile_size,
)
image = resize(
image,
(new_height, new_width),
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
)
return image, (num_tiles_height, num_tiles_width)
__all__ = ["MllamaImageProcessor"]
| MllamaImageProcessor |
python | TheAlgorithms__Python | conversions/convert_number_to_words.py | {
"start": 51,
"end": 1326
} | class ____(Enum):
SHORT = (
(15, "quadrillion"),
(12, "trillion"),
(9, "billion"),
(6, "million"),
(3, "thousand"),
(2, "hundred"),
)
LONG = (
(15, "billiard"),
(9, "milliard"),
(6, "million"),
(3, "thousand"),
(2, "hundred"),
)
INDIAN = (
(14, "crore crore"),
(12, "lakh crore"),
(7, "crore"),
(5, "lakh"),
(3, "thousand"),
(2, "hundred"),
)
@classmethod
def max_value(cls, system: str) -> int:
"""
Gets the max value supported by the given number system.
>>> NumberingSystem.max_value("short") == 10**18 - 1
True
>>> NumberingSystem.max_value("long") == 10**21 - 1
True
>>> NumberingSystem.max_value("indian") == 10**19 - 1
True
"""
match system_enum := cls[system.upper()]:
case cls.SHORT:
max_exp = system_enum.value[0][0] + 3
case cls.LONG:
max_exp = system_enum.value[0][0] + 6
case cls.INDIAN:
max_exp = 19
case _:
raise ValueError("Invalid numbering system")
return 10**max_exp - 1
| NumberingSystem |
python | numpy__numpy | numpy/linalg/lapack_lite/clapack_scrub.py | {
"start": 3385,
"end": 3854
} | class ____:
def __init__(self):
object.__init__(self)
self._queue = []
def add(self, line):
self._queue.append(line)
def clear(self):
self._queue = []
def flushTo(self, other_queue):
for line in self._queue:
other_queue.add(line)
self.clear()
def getValue(self):
q = LineQueue()
self.flushTo(q)
s = ''.join(q._queue)
self.clear()
return s
| LineQueue |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_detector_anomaly_data.py | {
"start": 452,
"end": 5782
} | class ____(BaseWorkflowTest, APITestCase):
endpoint = "sentry-api-0-organization-detector-anomaly-data"
def setUp(self):
super().setUp()
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
self.data_condition_group = self.create_data_condition_group()
with self.tasks():
self.snuba_query = self.create_snuba_query()
self.subscription = QuerySubscription.objects.create(
project=self.project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="123",
snuba_query=self.snuba_query,
)
self.data_source = self.create_data_source(
organization=self.organization, source_id=self.subscription.id
)
self.detector = self.create_detector(
project_id=self.project.id,
name="Test Detector",
type=MetricIssue.slug,
workflow_condition_group=self.data_condition_group,
)
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)
@with_feature("organizations:anomaly-detection-threshold-data")
def test_missing_parameters(self):
response = self.get_error_response(
self.organization.slug, self.detector.id, end="1729179000.0", status_code=400
)
assert response.data["detail"] == "start and end parameters are required"
response = self.get_error_response(
self.organization.slug, self.detector.id, start="1729178100.0", status_code=400
)
assert response.data["detail"] == "start and end parameters are required"
@with_feature("organizations:anomaly-detection-threshold-data")
def test_invalid_parameters(self):
response = self.get_error_response(
self.organization.slug,
self.detector.id,
start="invalid",
end="1729179000.0",
status_code=400,
)
assert response.data["detail"] == "start and end must be valid timestamps"
@with_feature("organizations:anomaly-detection-threshold-data")
def test_no_subscription_found(self):
# Delete the data source to simulate missing subscription
DataSourceDetector.objects.filter(detector=self.detector).delete()
response = self.get_error_response(
self.organization.slug,
self.detector.id,
start="1729178100.0",
end="1729179000.0",
status_code=500,
)
assert response.data["detail"] == "Could not find detector, data source not found"
@with_feature("organizations:anomaly-detection-threshold-data")
@patch(
"sentry.workflow_engine.endpoints.organization_detector_anomaly_data.get_anomaly_threshold_data_from_seer"
)
def test_seer_error(self, mock_get_data):
mock_get_data.return_value = None
response = self.get_error_response(
self.organization.slug,
self.detector.id,
start="1729178100.0",
end="1729179000.0",
status_code=400,
)
assert response.data["detail"] == "Unable to fetch anomaly detection threshold data"
@with_feature("organizations:anomaly-detection-threshold-data")
@patch(
"sentry.workflow_engine.endpoints.organization_detector_anomaly_data.get_anomaly_threshold_data_from_seer"
)
def test_successful_fetch(self, mock_get_data):
mock_data = [
{
"external_alert_id": 24,
"timestamp": 1729178100.0,
"value": 0,
"yhat_lower": 10.5,
"yhat_upper": 20.5,
}
]
mock_get_data.return_value = mock_data
response = self.get_success_response(
self.organization.slug,
self.detector.id,
start="1729178100.0",
end="1729179000.0",
)
assert response.data == {"data": mock_data}
assert mock_get_data.call_args.kwargs["start"] == 1729178100.0
assert mock_get_data.call_args.kwargs["end"] == 1729179000.0
@with_feature("organizations:anomaly-detection-threshold-data")
def test_permission_denied(self):
self.login_as(self.create_user())
self.get_error_response(
self.organization.slug,
self.detector.id,
start="1729178100.0",
end="1729179000.0",
status_code=403,
)
def test_feature_flag_disabled(self):
"""Test that endpoint returns 404 when feature flag is disabled"""
self.get_error_response(
self.organization.slug,
self.detector.id,
start="1729178100.0",
end="1729179000.0",
status_code=404,
)
@with_feature("organizations:anomaly-detection-threshold-data")
def test_invalid_detector_id(self):
"""Test that non-numeric detector IDs return 404"""
self.get_error_response(
self.organization.slug,
"not-a-number",
start="1729178100.0",
end="1729179000.0",
status_code=404,
)
| OrganizationDetectorAnomalyDataEndpointTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_cond_format12.py | {
"start": 345,
"end": 4495
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.write("A10", 10)
worksheet.write("A11", 11)
worksheet.write("A12", 12)
worksheet.conditional_format("A1:A12", {"type": "2_color_scale"})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A12">
<cfRule type="colorScale" priority="1">
<colorScale>
<cfvo type="min" val="0"/>
<cfvo type="max" val="0"/>
<color rgb="FFFF7128"/>
<color rgb="FFFFEF9C"/>
</colorScale>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | doocs__leetcode | solution/0200-0299/0261.Graph Valid Tree/Solution2.py | {
"start": 0,
"end": 454
} | class ____:
def validTree(self, n: int, edges: List[List[int]]) -> bool:
def dfs(i: int):
vis.add(i)
for j in g[i]:
if j not in vis:
dfs(j)
if len(edges) != n - 1:
return False
g = [[] for _ in range(n)]
for a, b in edges:
g[a].append(b)
g[b].append(a)
vis = set()
dfs(0)
return len(vis) == n
| Solution |
python | huggingface__transformers | tests/trainer/test_trainer_fsdp.py | {
"start": 3786,
"end": 4706
} | class ____(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
"--auto_find_batch_size",
"True",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
| TestFSDPTrainerWrap |
python | pydantic__pydantic | pydantic/types.py | {
"start": 36082,
"end": 36376
} | class ____(BaseModel):
uuid5: UUID5
Model(uuid5=uuid.uuid5(uuid.NAMESPACE_DNS, 'pydantic.org'))
```
"""
UUID6 = Annotated[UUID, UuidVersion(6)]
"""A [UUID](https://docs.python.org/3/library/uuid.html) that must be version 6.
```python
import uuid
from pydantic import UUID6, BaseModel
| Model |
python | getsentry__sentry | tests/sentry/sentry_apps/tasks/test_sentry_apps.py | {
"start": 68510,
"end": 70311
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user, id=1)
self.sentry_app = self.create_sentry_app(
name="Test App",
organization=self.organization,
events=["issue.resolved", "issue.ignored", "issue.assigned"],
webhook_url="https://example.com",
)
with assume_test_silo_mode_of(SentryApp):
self.sentry_app.update(status=SentryAppStatus.PUBLISHED)
self.install = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app.slug
)
self.issue = self.create_group(project=self.project)
self.buffer = SentryAppWebhookRequestsBuffer(self.sentry_app)
@patch(
"sentry.utils.sentry_apps.webhooks.safe_urlopen",
return_value=MockResponseWithHeadersInstance,
)
def test_saves_error_event_id_if_in_header(self, safe_urlopen: MagicMock) -> None:
data = {"issue": serialize(self.issue)}
with pytest.raises(ClientError):
send_webhooks(
installation=self.install, event="issue.assigned", data=data, actor=self.user
)
requests = self.buffer.get_requests()
first_request = requests[0]
assert safe_urlopen.called
assert len(requests) == 1
assert first_request["response_code"] == 400
assert first_request["event_type"] == "issue.assigned"
assert first_request["organization_id"] == self.install.organization_id
assert first_request["error_id"] == "d5111da2c28645c5889d072017e3445d"
assert first_request["project_id"] == "1"
@patch("sentry.utils.sentry_apps.webhooks.safe_urlopen", return_value=MockResponseInstance)
| TestWebhookRequests |
python | ray-project__ray | python/ray/data/preprocessors/serialization_handlers.py | {
"start": 2067,
"end": 3107
} | class ____(SerializationHandler):
"""Handler for CloudPickle serialization format."""
MAGIC_CLOUDPICKLE = b"CPKL:"
def serialize(
self, data: Union["Preprocessor", Dict[str, Any]] # noqa: F821
) -> bytes:
"""Serialize to CloudPickle format with magic prefix."""
return self.MAGIC_CLOUDPICKLE + cloudpickle.dumps(data)
def deserialize(self, serialized: bytes) -> Dict[str, Any]:
"""Deserialize from CloudPickle format."""
if not isinstance(serialized, bytes):
raise ValueError(
f"Expected bytes for CloudPickle deserialization, got {type(serialized)}"
)
if not serialized.startswith(self.MAGIC_CLOUDPICKLE):
raise ValueError(f"Invalid CloudPickle magic bytes: {serialized[:10]}")
cloudpickle_data = self.strip_magic_bytes(serialized)
return cloudpickle.loads(cloudpickle_data)
def get_magic_bytes(self) -> bytes:
return self.MAGIC_CLOUDPICKLE
@DeveloperAPI
| CloudPickleSerializationHandler |
python | walkccc__LeetCode | solutions/410. Split Array Largest Sum/410-3.py | {
"start": 0,
"end": 517
} | class ____:
def splitArray(self, nums: list[int], k: int) -> int:
l = max(nums)
r = sum(nums) + 1
def numGroups(maxSumInGroup: int) -> int:
groupCount = 1
sumInGroup = 0
for num in nums:
if sumInGroup + num <= maxSumInGroup:
sumInGroup += num
else:
groupCount += 1
sumInGroup = num
return groupCount
while l < r:
m = (l + r) // 2
if numGroups(m) > k:
l = m + 1
else:
r = m
return l
| Solution |
python | kamyu104__LeetCode-Solutions | Python/longest-substring-of-one-repeating-character.py | {
"start": 157,
"end": 976
} | class ____(object):
def __init__(self, N,
build_fn=lambda _: float("inf"),
query_fn=lambda x, y: x if y is None else min(x, y),
update_fn=lambda x: x):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
# segment tree
| SegmentTree |
python | walkccc__LeetCode | solutions/3546. Equal Sum Grid Partition I/3546.py | {
"start": 0,
"end": 380
} | class ____:
def canPartitionGrid(self, grid: list[list[int]]) -> bool:
totalSum = sum(map(sum, grid))
def canPartition(grid: list[list[int]]) -> bool:
runningSum = 0
for row in grid:
runningSum += sum(row)
if runningSum * 2 == totalSum:
return True
return False
return canPartition(grid) or canPartition(zip(*grid))
| Solution |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 29522,
"end": 29706
} | class ____(Constant):
__slots__ = ()
def validate(self):
if self.value is None:
raise InvalidLiteral("`None` is not a valid vyper value!", self)
| NameConstant |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/line_api_scrollbars.py | {
"start": 414,
"end": 1108
} | class ____(App):
CSS = """
Screen {
align: center middle;
}
RichLog {
width:13;
height:10;
}
VerticalScroll {
width:13;
height: 10;
overflow: scroll;
overflow-x: auto;
}
MyWidget {
width:13;
height:auto;
}
"""
def compose(self) -> ComposeResult:
yield RichLog()
yield VerticalScroll(MyWidget())
def on_ready(self) -> None:
self.query_one(RichLog).write("\n".join(f"{n} 0123456789" for n in range(20)))
self.query_one(VerticalScroll).scroll_end(animate=False)
if __name__ == "__main__":
app = ScrollViewApp()
app.run()
| ScrollViewApp |
python | networkx__networkx | networkx/readwrite/gexf.py | {
"start": 9329,
"end": 26035
} | class ____(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(
self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft"
):
self.construct_types()
self.prettyprint = prettyprint
self.encoding = encoding
self.set_version(version)
self.xml = Element(
"gexf",
{
"xmlns": self.NS_GEXF,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
"version": self.VERSION,
},
)
# Make meta element a non-graph element
# Also add lastmodifieddate as attribute, not tag
meta_element = Element("meta")
subelement_text = f"NetworkX {nx.__version__}"
SubElement(meta_element, "creator").text = subelement_text
meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d"))
self.xml.append(meta_element)
register_namespace("viz", self.NS_VIZ)
# counters for edge and attribute identifiers
self.edge_id = itertools.count()
self.attr_id = itertools.count()
self.all_edge_ids = set()
# default attributes are stored in dictionaries
self.attr = {}
self.attr["node"] = {}
self.attr["edge"] = {}
self.attr["node"]["dynamic"] = {}
self.attr["node"]["static"] = {}
self.attr["edge"]["dynamic"] = {}
self.attr["edge"]["static"] = {}
if graph is not None:
self.add_graph(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s = tostring(self.xml).decode(self.encoding)
return s
def add_graph(self, G):
# first pass through G collecting edge ids
for u, v, dd in G.edges(data=True):
eid = dd.get("id")
if eid is not None:
self.all_edge_ids.add(str(eid))
# set graph attributes
if G.graph.get("mode") == "dynamic":
mode = "dynamic"
else:
mode = "static"
# Add a graph element to the XML
if G.is_directed():
default = "directed"
else:
default = "undirected"
name = G.graph.get("name", "")
graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name)
self.graph_element = graph_element
self.add_nodes(G, graph_element)
self.add_edges(G, graph_element)
self.xml.append(graph_element)
def add_nodes(self, G, graph_element):
nodes_element = Element("nodes")
for node, data in G.nodes(data=True):
node_data = data.copy()
node_id = str(node_data.pop("id", node))
kw = {"id": node_id}
label = str(node_data.pop("label", node))
kw["label"] = label
try:
pid = node_data.pop("pid")
kw["pid"] = str(pid)
except KeyError:
pass
try:
start = node_data.pop("start")
kw["start"] = str(start)
self.alter_graph_mode_timeformat(start)
except KeyError:
pass
try:
end = node_data.pop("end")
kw["end"] = str(end)
self.alter_graph_mode_timeformat(end)
except KeyError:
pass
# add node element with attributes
node_element = Element("node", **kw)
# add node element and attr subelements
default = G.graph.get("node_default", {})
node_data = self.add_parents(node_element, node_data)
if self.VERSION == "1.1":
node_data = self.add_slices(node_element, node_data)
else:
node_data = self.add_spells(node_element, node_data)
node_data = self.add_viz(node_element, node_data)
node_data = self.add_attributes("node", node_element, node_data, default)
nodes_element.append(node_element)
graph_element.append(nodes_element)
def add_edges(self, G, graph_element):
def edge_key_data(G):
# helper function to unify multigraph and graph edge iterator
if G.is_multigraph():
for u, v, key, data in G.edges(data=True, keys=True):
edge_data = data.copy()
edge_data.update(key=key)
edge_id = edge_data.pop("id", None)
if edge_id is None:
edge_id = next(self.edge_id)
while str(edge_id) in self.all_edge_ids:
edge_id = next(self.edge_id)
self.all_edge_ids.add(str(edge_id))
yield u, v, edge_id, edge_data
else:
for u, v, data in G.edges(data=True):
edge_data = data.copy()
edge_id = edge_data.pop("id", None)
if edge_id is None:
edge_id = next(self.edge_id)
while str(edge_id) in self.all_edge_ids:
edge_id = next(self.edge_id)
self.all_edge_ids.add(str(edge_id))
yield u, v, edge_id, edge_data
edges_element = Element("edges")
for u, v, key, edge_data in edge_key_data(G):
kw = {"id": str(key)}
try:
edge_label = edge_data.pop("label")
kw["label"] = str(edge_label)
except KeyError:
pass
try:
edge_weight = edge_data.pop("weight")
kw["weight"] = str(edge_weight)
except KeyError:
pass
try:
edge_type = edge_data.pop("type")
kw["type"] = str(edge_type)
except KeyError:
pass
try:
start = edge_data.pop("start")
kw["start"] = str(start)
self.alter_graph_mode_timeformat(start)
except KeyError:
pass
try:
end = edge_data.pop("end")
kw["end"] = str(end)
self.alter_graph_mode_timeformat(end)
except KeyError:
pass
source_id = str(G.nodes[u].get("id", u))
target_id = str(G.nodes[v].get("id", v))
edge_element = Element("edge", source=source_id, target=target_id, **kw)
default = G.graph.get("edge_default", {})
if self.VERSION == "1.1":
edge_data = self.add_slices(edge_element, edge_data)
else:
edge_data = self.add_spells(edge_element, edge_data)
edge_data = self.add_viz(edge_element, edge_data)
edge_data = self.add_attributes("edge", edge_element, edge_data, default)
edges_element.append(edge_element)
graph_element.append(edges_element)
def add_attributes(self, node_or_edge, xml_obj, data, default):
# Add attrvalues to node or edge
attvalues = Element("attvalues")
if len(data) == 0:
return data
mode = "static"
for k, v in data.items():
# rename generic multigraph key to avoid any name conflict
if k == "key":
k = "networkx_key"
val_type = type(v)
if val_type not in self.xml_type:
raise TypeError(f"attribute value type is not allowed: {val_type}")
if isinstance(v, list):
# dynamic data
for val, start, end in v:
val_type = type(val)
if start is not None or end is not None:
mode = "dynamic"
self.alter_graph_mode_timeformat(start)
self.alter_graph_mode_timeformat(end)
break
attr_id = self.get_attr_id(
str(k), self.xml_type[val_type], node_or_edge, default, mode
)
for val, start, end in v:
e = Element("attvalue")
e.attrib["for"] = attr_id
e.attrib["value"] = str(val)
# Handle nan, inf, -inf differently
if val_type is float:
if e.attrib["value"] == "inf":
e.attrib["value"] = "INF"
elif e.attrib["value"] == "nan":
e.attrib["value"] = "NaN"
elif e.attrib["value"] == "-inf":
e.attrib["value"] = "-INF"
if start is not None:
e.attrib["start"] = str(start)
if end is not None:
e.attrib["end"] = str(end)
attvalues.append(e)
else:
# static data
mode = "static"
attr_id = self.get_attr_id(
str(k), self.xml_type[val_type], node_or_edge, default, mode
)
e = Element("attvalue")
e.attrib["for"] = attr_id
if isinstance(v, bool):
e.attrib["value"] = str(v).lower()
else:
e.attrib["value"] = str(v)
# Handle float nan, inf, -inf differently
if val_type is float:
if e.attrib["value"] == "inf":
e.attrib["value"] = "INF"
elif e.attrib["value"] == "nan":
e.attrib["value"] = "NaN"
elif e.attrib["value"] == "-inf":
e.attrib["value"] = "-INF"
attvalues.append(e)
xml_obj.append(attvalues)
return data
def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
# find the id of the attribute or generate a new id
try:
return self.attr[edge_or_node][mode][title]
except KeyError:
# generate new id
new_id = str(next(self.attr_id))
self.attr[edge_or_node][mode][title] = new_id
attr_kwargs = {"id": new_id, "title": title, "type": attr_type}
attribute = Element("attribute", **attr_kwargs)
# add subelement for data default value if present
default_title = default.get(title)
if default_title is not None:
default_element = Element("default")
default_element.text = str(default_title)
attribute.append(default_element)
# new insert it into the XML
attributes_element = None
for a in self.graph_element.findall("attributes"):
# find existing attributes element by class and mode
a_class = a.get("class")
a_mode = a.get("mode", "static")
if a_class == edge_or_node and a_mode == mode:
attributes_element = a
if attributes_element is None:
# create new attributes element
attr_kwargs = {"mode": mode, "class": edge_or_node}
attributes_element = Element("attributes", **attr_kwargs)
self.graph_element.insert(0, attributes_element)
attributes_element.append(attribute)
return new_id
def add_viz(self, element, node_data):
viz = node_data.pop("viz", False)
if viz:
color = viz.get("color")
if color is not None:
if self.VERSION == "1.1":
e = Element(
f"{{{self.NS_VIZ}}}color",
r=str(color.get("r")),
g=str(color.get("g")),
b=str(color.get("b")),
)
else:
e = Element(
f"{{{self.NS_VIZ}}}color",
r=str(color.get("r")),
g=str(color.get("g")),
b=str(color.get("b")),
a=str(color.get("a", 1.0)),
)
element.append(e)
size = viz.get("size")
if size is not None:
e = Element(f"{{{self.NS_VIZ}}}size", value=str(size))
element.append(e)
thickness = viz.get("thickness")
if thickness is not None:
e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness))
element.append(e)
shape = viz.get("shape")
if shape is not None:
if shape.startswith("http"):
e = Element(
f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape)
)
else:
e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape))
element.append(e)
position = viz.get("position")
if position is not None:
e = Element(
f"{{{self.NS_VIZ}}}position",
x=str(position.get("x")),
y=str(position.get("y")),
z=str(position.get("z")),
)
element.append(e)
return node_data
def add_parents(self, node_element, node_data):
parents = node_data.pop("parents", False)
if parents:
parents_element = Element("parents")
for p in parents:
e = Element("parent")
e.attrib["for"] = str(p)
parents_element.append(e)
node_element.append(parents_element)
return node_data
def add_slices(self, node_or_edge_element, node_or_edge_data):
slices = node_or_edge_data.pop("slices", False)
if slices:
slices_element = Element("slices")
for start, end in slices:
e = Element("slice", start=str(start), end=str(end))
slices_element.append(e)
node_or_edge_element.append(slices_element)
return node_or_edge_data
def add_spells(self, node_or_edge_element, node_or_edge_data):
spells = node_or_edge_data.pop("spells", False)
if spells:
spells_element = Element("spells")
for start, end in spells:
e = Element("spell")
if start is not None:
e.attrib["start"] = str(start)
self.alter_graph_mode_timeformat(start)
if end is not None:
e.attrib["end"] = str(end)
self.alter_graph_mode_timeformat(end)
spells_element.append(e)
node_or_edge_element.append(spells_element)
return node_or_edge_data
def alter_graph_mode_timeformat(self, start_or_end):
# If 'start' or 'end' appears, set timeformat
if start_or_end is not None:
if isinstance(start_or_end, str):
timeformat = "date"
elif isinstance(start_or_end, float):
timeformat = "double"
elif isinstance(start_or_end, int):
timeformat = "long"
else:
raise nx.NetworkXError(
"timeformat should be of the type int, float or str"
)
self.graph_element.set("timeformat", timeformat)
# If Graph mode is static, alter to dynamic
if self.graph_element.get("mode") == "static":
self.graph_element.set("mode", "dynamic")
def write(self, fh):
# Serialize graph G in GEXF to the open fh
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
document.write(fh, encoding=self.encoding, xml_declaration=True)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + " " * level
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| GEXFWriter |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/graphql_context_test_suite.py | {
"start": 12095,
"end": 17802
} | class ____:
@staticmethod
def managed_grpc(target=None, location_name="test_location"):
@contextmanager
def _mgr_fn(instance, read_only):
"""Relies on webserver to load the code location in a subprocess and manage its lifecyle."""
loadable_target_origin = (
target if target is not None else get_main_loadable_target_origin()
)
with WorkspaceProcessContext(
instance,
(
PythonFileTarget(
python_file=loadable_target_origin.python_file,
attribute=loadable_target_origin.attribute,
working_directory=loadable_target_origin.working_directory,
location_name=location_name,
)
if loadable_target_origin.python_file
else ModuleTarget(
module_name=loadable_target_origin.module_name, # pyright: ignore[reportArgumentType]
attribute=loadable_target_origin.attribute,
working_directory=loadable_target_origin.working_directory,
location_name=location_name,
)
),
version="",
read_only=read_only,
) as workspace_process_context:
yield workspace_process_context
return MarkedManager(_mgr_fn, [Marks.managed_grpc_env])
@staticmethod
def deployed_grpc(target=None, location_name="test_location"):
"""Launches a code server in a "dagster api grpc" subprocess."""
@contextmanager
def _mgr_fn(instance, read_only):
with GrpcServerProcess(
instance_ref=instance.get_ref(),
location_name=location_name,
loadable_target_origin=(
target if target is not None else get_main_loadable_target_origin()
),
wait_on_exit=True,
) as server_process:
api_client = server_process.create_client()
with WorkspaceProcessContext(
instance,
GrpcServerTarget(
port=api_client.port,
socket=api_client.socket,
host=api_client.host, # pyright: ignore[reportArgumentType]
location_name=location_name,
),
version="",
read_only=read_only,
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.deployed_grpc_env])
@staticmethod
def code_server_cli_grpc(target=None, location_name="test_location"):
"""Launches a code server in a "dagster code-server start" subprocess (which will
in turn open up a `dagster api grpc` subprocess that actually loads the code location).
"""
@contextmanager
def _mgr_fn(instance, read_only):
loadable_target_origin = target or get_main_loadable_target_origin()
with safe_tempfile_path() as socket:
subprocess_args = [ # pyright: ignore[reportOperatorIssue]
"dagster",
"code-server",
"start",
"--socket",
socket,
] + loadable_target_origin.get_cli_args()
server_process = open_ipc_subprocess(subprocess_args)
client = DagsterGrpcClient(port=None, socket=socket, host="localhost")
try:
wait_for_grpc_server(server_process, client, subprocess_args)
with WorkspaceProcessContext(
instance,
GrpcServerTarget(
port=None,
socket=socket,
host="localhost",
location_name=location_name,
),
version="",
read_only=read_only,
) as workspace:
yield workspace
finally:
client.shutdown_server()
server_process.wait(timeout=30)
return MarkedManager(_mgr_fn, [Marks.code_server_cli_grpc_env])
@staticmethod
def multi_location():
@contextmanager
def _mgr_fn(instance, read_only):
"""Goes out of process but same process as host process."""
with WorkspaceProcessContext(
instance,
WorkspaceFileTarget(paths=[file_relative_path(__file__, "multi_location.yaml")]),
version="",
read_only=read_only,
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.multi_location])
@staticmethod
def lazy_repository():
@contextmanager
def _mgr_fn(instance, read_only):
"""Goes out of process but same process as host process."""
with WorkspaceProcessContext(
instance,
PythonFileTarget(
python_file=file_relative_path(__file__, "repo.py"),
attribute="test_dict_repo",
working_directory=None,
location_name="test_location",
),
version="",
read_only=read_only,
) as workspace:
yield workspace
return MarkedManager(_mgr_fn, [Marks.lazy_repository])
| EnvironmentManagers |
python | encode__django-rest-framework | tests/test_serializer_lists.py | {
"start": 23588,
"end": 26134
} | class ____:
"""
Tests the behavior of ListSerializers when max_length and min_length are used
"""
def setup_method(self):
class IntegerSerializer(serializers.Serializer):
some_int = serializers.IntegerField()
class MaxLengthSerializer(serializers.Serializer):
many_int = IntegerSerializer(many=True, max_length=5)
class MinLengthSerializer(serializers.Serializer):
many_int = IntegerSerializer(many=True, min_length=3)
class MaxMinLengthSerializer(serializers.Serializer):
many_int = IntegerSerializer(many=True, min_length=3, max_length=5)
self.MaxLengthSerializer = MaxLengthSerializer
self.MinLengthSerializer = MinLengthSerializer
self.MaxMinLengthSerializer = MaxMinLengthSerializer
def test_min_max_length_two_items(self):
input_data = {'many_int': [{'some_int': i} for i in range(2)]}
max_serializer = self.MaxLengthSerializer(data=input_data)
min_serializer = self.MinLengthSerializer(data=input_data)
max_min_serializer = self.MaxMinLengthSerializer(data=input_data)
assert max_serializer.is_valid()
assert max_serializer.validated_data == input_data
assert not min_serializer.is_valid()
assert not max_min_serializer.is_valid()
def test_min_max_length_four_items(self):
input_data = {'many_int': [{'some_int': i} for i in range(4)]}
max_serializer = self.MaxLengthSerializer(data=input_data)
min_serializer = self.MinLengthSerializer(data=input_data)
max_min_serializer = self.MaxMinLengthSerializer(data=input_data)
assert max_serializer.is_valid()
assert max_serializer.validated_data == input_data
assert min_serializer.is_valid()
assert min_serializer.validated_data == input_data
assert max_min_serializer.is_valid()
assert min_serializer.validated_data == input_data
def test_min_max_length_six_items(self):
input_data = {'many_int': [{'some_int': i} for i in range(6)]}
max_serializer = self.MaxLengthSerializer(data=input_data)
min_serializer = self.MinLengthSerializer(data=input_data)
max_min_serializer = self.MaxMinLengthSerializer(data=input_data)
assert not max_serializer.is_valid()
assert min_serializer.is_valid()
assert min_serializer.validated_data == input_data
assert not max_min_serializer.is_valid()
@pytest.mark.django_db()
| TestMaxMinLengthListSerializer |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/docs_publisher.py | {
"start": 1397,
"end": 5052
} | class ____:
"""Documentation builder for Airflow Docs Publishing."""
def __init__(self, package_name: str, output: Output | None, verbose: bool):
self.package_name = package_name
self.output = output
self.verbose = verbose
@property
def is_versioned(self):
"""Is current documentation package versioned?"""
# Disable versioning. This documentation does not apply to any released product and we can update
# it as needed, i.e. with each new package of providers.
return self.package_name not in ("apache-airflow-providers", "docker-stack")
@property
def _build_dir(self) -> str:
if self.is_versioned:
version = "stable"
return f"{GENERATED_PATH}/_build/docs/{self.package_name}/{version}"
return f"{GENERATED_PATH}/_build/docs/{self.package_name}"
@property
def _current_version(self):
if not self.is_versioned:
msg = (
"This documentation package is not versioned. "
"Make sure to add version in `provider.yaml` for the package."
)
raise RuntimeError(msg)
if self.package_name == "apache-airflow":
return get_airflow_version()
if self.package_name.startswith("apache-airflow-providers-"):
provider = get_provider_distributions_metadata().get(get_short_package_name(self.package_name))
return provider["versions"][0]
if self.package_name == "task-sdk":
return get_task_sdk_version()
if self.package_name == "helm-chart":
return chart_version()
if self.package_name == "apache-airflow-ctl":
return get_airflowctl_version()
raise SystemExit(f"Unsupported package: {self.package_name}")
@property
def _publish_dir(self) -> str:
if self.is_versioned:
return f"docs-archive/{self.package_name}/{self._current_version}"
return f"docs-archive/{self.package_name}"
def publish(self, override_versioned: bool, airflow_site_dir: str):
"""Copy documentation packages files to airflow-site repository."""
get_console(output=self.output).print(f"Publishing docs for {self.package_name}")
output_dir = os.path.join(airflow_site_dir, self._publish_dir)
pretty_source = pretty_format_path(self._build_dir, os.getcwd())
pretty_target = pretty_format_path(output_dir, airflow_site_dir)
get_console(output=self.output).print(f"Copy directory: {pretty_source} => {pretty_target}")
if os.path.exists(output_dir):
if self.is_versioned:
if override_versioned:
get_console(output=self.output).print(f"Overriding previously existing {output_dir}! ")
else:
get_console(output=self.output).print(
f"Skipping previously existing {output_dir}! "
f"Delete it manually if you want to regenerate it!"
)
get_console(output=self.output).print()
return 1, f"Skipping {self.package_name}: Previously existing directory"
# If output directory exists and is not versioned, delete it
shutil.rmtree(output_dir)
shutil.copytree(self._build_dir, output_dir)
if self.is_versioned:
with open(os.path.join(output_dir, "..", "stable.txt"), "w") as stable_file:
stable_file.write(self._current_version)
get_console(output=self.output).print()
return 0, f"Docs published: {self.package_name}"
| DocsPublisher |
python | great-expectations__great_expectations | great_expectations/datasource/datasource_dict.py | {
"start": 1029,
"end": 5166
} | class ____(UserDict):
"""
An abstraction around the DatasourceStore to enable easy retrieval and storage of Datasource objects
using dictionary syntactic sugar.
Example:
```
d = DatasourceDict(...)
d["my_fds"] = pandas_fds # Underlying DatasourceStore makes a `set()` call
pandas_fds = d["my_fds"] # Underlying DatasourceStore makes a `get()` call
```
""" # noqa: E501 # FIXME CoP
def __init__(
self,
context: AbstractDataContext,
datasource_store: DatasourceStore,
):
self._context = context # If possible, we should avoid passing the context through - once block-style is removed, we can extract this # noqa: E501 # FIXME CoP
self._datasource_store = datasource_store
self._in_memory_data_assets: dict[str, DataAsset] = {}
@staticmethod
def _get_in_memory_data_asset_name(datasource_name: str, data_asset_name: str) -> str:
return f"{datasource_name}-{data_asset_name}"
@override
@property
def data(self) -> dict[str, FluentDatasource]: # type: ignore[override] # `data` is meant to be a writeable attr (not a read-only property)
"""
`data` is referenced by the parent `UserDict` and enables the class to fulfill its various dunder methods
(__setitem__, __getitem__, etc)
This is generated just-in-time as the contents of the store may have changed.
""" # noqa: E501 # FIXME CoP
datasources: dict[str, FluentDatasource] = {}
configs = self._datasource_store.get_all()
for config in configs:
name = config.name
try:
datasources[name] = self._init_fluent_datasource(name=name, ds=config)
except gx_exceptions.DatasourceInitializationError as e:
logger.warning(f"Cannot initialize datasource {name}: {e}")
return datasources
def set_datasource(self, name: str, ds: FluentDatasource) -> FluentDatasource | None:
config = self._prep_fds_config_for_set(name=name, ds=ds)
datasource = self._datasource_store.set(key=None, value=config)
return self._init_fluent_datasource(name=name, ds=datasource)
@override
def __setitem__(self, name: str, ds: FluentDatasource) -> None:
self.set_datasource(name=name, ds=ds)
def _prep_fds_config_for_set(self, name: str, ds: FluentDatasource) -> FluentDatasource:
if isinstance(ds, SupportsInMemoryDataAssets):
for asset in ds.assets:
if asset.type == _IN_MEMORY_DATA_ASSET_TYPE:
in_memory_asset_name: str = DatasourceDict._get_in_memory_data_asset_name(
datasource_name=name,
data_asset_name=asset.name,
)
self._in_memory_data_assets[in_memory_asset_name] = asset
return ds
def _get_ds_from_store(self, name: str) -> FluentDatasource:
try:
return self._datasource_store.retrieve_by_name(name)
except ValueError:
raise KeyError(f"Could not find a datasource named '{name}'") # noqa: TRY003 # FIXME CoP
@override
def __delitem__(self, name: str) -> None:
ds = self._get_ds_from_store(name)
self._datasource_store.delete(ds)
@override
def __getitem__(self, name: str) -> FluentDatasource:
ds = self._get_ds_from_store(name)
return self._init_fluent_datasource(name=name, ds=ds)
def _init_fluent_datasource(self, name: str, ds: FluentDatasource) -> FluentDatasource:
ds._data_context = self._context
ds._rebuild_asset_data_connectors()
if isinstance(ds, SupportsInMemoryDataAssets):
for asset in ds.assets:
if asset.type == _IN_MEMORY_DATA_ASSET_TYPE:
in_memory_asset_name: str = DatasourceDict._get_in_memory_data_asset_name(
datasource_name=name,
data_asset_name=asset.name,
)
self._in_memory_data_assets[in_memory_asset_name] = asset
return ds
| DatasourceDict |
python | huggingface__transformers | src/transformers/models/prophetnet/modeling_prophetnet.py | {
"start": 27867,
"end": 43590
} | class ____(nn.Module):
def __init__(self, config: ProphetNetConfig, layer_idx=None):
super().__init__()
self.hidden_size = config.hidden_size
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.num_attn_heads = config.num_decoder_attention_heads
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
self.head_dim = config.hidden_size // self.num_attn_heads
self.ngram = config.ngram
self.layer_idx = layer_idx
assert self.head_dim * self.num_attn_heads == config.hidden_size, (
"config.hidden_size must be divisible by num_attn_heads"
)
# key, value, query projection
self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
# out projection
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
# rel position embeddings
self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
# for onnx runtime
self.onnx_trace = False
def _shape(self, tensor, seq_len, batch_size):
return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
hidden_states,
past_key_values: Optional[Cache] = None,
attention_mask=None,
extended_predict_attention_mask=None,
main_relative_position_buckets=None,
predict_relative_position_buckets=None,
position_ids=None,
cache_position=None,
):
batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
f" {hidden_states.shape}"
)
# project
query_states = self.query_proj(hidden_states)
key_states = self.key_proj(hidden_states)
value_states = self.value_proj(hidden_states)
# normalize
query_states = query_states / (self.head_dim**0.5)
# reshape
query_states = self._shape(query_states, ngram_sequence_length, batch_size)
key_states = self._shape(key_states, -1, batch_size)
value_states = self._shape(value_states, -1, batch_size)
proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
# chunk into main stream and predict stream
hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
query_states_list = query_states.chunk(1 + self.ngram, dim=2)
key_states_list = key_states.chunk(1 + self.ngram, dim=2)
value_states_list = value_states.chunk(1 + self.ngram, dim=2)
main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
# ProphetNet has two separate attention layers, one for self and one for cross attention
# We need to obtain the self attention only for this module, if `EncoderDecoderCache`
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
main_key_states, main_value_states = curr_past_key_values.update(
main_key_states, main_value_states, self.layer_idx, {"cache_position": cache_position}
)
# get seq_length of main stream only
sequence_length = ngram_sequence_length // (1 + self.ngram)
# MAIN-STREAM
# main attn weights
# [batch_size, number_heads, sequence_length, head_dimesion]
# x [batch_size, number_heads, head_dimesion, sequence_length]
# -> [batch_size, number_heads, sequence_length, sequence_length]
main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
# retrieve relative position embeddings for each layer -> see paper for more details
main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
)
main_attn_weights = main_attn_weights + main_relative_pos_embeddings
if attention_mask is not None:
main_attn_weights = main_attn_weights + attention_mask
main_attn_probs = softmax(
main_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(main_attn_weights)
main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
# project to attn_output
# [batch_size, number_heads, sequence_length, sequence_length]
# x [batch_size, number_heads, sequence_length, head_dimesion]
# -> [batch_size, number_heads, sequence_length, head_dimesion]
main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
# reshape so that num_heads dim is merged into last `head_dim` axis
main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
main_attn_output = self.out_proj(main_attn_output)
# PREDICT-STREAM
# [batch_size, ngram, number_heads, sequence_length, head_dimesion]
predict_query_states = torch.stack(predict_query_states_list, 1).view(
batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
)
# [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
# [batch_size, sequence_length, ngram, hidden_size]
predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
# [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
predict_value_states = torch.cat(
[torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
)
# [batch_size, ngram, number_heads, sequence_length, head_dimesion]
# x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
# -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
# retrieve relative position embeddings for each layer -> see paper for more details
# [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
)
# [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
if extended_predict_attention_mask is not None:
# Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
predict_attn_probs = softmax(
predict_attn_weights,
dim=-1,
onnx_trace=self.onnx_trace,
).type_as(predict_attn_weights)
predict_attn_probs = nn.functional.dropout(
predict_attn_probs, p=self.attention_dropout, training=self.training
)
# project to attention output
# [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
# x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
# -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
predict_attn_output = torch.einsum(
"bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
)
# reshape so that num_heads dim is merged into last `head_dim` axis
# [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
predict_attn_output = predict_attn_output.transpose(2, 3)
predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
predict_attn_output = self.out_proj(predict_attn_output)
# concat to single attn output
# [batch_size, (1+ngram)*sequence_length, hidden_size]
attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
# reshape into better form for `config.output_attentions`
main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
return attn_output, main_attn_probs, predict_attn_probs
def get_main_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
):
# input hidden_states [batch_size, sequence_length, hidden_size]
# input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
# input position_ids [batch_size, sequence_length] or [1,1]
batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
if main_relative_position_buckets is None:
batch_size, sequence_length = hidden_states.shape[:2]
relative_positions = (
torch.arange(1, attn_weights.shape[-1] + 1)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
# [batch_size, sequence_length, sequence_length+1]
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
main_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
# [batch_size, sequence_length, num_buckets * num_heads]
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
rel_pos_embeddings = rel_pos_embeddings.view(
rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
)
rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
# [batch_size, num_heads, sequence_length, num_buckets]
rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
# [batch_size * num_heads * sequence_length, sequence_length]
main_relative_position_buckets = main_relative_position_buckets.view(
-1, main_relative_position_buckets.shape[-1]
)
main_relative_position_buckets = main_relative_position_buckets.long()
# [batch_size * num_heads * sequence_length, sequence_length]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
return main_relative_pos_embeddings
def get_predict_relative_pos_embeddings(
self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
):
# input hidden_states [batch_size, sequence_length, ngram, hidden_size]
# input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
# input position_ids [batch_size, sequence_length] or [1,1]
# input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
batch_size, sequence_length = hidden_states.shape[0:2]
if predict_relative_position_buckets is None:
key_sequence_length = attn_weights.shape[-1]
assert position_ids[0][0] == key_sequence_length - 1, (
"`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
)
relative_positions = (
torch.arange(0, key_sequence_length)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch_size, sequence_length, 1)
.to(position_ids.device)
)
relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
predict_relative_position_buckets = compute_relative_buckets(
self.num_buckets, self.relative_max_distance, relative_positions, False
)
# [batch_size, ngram, sequence_length, hidden_size]
hidden_states = hidden_states.transpose(1, 2)
rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
# [batch_size, ngram, sequence_length, num_buckets, num_heads]
rel_pos_embeddings = rel_pos_embeddings.view(
hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
)
rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
# [batch_size * ngram * sequence_length * num_heads, num_buckets]
rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
# [ngram, batch_size, num_heads * sequence_length, -1]
predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
predict_relative_position_buckets = predict_relative_position_buckets.repeat(
self.ngram, 1, self.num_attn_heads, 1
)
# [ngram * batch_size * num_heads * sequence_length, -1]
predict_relative_position_buckets = predict_relative_position_buckets.view(
-1, predict_relative_position_buckets.size(-1)
).long()
predict_relative_pos_embeddings = torch.gather(
rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
)
# [batch_size, gram, num_heads, sequence_length, -1]
predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
)
return predict_relative_pos_embeddings
| ProphetNetNgramSelfAttention |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/tool_emulator.py | {
"start": 586,
"end": 7364
} | class ____(AgentMiddleware):
"""Emulates specified tools using an LLM instead of executing them.
This middleware allows selective emulation of tools for testing purposes.
By default (when `tools=None`), all tools are emulated. You can specify which
tools to emulate by passing a list of tool names or `BaseTool` instances.
Examples:
!!! example "Emulate all tools (default behavior)"
```python
from langchain.agents.middleware import LLMToolEmulator
middleware = LLMToolEmulator()
agent = create_agent(
model="openai:gpt-4o",
tools=[get_weather, get_user_location, calculator],
middleware=[middleware],
)
```
!!! example "Emulate specific tools by name"
```python
middleware = LLMToolEmulator(tools=["get_weather", "get_user_location"])
```
!!! example "Use a custom model for emulation"
```python
middleware = LLMToolEmulator(
tools=["get_weather"], model="anthropic:claude-sonnet-4-5-20250929"
)
```
!!! example "Emulate specific tools by passing tool instances"
```python
middleware = LLMToolEmulator(tools=[get_weather, get_user_location])
```
"""
def __init__(
self,
*,
tools: list[str | BaseTool] | None = None,
model: str | BaseChatModel | None = None,
) -> None:
"""Initialize the tool emulator.
Args:
tools: List of tool names (`str`) or `BaseTool` instances to emulate.
If `None`, ALL tools will be emulated.
If empty list, no tools will be emulated.
model: Model to use for emulation.
Defaults to `'anthropic:claude-sonnet-4-5-20250929'`.
Can be a model identifier string or `BaseChatModel` instance.
"""
super().__init__()
# Extract tool names from tools
# None means emulate all tools
self.emulate_all = tools is None
self.tools_to_emulate: set[str] = set()
if not self.emulate_all and tools is not None:
for tool in tools:
if isinstance(tool, str):
self.tools_to_emulate.add(tool)
else:
# Assume BaseTool with .name attribute
self.tools_to_emulate.add(tool.name)
# Initialize emulator model
if model is None:
self.model = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=1)
elif isinstance(model, BaseChatModel):
self.model = model
else:
self.model = init_chat_model(model, temperature=1)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command],
) -> ToolMessage | Command:
"""Emulate tool execution using LLM if tool should be emulated.
Args:
request: Tool call request to potentially emulate.
handler: Callback to execute the tool (can be called multiple times).
Returns:
ToolMessage with emulated response if tool should be emulated,
otherwise calls handler for normal execution.
"""
tool_name = request.tool_call["name"]
# Check if this tool should be emulated
should_emulate = self.emulate_all or tool_name in self.tools_to_emulate
if not should_emulate:
# Let it execute normally by calling the handler
return handler(request)
# Extract tool information for emulation
tool_args = request.tool_call["args"]
tool_description = request.tool.description if request.tool else "No description available"
# Build prompt for emulator LLM
prompt = (
f"You are emulating a tool call for testing purposes.\n\n"
f"Tool: {tool_name}\n"
f"Description: {tool_description}\n"
f"Arguments: {tool_args}\n\n"
f"Generate a realistic response that this tool would return "
f"given these arguments.\n"
f"Return ONLY the tool's output, no explanation or preamble. "
f"Introduce variation into your responses."
)
# Get emulated response from LLM
response = self.model.invoke([HumanMessage(prompt)])
# Short-circuit: return emulated result without executing real tool
return ToolMessage(
content=response.content,
tool_call_id=request.tool_call["id"],
name=tool_name,
)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
) -> ToolMessage | Command:
"""Async version of `wrap_tool_call`.
Emulate tool execution using LLM if tool should be emulated.
Args:
request: Tool call request to potentially emulate.
handler: Async callback to execute the tool (can be called multiple times).
Returns:
ToolMessage with emulated response if tool should be emulated,
otherwise calls handler for normal execution.
"""
tool_name = request.tool_call["name"]
# Check if this tool should be emulated
should_emulate = self.emulate_all or tool_name in self.tools_to_emulate
if not should_emulate:
# Let it execute normally by calling the handler
return await handler(request)
# Extract tool information for emulation
tool_args = request.tool_call["args"]
tool_description = request.tool.description if request.tool else "No description available"
# Build prompt for emulator LLM
prompt = (
f"You are emulating a tool call for testing purposes.\n\n"
f"Tool: {tool_name}\n"
f"Description: {tool_description}\n"
f"Arguments: {tool_args}\n\n"
f"Generate a realistic response that this tool would return "
f"given these arguments.\n"
f"Return ONLY the tool's output, no explanation or preamble. "
f"Introduce variation into your responses."
)
# Get emulated response from LLM (using async invoke)
response = await self.model.ainvoke([HumanMessage(prompt)])
# Short-circuit: return emulated result without executing real tool
return ToolMessage(
content=response.content,
tool_call_id=request.tool_call["id"],
name=tool_name,
)
| LLMToolEmulator |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 27922,
"end": 29021
} | class ____(TypeDefinition):
__slots__ = ('loc', 'name', 'types', 'directives',)
_fields = ('name', 'types',)
def __init__(self, name, types, loc=None, directives=None):
self.loc = loc
self.name = name
self.types = types
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, UnionTypeDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.types == other.types and
self.directives == other.directives
)
)
def __repr__(self):
return ('UnionTypeDefinition('
'name={self.name!r}'
', types={self.types!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.types,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| UnionTypeDefinition |
python | apache__airflow | providers/jenkins/tests/unit/jenkins/hooks/test_jenkins.py | {
"start": 1190,
"end": 3838
} | class ____:
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_client_created_default_http(self, get_connection_mock):
"""tests `init` method to validate http client creation when all parameters are passed"""
default_connection_id = "jenkins_default"
connection_host = "test.com"
connection_port = 8080
get_connection_mock.return_value = mock.Mock(
connection_id=default_connection_id,
login="test",
password="test",
schema="",
extra_dejson={"use_https": False},
host=connection_host,
port=connection_port,
)
complete_url = f"http://{connection_host}:{connection_port}/"
hook = JenkinsHook(default_connection_id)
assert hook.jenkins_server is not None
assert hook.jenkins_server.server == complete_url
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_client_created_default_https(self, get_connection_mock):
"""tests `init` method to validate https client creation when all
parameters are passed"""
default_connection_id = "jenkins_default"
connection_host = "test.com"
connection_port = 8080
get_connection_mock.return_value = mock.Mock(
connection_id=default_connection_id,
login="test",
password="test",
schema="",
extra_dejson={"use_https": True},
host=connection_host,
port=connection_port,
)
complete_url = f"https://{connection_host}:{connection_port}/"
hook = JenkinsHook(default_connection_id)
assert hook.jenkins_server is not None
assert hook.jenkins_server.server == complete_url
@pytest.mark.parametrize("param_building", [True, False])
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
@mock.patch("jenkins.Jenkins.get_job_info")
@mock.patch("jenkins.Jenkins.get_build_info")
def test_get_build_building_state(
self, mock_get_build_info, mock_get_job_info, get_connection_mock, param_building
):
get_connection_mock.return_value = mock.Mock(
connection_id="test_connection",
login="test",
password="test",
schema="",
extra_dejson={"use_https": False},
host="test.com",
port=8080,
)
mock_get_build_info.return_value = {"building": param_building}
hook = JenkinsHook("none_connection_id")
result = hook.get_build_building_state("some_job", 1)
assert result == param_building
| TestJenkinsHook |
python | plotly__plotly.py | tests/test_optional/test_tools/test_figure_factory.py | {
"start": 44984,
"end": 57629
} | class ____(TestCaseNoTemplate, NumpyTestUtilsMixin):
def test_fontcolor_input(self):
# check: ValueError if fontcolor input is incorrect
kwargs = {
"table_text": [["one", "two"], [1, 2], [1, 2], [1, 2]],
"fontcolor": "#000000",
}
self.assertRaises(ValueError, ff.create_table, **kwargs)
kwargs = {
"table_text": [["one", "two"], [1, 2], [1, 2], [1, 2]],
"fontcolor": ["red", "blue"],
}
self.assertRaises(ValueError, ff.create_table, **kwargs)
def test_simple_table(self):
# we should be able to create a striped table by supplying a text matrix
text = [
["Country", "Year", "Population"],
["US", 2000, 282200000],
["Canada", 2000, 27790000],
["US", 1980, 226500000],
]
table = ff.create_table(text)
expected_table = {
"data": [
{
"colorscale": [[0, "#00083e"], [0.5, "#ededee"], [1, "#ffffff"]],
"hoverinfo": "none",
"opacity": 0.75,
"showscale": False,
"type": "heatmap",
"z": [[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1], [0.5, 0.5, 0.5]],
}
],
"layout": {
"annotations": [
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Country</b>",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Year</b>",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Population</b>",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "US",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "2000",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "282200000",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "Canada",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "2000",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "27790000",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "US",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 3,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "1980",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 3,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "226500000",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 3,
"yref": "y",
},
],
"height": 170,
"margin": {"b": 0, "l": 0, "r": 0, "t": 0},
"xaxis": {
"dtick": 1,
"gridwidth": 2,
"showticklabels": False,
"tick0": -0.5,
"ticks": "",
"zeroline": False,
},
"yaxis": {
"autorange": "reversed",
"dtick": 1,
"gridwidth": 2,
"showticklabels": False,
"tick0": 0.5,
"ticks": "",
"zeroline": False,
},
},
}
self.assert_fig_equal(table["data"][0], expected_table["data"][0])
self.assert_fig_equal(table["layout"], expected_table["layout"])
def test_table_with_index(self):
# we should be able to create a striped table where the first column
# matches the coloring of the header
text = [
["Country", "Year", "Population"],
["US", 2000, 282200000],
["Canada", 2000, 27790000],
]
index_table = ff.create_table(text, index=True, index_title="Title")
exp_index_table = {
"data": [
{
"colorscale": [[0, "#00083e"], [0.5, "#ededee"], [1, "#ffffff"]],
"hoverinfo": "none",
"opacity": 0.75,
"showscale": False,
"type": "heatmap",
"z": [[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]],
}
],
"layout": {
"annotations": [
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Country</b>",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Year</b>",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Population</b>",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 0,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>US</b>",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "2000",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "282200000",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 1,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#ffffff"},
"showarrow": False,
"text": "<b>Canada</b>",
"x": -0.45,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "2000",
"x": 0.55,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
{
"align": "left",
"font": {"color": "#000000"},
"showarrow": False,
"text": "27790000",
"x": 1.55,
"xanchor": "left",
"xref": "x",
"y": 2,
"yref": "y",
},
],
"height": 140,
"margin": {"b": 0, "l": 0, "r": 0, "t": 0},
"xaxis": {
"dtick": 1,
"gridwidth": 2,
"showticklabels": False,
"tick0": -0.5,
"ticks": "",
"zeroline": False,
},
"yaxis": {
"autorange": "reversed",
"dtick": 1,
"gridwidth": 2,
"showticklabels": False,
"tick0": 0.5,
"ticks": "",
"zeroline": False,
},
},
}
self.assert_fig_equal(index_table["data"][0], exp_index_table["data"][0])
self.assert_fig_equal(index_table["layout"], exp_index_table["layout"])
| TestTable |
python | numba__numba | numba/tests/test_parfors.py | {
"start": 122941,
"end": 136344
} | class ____(TestPrangeBase):
""" Tests Prange """
def test_prange01(self):
def test_impl():
n = 4
A = np.zeros(n)
for i in range(n):
A[i] = 2.0 * i
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange02(self):
def test_impl():
n = 4
A = np.zeros(n - 1)
for i in range(1, n):
A[i - 1] = 2.0 * i
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03(self):
def test_impl():
s = 10
for i in range(10):
s += 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03mul(self):
def test_impl():
s = 3
for i in range(10):
s *= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03sub(self):
def test_impl():
s = 100
for i in range(10):
s -= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03div(self):
def test_impl():
s = 10
for i in range(10):
s /= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange04(self):
def test_impl():
a = 2
b = 3
A = np.empty(4)
for i in range(4):
if i == a:
A[i] = b
else:
A[i] = 0
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange05(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(1, n - 1, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange06(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(1, 1, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange07(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(n, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange08(self):
def test_impl():
n = 4
A = np.ones((n))
acc = 0
for i in range(len(A)):
for j in range(len(A)):
acc += A[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange08_1(self):
def test_impl():
n = 4
A = np.ones((n))
acc = 0
for i in range(4):
for j in range(4):
acc += A[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange09(self):
def test_impl():
n = 4
acc = 0
for i in range(n):
for j in range(n):
acc += 1
return acc
# patch inner loop to 'prange'
self.prange_tester(test_impl, patch_instance=[1],
scheduler_type='unsigned',
check_fastmath=True)
def test_prange10(self):
def test_impl():
n = 4
acc2 = 0
for j in range(n):
acc1 = 0
for i in range(n):
acc1 += 1
acc2 += acc1
return acc2
# patch outer loop to 'prange'
self.prange_tester(test_impl, patch_instance=[0],
scheduler_type='unsigned',
check_fastmath=True)
@unittest.skip("list append is not thread-safe yet (#2391, #2408)")
def test_prange11(self):
def test_impl():
n = 4
return [np.sin(j) for j in range(n)]
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange12(self):
def test_impl():
acc = 0
n = 4
X = np.ones(n)
for i in range(-len(X)):
acc += X[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange13(self):
def test_impl(n):
acc = 0
for i in range(n):
acc += 1
return acc
self.prange_tester(test_impl, np.int32(4), scheduler_type='unsigned',
check_fastmath=True)
def test_prange14(self):
def test_impl(A):
s = 3
for i in range(len(A)):
s += A[i]*2
return s
# this tests reduction detection well since the accumulated variable
# is initialized before the parfor and the value accessed from the array
# is updated before accumulation
self.prange_tester(test_impl, np.random.ranf(4),
scheduler_type='unsigned',
check_fastmath=True)
def test_prange15(self):
# from issue 2587
# test parfor type inference when there is multi-dimensional indexing
def test_impl(N):
acc = 0
for i in range(N):
x = np.ones((1, 1))
acc += x[0, 0]
return acc
self.prange_tester(test_impl, 1024, scheduler_type='unsigned',
check_fastmath=True)
# Tests for negative ranges
def test_prange16(self):
def test_impl(N):
acc = 0
for i in range(-N, N):
acc += 2
return acc
self.prange_tester(test_impl, 1024, scheduler_type='signed',
check_fastmath=True)
def test_prange17(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-N, N):
acc += X[i]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange18(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-N, 5):
acc += X[i]
for j in range(-4, N):
acc += X[j]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange19(self):
def test_impl(N):
acc = 0
M = N + 4
X = np.ones((N, M))
for i in range(-N, N):
for j in range(-M, M):
acc += X[i, j]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange20(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-1, N):
acc += X[i]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange21(self):
def test_impl(N):
acc = 0
for i in range(-3, -1):
acc += 3
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange22(self):
def test_impl():
a = 0
b = 3
A = np.empty(4)
for i in range(-2, 2):
if i == a:
A[i] = b
elif i < 1:
A[i] = -1
else:
A[i] = 7
return A
self.prange_tester(test_impl, scheduler_type='signed',
check_fastmath=True, check_fastmath_result=True)
def test_prange23(self):
# test non-contig input
def test_impl(A):
for i in range(len(A)):
A[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange24(self):
# test non-contig input, signed range
def test_impl(A):
for i in range(-len(A), 0):
A[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='signed',
check_fastmath=True, check_fastmath_result=True)
def test_prange25(self):
def test_impl(A):
n = len(A)
buf = [np.zeros_like(A) for _ in range(n)]
for i in range(n):
buf[i] = A + i
return buf
A = np.ones((10,))
self.prange_tester(test_impl, A, patch_instance=[1],
scheduler_type='unsigned', check_fastmath=True,
check_fastmath_result=True)
cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),))
diagnostics = cpfunc.metadata['parfor_diagnostics']
hoisted_allocs = diagnostics.hoisted_allocations()
self.assertEqual(len(hoisted_allocs), 0)
def test_prange26(self):
def test_impl(A):
B = A[::3]
for i in range(len(B)):
B[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange27(self):
# issue5597: usedef error in parfor
def test_impl(a, b, c):
for j in range(b[0]-1):
for k in range(2):
z = np.abs(a[c-1:c+1])
return 0
# patch inner loop to 'prange'
self.prange_tester(test_impl,
np.arange(20),
np.asarray([4,4,4,4,4,4,4,4,4,4]),
0,
patch_instance=[1],
scheduler_type='unsigned',
check_fastmath=True)
def test_prange28(self):
# issue7105: label conflict in nested parfor
def test_impl(x, y):
out = np.zeros(len(y))
for idx in range(0, len(y)):
i0 = y[idx, 0]
i1 = y[idx, 1]
Pt1 = x[i0]
Pt2 = x[i1]
v = Pt1 - Pt2
vl2 = v[0] + v[1]
out[idx] = vl2
return out
X = np.array([[-1., -1.],
[-1., 1.],
[ 0., 0.],
[ 1., -1.],
[ 1., 0.],
[ 1., 1.]])
Y = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5]])
self.prange_tester(test_impl, X, Y, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange29(self):
# issue7630: SSA renaming in prange header
def test_impl(flag):
result = 0
if flag:
for i in range(1):
result += 1
else:
for i in range(1):
result -= 3
return result
self.prange_tester(test_impl, True)
self.prange_tester(test_impl, False)
def test_prange30(self):
# issue7675: broadcast setitem
def test_impl(x, par, numthreads):
n_par = par.shape[0]
n_x = len(x)
result = np.zeros((n_par, n_x), dtype=np.float64)
chunklen = (len(x) + numthreads - 1) // numthreads
for i in range(numthreads):
start = i * chunklen
stop = (i + 1) * chunklen
result[:, start:stop] = x[start:stop] * par[:]
return result
x = np.array(np.arange(0, 6, 1.0))
par = np.array([1.0, 2.0, 3.0])
self.prange_tester(test_impl, x, par, 2)
@register_jitable
def test_call_hoisting_outcall(a,b):
return (a, b)
@skip_parfors_unsupported
| TestPrangeBasic |
python | conda__conda | tests/plugins/test_pre_commands.py | {
"start": 247,
"end": 1967
} | class ____:
def pre_command_action(self, command: str) -> None:
pass
@plugins.hookimpl
def conda_pre_commands(self):
yield CondaPreCommand(
name="custom-pre-command",
action=self.pre_command_action,
run_for={"install", "create", "info"},
)
@pytest.fixture()
def pre_command_plugin(mocker, plugin_manager):
mocker.patch.object(PreCommandPlugin, "pre_command_action")
pre_command_plugin = PreCommandPlugin()
plugin_manager.register(pre_command_plugin)
plugin_manager.load_plugins(*reporter_backends_plugins)
return pre_command_plugin
def test_pre_command_invoked(pre_command_plugin, conda_cli):
"""
Makes sure that we successfully invoked our "pre-command" action.
"""
conda_cli("info")
assert len(pre_command_plugin.pre_command_action.mock_calls) == 1
def test_pre_command_not_invoked(pre_command_plugin, conda_cli):
"""
Makes sure that we successfully did not invoke our "pre-command" action.
"""
conda_cli("config")
assert len(pre_command_plugin.pre_command_action.mock_calls) == 0
def test_pre_command_action_raises_exception(pre_command_plugin, conda_cli):
"""
When the plugin action fails or raises an exception, we want to make sure
that it bubbles up to the top and isn't caught anywhere. This will ensure that it
goes through our normal exception catching/reporting mechanism.
"""
exc_message = "💥"
pre_command_plugin.pre_command_action.side_effect = [Exception(exc_message)]
with pytest.raises(Exception, match=exc_message):
conda_cli("info")
assert len(pre_command_plugin.pre_command_action.mock_calls) == 1
| PreCommandPlugin |
python | conda__conda | conda/core/subdir_data.py | {
"start": 4538,
"end": 28754
} | class ____(metaclass=SubdirDataType):
"""
A class representing the SubdirData.
This class provides functionality for managing and caching SubdirData instances.
:param channel: The channel object
:param repodata_fn: The name of the repodata file. Defaults to REPODATA_FN
:return: A SubdirData instance.
"""
_cache_: dict[tuple[str, str], PackageRecordList | SubdirData] = {}
@classmethod
def clear_cached_local_channel_data(cls, exclude_file: bool = True) -> None:
"""
Clear the cached local channel data.
This method is used to clear the cached local channel data. It is primarily used during
unit tests to handle changes in the CONDA_USE_ONLY_TAR_BZ2 environment variable during the
process lifetime.
:param exclude_file: A flag indicating whether to exclude file:// URLs from the cache.
"""
# This should only ever be needed during unit tests, when
# CONDA_USE_ONLY_TAR_BZ2 may change during process lifetime.
if exclude_file:
cls._cache_ = {
k: v for k, v in cls._cache_.items() if not k[0].startswith("file://")
}
else:
cls._cache_.clear()
@staticmethod
def query_all(
package_ref_or_match_spec: MatchSpec | str,
channels: Iterable[Channel | str] | None = None,
subdirs: Iterable[str] | None = None,
repodata_fn: str = REPODATA_FN,
) -> tuple[PackageRecord, ...]:
"""
Execute a query against all repodata instances in the channel/subdir
matrix.
:param package_ref_or_match_spec: A `MatchSpec` query object. A `str`
will be turned into a `MatchSpec` automatically.
:param channels: An iterable of urls for channels or `Channel` objects.
If None, will fall back to `context.channels`.
:param subdirs: If None, will fall back to context.subdirs.
:param repodata_fn: The filename of the repodata.
:return: A tuple of `PackageRecord` objects.
"""
# ensure that this is not called by threaded code
create_cache_dir()
if channels is None:
channels = context.channels
channel_urls = all_channel_urls(channels, subdirs=subdirs)
if context.offline:
grouped_urls = groupby(lambda url: url.startswith("file://"), channel_urls)
ignored_urls = grouped_urls.get(False, ())
if ignored_urls:
log.info(
"Ignoring the following channel urls because mode is offline.%s",
dashlist(ignored_urls),
)
channel_urls = IndexedSet(grouped_urls.get(True, ()))
def subdir_query(url: str) -> tuple[PackageRecord, ...]:
"""
Queries the SubdirData for a given URL and returns a tuple of PackageRecord objects.
:param url: The URL of the SubdirData to query.
:return: A tuple of PackageRecord objects representing the query results.
"""
return tuple(
SubdirData(Channel(url), repodata_fn=repodata_fn).query(
package_ref_or_match_spec
)
)
# TODO test timing with ProcessPoolExecutor
Executor = (
DummyExecutor
if context.debug or context.repodata_threads == 1
else partial(
ThreadLimitedThreadPoolExecutor, max_workers=context.repodata_threads
)
)
with Executor() as executor:
result = tuple(
chain.from_iterable(executor.map(subdir_query, channel_urls))
)
return result
def query(
self, package_ref_or_match_spec: str | MatchSpec
) -> Iterator[PackageRecord]:
"""
A function that queries for a specific package reference or MatchSpec object.
:param package_ref_or_match_spec: The package reference or MatchSpec object to query.
:yields: PackageRecord objects.
"""
if not self._loaded:
self.load()
param = package_ref_or_match_spec
if isinstance(param, str):
param = MatchSpec(param) # type: ignore
if isinstance(param, MatchSpec):
if param.get_exact_value("name"):
package_name = param.get_exact_value("name")
for prec in self._iter_records_by_name(package_name):
if param.match(prec):
yield prec
else:
for prec in self.iter_records():
if param.match(prec):
yield prec
else:
if not isinstance(param, PackageRecord):
raise TypeError("Query did not result in a record.")
for prec in self._iter_records_by_name(param.name):
if prec == param:
yield prec
def __init__(
self,
channel: Channel,
repodata_fn: str = REPODATA_FN,
RepoInterface: type[RepoInterface] = CondaRepoInterface,
):
"""
Initializes a new instance of the SubdirData class.
:param channel: The channel object.
:param repodata_fn: The repodata filename.
:param RepoInterface: The RepoInterface class.
"""
if not channel.subdir:
raise ValueError("SubdirData requires a subdir-aware Channel.")
# metaclass __init__ asserts no package_filename
if channel.package_filename: # pragma: no cover
parts = channel.dump()
del parts["package_filename"]
channel = Channel(**parts)
self.channel = channel
# disallow None (typing)
self.url_w_subdir = self.channel.url(with_credentials=False) or ""
self.url_w_credentials = self.channel.url(with_credentials=True) or ""
# these can be overriden by repodata.json v2
self._base_url = self.url_w_subdir
self._base_url_w_credentials = self.url_w_credentials
# whether or not to try using the new, trimmed-down repodata
self.repodata_fn = repodata_fn
self.RepoInterface = RepoInterface
self._loaded = False
self._key_mgr = None
@property
def _repo(self) -> RepoInterface:
"""
Changes as we mutate self.repodata_fn.
"""
return self.repo_fetch._repo
@property
def repo_cache(self) -> RepodataCache:
"""
Returns the `RepodataCache` object associated with the current instance of `SubdirData`.
"""
return self.repo_fetch.repo_cache
@property
def repo_fetch(self) -> RepodataFetch:
"""
Object to get repodata. Not cached since self.repodata_fn is mutable.
Replaces self._repo & self.repo_cache.
"""
return RepodataFetch(
Path(self.cache_path_base),
self.channel,
self.repodata_fn,
repo_interface_cls=self.RepoInterface,
)
def reload(self) -> Self:
"""
Update the instance with new information.
"""
self._loaded = False
self.load()
return self
@property
def cache_path_base(self) -> str:
"""
Get the base path for caching the repodata.json file.
This method returns the base path for caching the repodata.json file. It is used to
construct the full path for caching the file.
"""
return join(
create_cache_dir(),
splitext(cache_fn_url(self.url_w_credentials, self.repodata_fn))[0],
)
@property
def url_w_repodata_fn(self) -> str:
"""
Get the URL with the repodata filename.
This method returns the URL with the repodata filename.
"""
return self.url_w_subdir + "/" + self.repodata_fn
@property
def cache_path_json(self) -> Path:
"""
Get the path to the cache file.
This method returns the path to the cache file.
"""
return Path(
self.cache_path_base + ("1" if context.use_only_tar_bz2 else "") + ".json"
)
@property
def cache_path_state(self) -> Path:
"""
Out-of-band etag and other state needed by the RepoInterface.
Get the path to the cache state file.
This method returns the path to the cache state file.
"""
return Path(
self.cache_path_base
+ ("1" if context.use_only_tar_bz2 else "")
+ CACHE_STATE_SUFFIX
)
@property
def cache_path_pickle(self) -> str:
"""
Get the path to the cache pickle file.
This method returns the path to the cache pickle file.
"""
return self.cache_path_base + ("1" if context.use_only_tar_bz2 else "") + ".q"
def load(self) -> Self:
"""
Load the internal state of the SubdirData instance.
This method loads the internal state of the SubdirData instance.
"""
_internal_state = self._load()
if _internal_state.get("repodata_version", 0) > MAX_REPODATA_VERSION:
raise CondaUpgradeError(
dals(
"""
The current version of conda is too old to read repodata from
%s
(This version only supports repodata_version 1 and 2.)
Please update conda to use this channel.
"""
)
% self.url_w_repodata_fn
)
self._base_url = _internal_state.get("base_url", self.url_w_subdir)
self._base_url_w_credentials = _internal_state.get(
"base_url_w_credentials", self.url_w_credentials
)
self._internal_state = _internal_state
self._package_records = _internal_state["_package_records"]
self._names_index = _internal_state["_names_index"]
# Unused since early 2023:
self._track_features_index = _internal_state["_track_features_index"]
self._loaded = True
return self
def iter_records(self) -> Iterator[PackageRecord]:
"""
A function that iterates over package records.
This function checks if the package records are loaded. If not loaded, it loads them. It
returns an iterator over the package records. The package_records attribute could
potentially be replaced with fully-converted UserList data after going through the entire
list.
"""
if not self._loaded:
self.load()
return iter(self._package_records)
# could replace self._package_records with fully-converted UserList.data
# after going through entire list
def _iter_records_by_name(self, name: str) -> Iterator[PackageRecord]:
"""
A function that iterates over package records by name.
This function iterates over package records and yields those whose name matches the given
name. If include_self is True, it also yields the record with the given name.
"""
for i in self._names_index[name]:
yield self._package_records[i]
def _load(self) -> dict[str, Any]:
"""
Try to load repodata. If e.g. we are downloading
`current_repodata.json`, fall back to `repodata.json` when the former is
unavailable.
"""
try:
fetcher = self.repo_fetch
repodata, state = fetcher.fetch_latest_parsed()
return self._process_raw_repodata(repodata, state)
except UnavailableInvalidChannel:
if self.repodata_fn != REPODATA_FN:
self.repodata_fn = REPODATA_FN
return self._load()
else:
raise
def _pickle_me(self) -> None:
"""
Pickle the object to the specified file.
"""
try:
log.debug(
"Saving pickled state for %s at %s",
self.url_w_repodata_fn,
self.cache_path_pickle,
)
with open(self.cache_path_pickle, "wb") as fh:
pickle.dump(self._internal_state, fh, pickle.HIGHEST_PROTOCOL)
except Exception:
log.debug("Failed to dump pickled repodata.", exc_info=True)
def _read_local_repodata(self, state: RepodataState) -> dict[str, Any]:
"""
Read local repodata from the cache and process it.
"""
# first try reading pickled data
_pickled_state = self._read_pickled(state)
if _pickled_state:
return _pickled_state
raw_repodata_str, state = self.repo_fetch.read_cache()
_internal_state = self._process_raw_repodata_str(raw_repodata_str, state)
# taken care of by _process_raw_repodata():
if self._internal_state is not _internal_state:
raise RuntimeError("Internal state out of sync.")
self._pickle_me()
return _internal_state
def _pickle_valid_checks(
self, pickled_state: dict[str, Any], mod: str, etag: str
) -> Iterator[tuple[str, Any, Any]]:
"""
Throw away the pickle if these don't all match.
:param pickled_state: The pickled state to compare against.
:param mod: The modification information to check.
:param etag: The etag to compare against.
:yields: Tuples of the form (check_name, pickled_value, current_value).
"""
yield "_url", pickled_state.get("_url"), self.url_w_credentials
yield "_schannel", pickled_state.get("_schannel"), self.channel.canonical_name
yield (
"_add_pip",
pickled_state.get("_add_pip"),
context.add_pip_as_python_dependency,
)
yield "_mod", pickled_state.get("_mod"), mod
yield "_etag", pickled_state.get("_etag"), etag
yield (
"_pickle_version",
pickled_state.get("_pickle_version"),
REPODATA_PICKLE_VERSION,
)
yield "fn", pickled_state.get("fn"), self.repodata_fn
def _read_pickled(self, state: RepodataState) -> dict[str, Any] | None:
"""
Read pickled repodata from the cache and process it.
:param state: The repodata state.
:return: A dictionary containing the processed pickled repodata, or None if the repodata is
not pickled.
"""
if not isinstance(state, RepodataState):
state = RepodataState(
self.cache_path_json,
self.cache_path_state,
self.repodata_fn,
dict=state,
)
if not isfile(self.cache_path_pickle) or not isfile(self.cache_path_json):
# Don't trust pickled data if there is no accompanying json data
return None
try:
if isfile(self.cache_path_pickle):
log.debug("found pickle file %s", self.cache_path_pickle)
with open(self.cache_path_pickle, "rb") as fh:
_pickled_state = pickle.load(fh)
except Exception:
log.debug("Failed to load pickled repodata.", exc_info=True)
rm_rf(self.cache_path_pickle)
return None
def checks() -> Iterator[tuple[str, str | None, str]]:
"""
Generate a list of checks to verify the validity of a pickled state.
:return: A list of tuples, where each tuple contains a check name, the value from the
pickled state, and the current value.
"""
return self._pickle_valid_checks(_pickled_state, state.mod, state.etag)
def _check_pickled_valid() -> Iterator[bool]:
"""
Generate a generator that yields the results of checking the validity of a pickled
state.
:yields: True if the pickled state is valid, False otherwise.
"""
for _, left, right in checks():
yield left == right
if not all(_check_pickled_valid()):
log.debug(
"Pickle load validation failed for %s at %s. %r",
self.url_w_repodata_fn,
self.cache_path_json,
tuple(checks()),
)
return None
return _pickled_state
def _process_raw_repodata_str(
self,
raw_repodata_str: str,
state: RepodataState | None = None,
) -> dict[str, Any]:
"""State contains information that was previously in-band in raw_repodata_str.
Process the raw repodata string and return the processed repodata.
:param raw_repodata_str: The raw repodata string.
:return: A dictionary containing the processed repodata.
"""
json_obj = json.loads(raw_repodata_str or "{}")
return self._process_raw_repodata(json_obj, state=state)
def _process_raw_repodata(
self, repodata: dict[str, Any], state: RepodataState | None = None
) -> dict[str, Any]:
"""
Process the raw repodata dictionary and return the processed repodata.
:param repodata: The raw repodata dictionary.
:param state: The repodata state. Defaults to None.
:return: A dictionary containing the processed repodata.
"""
if not isinstance(state, RepodataState):
state = RepodataState(
self.cache_path_json,
self.cache_path_state,
self.repodata_fn,
dict=state,
)
subdir = repodata.get("info", {}).get("subdir") or self.channel.subdir
if subdir != self.channel.subdir:
raise ValueError(
f"Repodata subdir ({subdir}) does not match channel ({self.channel.subdir})"
)
add_pip = context.add_pip_as_python_dependency
schannel = self.channel.canonical_name
self._package_records = _package_records = PackageRecordList()
self._names_index = _names_index = defaultdict(list)
self._track_features_index = _track_features_index = defaultdict(list)
base_url = self._get_base_url(repodata, with_credentials=False)
base_url_w_credentials = self._get_base_url(repodata, with_credentials=True)
_internal_state = {
"channel": self.channel,
"url_w_subdir": self.url_w_subdir,
"url_w_credentials": self.url_w_credentials,
"base_url": base_url,
"base_url_w_credentials": base_url_w_credentials,
"cache_path_base": self.cache_path_base,
"fn": self.repodata_fn,
"_package_records": _package_records,
"_names_index": _names_index,
"_track_features_index": _track_features_index,
"_etag": state.get("_etag"),
"_mod": state.get("_mod"),
"_cache_control": state.get("_cache_control"),
"_url": state.get("_url"),
"_add_pip": add_pip,
"_pickle_version": REPODATA_PICKLE_VERSION,
"_schannel": schannel,
"repodata_version": state.get("repodata_version", 0),
}
if _internal_state["repodata_version"] > MAX_REPODATA_VERSION:
raise CondaUpgradeError(
dals(
"""
The current version of conda is too old to read repodata from
%s
(This version only supports repodata_version 1 and 2.)
Please update conda to use this channel.
"""
)
% self.url_w_subdir
)
meta_in_common = { # just need to make this once, then apply with .update()
"arch": repodata.get("info", {}).get("arch"),
"channel": self.channel,
"platform": repodata.get("info", {}).get("platform"),
"schannel": schannel,
"subdir": subdir,
}
legacy_packages = repodata.get("packages", {})
conda_packages = (
{} if context.use_only_tar_bz2 else repodata.get("packages.conda", {})
)
_tar_bz2 = CONDA_PACKAGE_EXTENSION_V1
use_these_legacy_keys = set(legacy_packages.keys()) - {
k[:-6] + _tar_bz2 for k in conda_packages.keys()
}
for group, copy_legacy_md5 in (
(conda_packages.items(), True),
(((k, legacy_packages[k]) for k in use_these_legacy_keys), False),
):
for fn, info in group:
if copy_legacy_md5:
counterpart = f"{fn[: -len('.conda')]}.tar.bz2"
if counterpart in legacy_packages:
info["legacy_bz2_md5"] = legacy_packages[counterpart].get("md5")
info["legacy_bz2_size"] = legacy_packages[counterpart].get(
"size"
)
if (
add_pip
and info["name"] == "python"
and info["version"].startswith(("2.", "3."))
):
info["depends"].append("pip")
info.update(meta_in_common)
if info.get("record_version", 0) > 1:
log.debug(
"Ignoring record_version %d from %s",
info["record_version"],
info["url"],
)
continue
# lazy
# package_record = PackageRecord(**info)
info["fn"] = fn
info["url"] = join_url(base_url_w_credentials, fn)
_package_records.append(info)
record_index = len(_package_records) - 1
_names_index[info["name"]].append(record_index)
self._internal_state = _internal_state
return _internal_state
def _get_base_url(self, repodata: dict, with_credentials: bool = True) -> str:
"""
In repodata_version=1, .tar.bz2 and .conda artifacts are assumed to
be colocated next to repodata.json, in the same server and directory.
In repodata_version=2, repodata.json files can define a 'base_url' field
to override that default assumption. See CEP-15 for more details.
This method deals with both cases and returns the appropriate value.
Get the base URL for the given endpoint.
:param endpoint: The endpoint for which the base URL is needed.
:return: The base URL corresponding to the provided endpoint.
"""
maybe_base_url = repodata.get("info", {}).get("base_url")
if maybe_base_url: # repodata defines base_url field
try:
base_url_parts = Channel(maybe_base_url).dump()
except ValueError as exc:
raise ChannelError(
f"Subdir for {self.channel.canonical_name} at url '{self.url_w_subdir}' "
"has invalid 'base_url'"
) from exc
if with_credentials and self.url_w_credentials != self.url_w_subdir:
# We don't check for .token or .auth because those are not well defined
# in multichannel objects. It's safer to compare the resulting URLs.
# Note that base_url is assumed to have the same authentication as the repodata
channel_parts = self.channel.dump()
for key in ("auth", "token"):
if base_url_parts.get(key):
raise ChannelError(
f"'{self.url_w_subdir}' has 'base_url' with credentials. "
"This is not supported."
)
channel_creds = channel_parts.get(key)
if channel_creds:
base_url_parts[key] = channel_creds
return Channel(**base_url_parts).url(with_credentials=True)
return maybe_base_url
if with_credentials:
return self.url_w_credentials
return self.url_w_subdir
| SubdirData |
python | mitmproxy__pdoc | test/testdata/demopackage/child_f.py | {
"start": 49,
"end": 413
} | class ____:
"""
This class defined in .child_f links to subpackage's G which is re-exposed as `G` directly in demopackage.
We want to make sure that these links render:
- demopackage.G
- demopackage.subpackage.G
- demopackage.subpackage.child_g.G
"""
def g(self) -> subpackage.G:
return subpackage.G()
G = subpackage.G
| F |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 2671,
"end": 3092
} | class ____(BaseModel):
scroll_x: int
"""The horizontal scroll distance."""
scroll_y: int
"""The vertical scroll distance."""
type: Literal["scroll"]
"""Specifies the event type.
For a scroll action, this property is always set to `scroll`.
"""
x: int
"""The x-coordinate where the scroll occurred."""
y: int
"""The y-coordinate where the scroll occurred."""
| ActionScroll |
python | django__django | tests/template_tests/syntax_tests/i18n/test_translate.py | {
"start": 10276,
"end": 10367
} | class ____(TranslationTransTagTests):
tag_name = "translate"
| TranslationTranslateTagTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-sftp-json/destination_sftp_json/client.py | {
"start": 687,
"end": 2686
} | class ____:
def __init__(
self,
host: str,
username: str,
password: str,
destination_path: str,
port: int = 22,
):
self.host = host
self.port = port
self.username = username
self.password = password
self.destination_path = destination_path
self._files: Dict[str, TextIO] = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_path(self, stream: str) -> str:
return f"{self.destination_path}/airbyte_json_{stream}.jsonl"
def _get_uri(self, stream: str) -> str:
path = self._get_path(stream)
return f"sftp://{self.username}:{self.password}@{self.host}:{self.port}/{path}"
def _open(self, stream: str) -> TextIO:
uri = self._get_uri(stream)
return smart_open.open(uri, mode="a+")
def close(self):
for file in self._files.values():
file.close()
def write(self, stream: str, record: Dict) -> None:
if stream not in self._files:
self._files[stream] = self._open(stream)
text = json.dumps(record)
self._files[stream].write(f"{text}\n")
def read_data(self, stream: str) -> List[Dict]:
with self._open(stream) as file:
pos = file.tell()
file.seek(0)
lines = file.readlines()
file.seek(pos)
data = [json.loads(line.strip()) for line in lines]
return data
def delete(self, stream: str) -> None:
with sftp_client(self.host, self.port, self.username, self.password) as sftp:
try:
path = self._get_path(stream)
sftp.remove(path)
except IOError as err:
# Ignore the case where the file doesn't exist, only raise the
# exception if it's something else
if err.errno != errno.ENOENT:
raise
| SftpClient |
python | sqlalchemy__sqlalchemy | test/ext/test_automap.py | {
"start": 17982,
"end": 20296
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"single",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(10)),
test_needs_fk=True,
)
Table(
"joined_base",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(10)),
test_needs_fk=True,
)
Table(
"joined_inh",
metadata,
Column(
"id", Integer, ForeignKey("joined_base.id"), primary_key=True
),
test_needs_fk=True,
)
FixtureTest.define_tables(metadata)
def test_single_inheritance_reflect(self):
Base = automap_base()
class Single(Base):
__tablename__ = "single"
type = Column(String)
__mapper_args__ = {
"polymorphic_identity": "u0",
"polymorphic_on": type,
}
class SubUser1(Single):
__mapper_args__ = {"polymorphic_identity": "u1"}
class SubUser2(Single):
__mapper_args__ = {"polymorphic_identity": "u2"}
Base.prepare(autoload_with=testing.db)
assert SubUser2.__mapper__.inherits is Single.__mapper__
def test_joined_inheritance_reflect(self):
Base = automap_base()
class Joined(Base):
__tablename__ = "joined_base"
type = Column(String)
__mapper_args__ = {
"polymorphic_identity": "u0",
"polymorphic_on": type,
}
class SubJoined(Joined):
__tablename__ = "joined_inh"
__mapper_args__ = {"polymorphic_identity": "u1"}
Base.prepare(autoload_with=testing.db)
assert SubJoined.__mapper__.inherits is Joined.__mapper__
assert not Joined.__mapper__.relationships
assert not SubJoined.__mapper__.relationships
def test_conditional_relationship(self):
Base = automap_base()
def _gen_relationship(*arg, **kw):
return None
Base.prepare(
autoload_with=testing.db,
generate_relationship=_gen_relationship,
)
| AutomapInhTest |
python | PyCQA__pylint | tests/functional/m/misplaced_format_function.py | {
"start": 1218,
"end": 1711
} | class ____:
def __init__(self, string):
self.string = string
def get_string(self):
return self.string
def format_string(self):
self.string.format()
self.get_string().format()
print(self.get_string()).format() # [misplaced-format-function]
print(self.get_string().format())
obj = FakeClass('Voila!!!')
obj.get_string().format()
print(obj.get_string().format())
print(obj.get_string()).format() # [misplaced-format-function]
| FakeClass |
python | mlflow__mlflow | mlflow/store/artifact/s3_artifact_repo.py | {
"start": 4090,
"end": 23709
} | class ____(ArtifactRepository, MultipartUploadMixin):
"""
Stores artifacts on Amazon S3.
This repository provides MLflow artifact storage using Amazon S3 as the backend.
It supports both single-file uploads and multipart uploads for large files,
with automatic content type detection and configurable upload parameters.
The repository uses boto3 for S3 operations and supports various authentication
methods including AWS credentials, IAM roles, and environment variables.
Environment Variables:
AWS_ACCESS_KEY_ID: AWS access key ID for authentication
AWS_SECRET_ACCESS_KEY: AWS secret access key for authentication
AWS_SESSION_TOKEN: AWS session token for temporary credentials
AWS_DEFAULT_REGION: Default AWS region for S3 operations
MLFLOW_S3_ENDPOINT_URL: Custom S3 endpoint URL (for S3-compatible storage)
MLFLOW_S3_IGNORE_TLS: Set to 'true' to disable TLS verification
MLFLOW_S3_UPLOAD_EXTRA_ARGS: JSON string of extra arguments for S3 uploads
MLFLOW_BOTO_CLIENT_ADDRESSING_STYLE: S3 addressing style ('path' or 'virtual')
Note:
This class inherits from both ArtifactRepository and MultipartUploadMixin,
providing full artifact management capabilities including efficient large file uploads.
"""
def __init__(
self,
artifact_uri: str,
access_key_id=None,
secret_access_key=None,
session_token=None,
tracking_uri: str | None = None,
registry_uri: str | None = None,
) -> None:
"""
Initialize an S3 artifact repository.
Args:
artifact_uri: S3 URI in the format 's3://bucket-name/path/to/artifacts'.
The URI must be a valid S3 URI with a bucket that exists and is accessible.
access_key_id: Optional AWS access key ID. If None, uses default AWS credential
resolution (environment variables, IAM roles, etc.).
secret_access_key: Optional AWS secret access key. Must be provided if
access_key_id is provided.
session_token: Optional AWS session token for temporary credentials.
Used with STS tokens or IAM roles.
tracking_uri: Optional URI for the MLflow tracking server.
If None, uses the current tracking URI context.
"""
super().__init__(artifact_uri, tracking_uri, registry_uri)
self._access_key_id = access_key_id
self._secret_access_key = secret_access_key
self._session_token = session_token
self._bucket_owner_params = (
{"ExpectedBucketOwner": owner}
if (owner := MLFLOW_S3_EXPECTED_BUCKET_OWNER.get())
else {}
)
def _get_s3_client(self):
return _get_s3_client(
access_key_id=self._access_key_id,
secret_access_key=self._secret_access_key,
session_token=self._session_token,
)
def parse_s3_compliant_uri(self, uri):
"""
Parse an S3 URI into bucket and path components.
Args:
uri: S3 URI in the format 's3://bucket-name/path/to/object'
Returns:
A tuple containing (bucket_name, object_path) where:
- bucket_name: The S3 bucket name
- object_path: The path within the bucket (without leading slash)
"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "s3":
raise Exception(f"Not an S3 URI: {uri}")
path = parsed.path
path = path.removeprefix("/")
return parsed.netloc, path
@staticmethod
def get_s3_file_upload_extra_args():
"""
Get additional S3 upload arguments from environment variables.
Returns:
Dictionary of extra arguments for S3 uploads, or None if not configured.
These arguments are passed to boto3's upload_file method.
Environment Variables:
MLFLOW_S3_UPLOAD_EXTRA_ARGS: JSON string containing extra arguments
for S3 uploads (e.g., '{"ServerSideEncryption": "AES256"}')
"""
if s3_file_upload_extra_args := MLFLOW_S3_UPLOAD_EXTRA_ARGS.get():
return json.loads(s3_file_upload_extra_args)
else:
return None
def _upload_file(self, s3_client, local_file, bucket, key):
extra_args = {}
guessed_type, guessed_encoding = guess_type(local_file)
if guessed_type is not None:
extra_args["ContentType"] = guessed_type
if guessed_encoding is not None:
extra_args["ContentEncoding"] = guessed_encoding
extra_args.update(self._bucket_owner_params)
environ_extra_args = self.get_s3_file_upload_extra_args()
if environ_extra_args is not None:
extra_args.update(environ_extra_args)
s3_client.upload_file(Filename=local_file, Bucket=bucket, Key=key, ExtraArgs=extra_args)
def log_artifact(self, local_file, artifact_path=None):
"""
Log a local file as an artifact to S3.
This method uploads a single file to S3 with automatic content type detection
and optional extra upload arguments from environment variables.
Args:
local_file: Absolute path to the local file to upload. The file must
exist and be readable.
artifact_path: Optional relative path within the S3 bucket where the
artifact should be stored. If None, the file is stored in the root
of the configured S3 path. Use forward slashes (/) for path separators.
"""
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
dest_path = posixpath.join(dest_path, os.path.basename(local_file))
self._upload_file(
s3_client=self._get_s3_client(), local_file=local_file, bucket=bucket, key=dest_path
)
def log_artifacts(self, local_dir, artifact_path=None):
"""
Log all files in a local directory as artifacts to S3.
This method recursively uploads all files in the specified directory,
preserving the directory structure in S3. Each file is uploaded with
automatic content type detection.
Args:
local_dir: Absolute path to the local directory containing files to upload.
The directory must exist and be readable.
artifact_path: Optional relative path within the S3 bucket where the
artifacts should be stored. If None, files are stored in the root
of the configured S3 path. Use forward slashes (/) for path separators.
"""
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
s3_client = self._get_s3_client()
local_dir = os.path.abspath(local_dir)
for root, _, filenames in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = os.path.relpath(root, local_dir)
rel_path = relative_path_to_artifact_path(rel_path)
upload_path = posixpath.join(dest_path, rel_path)
for f in filenames:
self._upload_file(
s3_client=s3_client,
local_file=os.path.join(root, f),
bucket=bucket,
key=posixpath.join(upload_path, f),
)
def _iterate_s3_paginated_results(self, bucket, prefix):
"""
Iterate over paginated S3 list_objects_v2 results with error handling.
This helper method isolates the S3 client operations that can raise ClientError
and provides appropriate error handling and mapping to MLflow exceptions.
The ClientError can occur during both paginator setup and iteration, because
the botocore library makes lazy calls.
Args:
bucket: S3 bucket name
prefix: S3 prefix to list objects under
Yields:
Individual result pages from S3 list_objects_v2 operation
Raises:
MlflowException: If S3 client operations fail
"""
from botocore.exceptions import ClientError
try:
s3_client = self._get_s3_client()
paginator = s3_client.get_paginator("list_objects_v2")
results = paginator.paginate(
Bucket=bucket, Prefix=prefix, Delimiter="/", **self._bucket_owner_params
)
for result in results:
yield result
except ClientError as error:
error_code = error.response["Error"]["Code"]
mlflow_error_code = BOTO_TO_MLFLOW_ERROR.get(error_code, INTERNAL_ERROR)
error_message = error.response["Error"]["Message"]
raise MlflowException(
f"Failed to list artifacts in {self.artifact_uri}: {error_message}",
error_code=mlflow_error_code,
)
def list_artifacts(self, path=None):
"""
List all artifacts directly under the specified S3 path.
This method uses S3's list_objects_v2 API with pagination to efficiently
list artifacts. It treats S3 prefixes as directories and returns both
files and directories as FileInfo objects.
Args:
path: Optional relative path within the S3 bucket to list. If None,
lists artifacts in the root of the configured S3 path. If the path
refers to a single file, returns an empty list per MLflow convention.
Returns:
A list of FileInfo objects representing artifacts directly under the
specified path. Each FileInfo contains:
- path: Relative path of the artifact from the repository root
- is_dir: True if the artifact represents a directory (S3 prefix)
- file_size: Size in bytes for files, None for directories
"""
(bucket, artifact_path) = self.parse_s3_compliant_uri(self.artifact_uri)
dest_path = artifact_path
if path:
dest_path = posixpath.join(dest_path, path)
dest_path = dest_path.rstrip("/") if dest_path else ""
infos = []
prefix = dest_path + "/" if dest_path else ""
for result in self._iterate_s3_paginated_results(bucket, prefix):
# Subdirectories will be listed as "common prefixes"
# due to the way we made the request
for obj in result.get("CommonPrefixes", []):
subdir_path = obj.get("Prefix")
self._verify_listed_object_contains_artifact_path_prefix(
listed_object_path=subdir_path, artifact_path=artifact_path
)
subdir_rel_path = posixpath.relpath(path=subdir_path, start=artifact_path)
subdir_rel_path = subdir_rel_path.removesuffix("/")
infos.append(FileInfo(subdir_rel_path, True, None))
# Objects listed directly will be files
for obj in result.get("Contents", []):
file_path = obj.get("Key")
self._verify_listed_object_contains_artifact_path_prefix(
listed_object_path=file_path, artifact_path=artifact_path
)
file_rel_path = posixpath.relpath(path=file_path, start=artifact_path)
file_size = int(obj.get("Size"))
infos.append(FileInfo(file_rel_path, False, file_size))
return sorted(infos, key=lambda f: f.path)
@staticmethod
def _verify_listed_object_contains_artifact_path_prefix(listed_object_path, artifact_path):
if not listed_object_path.startswith(artifact_path):
raise MlflowException(
"The path of the listed S3 object does not begin with the specified"
f" artifact path. Artifact path: {artifact_path}. Object path:"
f" {listed_object_path}."
)
def _download_file(self, remote_file_path, local_path):
"""
Download a file from S3 to the local filesystem.
This method downloads a single file from S3 to the specified local path.
It's used internally by the download_artifacts method.
Args:
remote_file_path: Relative path of the file within the S3 bucket,
relative to the repository's root path.
local_path: Absolute path where the file should be saved locally.
The parent directory must exist.
"""
(bucket, s3_root_path) = self.parse_s3_compliant_uri(self.artifact_uri)
s3_full_path = posixpath.join(s3_root_path, remote_file_path)
s3_client = self._get_s3_client()
download_kwargs = (
{"ExtraArgs": self._bucket_owner_params} if self._bucket_owner_params else {}
)
s3_client.download_file(bucket, s3_full_path, local_path, **download_kwargs)
def delete_artifacts(self, artifact_path=None):
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
dest_path = dest_path.rstrip("/") if dest_path else ""
s3_client = self._get_s3_client()
paginator = s3_client.get_paginator("list_objects_v2")
results = paginator.paginate(Bucket=bucket, Prefix=dest_path, **self._bucket_owner_params)
for result in results:
keys = []
for to_delete_obj in result.get("Contents", []):
file_path = to_delete_obj.get("Key")
self._verify_listed_object_contains_artifact_path_prefix(
listed_object_path=file_path, artifact_path=dest_path
)
keys.append({"Key": file_path})
if keys:
s3_client.delete_objects(
Bucket=bucket, Delete={"Objects": keys}, **self._bucket_owner_params
)
def create_multipart_upload(self, local_file, num_parts=1, artifact_path=None):
"""
Initiate a multipart upload for efficient large file uploads to S3.
This method creates a multipart upload session in S3 and generates
presigned URLs for uploading each part. This is more efficient than
single-part uploads for large files and provides better error recovery.
Args:
local_file: Absolute path to the local file to upload. The file must
exist and be readable.
num_parts: Number of parts to split the upload into. Must be between
1 and 10,000 (S3 limit). More parts allow greater parallelism
but increase overhead.
artifact_path: Optional relative path within the S3 bucket where the
artifact should be stored. If None, the file is stored in the root
of the configured S3 path.
Returns:
CreateMultipartUploadResponse containing:
- credentials: List of MultipartUploadCredential objects with presigned URLs
- upload_id: S3 upload ID for tracking this multipart upload
"""
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
dest_path = posixpath.join(dest_path, os.path.basename(local_file))
s3_client = self._get_s3_client()
create_response = s3_client.create_multipart_upload(
Bucket=bucket,
Key=dest_path,
**self._bucket_owner_params,
)
upload_id = create_response["UploadId"]
credentials = []
for i in range(1, num_parts + 1): # part number must be in [1, 10000]
url = s3_client.generate_presigned_url(
"upload_part",
Params={
"Bucket": bucket,
"Key": dest_path,
"PartNumber": i,
"UploadId": upload_id,
**self._bucket_owner_params,
},
)
credentials.append(
MultipartUploadCredential(
url=url,
part_number=i,
headers={},
)
)
return CreateMultipartUploadResponse(
credentials=credentials,
upload_id=upload_id,
)
def complete_multipart_upload(self, local_file, upload_id, parts=None, artifact_path=None):
"""
Complete a multipart upload by combining all parts into a single S3 object.
This method should be called after all parts have been successfully uploaded
using the presigned URLs from create_multipart_upload. It tells S3 to combine
all the parts into the final object.
Args:
local_file: Absolute path to the local file that was uploaded. Must match
the local_file used in create_multipart_upload.
upload_id: The S3 upload ID returned by create_multipart_upload.
parts: List of MultipartUploadPart objects containing metadata for each
successfully uploaded part. Must include part_number and etag for each part.
Parts must be provided in order (part 1, part 2, etc.).
artifact_path: Optional relative path where the artifact should be stored.
Must match the artifact_path used in create_multipart_upload.
"""
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
dest_path = posixpath.join(dest_path, os.path.basename(local_file))
parts = [{"PartNumber": part.part_number, "ETag": part.etag} for part in parts]
s3_client = self._get_s3_client()
s3_client.complete_multipart_upload(
Bucket=bucket,
Key=dest_path,
UploadId=upload_id,
MultipartUpload={"Parts": parts},
**self._bucket_owner_params,
)
def abort_multipart_upload(self, local_file, upload_id, artifact_path=None):
"""
Abort a multipart upload and clean up any uploaded parts.
This method should be called if a multipart upload fails or is cancelled.
It cleans up any parts that were successfully uploaded and cancels the
multipart upload session in S3.
Args:
local_file: Absolute path to the local file that was being uploaded.
Must match the local_file used in create_multipart_upload.
upload_id: The S3 upload ID returned by create_multipart_upload.
artifact_path: Optional relative path where the artifact would have been stored.
Must match the artifact_path used in create_multipart_upload.
"""
(bucket, dest_path) = self.parse_s3_compliant_uri(self.artifact_uri)
if artifact_path:
dest_path = posixpath.join(dest_path, artifact_path)
dest_path = posixpath.join(dest_path, os.path.basename(local_file))
s3_client = self._get_s3_client()
s3_client.abort_multipart_upload(
Bucket=bucket,
Key=dest_path,
UploadId=upload_id,
**self._bucket_owner_params,
)
| S3ArtifactRepository |
python | networkx__networkx | networkx/algorithms/tests/test_matching.py | {
"start": 16837,
"end": 18319
} | class ____:
"""Unit tests for the
:func:`~networkx.algorithms.matching.maximal_matching`.
"""
def test_valid_matching(self):
edges = [(1, 2), (1, 5), (2, 3), (2, 5), (3, 4), (3, 6), (5, 6)]
G = nx.Graph(edges)
matching = nx.maximal_matching(G)
assert nx.is_maximal_matching(G, matching)
def test_single_edge_matching(self):
# In the star graph, any maximal matching has just one edge.
G = nx.star_graph(5)
matching = nx.maximal_matching(G)
assert 1 == len(matching)
assert nx.is_maximal_matching(G, matching)
def test_self_loops(self):
# Create the path graph with two self-loops.
G = nx.path_graph(3)
G.add_edges_from([(0, 0), (1, 1)])
matching = nx.maximal_matching(G)
assert len(matching) == 1
# The matching should never include self-loops.
assert not any(u == v for u, v in matching)
assert nx.is_maximal_matching(G, matching)
def test_ordering(self):
"""Tests that a maximal matching is computed correctly
regardless of the order in which nodes are added to the graph.
"""
for nodes in permutations(range(3)):
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from([(0, 1), (0, 2)])
matching = nx.maximal_matching(G)
assert len(matching) == 1
assert nx.is_maximal_matching(G, matching)
| TestMaximalMatching |
python | optuna__optuna | optuna/visualization/_contour.py | {
"start": 1161,
"end": 1345
} | class ____(NamedTuple):
name: str
range: tuple[float, float]
is_log: bool
is_cat: bool
indices: list[str | int | float]
values: list[str | float | None]
| _AxisInfo |
python | numba__numba | numba/tests/test_nrt.py | {
"start": 1122,
"end": 1927
} | class ____(unittest.TestCase):
"""
Unit test for checking the use of the NRT fails if the
initialization sequence has not been run.
"""
_numba_parallel_test_ = False
def test_init_fail(self):
methods = {'library': (),
'meminfo_new': ((), ()),
'meminfo_alloc': ((),),
}
for meth, args in methods.items():
try:
with self.assertRaises(RuntimeError) as raises:
rtsys._init = False
fn = getattr(rtsys, meth)
fn(*args)
msg = "Runtime must be initialized before use."
self.assertIn(msg, str(raises.exception))
finally:
rtsys._init = True
| TestNrtMemInfoNotInitialized |
python | getsentry__sentry | src/sentry/auth/elevated_mode.py | {
"start": 291,
"end": 584
} | class ____(str, Enum):
INVALID_IP = "invalid-ip"
INCOMPLETE_SSO = "incomplete-sso"
# Indicates the request should be allowed
NONE = None
def __bool__(self) -> bool:
return self.value is not None
def __str__(self) -> str:
return self.value
| InactiveReason |
python | crytic__slither | slither/tools/upgradeability/__main__.py | {
"start": 6124,
"end": 6415
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self, parser: Any, *args: Any, **kwargs: Any
) -> None: # pylint: disable=signature-differs
checks = _get_checks()
output_detectors(checks)
parser.exit()
| ListDetectors |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_organization_release_assemble.py | {
"start": 571,
"end": 7334
} | class ____(APITestCase):
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
self.token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.team = self.create_team(organization=self.organization)
self.release = self.create_release(version="my-unique-release.1")
self.url = reverse(
"sentry-api-0-organization-release-assemble",
args=[self.organization.slug, self.release.version],
)
def test_assemble_json_schema(self) -> None:
response = self.client.post(
self.url, data={"lol": "test"}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400, response.content
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={"checksum": "invalid"},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url,
data={"checksum": checksum},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url,
data={"checksum": checksum, "chunks": []},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.NOT_FOUND
@patch("sentry.tasks.assemble.assemble_artifacts")
def test_assemble(self, mock_assemble_artifacts: MagicMock) -> None:
bundle_file = self.create_artifact_bundle_zip(
org=self.organization.slug, release=self.release.version
)
total_checksum = sha1(bundle_file).hexdigest()
blob1 = FileBlob.from_file_with_organization(ContentFile(bundle_file), self.organization)
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob1)
response = self.client.post(
self.url,
data={"checksum": total_checksum, "chunks": [blob1.checksum]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
assert set(response.data["missingChunks"]) == set()
mock_assemble_artifacts.apply_async.assert_called_once_with(
kwargs={
"org_id": self.organization.id,
"version": self.release.version,
"chunks": [blob1.checksum],
"checksum": total_checksum,
"project_ids": [self.project.id],
"is_release_bundle_migration": True,
}
)
def test_assemble_response(self) -> None:
bundle_file = self.create_artifact_bundle_zip(
org=self.organization.slug, release=self.release.version
)
total_checksum = sha1(bundle_file).hexdigest()
blob1 = FileBlob.from_file_with_organization(ContentFile(bundle_file), self.organization)
assemble_artifacts(
org_id=self.organization.id,
version=self.release.version,
checksum=total_checksum,
chunks=[blob1.checksum],
project_ids=[self.project.id],
is_release_bundle_migration=True,
)
response = self.client.post(
self.url,
data={"checksum": total_checksum, "chunks": [blob1.checksum]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.OK
def test_dif_error_response(self) -> None:
bundle_file = b"invalid"
total_checksum = sha1(bundle_file).hexdigest()
blob1 = FileBlob.from_file_with_organization(ContentFile(bundle_file), self.organization)
assemble_artifacts(
org_id=self.organization.id,
version=self.release.version,
checksum=total_checksum,
chunks=[blob1.checksum],
project_ids=[self.project.id],
is_release_bundle_migration=True,
)
response = self.client.post(
self.url,
data={"checksum": total_checksum, "chunks": [blob1.checksum]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.ERROR
@patch("sentry.tasks.assemble.assemble_artifacts")
def test_assemble_as_artifact_bundle(self, mock_assemble_artifacts: MagicMock) -> None:
bundle_file = self.create_artifact_bundle_zip(
org=self.organization.slug, release=self.release.version
)
total_checksum = sha1(bundle_file).hexdigest()
blob1 = FileBlob.from_file_with_organization(ContentFile(bundle_file), self.organization)
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob1)
response = self.client.post(
self.url,
data={"checksum": total_checksum, "chunks": [blob1.checksum]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.CREATED
assert set(response.data["missingChunks"]) == set()
# assert that we are uploading as artifact bundle
kwargs = {
"org_id": self.organization.id,
"version": self.release.version,
"checksum": total_checksum,
"chunks": [blob1.checksum],
"project_ids": [self.project.id],
"is_release_bundle_migration": True,
}
mock_assemble_artifacts.apply_async.assert_called_once_with(kwargs=kwargs)
# actually call through to assemble :-)
assemble_artifacts(**kwargs)
response = self.client.post(
self.url,
data={"checksum": total_checksum, "chunks": [blob1.checksum]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data["state"] == ChunkFileState.OK
# make sure that we have an artifact bundle now
artifact_bundles = ArtifactBundle.objects.filter(
organization_id=self.organization.id,
)
assert len(artifact_bundles) == 1
| OrganizationReleaseAssembleTest |
python | python__mypy | mypy/nodes.py | {
"start": 96918,
"end": 97287
} | class ____(Expression):
"""Type alias expression (rvalue)."""
__slots__ = ("node",)
__match_args__ = ("node",)
node: TypeAlias
def __init__(self, node: TypeAlias) -> None:
super().__init__()
self.node = node
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_alias_expr(self)
| TypeAliasExpr |
python | apache__airflow | providers/ydb/src/airflow/providers/ydb/operators/ydb.py | {
"start": 997,
"end": 2125
} | class ____(SQLExecuteQueryOperator):
"""
Executes sql code in a specific YDB database.
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
:param ydb_conn_id: The :ref:`ydb conn id <howto/connection:ydb>`
reference to a specific YDB cluster and database.
:param parameters: (optional) the parameters to render the SQL query with.
"""
ui_color = "#ededed"
def __init__(
self,
sql: str | list[str],
is_ddl: bool = False,
ydb_conn_id: str = "ydb_default",
parameters: Mapping | Iterable | None = None,
**kwargs,
) -> None:
if parameters is not None:
raise AirflowException("parameters are not supported yet")
if is_ddl:
hook_params = kwargs.pop("hook_params", {})
kwargs["hook_params"] = {"is_ddl": is_ddl, **hook_params}
super().__init__(conn_id=ydb_conn_id, sql=sql, parameters=parameters, **kwargs)
| YDBExecuteQueryOperator |
python | google__jax | jax/_src/interpreters/mlir.py | {
"start": 24066,
"end": 25829
} | class ____:
# A mapping between primitives and user-defined LoweringRules.
# When lowering a primitive, give priority to the rule in this map over
# existing Jax rules.
override_lowering_rules: tuple[tuple[core.Primitive, LoweringRule]] | None = None
# Signals that the entire computation being lowered operates on global
# constants. This will result in adding jax.global_constant attributes
# to the arguments of all functions that are created, e.g., floor_divide.
# This is used only in export and jax2tf in presence of shape polymorphism
# or multi-platform lowering.
global_constant_computation: bool = False
# Signals that we are lowering for exporting.
for_export: bool = False
# See usage in https://docs.jax.dev/en/latest/export/export.html#ensuring-forward-and-backward-compatibility
# We have this here to ensure it is reflected in the cache keys
export_ignore_forward_compatibility: bool = False
# During lowering hoist the core.Literal constants as args for the main MLIR
# function and all the intermediate functions that need them.
# See https://docs.jax.dev/en/latest/internals/constants.html
# TODO(necula): perhaps we can use `for_export` instead of this additional
# field.
hoist_constants_as_args: bool = config.use_simplified_jaxpr_constants.value
def _code_to_filename(code: types.CodeType) -> str | None:
"""Returns the canonicalized filename of a code object.
Returns None if the filename should be omitted in tracebacks.
"""
if not source_info_util.is_user_filename(code.co_filename):
return None
pattern = config.hlo_source_file_canonicalization_regex.value
return re.sub(pattern, '', code.co_filename) if pattern else code.co_filename
@dataclasses.dataclass
| LoweringParameters |
python | getsentry__sentry | src/sentry/integrations/cursor/models.py | {
"start": 163,
"end": 256
} | class ____(BaseModel):
text: str
images: list[dict] = []
| CursorAgentLaunchRequestPrompt |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units12.py | {
"start": 315,
"end": 1402
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [93550464, 93548544]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
worksheet.write_column(0, 1, data[0])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
chart.set_y_axis({"display_units": "hundreds", "display_units_visible": False})
chart.set_x_axis({"display_units": "thousands", "display_units_visible": False})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/backends/_coreml/preprocess.py | {
"start": 985,
"end": 4301
} | class ____:
LINEAR = "linear"
LINEAR_SYMMETRIC = "linear_symmetric"
NONE = "none"
def TensorSpec(shape, dtype=ScalarType.Float):
return (shape, dtype)
def CompileSpec(
inputs,
outputs,
backend=CoreMLComputeUnit.CPU,
allow_low_precision=True,
quantization_mode=CoreMLQuantizationMode.NONE,
mlmodel_export_path=None,
convert_to=None,
):
return (
inputs,
outputs,
backend,
allow_low_precision,
quantization_mode,
mlmodel_export_path,
convert_to,
)
def _check_enumerated_shape(shape):
for s in shape:
if not isinstance(s, (list, tuple)):
return False
return True
def _convert_to_mil_type(shape, dtype, name: str):
mil_shape = shape
if _check_enumerated_shape(shape):
mil_shape = ct.EnumeratedShapes(shape)
ml_type = TensorType(shape=mil_shape, dtype=torch_to_mil_types[dtype])
ml_type.name = name
return ml_type
def preprocess(script_module: torch._C.ScriptObject, compile_spec: dict[str, tuple]):
spec = compile_spec["forward"]
(
input_specs,
output_specs,
backend,
allow_low_precision,
quantization_mode,
mlmodel_export_path,
convert_to,
) = spec
mil_inputs = []
inputs = []
for index, input in enumerate(input_specs):
shape, dtype = input
name = "input_" + str(index)
inputs.append([name, str(dtype), str(shape)])
ml_type = _convert_to_mil_type(shape, dtype, name)
mil_inputs.append(ml_type)
model = torch.jit.RecursiveScriptModule._construct(script_module, lambda x: None)
mlmodel = ct.convert(model, inputs=mil_inputs, convert_to=convert_to)
if quantization_mode != CoreMLQuantizationMode.NONE:
quant_model_spec = quantization_utils.quantize_weights(
mlmodel, nbits=8, quantization_mode=quantization_mode
)
mlmodel = ct.models.MLModel(quant_model_spec)
spec = mlmodel.get_spec()
assert len(spec.description.output) == len(output_specs) # type: ignore[attr-defined]
outputs = []
for index, output in enumerate(output_specs):
shape, dtype = output
name = spec.description.output[index].name # type: ignore[attr-defined]
outputs.append([name, str(dtype), str(shape)])
mlmodel = ct.models.model.MLModel(spec)
print(mlmodel)
if mlmodel_export_path is not None:
print(f"Saving CoreML .mlmodel file to {mlmodel_export_path}")
mlmodel.save(mlmodel_export_path)
config = {
"spec_ver": str(spec.specificationVersion), # type: ignore[attr-defined]
"backend": backend,
"allow_low_precision": str(allow_low_precision),
}
metadata = {
"coremltool_ver": mlmodel.user_defined_metadata[CT_METADATA_VERSION],
"torch_ver": mlmodel.user_defined_metadata[CT_METADATA_SOURCE],
}
coreml_compile_spec = {
"inputs": inputs,
"outputs": outputs,
"config": config,
"metadata": metadata,
}
mlmodel = spec.SerializeToString() # type: ignore[attr-defined]
return {
"model": mlmodel,
"hash": str(hashlib.sha256(mlmodel).hexdigest()),
"extra": json.dumps(coreml_compile_spec),
}
| CoreMLQuantizationMode |
python | getsentry__sentry | tests/sentry/receivers/test_releases.py | {
"start": 1722,
"end": 9539
} | class ____(TestCase):
def assertResolvedFromCommit(self, group, commit):
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
assert Group.objects.filter(
id=group.id, status=GroupStatus.RESOLVED, resolved_at__isnull=False
).exists()
assert not GroupInbox.objects.filter(group=group).exists()
assert GroupHistory.objects.filter(
group=group,
status=GroupHistoryStatus.SET_RESOLVED_IN_COMMIT,
).exists()
def assertNotResolvedFromCommit(self, group, commit):
assert not GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
assert not Group.objects.filter(id=group.id, status=GroupStatus.RESOLVED).exists()
assert GroupInbox.objects.filter(group=group).exists()
# TODO(dcramer): pull out short ID matching and expand regexp tests
@receivers_raise_on_send()
def test_simple_no_author(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
)
self.assertResolvedFromCommit(group, commit)
@receivers_raise_on_send()
def test_updating_commit(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
)
self.assertNotResolvedFromCommit(group, commit)
commit.message = f"Foo Biz\n\nFixes {group.qualified_short_id}"
commit.save()
self.assertResolvedFromCommit(group, commit)
@receivers_raise_on_send()
def test_updating_commit_with_existing_grouplink(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
)
self.assertResolvedFromCommit(group, commit)
commit.message = f"Foo Bar Biz\n\nFixes {group.qualified_short_id}"
commit.save()
self.assertResolvedFromCommit(group, commit)
@receivers_raise_on_send()
def test_removes_group_link_when_message_changes(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
)
self.assertResolvedFromCommit(group, commit)
commit.message = "no groups here"
commit.save()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
self.assertNotResolvedFromCommit(group, commit)
@receivers_raise_on_send()
def test_no_matching_group(self) -> None:
repo = Repository.objects.create(name="example", organization_id=self.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=self.organization.id,
message=f"Foo Biz\n\nFixes {self.project.slug.upper()}-12F",
)
assert not GroupLink.objects.filter(
linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
@receivers_raise_on_send()
def test_matching_author_with_assignment(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
user = self.create_user(name="Foo Bar", email="foo@example.com", is_active=True)
with assume_test_silo_mode(SiloMode.CONTROL):
email = UserEmail.objects.get_primary_email(user=user)
email.is_verified = True
with assume_test_silo_mode(SiloMode.CONTROL):
email.save()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
OrganizationMember.objects.create(organization=group.project.organization, user_id=user.id)
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.set_value(user=user, key="self_assign_issue", value="1")
author = CommitAuthor.objects.create(
organization_id=group.organization.id, name=user.name, email=user.email
)
author.preload_users()
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
organization_id=group.organization.id,
repository_id=repo.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
author=author,
)
self.assertResolvedFromCommit(group, commit)
assert GroupAssignee.objects.filter(group=group, user_id=user.id).exists()
assert Activity.objects.filter(
project=group.project, group=group, type=ActivityType.ASSIGNED.value, user_id=user.id
)[0].data == {
"assignee": str(user.id),
"assigneeEmail": user.email,
"assigneeName": user.name,
"assigneeType": "user",
}
assert GroupSubscription.objects.filter(group=group, user_id=user.id).exists()
@receivers_raise_on_send()
def test_matching_author_without_assignment(self) -> None:
group = self.create_group()
add_group_to_inbox(group, GroupInboxReason.MANUAL)
user = self.create_user(name="Foo Bar", email="foo@example.com", is_active=True)
with assume_test_silo_mode(SiloMode.CONTROL):
email = UserEmail.objects.get_primary_email(user=user)
email.is_verified = True
email.save()
UserOption.objects.set_value(user=user, key="self_assign_issue", value="0")
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
OrganizationMember.objects.create(organization=group.project.organization, user_id=user.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
organization_id=group.organization.id,
repository_id=repo.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}",
author=CommitAuthor.objects.create(
organization_id=group.organization.id, name=user.name, email=user.email
),
)
self.assertResolvedFromCommit(group, commit)
assert not Activity.objects.filter(
project=group.project, group=group, type=ActivityType.ASSIGNED.value, user_id=user.id
).exists()
assert GroupSubscription.objects.filter(group=group, user_id=user.id).exists()
| ResolvedInCommitTest |
python | agronholm__apscheduler | src/apscheduler/datastores/sqlalchemy.py | {
"start": 2005,
"end": 2454
} | class ____(TypeDecorator[datetime]):
impl = Unicode(32)
cache_ok = True
def process_bind_param(
self, value: datetime | None, dialect: Dialect
) -> str | None:
return value.isoformat() if value is not None else None
def process_result_value(
self, value: str | None, dialect: Dialect
) -> datetime | None:
return datetime.fromisoformat(value) if value is not None else None
| EmulatedTimestampTZ |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_real_transforms.py | {
"start": 13323,
"end": 16863
} | class ____:
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'backward', 'ortho', 'forward']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm):
y1 = funcn(self.data, type=dct_type, axes=None, norm=norm)
y2 = ref_2d(func, self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = funcn(fdata, type=dct_type, norm=norm)
y2 = ref_2d(func, fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, s=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, s=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, s=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, s=None, axes=axes, norm='ortho')
tmp = finverse(tmp, s=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
@pytest.mark.parametrize('func', [dct, dctn, idct, idctn,
dst, dstn, idst, idstn])
def test_swapped_byte_order(func):
rng = np.random.RandomState(1234)
x = rng.rand(10)
swapped_dt = x.dtype.newbyteorder('S')
assert_allclose(func(x.astype(swapped_dt)), func(x))
| Test_DCTN_IDCTN |
python | nedbat__coveragepy | coverage/plugin.py | {
"start": 13462,
"end": 21509
} | class ____(CoveragePluginBase):
"""Support needed for files during the analysis and reporting phases.
File tracer plug-ins implement a subclass of `FileReporter`, and return
instances from their :meth:`CoveragePlugin.file_reporter` method.
There are many methods here, but only :meth:`lines` is required, to provide
the set of executable lines in the file.
See :ref:`howitworks` for details of the different coverage.py phases.
"""
def __init__(self, filename: str) -> None:
"""Simple initialization of a `FileReporter`.
The `filename` argument is the path to the file being reported. This
will be available as the `.filename` attribute on the object. Other
method implementations on this base class rely on this attribute.
"""
self.filename = filename
def __repr__(self) -> str:
return f"<{self.__class__.__name__} filename={self.filename!r}>"
def relative_filename(self) -> str:
"""Get the relative file name for this file.
This file path will be displayed in reports. The default
implementation will supply the actual project-relative file path. You
only need to supply this method if you have an unusual syntax for file
paths.
"""
return files.relative_filename(self.filename)
def source(self) -> str:
"""Get the source for the file.
Returns a Unicode string.
The base implementation simply reads the `self.filename` file and
decodes it as UTF-8. Override this method if your file isn't readable
as a text file, or if you need other encoding support.
"""
with open(self.filename, encoding="utf-8") as f:
return f.read()
def lines(self) -> set[TLineNo]:
"""Get the executable lines in this file.
Your plug-in must determine which lines in the file were possibly
executable. This method returns a set of those line numbers.
Returns a set of line numbers.
"""
_needs_to_implement(self, "lines")
def excluded_lines(self) -> set[TLineNo]:
"""Get the excluded executable lines in this file.
Your plug-in can use any method it likes to allow the user to exclude
executable lines from consideration.
Returns a set of line numbers.
The base implementation returns the empty set.
"""
return set()
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
"""Translate recorded lines into reported lines.
Some file formats will want to report lines slightly differently than
they are recorded. For example, Python records the last line of a
multi-line statement, but reports are nicer if they mention the first
line.
Your plug-in can optionally define this method to perform these kinds
of adjustment.
`lines` is a sequence of integers, the recorded line numbers.
Returns a set of integers, the adjusted line numbers.
The base implementation returns the numbers unchanged.
"""
return set(lines)
def arcs(self) -> set[TArc]:
"""Get the executable arcs in this file.
To support branch coverage, your plug-in needs to be able to indicate
possible execution paths, as a set of line number pairs. Each pair is
a `(prev, next)` pair indicating that execution can transition from the
`prev` line number to the `next` line number.
Returns a set of pairs of line numbers. The default implementation
returns an empty set.
"""
return set()
def no_branch_lines(self) -> set[TLineNo]:
"""Get the lines excused from branch coverage in this file.
Your plug-in can use any method it likes to allow the user to exclude
lines from consideration of branch coverage.
Returns a set of line numbers.
The base implementation returns the empty set.
"""
return set()
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
"""Translate recorded arcs into reported arcs.
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
line number pairs.
Returns a set of line number pairs.
The default implementation returns `arcs` unchanged.
"""
return set(arcs)
def exit_counts(self) -> dict[TLineNo, int]:
"""Get a count of exits from that each line.
To determine which lines are branches, coverage.py looks for lines that
have more than one exit. This function creates a dict mapping each
executable line number to a count of how many exits it has.
To be honest, this feels wrong, and should be refactored. Let me know
if you attempt to implement this method in your plug-in...
"""
return {}
def missing_arc_description(
self,
start: TLineNo,
end: TLineNo,
executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument
) -> str:
"""Provide an English sentence describing a missing arc.
The `start` and `end` arguments are the line numbers of the missing
arc. Negative numbers indicate entering or exiting code objects.
The `executed_arcs` argument is a set of line number pairs, the arcs
that were executed in this file.
By default, this simply returns the string "Line {start} didn't jump
to {end}".
"""
return f"Line {start} didn't jump to line {end}"
def arc_description(
self,
start: TLineNo, # pylint: disable=unused-argument
end: TLineNo,
) -> str:
"""Provide an English description of an arc's effect."""
return f"jump to line {end}"
def source_token_lines(self) -> TSourceTokenLines:
"""Generate a series of tokenized lines, one for each line in `source`.
These tokens are used for syntax-colored reports.
Each line is a list of pairs, each pair is a token::
[("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
Each pair has a token class, and the token text. The token classes
are:
* ``"com"``: a comment
* ``"key"``: a keyword
* ``"nam"``: a name, or identifier
* ``"num"``: a number
* ``"op"``: an operator
* ``"str"``: a string literal
* ``"ws"``: some white space
* ``"txt"``: some other kind of text
If you concatenate all the token texts, and then join them with
newlines, you should have your original source back.
The default implementation simply returns each line tagged as
``"txt"``.
"""
for line in self.source().splitlines():
yield [("txt", line)]
def code_regions(self) -> Iterable[CodeRegion]:
"""Identify regions in the source file for finer reporting than by file.
Returns an iterable of :class:`CodeRegion` objects. The kinds reported
should be in the possibilities returned by :meth:`code_region_kinds`.
"""
return []
def code_region_kinds(self) -> Iterable[tuple[str, str]]:
"""Return the kinds of code regions this plugin can find.
The returned pairs are the singular and plural forms of the kinds::
[
("function", "functions"),
("class", "classes"),
]
This will usually be hard-coded, but could also differ by the specific
source file involved.
"""
return []
def __eq__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename == other.filename
def __lt__(self, other: Any) -> bool:
return isinstance(other, FileReporter) and self.filename < other.filename
# This object doesn't need to be hashed.
__hash__ = None # type: ignore[assignment]
| FileReporter |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py | {
"start": 666,
"end": 5312
} | class ____(CustomLLM):
"""
Replicate LLM.
Examples:
`pip install llama-index-llms-replicate`
```python
from llama_index.llms.replicate import Replicate
# Set up the Replicate API token
import os
os.environ["REPLICATE_API_TOKEN"] = "<your API key>"
# Initialize the Replicate class
llm = Replicate(
model="replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b"
)
# Example of calling the 'complete' method with a prompt
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
model: str = Field(description="The Replicate model to use.")
temperature: float = Field(
default=DEFAULT_REPLICATE_TEMP,
description="The temperature to use for sampling.",
ge=0.01,
le=1.0,
)
image: str = Field(
default="", description="The image file for multimodal model to use. (optional)"
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
prompt_key: str = Field(
default="prompt", description="The key to use for the prompt in API calls."
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Replicate API."
)
is_chat_model: bool = Field(
default=False, description="Whether the model is a chat model."
)
@classmethod
def class_name(cls) -> str:
return "Replicate_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=DEFAULT_NUM_OUTPUTS,
model_name=self.model,
is_chat_model=self.is_chat_model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs: Dict[str, Any] = {
"temperature": self.temperature,
"max_length": self.context_window,
}
if self.image != "":
try:
base_kwargs["image"] = open(self.image, "rb")
except FileNotFoundError:
raise FileNotFoundError(
"Could not load image file. Please check whether the file exists"
)
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_input_dict(self, prompt: str, **kwargs: Any) -> Dict[str, Any]:
return {self.prompt_key: prompt, **self._model_kwargs, **kwargs}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response_gen = self.stream_complete(prompt, formatted=formatted, **kwargs)
response_list = list(response_gen)
final_response = response_list[-1]
final_response.delta = None
return final_response
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
try:
import replicate
except ImportError:
raise ImportError(
"Could not import replicate library."
"Please install replicate with `pip install replicate`"
)
if not formatted:
prompt = self.completion_to_prompt(prompt)
input_dict = self._get_input_dict(prompt, **kwargs)
response_iter = replicate.stream(self.model, input=input_dict)
def gen() -> CompletionResponseGen:
text = ""
for server_event in response_iter:
delta = str(server_event)
text += delta
yield CompletionResponse(
delta=delta,
text=text,
)
return gen()
| Replicate |
python | great-expectations__great_expectations | tests/core/test_expectation_suite.py | {
"start": 41643,
"end": 42560
} | class ____:
def __init__(self):
self.messages = []
def send_usage_message(
self,
event,
event_payload,
success,
):
self.messages.append(
{
"event": event,
"event_payload": event_payload,
"success": success,
}
)
@pytest.mark.unit
def test_add_expectation_fails_validation(empty_suite_with_meta: ExpectationSuite):
suite = empty_suite_with_meta
expectation_type = "my_fake_expectation"
kwargs = {"foo": "bar"}
expectation_configuration = ExpectationConfiguration(
type=expectation_type,
kwargs=kwargs,
)
with pytest.raises(gx_exceptions.InvalidExpectationConfigurationError) as e:
suite.add_expectation_configuration(expectation_configuration)
assert f"{expectation_type} not found" in str(e)
| DataContextSendUsageMessageSpy |
python | getsentry__sentry | tests/sentry/sentry_apps/api/bases/test_sentryapps.py | {
"start": 5773,
"end": 8661
} | class ____(TestCase):
def setUp(self) -> None:
self.permission = SentryAppInstallationPermission()
self.sentry_app = self.create_sentry_app(name="foo", organization=self.organization)
self.installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization, user=self.user
)
self.superuser = self.create_user(is_superuser=True)
def test_missing_request_user(self) -> None:
request = drf_request_from_request(self.make_request(user=AnonymousUser(), method="GET"))
assert not self.permission.has_object_permission(request, APIView(), self.installation)
def test_request_user_in_organization(self) -> None:
request = drf_request_from_request(self.make_request(user=self.user, method="GET"))
assert self.permission.has_object_permission(request, APIView(), self.installation)
def test_request_user_not_in_organization(self) -> None:
user = self.create_user()
request = drf_request_from_request(self.make_request(user=user, method="GET"))
with pytest.raises(SentryAppError):
self.permission.has_object_permission(request, APIView(), self.installation)
def test_superuser_has_permission(self) -> None:
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.installation)
request._request.method = "POST"
assert self.permission.has_object_permission(request, APIView(), self.installation)
@override_options({"superuser.read-write.ga-rollout": True})
@override_settings(SENTRY_SELF_HOSTED=False)
def test_superuser_has_permission_read_only(self) -> None:
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.installation)
request._request.method = "POST"
with pytest.raises(SentryAppError):
self.permission.has_object_permission(request, APIView(), self.installation)
@override_options({"superuser.read-write.ga-rollout": True})
@override_settings(SENTRY_SELF_HOSTED=False)
def test_superuser_has_permission_write(self) -> None:
self.add_user_permission(self.superuser, "superuser.write")
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.installation)
request._request.method = "POST"
self.permission.has_object_permission(request, APIView(), self.installation)
@control_silo_test
| SentryAppInstallationPermissionTest |
python | ray-project__ray | python/ray/tests/test_autoscaling_policy.py | {
"start": 2045,
"end": 3304
} | class ____:
def __init__(self, resources, in_cluster, node_type, start_time):
self.total_resources = copy.deepcopy(resources)
self.available_resources = copy.deepcopy(resources)
self.in_cluster = in_cluster
self.node_type = node_type
self.start_time = start_time
self.node_id = mock_node_id()
def bundle_fits(self, bundle):
if not self.in_cluster:
return False
for resource, quantity in bundle.items():
if self.available_resources.get(resource, -1) < quantity:
return False
return True
def feasible(self, bundle):
if not self.in_cluster:
return False
for resource, quantity in bundle.items():
if self.total_resources.get(resource, -1) < quantity:
return False
return True
def allocate(self, bundle):
assert self.bundle_fits(bundle) and self.in_cluster
for resource, quantity in bundle.items():
self.available_resources[resource] -= quantity
def free(self, bundle):
for resource, quantity in bundle.items():
self.available_resources[resource] += quantity
assert self.feasible(self.available_resources)
| Node |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 80748,
"end": 91198
} | class ____:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self) -> Literal[False]:
return False
def __call__(self, fullpath: object):
return iter(())
def safe_listdir(path: StrOrBytesPath):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
raise
return ()
def distributions_from_metadata(path: str):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata: _MetadataType = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root,
entry,
metadata,
precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
for line in _read_utf8_with_fallback(path).splitlines():
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
if hasattr(pkgutil, 'ImpImporter'):
register_finder(pkgutil.ImpImporter, find_on_path)
register_finder(importlib.machinery.FileFinder, find_on_path)
_namespace_handlers: dict[type, _NSHandlerType[Any]] = _declare_state(
'dict', '_namespace_handlers', {}
)
_namespace_packages: dict[str | None, list[str]] = _declare_state(
'dict', '_namespace_packages', {}
)
def register_namespace_handler(
importer_type: type[_T], namespace_handler: _NSHandlerType[_T]
) -> None:
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# use find_spec (PEP 451) and fall-back to find_module (PEP 302)
try:
spec = importer.find_spec(packageName)
except AttributeError:
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
else:
loader = spec.loader if spec else None
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
importlib.import_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module: types.ModuleType) -> None:
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName: str) -> None:
"""Declare that package 'packageName' is a namespace package"""
msg = (
f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n"
"Implementing implicit namespace packages (as specified in PEP 420) "
"is preferred to `pkg_resources.declare_namespace`. "
"See https://setuptools.pypa.io/en/latest/references/"
"keywords.html#keyword-namespace-packages"
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path: MutableSequence[str] = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError as e:
raise TypeError("Not a package:", parent) from e
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item: str, parent: str | None = None) -> None:
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(
importer: object,
path_item: StrPath,
packageName: str,
module: types.ModuleType,
):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
if hasattr(pkgutil, 'ImpImporter'):
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
register_namespace_handler(importlib.machinery.FileFinder, file_ns_handler)
def null_ns_handler(
importer: object,
path_item: str | None,
packageName: str | None,
module: _ModuleLike | None,
) -> None:
return None
register_namespace_handler(object, null_ns_handler)
@overload
def normalize_path(filename: StrPath) -> str: ...
@overload
def normalize_path(filename: BytesPath) -> bytes: ...
def normalize_path(filename: StrOrBytesPath) -> str | bytes:
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename: StrOrBytesPath): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
if TYPE_CHECKING:
# https://github.com/python/mypy/issues/16261
# https://github.com/python/typeshed/issues/6347
@overload
def _normalize_cached(filename: StrPath) -> str: ...
@overload
def _normalize_cached(filename: BytesPath) -> bytes: ...
def _normalize_cached(filename: StrOrBytesPath) -> str | bytes: ...
else:
@functools.cache
def _normalize_cached(filename):
return normalize_path(filename)
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return _is_zip_egg(path) or _is_unpacked_egg(path)
def _is_zip_egg(path):
return (
path.lower().endswith('.egg')
and os.path.isfile(path)
and zipfile.is_zipfile(path)
)
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return path.lower().endswith('.egg') and os.path.isfile(
os.path.join(path, 'EGG-INFO', 'PKG-INFO')
)
def _set_parent_ns(packageName) -> None:
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
| NoDists |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 101957,
"end": 112447
} | class ____(TestCase):
# These tests are for use by the Numba project maintainers to check that
# package builds for which they are responsible are producing artifacts in
# the expected way. If you are a package maintainer and these tests are
# running, they shouldn't be by default. The only way they will run is if
# the environment variable LLVMLITE_DIST_TEST is set. The things they are
# checking are based on how the Numba project maintainers want to ship the
# packages, this may be entirely different to how other maintainers wish to
# ship. Basically, don't enable these tests unless you are sure they are
# suitable for your use case.
#
# The llvmlite DSO is the foundation of Numba's JIT compiler stack and is
# also used by other similar projects. It has to link against LLVM as that
# is what provides the tooling to do e.g. IR generation and JIT compilation.
# There are various options surrounding how to build LLVM and then how to
# link it into llvmlite. There have been many occurences of surprising
# linkages, symbol collisions and various other issues.
#
# The following tests are designed to try and test out some of the more
# common combinations of package type and linkage.
#
# NOTE: For Numba project maintainers on packaging formats and expected
# linkage. The following dictionaries capture the state of linkage as of
# llvmlite release 0.44. This is not an indication that it is correct, just
# that this is what is present in practice and clearly "works" to a large
# degree by virtue of having fixed the few reported issues. If you need to
# modify these dictionaries based on new information, that's fine, just make
# sure that it is an understood action opposed to just capturing what
# happened!
wheel_expected = {"linux": {"x86_64": set(["pthread",
"z",
"dl",
"m",
"gcc_s",
"c",
"rt",
"stdc++",
"ld-linux-x86-64",]),
"aarch64": set(["pthread",
"z",
"dl",
"m",
"gcc_s",
"c",
"rt",
"stdc++",]),
}, # end linux
# NOTE: on windows, this includes a "capture what is
# present and known to work and make sure it doesn"t
# change" approach.
"windows": {"amd64": set(["advapi32",
"kernel32",
"ntdll",
"msvcp140",
"vcruntime140",
"vcruntime140_1",
"api-ms-win-crt-convert-l1-1-0",
"api-ms-win-crt-environment-l1-1-0", # noqa: E501
"api-ms-win-crt-heap-l1-1-0",
"api-ms-win-crt-locale-l1-1-0",
"api-ms-win-crt-math-l1-1-0",
"api-ms-win-crt-runtime-l1-1-0",
"api-ms-win-crt-stdio-l1-1-0",
"api-ms-win-crt-string-l1-1-0",
"api-ms-win-crt-time-l1-1-0",
"api-ms-win-crt-utility-l1-1-0",
"shell32", # this is delayed
"ole32",]), # also delayed
}, # end windows
"darwin": {"x86_64": set(["llvmlite",
"system",
"z",
"corefoundation",
"c++",]),
"arm64": set(["llvmlite",
"system",
"z",
"c++",]),
},# end darwin
} # end wheel_expected
conda_expected = {"linux": {"x86_64": set(["pthread",
"z",
"zstd",
"dl",
"m",
"gcc_s",
"c",
# "stdc++", conda has static c++
"ld-linux-x86-64",]),
"aarch64": set(["pthread",
"z",
"zstd",
"dl",
"m",
"gcc_s",
"c",
# "stdc++", conda has static c++ # noqa: E501
"ld-linux-aarch64",]),
}, # end linux
# NOTE: on windows, this includes a "capture what is
# present and known to work and make sure it doesn"t
# change" approach.
"windows": {"amd64": set(["z",
"zstd",
"advapi32",
"kernel32",
"ntdll",
"msvcp140",
"vcruntime140",
"vcruntime140_1",
"api-ms-win-crt-convert-l1-1-0",
"api-ms-win-crt-environment-l1-1-0", # noqa: E501
"api-ms-win-crt-heap-l1-1-0",
"api-ms-win-crt-locale-l1-1-0",
"api-ms-win-crt-math-l1-1-0",
"api-ms-win-crt-runtime-l1-1-0",
"api-ms-win-crt-stdio-l1-1-0",
"api-ms-win-crt-string-l1-1-0",
"api-ms-win-crt-time-l1-1-0",
"api-ms-win-crt-utility-l1-1-0",
"shell32", # this is delayed
"ole32",]), # also delayed
}, # end windows
"darwin": {"x86_64": set(["llvmlite",
"system",
"z",
"zstd",
"corefoundation",
"c++",]),
"arm64": set(["llvmlite",
"system",
"z",
"zstd",
"c++",]),
},# end darwin
} # end wheel_expected
def check_linkage(self, info, package_type):
machine = platform.machine().lower()
os_name = platform.system().lower()
if package_type == "wheel":
expected = self.wheel_expected[os_name][machine]
elif package_type == "conda":
expected = self.conda_expected[os_name][machine]
else:
raise ValueError(f"Unexpected package type: {package_type}")
got = set(info["canonicalised_linked_libraries"])
# Normalize delvewheel-bundled MSVCP hashed name (e.g. msvcp140-<hash>)
got = {
("msvcp140" if lib.startswith("msvcp140") else lib)
for lib in got
}
try:
self.assertEqual(expected, got)
except AssertionError as e:
msg = ("Unexpected linkage encountered for libllvmlite:\n"
f"Expected: {sorted(expected)}\n"
f" Got: {sorted(got)}\n\n"
f"Difference: {set.symmetric_difference(expected, got)}\n"
f"Only in Expected: {set.difference(expected, got)}\n"
f"Only in Got: {set.difference(got, expected)}\n")
raise AssertionError(msg) from e
@is_wheel_package
def test_wheel_build(self):
info = llvm.config.get_sysinfo()
self.assertEqual(info['llvm_linkage_type'], "static")
self.assertEqual(info['llvm_assertions_state'], "on")
self.check_linkage(info, "wheel")
@is_conda_package
def test_conda_build(self):
info = llvm.config.get_sysinfo()
self.assertEqual(info['llvm_linkage_type'], "static")
self.assertEqual(info['llvm_assertions_state'], "on")
self.check_linkage(info, "conda")
if platform.system().lower() == "linux":
self.assertEqual(info['libstdcxx_linkage_type'], "static")
if __name__ == "__main__":
unittest.main()
| TestBuild |
python | sympy__sympy | sympy/matrices/expressions/determinant.py | {
"start": 252,
"end": 1706
} | class ____(Expr):
"""Matrix Determinant
Represents the determinant of a matrix expression.
Examples
========
>>> from sympy import MatrixSymbol, Determinant, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Determinant(A)
Determinant(A)
>>> Determinant(eye(3)).doit()
1
"""
is_commutative = True
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Determinant, %s, not a matrix" % str(mat))
if mat.is_square is False:
raise NonSquareMatrixError("Det of a non-square matrix")
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
@property
def kind(self):
return self.arg.kind.element_kind
def doit(self, **hints):
arg = self.arg
if hints.get('deep', True):
arg = arg.doit(**hints)
result = arg._eval_determinant()
if result is not None:
return result
return self
def _eval_derivative(self, x):
# Derivative currently implements `hasattr(..., "_eval_derivative")` to proceed:
return None
def det(matexpr):
""" Matrix Determinant
Examples
========
>>> from sympy import MatrixSymbol, det, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> det(A)
Determinant(A)
>>> det(eye(3))
1
"""
return Determinant(matexpr).doit()
| Determinant |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/default_name_resolver.py | {
"start": 63,
"end": 434
} | class ____(scrapy.Spider):
"""
Raises a twisted.internet.error.DNSLookupError:
the default name resolver does not handle IPv6 addresses.
"""
name = "ipv6_spider"
start_urls = ["http://[::1]"]
if __name__ == "__main__":
process = AsyncCrawlerProcess(settings={"RETRY_ENABLED": False})
process.crawl(IPv6Spider)
process.start()
| IPv6Spider |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_json.py | {
"start": 5607,
"end": 6017
} | class ____(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
| TestReadWriteJSON |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 2912,
"end": 3071
} | class ____(SphinxLogRecord):
"""Info log record class supporting location"""
prefix = '' # do not show any prefix for INFO messages
| SphinxInfoLogRecord |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-awadb/llama_index/readers/awadb/base.py | {
"start": 170,
"end": 1891
} | class ____(BaseReader):
"""
Awadb reader.
Retrieves documents through an existing awadb client.
These documents can then be used in a downstream LlamaIndex data structure.
Args:
client (awadb.client): An awadb client.
"""
def __init__(self, client: Any):
"""Initialize with parameters."""
import_err_msg = "`awadb` package not found, please run `pip install awadb`"
try:
pass
except ImportError:
raise ImportError(import_err_msg)
self.awadb_client = client
def load_data(
self,
query: np.ndarray,
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""
Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
results = self.awadb_client.Search(
query,
k,
text_in_page_content=None,
meta_filter=None,
not_include_fields=None,
)
documents = []
for item_detail in results[0]["ResultItems"]:
documents.append(Document(text=item_detail["embedding_text"]))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| AwadbReader |
python | getsentry__sentry | tests/sentry/issues/test_issue_search.py | {
"start": 18751,
"end": 19240
} | class ____(TestCase):
def test(self) -> None:
assert convert_first_release_value(["123"], [self.project], self.user, None) == ["123"]
def test_latest(self) -> None:
release = self.create_release(self.project)
assert convert_first_release_value(["latest"], [self.project], self.user, None) == [
release.version
]
assert convert_first_release_value(["14.*"], [self.project], self.user, None) == ["14.*"]
| ConvertFirstReleaseValueTest |
python | getsentry__sentry | src/sentry/api/endpoints/project_tagkey_values.py | {
"start": 731,
"end": 3508
} | class ____(ProjectEndpoint):
owner = ApiOwner.UNOWNED
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=10, window=1, concurrent_limit=10),
RateLimitCategory.USER: RateLimit(limit=10, window=1, concurrent_limit=10),
RateLimitCategory.ORGANIZATION: RateLimit(limit=20, window=1, concurrent_limit=5),
}
}
)
def get(self, request: Request, project, key) -> Response:
"""
List a Tag's Values
```````````````````
Return a list of values associated with this key. The `query`
parameter can be used to to perform a "contains" match on
values.
When paginated can return at most 1000 values.
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string project_id_or_slug: the id or slug of the project.
:pparam string key: the tag key to look up.
:auth: required
"""
lookup_key = tagstore.backend.prefix_reserved_key(key)
tenant_ids = {"organization_id": project.organization_id}
try:
environment_id = get_environment_id(request, project.organization_id)
except Environment.DoesNotExist:
# if the environment doesn't exist then the tag can't possibly exist
raise ResourceDoesNotExist
# Flags are stored on the same table as tags but on a different column. Ideally both
# could be queried in a single request. But at present we're not sure if we want to
# treat tags and flags as the same or different and in which context.
if request.GET.get("useFlagsBackend") == "1":
backend = tagstore.flag_backend
else:
backend = tagstore.backend
try:
tagkey = backend.get_tag_key(
project.id,
environment_id,
lookup_key,
tenant_ids=tenant_ids,
)
except tagstore.TagKeyNotFound:
raise ResourceDoesNotExist
start, end = get_date_range_from_params(request.GET)
paginator = backend.get_tag_value_paginator(
project.id,
environment_id,
tagkey.key,
start=start,
end=end,
query=request.GET.get("query"),
order_by="-last_seen",
tenant_ids=tenant_ids,
)
return self.paginate(
request=request,
paginator=paginator,
on_results=lambda results: serialize(results, request.user),
)
| ProjectTagKeyValuesEndpoint |
python | streamlit__streamlit | lib/tests/streamlit/elements/exception_test.py | {
"start": 1467,
"end": 9212
} | class ____(unittest.TestCase):
def test_format_syntax_error_message(self):
"""Tests that format_syntax_error_message produces expected output"""
err = SyntaxError(
"invalid syntax", ("syntax_hilite.py", 84, 23, "st.header(header_text))\n")
)
expected = """
File "syntax_hilite.py", line 84
st.header(header_text))
^
SyntaxError: invalid syntax
"""
assert expected.strip() == _format_syntax_error_message(err)
@parameterized.expand([(True,), (False,)])
def test_markdown_flag(self, is_uncaught_app_exception):
"""Test that ExceptionProtos for StreamlitAPIExceptions (and
subclasses) have the "message_is_markdown" flag set.
"""
proto = ExceptionProto()
exception.marshall(
proto,
RuntimeError("oh no!"),
is_uncaught_app_exception=is_uncaught_app_exception,
)
assert not proto.message_is_markdown
proto = ExceptionProto()
exception.marshall(
proto,
StreamlitAPIException("oh no!"),
is_uncaught_app_exception=is_uncaught_app_exception,
)
assert proto.message_is_markdown
proto = ExceptionProto()
exception.marshall(
proto,
errors.DuplicateWidgetID("oh no!"),
is_uncaught_app_exception=is_uncaught_app_exception,
)
assert proto.message_is_markdown
@parameterized.expand(
[
(user_module.st_call_with_arguments_missing, 2),
(user_module.st_call_with_bad_arguments, 7),
(user_module.pandas_call_with_bad_arguments, 2),
(user_module.internal_python_call_with_bad_arguments, 2),
]
)
@patch("streamlit.elements.exception.get_script_run_ctx")
def test_external_error_stack_starts_with_user_module(
self, user_func, stack_len, patched_get_script_run_ctx
):
"""Test stack traces for exceptions thrown by user code start from the first
line of user code.
"""
ctx = MagicMock()
user_module_path = Path(user_module.__file__).parent
ctx.main_script_parent = user_module_path
patched_get_script_run_ctx.return_value = ctx
err = None
try:
user_func()
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(
proto, cast("Exception", err), is_uncaught_app_exception=True
)
user_module_path = os.path.join(os.path.realpath(user_module_path), "")
assert user_module_path in proto.stack_trace[0], "Stack not stripped"
assert len(proto.stack_trace) == stack_len, (
f"Stack does not have length {stack_len}: {proto.stack_trace}"
)
@patch("streamlit.elements.exception.get_script_run_ctx")
def test_internal_error_stack_doesnt_start_with_user_module(
self, patched_get_script_run_ctx
):
"""Test stack traces for exceptions thrown by Streamlit code *not* called by the
user.
"""
ctx = MagicMock()
user_module_path = Path(user_module.__file__).parent
ctx.main_script_parent = user_module_path
patched_get_script_run_ctx.return_value = ctx
err = None
def func_with_error():
raise RuntimeError("This function throws on purpose")
try:
func_with_error()
except Exception as e:
err = e
assert err is not None
original_stack_len = len(traceback.extract_tb(err.__traceback__))
# Marshall it.
proto = ExceptionProto()
exception.marshall(
proto, cast("Exception", err), is_uncaught_app_exception=False
)
user_module_path = os.path.join(os.path.realpath(user_module_path), "")
assert not any(user_module_path in t for t in proto.stack_trace)
assert len(proto.stack_trace) == original_stack_len, (
f"Stack does not have length {original_stack_len}: {proto.stack_trace}"
)
@parameterized.expand([(True,), ("true",), ("True",), ("full",)])
def test_uncaught_app_exception_show_everything(
self, show_error_details_config_value
):
with testutil.patch_config_options(
{"client.showErrorDetails": show_error_details_config_value}
):
err = None
try:
st.format("http://not_an_image.png", width=-1)
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(proto, err, is_uncaught_app_exception=True)
assert proto.message == "module 'streamlit' has no attribute 'format'"
assert len(proto.stack_trace) > 0
assert proto.type == "AttributeError"
@parameterized.expand([(False,), ("false",), ("False",), ("stacktrace",)])
def test_uncaught_app_exception_hide_message(self, show_error_details_config_value):
with testutil.patch_config_options(
{"client.showErrorDetails": show_error_details_config_value}
):
err = None
try:
st.format("http://not_an_image.png", width=-1)
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(proto, err, is_uncaught_app_exception=True)
assert proto.message == _GENERIC_UNCAUGHT_EXCEPTION_TEXT
assert len(proto.stack_trace) > 0
assert proto.type == "AttributeError"
def test_uncaught_app_exception_show_type_and_stacktrace_only(self):
with testutil.patch_config_options({"client.showErrorDetails": "stacktrace"}):
err = None
try:
st.format("http://not_an_image.png", width=-1)
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(proto, err, is_uncaught_app_exception=True)
assert proto.message == _GENERIC_UNCAUGHT_EXCEPTION_TEXT
assert len(proto.stack_trace) > 0
assert proto.type == "AttributeError"
def test_uncaught_app_exception_show_only_type(self):
with testutil.patch_config_options({"client.showErrorDetails": "type"}):
err = None
try:
st.format("http://not_an_image.png", width=-1)
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(proto, err, is_uncaught_app_exception=True)
assert proto.message == _GENERIC_UNCAUGHT_EXCEPTION_TEXT
assert len(proto.stack_trace) == 0
assert proto.type == "AttributeError"
def test_uncaught_app_exception_hide_everything(self):
with testutil.patch_config_options({"client.showErrorDetails": "none"}):
err = None
try:
st.format("http://not_an_image.png", width=-1)
except Exception as e:
err = e
assert err is not None
# Marshall it.
proto = ExceptionProto()
exception.marshall(proto, err, is_uncaught_app_exception=True)
assert proto.message == _GENERIC_UNCAUGHT_EXCEPTION_TEXT
assert len(proto.stack_trace) == 0
assert proto.type == ""
| ExceptionProtoTest |
python | getsentry__sentry | tests/flagpole/test_conditions.py | {
"start": 4088,
"end": 6672
} | class ____:
def test_does_contain(self) -> None:
condition = ContainsCondition(property="foo", value="bar")
assert condition.match(
context=EvaluationContext({"foo": ["foo", "bar"]}), segment_name="test"
)
not_condition = NotContainsCondition(property="foo", value="bar")
assert not not_condition.match(
context=EvaluationContext({"foo": ["foo", "bar"]}), segment_name="test"
)
condition = ContainsCondition(property="foo", value=1)
assert condition.match(context=EvaluationContext({"foo": [1, 2]}), segment_name="test")
assert not condition.match(context=EvaluationContext({"foo": [3, 4]}), segment_name="test")
def test_does_not_contain(self) -> None:
values = "baz"
condition = ContainsCondition(property="foo", value=values, operator="contains")
assert not condition.match(
context=EvaluationContext({"foo": ["foo", "bar"]}), segment_name="test"
)
not_condition = NotContainsCondition(property="foo", value=values)
assert not_condition.match(
context=EvaluationContext({"foo": ["foo", "bar"]}), segment_name="test"
)
def test_invalid_property_provided(self) -> None:
values = "baz"
bad_context = ("oops", "1", 1, 3.14, None, False, True)
for attr_val in bad_context:
with pytest.raises(ConditionTypeMismatchException):
condition = ContainsCondition(property="foo", value=values)
assert not condition.match(
context=EvaluationContext({"foo": attr_val}), segment_name="test"
)
for attr_val in bad_context:
with pytest.raises(ConditionTypeMismatchException):
not_condition = NotContainsCondition(property="foo", value=values)
assert not_condition.match(
context=EvaluationContext({"foo": attr_val}), segment_name="test"
)
def test_missing_context_property(self) -> None:
condition = ContainsCondition(property="foo", value="bar")
with pytest.raises(ConditionTypeMismatchException):
condition.match(context=EvaluationContext({"bar": ["foo", "bar"]}), segment_name="test")
not_condition = NotContainsCondition(property="foo", value="bar")
with pytest.raises(ConditionTypeMismatchException):
not_condition.match(
context=EvaluationContext({"bar": ["foo", "bar"]}), segment_name="test"
)
| TestContainsConditions |
python | django-guardian__django-guardian | guardian/forms.py | {
"start": 3829,
"end": 5830
} | class ____(BaseObjectPermissionsForm):
"""Object level permissions management form for usage with `User` instances.
Attributes:
user (User): The user instance for which the permissions are being managed.
Example:
```python
from django.shortcuts import get_object_or_404
from myapp.models import Post
from guardian.forms import UserObjectPermissionsForm
from django.contrib.auth.models import User
def my_view(request, post_slug, user_id):
user = get_object_or_404(User, id=user_id)
post = get_object_or_404(Post, slug=post_slug)
form = UserObjectPermissionsForm(user, post, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
...
```
"""
def __init__(self, user: Any, *args, **kwargs) -> None:
self.user = user
super().__init__(*args, **kwargs)
def get_obj_perms_field_initial(self) -> QuerySet[Permission]:
"""Returns initial object permissions management field choices.
Returns:
List of permissions assigned to the user for the object.
"""
return get_user_perms(self.user, self.obj)
def save_obj_perms(self) -> None:
"""Saves selected object permissions.
Saves selected object permissions by creating new ones and removing
those which were not selected but already exists.
Should be called *after* form is validated.
"""
perms = set(self.cleaned_data[self.get_obj_perms_field_name()])
model_perms = {c[0] for c in self.get_obj_perms_field_choices()}
init_perms = set(self.get_obj_perms_field_initial())
to_remove = (model_perms - perms) & init_perms
for perm in to_remove:
remove_perm(perm, self.user, self.obj)
for perm in perms - init_perms:
assign_perm(perm, self.user, self.obj)
| UserObjectPermissionsForm |
python | getsentry__sentry-python | tests/integrations/ray/test_ray.py | {
"start": 540,
"end": 8224
} | class ____(TestTransport):
def capture_envelope(self, envelope: Envelope) -> None:
print(envelope.serialize().decode("utf-8", "replace"))
def setup_sentry_with_logging_transport():
setup_sentry(transport=RayLoggingTransport())
def setup_sentry(transport=None):
sentry_sdk.init(
integrations=[RayIntegration()],
transport=RayTestTransport() if transport is None else transport,
traces_sample_rate=1.0,
)
def read_error_from_log(job_id, ray_temp_dir):
# Find the actual session directory that Ray created
session_dirs = [d for d in os.listdir(ray_temp_dir) if d.startswith("session_")]
if not session_dirs:
raise FileNotFoundError(f"No session directory found in {ray_temp_dir}")
session_dir = os.path.join(ray_temp_dir, session_dirs[0])
log_dir = os.path.join(session_dir, "logs")
if not os.path.exists(log_dir):
raise FileNotFoundError(f"No logs directory found at {log_dir}")
log_file = [
f
for f in os.listdir(log_dir)
if "worker" in f and job_id in f and f.endswith(".out")
][0]
with open(os.path.join(log_dir, log_file), "r") as file:
lines = file.readlines()
try:
# parse error object from log line
error = json.loads(lines[4][:-1])
except IndexError:
error = None
return error
@pytest.mark.parametrize(
"task_options", [{}, {"num_cpus": 0, "memory": 1024 * 1024 * 10}]
)
def test_tracing_in_ray_tasks(task_options):
setup_sentry()
ray.init(
runtime_env={
"worker_process_setup_hook": setup_sentry,
"working_dir": "./",
}
)
def example_task():
with sentry_sdk.start_span(op="task", name="example task step"):
...
return sentry_sdk.get_client().transport.envelopes
# Setup ray task, calling decorator directly instead of @,
# to accommodate for test parametrization
if task_options:
example_task = ray.remote(**task_options)(example_task)
else:
example_task = ray.remote(example_task)
# Function name shouldn't be overwritten by Sentry wrapper
assert example_task._function_name == "tests.integrations.ray.test_ray.example_task"
with sentry_sdk.start_transaction(op="task", name="ray test transaction"):
worker_envelopes = ray.get(example_task.remote())
client_envelope = sentry_sdk.get_client().transport.envelopes[0]
client_transaction = client_envelope.get_transaction_event()
assert client_transaction["transaction"] == "ray test transaction"
assert client_transaction["transaction_info"] == {"source": "custom"}
worker_envelope = worker_envelopes[0]
worker_transaction = worker_envelope.get_transaction_event()
assert (
worker_transaction["transaction"]
== "tests.integrations.ray.test_ray.test_tracing_in_ray_tasks.<locals>.example_task"
)
assert worker_transaction["transaction_info"] == {"source": "task"}
(span,) = client_transaction["spans"]
assert span["op"] == "queue.submit.ray"
assert span["origin"] == "auto.queue.ray"
assert (
span["description"]
== "tests.integrations.ray.test_ray.test_tracing_in_ray_tasks.<locals>.example_task"
)
assert span["parent_span_id"] == client_transaction["contexts"]["trace"]["span_id"]
assert span["trace_id"] == client_transaction["contexts"]["trace"]["trace_id"]
(span,) = worker_transaction["spans"]
assert span["op"] == "task"
assert span["origin"] == "manual"
assert span["description"] == "example task step"
assert span["parent_span_id"] == worker_transaction["contexts"]["trace"]["span_id"]
assert span["trace_id"] == worker_transaction["contexts"]["trace"]["trace_id"]
assert (
client_transaction["contexts"]["trace"]["trace_id"]
== worker_transaction["contexts"]["trace"]["trace_id"]
)
def test_errors_in_ray_tasks():
setup_sentry_with_logging_transport()
ray_temp_dir = os.path.join("/tmp", f"ray_test_{uuid.uuid4().hex[:8]}")
os.makedirs(ray_temp_dir, exist_ok=True)
try:
ray.init(
runtime_env={
"worker_process_setup_hook": setup_sentry_with_logging_transport,
"working_dir": "./",
},
_temp_dir=ray_temp_dir,
)
# Setup ray task
@ray.remote
def example_task():
1 / 0
with sentry_sdk.start_transaction(op="task", name="ray test transaction"):
with pytest.raises(ZeroDivisionError):
future = example_task.remote()
ray.get(future)
job_id = future.job_id().hex()
error = read_error_from_log(job_id, ray_temp_dir)
assert error["level"] == "error"
assert (
error["transaction"]
== "tests.integrations.ray.test_ray.test_errors_in_ray_tasks.<locals>.example_task"
)
assert error["exception"]["values"][0]["mechanism"]["type"] == "ray"
assert not error["exception"]["values"][0]["mechanism"]["handled"]
finally:
if os.path.exists(ray_temp_dir):
shutil.rmtree(ray_temp_dir, ignore_errors=True)
def test_tracing_in_ray_actors():
setup_sentry()
ray.init(
runtime_env={
"worker_process_setup_hook": setup_sentry,
"working_dir": "./",
}
)
# Setup ray actor
@ray.remote
class Counter:
def __init__(self):
self.n = 0
def increment(self):
with sentry_sdk.start_span(op="task", name="example actor execution"):
self.n += 1
return sentry_sdk.get_client().transport.envelopes
with sentry_sdk.start_transaction(op="task", name="ray test transaction"):
counter = Counter.remote()
worker_envelopes = ray.get(counter.increment.remote())
client_envelope = sentry_sdk.get_client().transport.envelopes[0]
client_transaction = client_envelope.get_transaction_event()
# Spans for submitting the actor task are not created (actors are not supported yet)
assert client_transaction["spans"] == []
# Transaction are not yet created when executing ray actors (actors are not supported yet)
assert worker_envelopes == []
def test_errors_in_ray_actors():
setup_sentry_with_logging_transport()
ray_temp_dir = os.path.join("/tmp", f"ray_test_{uuid.uuid4().hex[:8]}")
os.makedirs(ray_temp_dir, exist_ok=True)
try:
ray.init(
runtime_env={
"worker_process_setup_hook": setup_sentry_with_logging_transport,
"working_dir": "./",
},
_temp_dir=ray_temp_dir,
)
# Setup ray actor
@ray.remote
class Counter:
def __init__(self):
self.n = 0
def increment(self):
with sentry_sdk.start_span(op="task", name="example actor execution"):
1 / 0
return sentry_sdk.get_client().transport.envelopes
with sentry_sdk.start_transaction(op="task", name="ray test transaction"):
with pytest.raises(ZeroDivisionError):
counter = Counter.remote()
future = counter.increment.remote()
ray.get(future)
job_id = future.job_id().hex()
error = read_error_from_log(job_id, ray_temp_dir)
# We do not capture errors in ray actors yet
assert error is None
finally:
if os.path.exists(ray_temp_dir):
shutil.rmtree(ray_temp_dir, ignore_errors=True)
| RayLoggingTransport |
python | wandb__wandb | wandb/automations/events.py | {
"start": 2219,
"end": 2342
} | class ____(GQLBase): # from: TriggeringFilterEvent
filter: JsonEncoded[MongoLikeFilter] = And()
| _WrappedSavedEventFilter |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/reduce_benchmark_test.py | {
"start": 1243,
"end": 3433
} | class ____(test.Benchmark):
"""Benchmarks for reductions."""
def _run(self, func, num_iters):
# call func to maybe warm up the GPU
func()
start = time.time()
for _ in range(num_iters):
func()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_reduce_sum_grad_eager(self):
with context.eager_mode():
tensor = array_ops.zeros([100, 1000])
def fn():
backprop.gradients_function(math_ops.reduce_sum, [0])(tensor)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_eager_cpu(self):
with context.eager_mode(), ops.device("/cpu:0"):
tensor = array_ops.zeros([100, 1000])
def fn():
backprop.gradients_function(math_ops.reduce_sum, [0])(tensor)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_graph(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)))
with ops.Graph().as_default(), session.Session(config=config) as sess:
tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32))
reduction = math_ops.reduce_sum(tensor)
grad, = gradients_impl.gradients(reduction, tensor)
def fn():
self.evaluate(grad.op)
self._run(fn, 10000)
def benchmark_reduce_sum_grad_graph_cpu(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)))
with ops.Graph().as_default(), session.Session(config=config) as sess:
with ops.device("/cpu:0"):
tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32))
reduction = math_ops.reduce_sum(tensor)
grad, = gradients_impl.gradients(reduction, tensor)
def fn():
self.evaluate(grad.op)
self._run(fn, 10000)
if __name__ == "__main__":
test.main()
| ReduceBenchmarks |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 50549,
"end": 53296
} | class ____(ClvpPreTrainedModel):
def __init__(self, config: ClvpDecoderConfig):
super().__init__(config)
self.config = config
self.decoder = ClvpDecoder(self.config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.input_embeds_layer
def set_input_embeddings(self, value):
self.decoder.input_embeds_layer = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
The CLVP decoder model with a language modelling head on top.
"""
)
| ClvpModel |
python | takluyver__flit | flit/log.py | {
"start": 1045,
"end": 3967
} | class ____(logging.Formatter):
"""Log formatter with colour support
"""
DEFAULT_COLORS = {
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
logging.CRITICAL: 1,
}
def __init__(self, color=True, datefmt=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = str(fg_color, "ascii")
for levelno, code in self.DEFAULT_COLORS.items():
self._colors[levelno] = str(curses.tparm(fg_color, code), "ascii")
self._normal = str(curses.tigetstr("sgr0"), "ascii")
scr = curses.initscr()
self.termwidth = scr.getmaxyx()[1]
curses.endwin()
else:
self._normal = ''
# Default width is usually 80, but too wide is worse than too narrow
self.termwidth = 70
def formatMessage(self, record):
l = len(record.message)
right_text = f'{record.levelname[0]}-{record.name}'
if l + len(right_text) < self.termwidth:
space = ' ' * (self.termwidth - (l + len(right_text)))
else:
space = ' '
if record.levelno in self._colors:
start_color = self._colors[record.levelno]
end_color = self._normal
else:
start_color = end_color = ''
return record.message + space + start_color + right_text + end_color
def enable_colourful_output(level=logging.INFO):
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
logging.root.addHandler(handler)
logging.root.setLevel(level)
| LogFormatter |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/pod_manager.py | {
"start": 11808,
"end": 37897
} | class ____(LoggingMixin):
"""Create, monitor, and otherwise interact with Kubernetes pods for use with the KubernetesPodOperator."""
def __init__(
self,
kube_client: client.CoreV1Api,
callbacks: list[type[KubernetesPodOperatorCallback]] | None = None,
):
"""
Create the launcher.
:param kube_client: kubernetes client
:param callbacks:
"""
super().__init__()
self._client = kube_client
self._watch = watch.Watch()
self._callbacks = callbacks or []
self.stop_watching_events = False
def run_pod_async(self, pod: V1Pod, **kwargs) -> V1Pod:
"""Run POD asynchronously."""
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug("Pod Creation Request: \n%s", json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug("Pod Creation Response: %s", resp)
except Exception as e:
self.log.exception(
"Exception when attempting to create Namespaced Pod: %s", str(json_pod).replace("\n", " ")
)
raise e
return resp
@generic_api_retry
def delete_pod(self, pod: V1Pod) -> None:
"""Delete POD."""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if str(e.status) != "404":
raise
@generic_api_retry
def create_pod(self, pod: V1Pod) -> V1Pod:
"""Launch the pod asynchronously."""
return self.run_pod_async(pod)
async def watch_pod_events(self, pod: V1Pod, check_interval: int = 1) -> None:
"""Read pod events and writes into log."""
await watch_pod_events(pod_manager=self, pod=pod, check_interval=check_interval)
async def await_pod_start(
self, pod: V1Pod, schedule_timeout: int = 120, startup_timeout: int = 120, check_interval: int = 1
) -> None:
"""
Wait for the pod to reach phase other than ``Pending``.
:param pod:
:param schedule_timeout: Timeout (in seconds) for pod stay in schedule state
(if pod is taking to long in schedule state, fails task)
:param startup_timeout: Timeout (in seconds) for startup of the pod
(if pod is pending for too long after being scheduled, fails task)
:param check_interval: Interval (in seconds) between checks
:return:
"""
await await_pod_start(
pod_manager=self,
pod=pod,
schedule_timeout=schedule_timeout,
startup_timeout=startup_timeout,
check_interval=check_interval,
)
def _log_message(
self,
message: str,
container_name: str,
container_name_log_prefix_enabled: bool,
log_formatter: Callable[[str, str], str] | None,
) -> None:
"""Log a message with appropriate formatting."""
if is_log_group_marker(message):
print(message)
else:
if log_formatter:
formatted_message = log_formatter(container_name, message)
self.log.info("%s", formatted_message)
else:
log_message = (
f"[{container_name}] {message}" if container_name_log_prefix_enabled else message
)
self.log.info("%s", log_message)
def fetch_container_logs(
self,
pod: V1Pod,
container_name: str,
*,
follow=False,
since_time: DateTime | None = None,
post_termination_timeout: int = 120,
container_name_log_prefix_enabled: bool = True,
log_formatter: Callable[[str, str], str] | None = None,
) -> PodLoggingStatus:
"""
Follow the logs of container and stream to airflow logging.
Returns when container exits.
Between when the pod starts and logs being available, there might be a delay due to CSR not approved
and signed yet. In such situation, ApiException is thrown. This is why we are retrying on this
specific exception.
:meta private:
"""
def consume_logs(*, since_time: DateTime | None = None) -> tuple[DateTime | None, Exception | None]:
"""
Try to follow container logs until container completes.
For a long-running container, sometimes the log read may be interrupted
Such errors of this kind are suppressed.
Returns the last timestamp observed in logs.
"""
exception = None
last_captured_timestamp = None
# We timeout connections after 30 minutes because otherwise they can get
# stuck forever. The 30 is somewhat arbitrary.
# As a consequence, a TimeoutError will be raised no more than 30 minutes
# after starting read.
connection_timeout = 60 * 30
# We set a shorter read timeout because that helps reduce *connection* timeouts
# (since the connection will be restarted periodically). And with read timeout,
# we don't need to worry about either duplicate messages or losing messages; we
# can safely resume from a few seconds later
read_timeout = 60 * 5
try:
since_seconds = None
if since_time:
try:
since_seconds = math.ceil((pendulum.now() - since_time).total_seconds())
except TypeError:
self.log.warning(
"Error calculating since_seconds with since_time %s. Using None instead.",
since_time,
)
logs = self.read_pod_logs(
pod=pod,
container_name=container_name,
timestamps=True,
since_seconds=since_seconds,
follow=follow,
post_termination_timeout=post_termination_timeout,
_request_timeout=(connection_timeout, read_timeout),
)
message_to_log = None
message_timestamp = None
progress_callback_lines = []
try:
for raw_line in logs:
line = raw_line.decode("utf-8", errors="backslashreplace")
line_timestamp, message = parse_log_line(line)
if line_timestamp: # detect new log line
if message_to_log is None: # first line in the log
message_to_log = message
message_timestamp = line_timestamp
progress_callback_lines.append(line)
else: # previous log line is complete
for line in progress_callback_lines:
for callback in self._callbacks:
callback.progress_callback(
line=line, client=self._client, mode=ExecutionMode.SYNC
)
if message_to_log is not None:
self._log_message(
message_to_log,
container_name,
container_name_log_prefix_enabled,
log_formatter,
)
last_captured_timestamp = message_timestamp
message_to_log = message
message_timestamp = line_timestamp
progress_callback_lines = [line]
else: # continuation of the previous log line
message_to_log = f"{message_to_log}\n{message}"
progress_callback_lines.append(line)
finally:
# log the last line and update the last_captured_timestamp
for line in progress_callback_lines:
for callback in self._callbacks:
callback.progress_callback(
line=line, client=self._client, mode=ExecutionMode.SYNC
)
if message_to_log is not None:
self._log_message(
message_to_log, container_name, container_name_log_prefix_enabled, log_formatter
)
last_captured_timestamp = message_timestamp
except TimeoutError as e:
# in case of timeout, increment return time by 2 seconds to avoid
# duplicate log entries
if val := (last_captured_timestamp or since_time):
return val.add(seconds=2), e
except HTTPError as e:
exception = e
self._http_error_timestamps = getattr(self, "_http_error_timestamps", [])
self._http_error_timestamps = [
t for t in self._http_error_timestamps if t > utcnow() - timedelta(seconds=60)
]
self._http_error_timestamps.append(utcnow())
# Log only if more than 2 errors occurred in the last 60 seconds
if len(self._http_error_timestamps) > 2:
self.log.exception(
"Reading of logs interrupted for container %r; will retry.",
container_name,
)
return last_captured_timestamp or since_time, exception
# note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to
# loop as we do here. But in a long-running process we might temporarily lose connectivity.
# So the looping logic is there to let us resume following the logs.
last_log_time = since_time
while True:
last_log_time, exc = consume_logs(since_time=last_log_time)
if not self.container_is_running(pod, container_name=container_name):
return PodLoggingStatus(running=False, last_log_time=last_log_time)
if not follow:
return PodLoggingStatus(running=True, last_log_time=last_log_time)
# a timeout is a normal thing and we ignore it and resume following logs
if not isinstance(exc, TimeoutError):
self.log.warning(
"Pod %s log read interrupted but container %s still running. Logs generated in the last one second might get duplicated.",
pod.metadata.name,
container_name,
)
time.sleep(1)
def _reconcile_requested_log_containers(
self, requested: Iterable[str] | str | bool | None, actual: list[str], pod_name
) -> list[str]:
"""Return actual containers based on requested."""
containers_to_log = []
if actual:
if isinstance(requested, str):
# fetch logs only for requested container if only one container is provided
if requested in actual:
containers_to_log.append(requested)
else:
self.log.error(
"container %s whose logs were requested not found in the pod %s",
requested,
pod_name,
)
elif isinstance(requested, bool):
# if True is provided, get logs for all the containers
if requested is True:
containers_to_log.extend(actual)
else:
self.log.error(
"False is not a valid value for container_logs",
)
else:
# if a sequence of containers are provided, iterate for every container in the pod
if isinstance(requested, Iterable):
for container in requested:
if container in actual:
containers_to_log.append(container)
else:
self.log.error(
"Container %s whose logs were requests not found in the pod %s",
container,
pod_name,
)
else:
self.log.error(
"Invalid type %s specified for container names input parameter", type(requested)
)
else:
self.log.error("Could not retrieve containers for the pod: %s", pod_name)
return containers_to_log
def fetch_requested_init_container_logs(
self,
pod: V1Pod,
init_containers: Iterable[str] | str | Literal[True] | None,
follow_logs=False,
container_name_log_prefix_enabled: bool = True,
log_formatter: Callable[[str, str], str] | None = None,
) -> list[PodLoggingStatus]:
"""
Follow the logs of containers in the specified pod and publish it to airflow logging.
Returns when all the containers exit.
:meta private:
"""
pod_logging_statuses = []
all_containers = self.get_init_container_names(pod)
containers_to_log = self._reconcile_requested_log_containers(
requested=init_containers,
actual=all_containers,
pod_name=pod.metadata.name,
)
# sort by spec.initContainers because containers runs sequentially
containers_to_log = sorted(containers_to_log, key=lambda cn: all_containers.index(cn))
for c in containers_to_log:
self._await_init_container_start(pod=pod, container_name=c)
status = self.fetch_container_logs(
pod=pod,
container_name=c,
follow=follow_logs,
container_name_log_prefix_enabled=container_name_log_prefix_enabled,
log_formatter=log_formatter,
)
pod_logging_statuses.append(status)
return pod_logging_statuses
def fetch_requested_container_logs(
self,
pod: V1Pod,
containers: Iterable[str] | str | Literal[True],
follow_logs=False,
container_name_log_prefix_enabled: bool = True,
log_formatter: Callable[[str, str], str] | None = None,
) -> list[PodLoggingStatus]:
"""
Follow the logs of containers in the specified pod and publish it to airflow logging.
Returns when all the containers exit.
:meta private:
"""
pod_logging_statuses = []
all_containers = self.get_container_names(pod)
containers_to_log = self._reconcile_requested_log_containers(
requested=containers,
actual=all_containers,
pod_name=pod.metadata.name,
)
for c in containers_to_log:
status = self.fetch_container_logs(
pod=pod,
container_name=c,
follow=follow_logs,
container_name_log_prefix_enabled=container_name_log_prefix_enabled,
log_formatter=log_formatter,
)
pod_logging_statuses.append(status)
return pod_logging_statuses
def await_container_completion(self, pod: V1Pod, container_name: str, polling_time: float = 1) -> None:
"""
Wait for the given container in the given pod to be completed.
:param pod: pod spec that will be monitored
:param container_name: name of the container within the pod to monitor
:param polling_time: polling time between two container status checks.
Defaults to 1s.
"""
while True:
remote_pod = self.read_pod(pod)
terminated = container_is_completed(remote_pod, container_name)
if terminated:
break
self.log.info("Waiting for container '%s' state to be completed", container_name)
time.sleep(polling_time)
def await_pod_completion(
self, pod: V1Pod, istio_enabled: bool = False, container_name: str = "base"
) -> V1Pod:
"""
Monitor a pod and return the final state.
:param istio_enabled: whether istio is enabled in the namespace
:param pod: pod spec that will be monitored
:param container_name: name of the container within the pod
:return: tuple[State, str | None]
"""
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase in PodPhase.terminal_states:
break
if istio_enabled and container_is_completed(remote_pod, container_name):
break
self.log.info("Pod %s has phase %s", pod.metadata.name, remote_pod.status.phase)
time.sleep(2)
return remote_pod
def container_is_running(self, pod: V1Pod, container_name: str) -> bool:
"""Read pod and checks if container is running."""
remote_pod = self.read_pod(pod)
return container_is_running(pod=remote_pod, container_name=container_name)
def container_is_terminated(self, pod: V1Pod, container_name: str) -> bool:
"""Read pod and checks if container is terminated."""
remote_pod = self.read_pod(pod)
return container_is_terminated(pod=remote_pod, container_name=container_name)
@generic_api_retry
def read_pod_logs(
self,
pod: V1Pod,
container_name: str,
tail_lines: int | None = None,
timestamps: bool = False,
since_seconds: int | None = None,
follow=True,
post_termination_timeout: int = 120,
**kwargs,
) -> PodLogsConsumer:
"""Read log from the POD."""
additional_kwargs = {}
if since_seconds:
additional_kwargs["since_seconds"] = since_seconds
if tail_lines:
additional_kwargs["tail_lines"] = tail_lines
additional_kwargs.update(**kwargs)
try:
logs = self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=container_name,
follow=follow,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except HTTPError:
self.log.exception("There was an error reading the kubernetes API.")
raise
return PodLogsConsumer(
response=logs,
pod=pod,
pod_manager=self,
container_name=container_name,
post_termination_timeout=post_termination_timeout,
)
def get_init_container_names(self, pod: V1Pod) -> list[str]:
"""
Return container names from the POD except for the airflow-xcom-sidecar container.
:meta private:
"""
return [container_spec.name for container_spec in pod.spec.init_containers]
def get_container_names(self, pod: V1Pod) -> list[str]:
"""
Return container names from the POD except for the airflow-xcom-sidecar container.
:meta private:
"""
pod_info = self.read_pod(pod)
return [
container_spec.name
for container_spec in pod_info.spec.containers
if container_spec.name != PodDefaults.SIDECAR_CONTAINER_NAME
]
@generic_api_retry
def read_pod_events(self, pod: V1Pod) -> CoreV1EventList:
"""Read events from the POD."""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except HTTPError as e:
raise KubernetesApiException(f"There was an error reading the kubernetes API: {e}")
@generic_api_retry
def read_pod(self, pod: V1Pod) -> V1Pod:
"""Read POD information."""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except HTTPError as e:
raise KubernetesApiException(f"There was an error reading the kubernetes API: {e}")
def await_xcom_sidecar_container_start(
self, pod: V1Pod, timeout: int = 900, log_interval: int = 30
) -> None:
"""Check if the sidecar container has reached the 'Running' state before performing do_xcom_push."""
self.log.info("Checking if xcom sidecar container is started.")
start_time = time.time()
last_log_time = start_time
while True:
elapsed_time = time.time() - start_time
if self.container_is_running(pod, PodDefaults.SIDECAR_CONTAINER_NAME):
self.log.info("The xcom sidecar container has started.")
break
if self.container_is_terminated(pod, PodDefaults.SIDECAR_CONTAINER_NAME):
raise AirflowException(
"Xcom sidecar container is already terminated! Not possible to read xcom output of task."
)
if (time.time() - last_log_time) >= log_interval:
self.log.warning(
"Still waiting for the xcom sidecar container to start. Elapsed time: %d seconds.",
int(elapsed_time),
)
last_log_time = time.time()
if elapsed_time > timeout:
raise AirflowException(
f"Xcom sidecar container did not start within {timeout // 60} minutes."
)
time.sleep(1)
def extract_xcom(self, pod: V1Pod) -> str:
"""Retrieve XCom value and kill xcom sidecar container."""
try:
result = self.extract_xcom_json(pod)
return result
finally:
self.extract_xcom_kill(pod)
@generic_api_retry
def extract_xcom_json(self, pod: V1Pod) -> str:
"""Retrieve XCom value and also check if xcom json is valid."""
command = (
f"if [ -s {PodDefaults.XCOM_MOUNT_PATH}/return.json ]; "
f"then cat {PodDefaults.XCOM_MOUNT_PATH}/return.json; "
f"else echo {EMPTY_XCOM_RESULT}; fi"
)
with closing(
kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=[
"/bin/sh",
"-c",
command,
],
stdin=False,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
) as client:
self.log.info("Running command... %s", command)
client.run_forever()
if client.peek_stderr():
stderr = client.read_stderr()
self.log.error("stderr from command: %s", stderr)
result = client.read_all()
if result and result.rstrip() != EMPTY_XCOM_RESULT:
# Note: result string is parsed to check if its valid json.
# This function still returns a string which is converted into json in the calling method.
json.loads(result)
if result is None:
raise AirflowException(f"Failed to extract xcom from pod: {pod.metadata.name}")
return result
@generic_api_retry
def extract_xcom_kill(self, pod: V1Pod):
"""Kill xcom sidecar container."""
with closing(
kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=["/bin/sh"],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
) as resp:
self._exec_pod_command(resp, "kill -2 $(pgrep -u $(id -u) -f 'sh')")
def _exec_pod_command(self, resp, command: str) -> str | None:
res = ""
if not resp.is_open():
return None
self.log.info("Running command... %s", command)
resp.write_stdin(f"{command}\n")
while resp.is_open():
resp.update(timeout=1)
while resp.peek_stdout():
res += resp.read_stdout()
error_res = ""
while resp.peek_stderr():
error_res += resp.read_stderr()
if error_res:
self.log.info("stderr from command: %s", error_res)
break
if res:
return res
return None
def _await_init_container_start(self, pod: V1Pod, container_name: str):
while True:
remote_pod = self.read_pod(pod)
if (
remote_pod.status is not None
and remote_pod.status.phase != PodPhase.PENDING
and get_container_status(remote_pod, container_name) is not None
and not container_is_wait(remote_pod, container_name)
):
return
time.sleep(1)
| PodManager |
python | celery__celery | celery/contrib/migrate.py | {
"start": 824,
"end": 8427
} | class ____:
"""Migration progress state."""
count = 0
filtered = 0
total_apx = 0
@property
def strtotal(self):
if not self.total_apx:
return '?'
return str(self.total_apx)
def __repr__(self):
if self.filtered:
return f'^{self.filtered}'
return f'{self.count}/{self.strtotal}'
def republish(producer, message, exchange=None, routing_key=None,
remove_props=None):
"""Republish message."""
if not remove_props:
remove_props = ['application_headers', 'content_type',
'content_encoding', 'headers']
body = ensure_bytes(message.body) # use raw message body.
info, headers, props = (message.delivery_info,
message.headers, message.properties)
exchange = info['exchange'] if exchange is None else exchange
routing_key = info['routing_key'] if routing_key is None else routing_key
ctype, enc = message.content_type, message.content_encoding
# remove compression header, as this will be inserted again
# when the message is recompressed.
compression = headers.pop('compression', None)
expiration = props.pop('expiration', None)
# ensure expiration is a float
expiration = float(expiration) if expiration is not None else None
for key in remove_props:
props.pop(key, None)
producer.publish(ensure_bytes(body), exchange=exchange,
routing_key=routing_key, compression=compression,
headers=headers, content_type=ctype,
content_encoding=enc, expiration=expiration,
**props)
def migrate_task(producer, body_, message, queues=None):
"""Migrate single task message."""
info = message.delivery_info
queues = {} if queues is None else queues
republish(producer, message,
exchange=queues.get(info['exchange']),
routing_key=queues.get(info['routing_key']))
def filter_callback(callback, tasks):
def filtered(body, message):
if tasks and body['task'] not in tasks:
return
return callback(body, message)
return filtered
def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
"""Migrate tasks from one broker to another."""
app = app_or_default(app)
queues = prepare_queues(queues)
producer = app.amqp.Producer(dest, auto_declare=False)
migrate = partial(migrate, producer, queues=queues)
def on_declare_queue(queue):
new_queue = queue(producer.channel)
new_queue.name = queues.get(queue.name, queue.name)
if new_queue.routing_key == queue.name:
new_queue.routing_key = queues.get(queue.name,
new_queue.routing_key)
if new_queue.exchange.name == queue.name:
new_queue.exchange.name = queues.get(queue.name, queue.name)
new_queue.declare()
return start_filter(app, source, migrate, queues=queues,
on_declare_queue=on_declare_queue, **kwargs)
def _maybe_queue(app, q):
if isinstance(q, str):
return app.amqp.queues[q]
return q
def move(predicate, connection=None, exchange=None, routing_key=None,
source=None, app=None, callback=None, limit=None, transform=None,
**kwargs):
"""Find tasks by filtering them and move the tasks to a new queue.
Arguments:
predicate (Callable): Filter function used to decide the messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the
message to be moved it must return either:
1) a tuple of ``(exchange, routing_key)``, or
2) a :class:`~kombu.entity.Queue` instance, or
3) any other true value means the specified
``exchange`` and ``routing_key`` arguments will be used.
connection (kombu.Connection): Custom connection to use.
source: List[Union[str, kombu.Queue]]: Optional list of source
queues to use instead of the default (queues
in :setting:`task_queues`). This list can also contain
:class:`~kombu.entity.Queue` instances.
exchange (str, kombu.Exchange): Default destination exchange.
routing_key (str): Default destination routing key.
limit (int): Limit number of messages to filter.
callback (Callable): Callback called after message moved,
with signature ``(state, body, message)``.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
Also supports the same keyword arguments as :func:`start_filter`.
To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:
.. code-block:: python
def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')
move(is_wanted_task)
or with a transform:
.. code-block:: python
def transform(value):
if isinstance(value, str):
return Queue(value, Exchange(value), value)
return value
move(is_wanted_task, transform=transform)
Note:
The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entity.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key.
"""
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
producer = app.amqp.Producer(conn)
state = State()
def on_task(body, message):
ret = predicate(body, message)
if ret:
if transform:
ret = transform(ret)
if isinstance(ret, Queue):
maybe_declare(ret, conn.default_channel)
ex, rk = ret.exchange.name, ret.routing_key
else:
ex, rk = expand_dest(ret, exchange, routing_key)
republish(producer, message,
exchange=ex, routing_key=rk)
message.ack()
state.filtered += 1
if callback:
callback(state, body, message)
if limit and state.filtered >= limit:
raise StopFiltering()
return start_filter(app, conn, on_task, consume_from=queues, **kwargs)
def expand_dest(ret, exchange, routing_key):
try:
ex, rk = ret
except (TypeError, ValueError):
ex, rk = exchange, routing_key
return ex, rk
def task_id_eq(task_id, body, message):
"""Return true if task id equals task_id'."""
return body['id'] == task_id
def task_id_in(ids, body, message):
"""Return true if task id is member of set ids'."""
return body['id'] in ids
def prepare_queues(queues):
if isinstance(queues, str):
queues = queues.split(',')
if isinstance(queues, list):
queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
for q in queues)
if queues is None:
queues = {}
return queues
| State |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/state.py | {
"start": 2286,
"end": 2377
} | class ____(Protocol):
def __call__(self) -> Optional[IdentityMap]: ...
| _InstanceDictProto |
python | tensorflow__tensorflow | tensorflow/python/framework/errors_impl.py | {
"start": 16618,
"end": 17059
} | class ____(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
"""
def __init__(self, node_def, op, message, *args):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL, *args)
@tf_export("errors.UnavailableError")
| InternalError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.