language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | facebookresearch__faiss | tests/test_contrib.py | {
"start": 23376,
"end": 25611
} | class ____(unittest.TestCase):
@contextmanager
def temp_directory(self):
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def do_test_ondisk_merge(self, shift_ids=False):
with self.temp_directory() as tmpdir:
# only train and add index to disk without adding elements.
# this will create empty inverted lists.
ds = datasets.SyntheticDataset(32, 2000, 200, 20)
index = faiss.index_factory(ds.d, "IVF32,Flat")
index.train(ds.get_train())
faiss.write_index(index, tmpdir + "/trained.index")
# create 4 shards and add elements to them
ns = 4 # number of shards
for bno in range(ns):
index = faiss.read_index(tmpdir + "/trained.index")
i0, i1 = int(bno * ds.nb / ns), int((bno + 1) * ds.nb / ns)
if shift_ids:
index.add_with_ids(ds.xb[i0:i1], np.arange(0, ds.nb / ns))
else:
index.add_with_ids(ds.xb[i0:i1], np.arange(i0, i1))
faiss.write_index(index, tmpdir + "/block_%d.index" % bno)
# construct the output index and merge them on disk
index = faiss.read_index(tmpdir + "/trained.index")
block_fnames = [tmpdir + "/block_%d.index" % bno for bno in range(4)]
merge_ondisk(
index, block_fnames, tmpdir + "/merged_index.ivfdata", shift_ids
)
faiss.write_index(index, tmpdir + "/populated.index")
# perform a search from index on disk
index = faiss.read_index(tmpdir + "/populated.index")
index.nprobe = 5
D, I = index.search(ds.xq, 5)
# ground-truth
gtI = ds.get_groundtruth(5)
recall_at_1 = (I[:, :1] == gtI[:, :1]).sum() / float(ds.xq.shape[0])
self.assertGreaterEqual(recall_at_1, 0.5)
def test_ondisk_merge(self):
self.do_test_ondisk_merge()
def test_ondisk_merge_with_shift_ids(self):
# verified that recall is same for test_ondisk_merge and
self.do_test_ondisk_merge(True)
| TestMerge |
python | huggingface__transformers | src/transformers/models/vitmatte/modeling_vitmatte.py | {
"start": 7560,
"end": 10713
} | class ____(VitMattePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.backbone = load_backbone(config)
self.decoder = VitMatteDetailCaptureModule(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth image matting for computing the loss.
Examples:
```python
>>> from transformers import VitMatteImageProcessor, VitMatteForImageMatting
>>> import torch
>>> from PIL import Image
>>> from huggingface_hub import hf_hub_download
>>> processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k")
>>> model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k")
>>> filepath = hf_hub_download(
... repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset"
... )
>>> image = Image.open(filepath).convert("RGB")
>>> filepath = hf_hub_download(
... repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset"
... )
>>> trimap = Image.open(filepath).convert("L")
>>> # prepare image + trimap for the model
>>> inputs = processor(images=image, trimaps=trimap, return_tensors="pt")
>>> with torch.no_grad():
... alphas = model(**inputs).alphas
>>> print(alphas.shape)
torch.Size([1, 1, 640, 960])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
loss = None
if labels is not None:
raise NotImplementedError("Training is not yet supported")
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
features = outputs.feature_maps[-1]
alphas = self.decoder(features, pixel_values)
if not return_dict:
output = (alphas,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return ImageMattingOutput(
loss=loss,
alphas=alphas,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["VitMattePreTrainedModel", "VitMatteForImageMatting"]
| VitMatteForImageMatting |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 18328,
"end": 18767
} | class ____(Transform):
r"""
Transform via the mapping :math:`y = \exp(x)`.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, ExpTransform)
def _call(self, x):
return x.exp()
def _inverse(self, y):
return y.log()
def log_abs_det_jacobian(self, x, y):
return x
| ExpTransform |
python | google__pytype | pytype/imports/typeshed.py | {
"start": 5167,
"end": 15506
} | class ____:
"""A typeshed installation.
The location is either retrieved from the environment variable
"TYPESHED_HOME" (if set) or otherwise assumed to be directly under
pytype (i.e., /{some_path}/pytype/typeshed).
"""
# Text file of typeshed entries that will not be loaded.
# The path is relative to typeshed's root directory, e.g. if you set this to
# "missing.txt" you need to create $TYPESHED_HOME/missing.txt or
# pytype/typeshed/missing.txt
# For testing, this file must contain the entry 'stdlib/pytypecanary'.
MISSING_FILE = None
def __init__(self, missing_modules: Collection[str] = ()):
"""Initializer.
Args:
missing_modules: A collection of modules in the format
'stdlib/module_name', which will be combined with the contents of
MISSING_FILE to form a set of missing modules for which pytype will not
report errors.
"""
if os.getenv("TYPESHED_HOME"):
self._store = ExternalTypeshedFs(missing_file=self.MISSING_FILE)
else:
self._store = InternalTypeshedFs(missing_file=self.MISSING_FILE)
self._missing = self._load_missing().union(missing_modules)
self._stdlib_versions = self._load_stdlib_versions()
self._third_party_packages = self._load_third_party_packages()
def _load_missing(self):
lines = self._store.load_missing()
return frozenset(line.strip() for line in lines if line)
def _load_stdlib_versions(self):
"""Loads the contents of typeshed/stdlib/VERSIONS.
VERSIONS lists the stdlib modules with the Python version in which they were
first added, in the format `{module}: {min_major}.{min_minor}-` or
`{module}: {min_major}.{min_minor}-{max_major}.{max_minor}`.
Returns:
A mapping from module name to version range in the format
{name: ((min_major, min_minor), (max_major, max_minor))}
The max tuple can be `None`.
"""
lines = self._store.load_stdlib_versions()
versions = {}
for line in lines:
line2 = line.split("#")[0].strip()
if not line2:
continue
match = re.fullmatch(r"(.+): (\d)\.(\d+)(?:-(?:(\d)\.(\d+))?)?", line2)
assert match
module, min_major, min_minor, max_major, max_minor = match.groups()
minimum = (int(min_major), int(min_minor))
maximum = (
(int(max_major), int(max_minor))
if max_major is not None and max_minor is not None
else None
)
versions[module] = minimum, maximum
return versions
def _load_third_party_packages(self):
"""Loads package and Python version information for typeshed/stubs/.
stubs/ contains type information for third-party packages. Each top-level
directory corresponds to one PyPI package and contains one or more modules,
plus a metadata file (METADATA.toml). The top-level directory may contain a
@tests subdirectory for typeshed testing.
Returns:
A mapping from module name to a set of package names.
"""
modules = collections.defaultdict(set)
stubs = set()
for third_party_file in self._store.list_files("stubs"):
parts = third_party_file.split(path_utils.sep)
filename = parts[-1]
if filename == "METADATA.toml" or parts[1] == "@tests":
continue
if filename.endswith(".pyi"):
stubs.add(parts[0])
name, _ = path_utils.splitext(parts[1])
modules[parts[0]].add(name)
packages = collections.defaultdict(set)
for package, names in modules.items():
for name in names:
if package in stubs:
packages[name].add(package)
return packages
@property
def missing(self):
"""Set of known-missing typeshed modules, as strings of paths."""
return self._missing
def get_module_file(self, namespace, module, version):
"""Get the contents of a typeshed .pyi file.
Arguments:
namespace: selects a top-level directory within typeshed/ Allowed values
are "stdlib" and "third_party". "third_party" corresponds to the the
typeshed/stubs/ directory.
module: module name (e.g., "sys" or "__builtins__"). Can contain dots, if
it's a submodule. Package names should omit the "__init__" suffix (e.g.,
pass in "os", not "os.__init__").
version: The Python version. (major, minor)
Returns:
A tuple with the filename and contents of the file
Raises:
IOError: if file not found
"""
module_parts = module.split(".")
module_path = path_utils.join(*module_parts)
paths = []
if namespace == "stdlib":
# Stubs for the stdlib 'foo' module are located in stdlib/foo.
# The VERSIONS file tells us whether stdlib/foo exists and what versions
# it targets.
path = path_utils.join(namespace, module_path)
if (
self._is_module_in_typeshed(module_parts, version)
or path in self.missing
):
paths.append(path)
elif namespace == "third_party":
# For third-party modules, we grab the alphabetically first package that
# provides a module with the specified name in the right version.
# TODO(rechen): It would be more correct to check what packages are
# currently installed and only consider those.
for package in sorted(self._third_party_packages[module_parts[0]]):
paths.append(path_utils.join("stubs", package, module_path))
for path_rel in paths:
# Give precedence to MISSING_FILE
if path_rel in self.missing:
relpath = path_utils.join("nonexistent", path_rel + ".pyi")
return relpath, builtin_stubs.DEFAULT_SRC
for path in [
path_utils.join(path_rel, "__init__.pyi"),
path_rel + ".pyi",
]:
try:
name, src = self._store.load_file(path)
return name, src
except OSError:
pass
raise OSError(f"Couldn't find {module}")
def _lookup_stdlib_version(self, module_parts: Sequence[str]):
"""Looks up the prefix chain until we find the module in stdlib/VERSIONS."""
index = len(module_parts)
while index > 0:
name = ".".join(module_parts[:index])
if name in self._stdlib_versions:
return self._stdlib_versions[name]
index -= 1
return None
def _is_module_in_typeshed(self, module_parts, version):
assert module_parts[-1] != "__init__", module_parts
version_info = self._lookup_stdlib_version(module_parts)
if version_info is None:
return False
min_version, max_version = version_info
return min_version <= version and (
max_version is None or max_version >= version
)
def get_typeshed_paths(self):
"""Gets the paths to typeshed's version-specific pyi files."""
typeshed_subdirs = ["stdlib"]
for packages in self._third_party_packages.values():
for package in packages:
typeshed_subdirs.append(path_utils.join("stubs", package))
return [self._store.filepath(d) for d in typeshed_subdirs]
def get_pytd_paths(self):
"""Gets the paths to pytype's version-specific pytd files."""
return [
pytype_source_utils.get_full_path(d)
for d in (f"stubs{os.path.sep}builtins", f"stubs{os.path.sep}stdlib")
]
def _list_modules(self, path, python_version):
"""Lists modules for _get_module_names_in_path."""
for filename in self._store.list_files(path):
if filename in ("VERSIONS", "METADATA.toml"):
# stdlib/VERSIONS, stubs/{package}/METADATA.toml are metadata files.
continue
parts = path.split(os.path.sep)
if "stdlib" in parts:
# Check supported versions for stubs directly in stdlib/.
module_parts = module_utils.strip_init_suffix(
path_utils.splitext(filename)[0].split(os.path.sep)
)
if not self._is_module_in_typeshed(module_parts, python_version):
continue
yield filename
def _get_missing_modules(self):
"""Gets module names from the `missing` list."""
module_names = set()
for f in self.missing:
parts = f.split(os.path.sep)
if parts[0] == "stdlib":
start_index = 1 # remove stdlib/ prefix
else:
assert parts[0] == "stubs"
start_index = 2 # remove stubs/{package}/ prefix
filename = os.path.sep.join(parts[start_index:])
module_names.add(filename.replace(os.path.sep, "."))
return module_names
def get_all_module_names(self, python_version):
"""Get the names of all modules in typeshed or bundled with pytype."""
module_names = set()
for abspath in self.get_typeshed_paths():
relpath = abspath.rpartition(f"typeshed{os.path.sep}")[-1]
module_names |= _get_module_names_in_path(
self._list_modules, relpath, python_version
)
for abspath in self.get_pytd_paths():
relpath = abspath.rpartition(f"pytype{os.path.sep}")[-1]
module_names |= _get_module_names_in_path(
lambda path, _: pytype_source_utils.list_pytype_files(path),
relpath,
python_version,
)
# Also load modules not in typeshed, so that we have a dummy entry for them.
module_names |= self._get_missing_modules()
assert "ctypes" in module_names # sanity check
return module_names
def read_blacklist(self):
"""Read the typeshed blacklist."""
lines = self._store.load_pytype_blocklist()
for line in lines:
if "#" in line:
line = line[: line.index("#")]
line = line.strip()
if line:
yield line
def blacklisted_modules(self):
"""Return the blacklist, as a list of module names. E.g. ["x", "y.z"]."""
for path in self.read_blacklist():
# E.g. ["stdlib", "html", "parser.pyi"]
parts = path.split(path_utils.sep)
if parts[0] == "stdlib":
filename = path_utils.sep.join(parts[1:])
else:
filename = path_utils.sep.join(parts[2:])
mod = module_utils.path_to_module_name(filename)
if mod:
yield mod
def _get_typeshed(missing_modules):
"""Get a Typeshed instance."""
try:
return Typeshed(missing_modules)
except OSError as e:
# This happens if typeshed is not available. Which is a setup error
# and should be propagated to the user. The IOError is caught further up
# in the stack.
raise utils.UsageError(f"Couldn't initialize typeshed:\n {str(e)}")
| Typeshed |
python | celery__celery | celery/fixups/django.py | {
"start": 1833,
"end": 3664
} | class ____:
"""Fixup installed when using Django."""
def __init__(self, app: "Celery"):
self.app = app
if _state.default_app is None:
self.app.set_default()
self._worker_fixup: Optional["DjangoWorkerFixup"] = None
def install(self) -> "DjangoFixup":
# Need to add project directory to path.
# The project directory has precedence over system modules,
# so we prepend it to the path.
sys.path.insert(0, os.getcwd())
self._settings = symbol_by_name('django.conf:settings')
self.app.loader.now = self.now
if not self.app._custom_task_cls_used:
self.app.task_cls = 'celery.contrib.django.task:DjangoTask'
signals.import_modules.connect(self.on_import_modules)
signals.worker_init.connect(self.on_worker_init)
return self
@property
def worker_fixup(self) -> "DjangoWorkerFixup":
if self._worker_fixup is None:
self._worker_fixup = DjangoWorkerFixup(self.app)
return self._worker_fixup
@worker_fixup.setter
def worker_fixup(self, value: "DjangoWorkerFixup") -> None:
self._worker_fixup = value
def on_import_modules(self, **kwargs: Any) -> None:
# call django.setup() before task modules are imported
self.worker_fixup.validate_models()
def on_worker_init(self, **kwargs: Any) -> None:
self.worker_fixup.install()
def now(self, utc: bool = False) -> datetime:
return datetime.now(timezone.utc) if utc else self._now()
def autodiscover_tasks(self) -> List[str]:
from django.apps import apps
return [config.name for config in apps.get_app_configs()]
@cached_property
def _now(self) -> datetime:
return symbol_by_name('django.utils.timezone:now')
| DjangoFixup |
python | scipy__scipy | scipy/sparse/linalg/_interface.py | {
"start": 27501,
"end": 29583
} | class ____(LinearOperator):
def __init__(self, shape, dtype=None):
super().__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _rmatmat(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse array (e.g. csr_array, lil_array, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Notes
-----
If 'A' has no .dtype attribute, the data type is determined by calling
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
call upon the linear operator creation.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif issparse(A) or is_pydata_spmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
rmatmat = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'rmatmat'):
rmatmat = A.rmatmat
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
rmatmat=rmatmat, dtype=dtype)
else:
raise TypeError('type not understood')
| IdentityOperator |
python | pytorch__pytorch | torch/distributed/tensor/parallel/style.py | {
"start": 13153,
"end": 18353
} | class ____(ParallelStyle):
"""
SequenceParallel replicates a compatible ``nn.Module`` parameters and runs the sharded computation with
input sharded on the sequence dimension. This currently supports ``nn.LayerNorm``, ``nn.Dropout``, and the
`RMSNorm python implementation <https://github.com/facebookresearch/llama/blob/main/llama/model.py#L34>`__
This style implements the operation that is described in the paper
`Reducing Activation Recomputation in Large Transformer Models <https://arxiv.org/abs/2205.05198>`__
If the input passed in to this ``nn.Module`` is a :class:`torch.Tensor`, it assumes that the input is already sharded
on the sequence dimension and converts the input to a :class:`DTensor` sharded on the sequence dimension. If the input
passed in to this ``nn.Module`` is already a :class:`DTensor` but is not sharded on the sequence dimension, it would
redistribute the input to be sharded on the sequence dimension.
The output of the ``nn.Module`` will be sharded on the sequence dimension.
Keyword Args:
sequence_dim (int, optional):
The sequence dimension of the input tensor for the ``nn.Module``, this is used to annotate the input tensor to
become a DTensor that is sharded on the sequence dimension, default: 1.
use_local_output (bool, optional):
Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: False.
Returns:
A :class:`ParallelStyle` object that represents Sequence Parallel of the ``nn.Module``.
Example::
>>> # xdoctest: +SKIP(failing)
>>> from torch.distributed.tensor.parallel import parallelize_module, SequenceParallel
>>> from torch.distributed.device_mesh import init_device_mesh
>>> ...
>>> m = Model(...) # m is a nn.Module that contains a "norm" nn.LayerNorm submodule
>>> tp_mesh = init_device_mesh("cuda", (8,))
>>>
>>> # By default, the input of the "norm" will be converted to DTensor that shards on the sequence dim
>>> # and the output of "norm" will return a sharded on sequence dimension :class:`DTensor`.
>>>
>>> sharded_mod = parallelize_module(m, tp_mesh, {"norm": SequenceParallel()}),
>>> ...
.. note:: SequenceParallel style assumes ones initialization if there are weights in the nn.Module (i.e.
``nn.LayerNorm`` or ``RMSNorm``, and they by default have ones initialization). If you have custom
inits for the weights on those modules, you need to broadcast the weights before/after parallelizing
to ensure that they are replicated.
"""
def __init__(self, *, sequence_dim: int = 1, use_local_output: bool = False):
super().__init__()
self.sequence_sharding = (Shard(sequence_dim),)
self.use_local_output = use_local_output
def _replicate_module_fn(
self, name: str, module: nn.Module, device_mesh: DeviceMesh
):
for p_name, param in module.named_parameters():
# simple replication with fixed ones_ init from LayerNorm/RMSNorm, which allow
# us to simply just use from_local
replicated_param = torch.nn.Parameter(
DTensor.from_local(param, device_mesh, [Replicate()], run_check=False)
)
module.register_parameter(p_name, replicated_param)
@staticmethod
def _prepare_input_fn(sequence_sharding, mod, inputs, device_mesh):
input_tensor = inputs[0]
if isinstance(input_tensor, DTensor):
# if the passed in input DTensor is not sharded on the sequence dim, we need to redistribute it
if input_tensor.placements != sequence_sharding:
input_tensor = input_tensor.redistribute(
placements=sequence_sharding, async_op=True
)
return input_tensor
elif isinstance(input_tensor, torch.Tensor):
# assume the input passed in already sharded on the sequence dim and create the DTensor
return DTensor.from_local(
input_tensor, device_mesh, sequence_sharding, run_check=False
)
else:
raise ValueError(
f"expecting input of {mod} to be a torch.Tensor or DTensor, but got {input_tensor}"
)
@staticmethod
def _prepare_output_fn(use_local_output, mod, outputs, device_mesh):
return outputs.to_local() if use_local_output else outputs
def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
return distribute_module(
module,
device_mesh,
self._replicate_module_fn,
partial(self._prepare_input_fn, self.sequence_sharding),
partial(self._prepare_output_fn, self.use_local_output),
)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
if len(self.sequence_sharding) == 1:
tmpstr += f"sequence_dim={self.sequence_sharding[0].dim}, "
tmpstr += f"use_local_output={self.use_local_output}"
tmpstr += ")"
return tmpstr
| SequenceParallel |
python | Pylons__pyramid | src/pyramid/scripts/ptweens.py | {
"start": 331,
"end": 3997
} | class ____:
description = """\
Print all implicit and explicit tween objects used by a Pyramid
application. The handler output includes whether the system is using an
explicit tweens ordering (will be true when the "pyramid.tweens"
deployment setting is used) or an implicit tweens ordering (will be true
when the "pyramid.tweens" deployment setting is *not* used).
This command accepts one positional argument named "config_uri" which
specifies the PasteDeploy config file to use for the interactive
shell. The format is "inifile#name". If the name is left off, "main"
will be assumed. Example: "ptweens myapp.ini#main".
"""
script_name = 'ptweens'
parser = argparse.ArgumentParser(
description=textwrap.dedent(description),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'config_uri',
nargs='?',
default=None,
help='The URI to the configuration file.',
)
parser.add_argument(
'config_vars',
nargs='*',
default=(),
help="Variables required by the config file. For example, "
"`http_port=%%(http_port)s` would expect `http_port=8080` to be "
"passed here.",
)
stdout = sys.stdout
bootstrap = staticmethod(bootstrap) # testing
setup_logging = staticmethod(setup_logging) # testing
def __init__(self, argv, quiet=False):
self.quiet = quiet
self.args = self.parser.parse_args(argv[1:])
def _get_tweens(self, registry):
from pyramid.config import Configurator
config = Configurator(registry=registry)
return config.registry.queryUtility(ITweens)
def out(self, msg): # pragma: no cover
if not self.quiet:
print(msg)
def show_chain(self, chain):
fmt = '%-10s %-65s'
self.out(fmt % ('Position', 'Name'))
self.out(fmt % ('-' * len('Position'), '-' * len('Name')))
self.out(fmt % ('-', INGRESS))
for pos, (name, _) in enumerate(chain):
self.out(fmt % (pos, name))
self.out(fmt % ('-', MAIN))
def run(self):
if not self.args.config_uri:
self.out('Requires a config file argument')
return 2
config_uri = self.args.config_uri
config_vars = parse_vars(self.args.config_vars)
config_vars.setdefault('__script__', self.script_name)
self.setup_logging(config_uri, global_conf=config_vars)
env = self.bootstrap(config_uri, options=config_vars)
registry = env['registry']
tweens = self._get_tweens(registry)
if tweens is not None:
explicit = tweens.explicit
if explicit:
self.out(
'"pyramid.tweens" config value set '
'(explicitly ordered tweens used)'
)
self.out('')
self.out('Explicit Tween Chain (used)')
self.out('')
self.show_chain(tweens.explicit)
self.out('')
self.out('Implicit Tween Chain (not used)')
self.out('')
self.show_chain(tweens.implicit())
else:
self.out(
'"pyramid.tweens" config value NOT set '
'(implicitly ordered tweens used)'
)
self.out('')
self.out('Implicit Tween Chain')
self.out('')
self.show_chain(tweens.implicit())
return 0
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
| PTweensCommand |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 395,
"end": 622
} | class ____(Mixin1):
pass
A1().do_stuff()
# This should generate an error because B1 doesn't
# match the protocol.
B1().do_stuff()
# This should generate an error because C1 doesn't
# match the protocol.
C1().do_stuff()
| C1 |
python | python-poetry__poetry | src/poetry/console/commands/new.py | {
"start": 334,
"end": 2864
} | class ____(InitCommand):
name = "new"
description = "Creates a new Python project at <path>."
arguments: ClassVar[list[Argument]] = [
argument("path", "The path to create the project at.")
]
options: ClassVar[list[Option]] = [
option(
"interactive",
"i",
"Allow interactive specification of project configuration.",
flag=True,
),
option("name", None, "Set the resulting package name.", flag=False),
option(
"src",
None,
"Use the src layout for the project. "
"<warning>Deprecated</>: This is the default option now.",
),
option("flat", None, "Use the flat layout for the project."),
option(
"readme",
None,
"Specify the readme file format. Default is md.",
flag=False,
),
*[
o
for o in InitCommand.options
if o.name
in {
"description",
"author",
"python",
"dependency",
"dev-dependency",
"license",
}
],
]
def handle(self) -> int:
from pathlib import Path
if self.io.input.option("project"):
self.line_error(
"<warning>--project only makes sense with existing projects, and will"
" be ignored. You should consider the option --path instead.</warning>"
)
path = Path(self.argument("path"))
if not path.is_absolute():
# we do not use resolve here due to compatibility issues
# for path.resolve(strict=False)
path = Path.cwd().joinpath(path)
if path.exists() and list(path.glob("*")):
# Directory is not empty. Aborting.
raise RuntimeError(
f"Destination <fg=yellow>{path}</> exists and is not empty. Did you mean `poetry init`?"
)
if self.option("src"):
self.line_error(
"The <c1>--src</> option is now the default and will be removed in a future version."
)
return self._init_pyproject(
project_path=path,
allow_interactive=self.option("interactive"),
layout_name="standard" if self.option("flat") else "src",
readme_format=self.option("readme") or "md",
allow_layout_creation_on_empty=True,
)
| NewCommand |
python | huggingface__transformers | src/transformers/models/focalnet/modeling_focalnet.py | {
"start": 23414,
"end": 24187
} | class ____(PreTrainedModel):
config: FocalNetConfig
base_model_prefix = "focalnet"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["FocalNetStage"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, FocalNetEmbeddings):
if module.mask_token is not None:
init.zeros_(module.mask_token)
elif isinstance(module, FocalNetLayer):
if self.config.use_layerscale:
init.constant_(module.gamma_1, self.config.layerscale_value)
init.constant_(module.gamma_2, self.config.layerscale_value)
@auto_docstring
| FocalNetPreTrainedModel |
python | wandb__wandb | wandb/vendor/pygments/styles/xcode.py | {
"start": 384,
"end": 1501
} | class ____(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
| XcodeStyle |
python | neetcode-gh__leetcode | python/0452-minimum-number-of-arrows-to-burst-balloons.py | {
"start": 0,
"end": 418
} | class ____:
def findMinArrowShots(self, points: List[List[int]]) -> int:
points.sort()
res = len(points)
prev = points[0]
for i in range(1, len(points)):
curr = points[i]
if curr[0] <= prev[1]:
res -= 1
prev = [curr[0], min(curr[1], prev[1])]
else:
prev = curr
return res
| Solution |
python | doocs__leetcode | lcof/面试题57 - II. 和为s的连续正数序列/Solution.py | {
"start": 0,
"end": 400
} | class ____:
def findContinuousSequence(self, target: int) -> List[List[int]]:
l, r = 1, 2
ans = []
while l < r:
s = (l + r) * (r - l + 1) // 2
if s == target:
ans.append(list(range(l, r + 1)))
l += 1
elif s < target:
r += 1
else:
l += 1
return ans
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001_py39.py | {
"start": 257,
"end": 419
} | class ____(TeamBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
heroes: list["Hero"] = Relationship(back_populates="team")
| Team |
python | python__mypy | mypyc/ir/ops.py | {
"start": 36327,
"end": 37555
} | class ____(RegisterOp):
"""Raise built-in exception with an optional error string.
We have a separate opcode for this for convenience and to
generate smaller, more idiomatic C code.
"""
# TODO: Make it more explicit at IR level that this always raises
error_kind = ERR_FALSE
VALUE_ERROR: Final = "ValueError"
ASSERTION_ERROR: Final = "AssertionError"
STOP_ITERATION: Final = "StopIteration"
UNBOUND_LOCAL_ERROR: Final = "UnboundLocalError"
RUNTIME_ERROR: Final = "RuntimeError"
NAME_ERROR: Final = "NameError"
ZERO_DIVISION_ERROR: Final = "ZeroDivisionError"
def __init__(self, class_name: str, value: str | Value | None, line: int) -> None:
super().__init__(line)
self.class_name = class_name
self.value = value
self.type = bool_rprimitive
def sources(self) -> list[Value]:
return []
def set_sources(self, new: list[Value]) -> None:
assert not new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_raise_standard_error(self)
# True steals all arguments, False steals none, a list steals those in matching positions
StealsDescription = bool | list[bool]
@final
| RaiseStandardError |
python | openai__openai-python | tests/api_resources/conversations/test_items.py | {
"start": 531,
"end": 9575
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
item = client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.create(
conversation_id="",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
item = client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
client.conversations.items.with_raw_response.retrieve(
item_id="",
conversation_id="conv_123",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
item = client.conversations.items.list(
conversation_id="conv_123",
)
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.list(
conversation_id="conv_123",
after="after",
include=["file_search_call.results"],
limit=0,
order="asc",
)
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.list(
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.list(
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
item = client.conversations.items.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(Conversation, item, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
client.conversations.items.with_raw_response.delete(
item_id="",
conversation_id="conv_123",
)
| TestItems |
python | python__mypy | mypy/types.py | {
"start": 130823,
"end": 134770
} | class ____(ProperType):
"""Temporary, yet-unknown type during semantic analysis.
This is needed when there's a reference to a type before the real symbol
table entry of the target type is available (specifically, we use a
temporary PlaceholderNode symbol node). Consider this example:
class str(Sequence[str]): ...
We use a PlaceholderType for the 'str' in 'Sequence[str]' since we can't create
a TypeInfo for 'str' until all base classes have been resolved. We'll soon
perform another analysis iteration which replaces the base class with a complete
type without any placeholders. After semantic analysis, no placeholder types must
exist.
"""
__slots__ = ("fullname", "args")
def __init__(self, fullname: str | None, args: list[Type], line: int) -> None:
super().__init__(line)
self.fullname = fullname # Must be a valid full name of an actual node (or None).
self.args = args
def accept(self, visitor: TypeVisitor[T]) -> T:
assert isinstance(visitor, SyntheticTypeVisitor)
ret: T = visitor.visit_placeholder_type(self)
return ret
def __hash__(self) -> int:
return hash((self.fullname, tuple(self.args)))
def __eq__(self, other: object) -> bool:
if not isinstance(other, PlaceholderType):
return NotImplemented
return self.fullname == other.fullname and self.args == other.args
def serialize(self) -> str:
# We should never get here since all placeholders should be replaced
# during semantic analysis.
assert False, f"Internal error: unresolved placeholder type {self.fullname}"
@overload
def get_proper_type(typ: None) -> None: ...
@overload
def get_proper_type(typ: Type) -> ProperType: ...
def get_proper_type(typ: Type | None) -> ProperType | None:
"""Get the expansion of a type alias type.
If the type is already a proper type, this is a no-op. Use this function
wherever a decision is made on a call like e.g. 'if isinstance(typ, UnionType): ...',
because 'typ' in this case may be an alias to union. Note: if after making the decision
on the isinstance() call you pass on the original type (and not one of its components)
it is recommended to *always* pass on the unexpanded alias.
"""
if typ is None:
return None
# TODO: this is an ugly hack, remove.
if isinstance(typ, TypeGuardedType):
typ = typ.type_guard
while isinstance(typ, TypeAliasType):
typ = typ._expand_once()
# TODO: store the name of original type alias on this type, so we can show it in errors.
return cast(ProperType, typ)
@overload
def get_proper_types(types: list[Type] | tuple[Type, ...]) -> list[ProperType]: ...
@overload
def get_proper_types(
types: list[Type | None] | tuple[Type | None, ...],
) -> list[ProperType | None]: ...
def get_proper_types(
types: list[Type] | list[Type | None] | tuple[Type | None, ...],
) -> list[ProperType] | list[ProperType | None]:
if isinstance(types, list):
typelist = types
# Optimize for the common case so that we don't need to allocate anything
if not any(isinstance(t, (TypeAliasType, TypeGuardedType)) for t in typelist):
return cast("list[ProperType]", typelist)
return [get_proper_type(t) for t in typelist]
else:
return [get_proper_type(t) for t in types]
# We split off the type visitor base classes to another module
# to make it easier to gradually get modules working with mypyc.
# Import them here, after the types are defined.
# This is intended as a re-export also.
from mypy.type_visitor import (
ALL_STRATEGY as ALL_STRATEGY,
ANY_STRATEGY as ANY_STRATEGY,
BoolTypeQuery as BoolTypeQuery,
SyntheticTypeVisitor as SyntheticTypeVisitor,
TypeQuery as TypeQuery,
TypeTranslator as TypeTranslator,
TypeVisitor as TypeVisitor,
)
| PlaceholderType |
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 9093,
"end": 9284
} | class ____(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
rel_db_type_class = models.BigIntegerField
| BigIntegerFieldTests |
python | huggingface__transformers | src/transformers/trainer_utils.py | {
"start": 29465,
"end": 29723
} | class ____(ExplicitEnum):
FULL_SHARD = "full_shard"
SHARD_GRAD_OP = "shard_grad_op"
NO_SHARD = "no_shard"
HYBRID_SHARD = "hybrid_shard"
HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
| FSDPOption |
python | pytorch__pytorch | benchmarks/tensorexpr/reduction.py | {
"start": 2325,
"end": 2587
} | class ____(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K, skip_input_transform):
super().__init__(mode, device, dtype, "row", M, N, K, skip_input_transform)
@staticmethod
def module():
return "reduce_row"
| ReduceRowBench |
python | ray-project__ray | rllib/examples/_old_api_stack/models/parametric_actions_model.py | {
"start": 4315,
"end": 7332
} | class ____(DistributionalQTFModel):
"""Same as the above ParametricActionsModel.
However, this version also learns the action embeddings.
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4,),
action_embed_size=2,
**kw
):
super(ParametricActionsModelThatLearnsEmbeddings, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw
)
action_ids_shifted = tf.constant(
list(range(1, num_outputs + 1)), dtype=tf.float32
)
obs_cart = tf.keras.layers.Input(shape=true_obs_shape, name="obs_cart")
valid_avail_actions_mask = tf.keras.layers.Input(
shape=(num_outputs,), name="valid_avail_actions_mask"
)
self.pred_action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape),
action_space,
action_embed_size,
model_config,
name + "_pred_action_embed",
)
# Compute the predicted action embedding
pred_action_embed, _ = self.pred_action_embed_model({"obs": obs_cart})
_value_out = self.pred_action_embed_model.value_function()
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(pred_action_embed, 1)
valid_avail_actions = action_ids_shifted * valid_avail_actions_mask
# Embedding for valid available actions which will be learned.
# Embedding vector for 0 is an invalid embedding (a "dummy embedding").
valid_avail_actions_embed = tf.keras.layers.Embedding(
input_dim=num_outputs + 1,
output_dim=action_embed_size,
name="action_embed_matrix",
)(valid_avail_actions)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(valid_avail_actions_embed * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(tf.math.log(valid_avail_actions_mask), tf.float32.min)
action_logits = action_logits + inf_mask
self.param_actions_model = tf.keras.Model(
inputs=[obs_cart, valid_avail_actions_mask],
outputs=[action_logits, _value_out],
)
self.param_actions_model.summary()
def forward(self, input_dict, state, seq_lens):
# Extract the available actions mask tensor from the observation.
valid_avail_actions_mask = input_dict["obs"]["valid_avail_actions_mask"]
action_logits, self._value_out = self.param_actions_model(
[input_dict["obs"]["cart"], valid_avail_actions_mask]
)
return action_logits, state
def value_function(self):
return self._value_out
| ParametricActionsModelThatLearnsEmbeddings |
python | streamlit__streamlit | lib/tests/streamlit/runtime/state/widgets_test.py | {
"start": 2002,
"end": 12236
} | class ____(unittest.TestCase):
def test_get(self):
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("bool", states).bool_value = True
_create_widget("float", states).double_value = 0.5
_create_widget("int", states).int_value = 123
_create_widget("string", states).string_value = "howdy!"
session_state = SessionState()
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(create_metadata("trigger", "trigger_value"))
session_state._set_widget_metadata(create_metadata("bool", "bool_value"))
session_state._set_widget_metadata(create_metadata("float", "double_value"))
session_state._set_widget_metadata(create_metadata("int", "int_value"))
session_state._set_widget_metadata(create_metadata("string", "string_value"))
assert session_state["trigger"]
assert session_state["bool"]
assert session_state["float"] == pytest.approx(0.5)
assert session_state["int"] == 123
assert session_state["string"] == "howdy!"
def test_get_nonexistent(self):
session_state = SessionState()
with pytest.raises(KeyError):
session_state["fake_widget_id"]
def test_get_prev_widget_value_nonexistent(self):
session_state = SessionState()
with pytest.raises(KeyError):
session_state["fake_widget_id"]
def test_set_widget_attrs_nonexistent(self):
session_state = SessionState()
session_state._set_widget_metadata(create_metadata("fake_widget_id", ""))
assert isinstance(
session_state._new_widget_state.widget_metadata["fake_widget_id"],
WidgetMetadata,
)
def test_call_callbacks(self):
"""Test the call_callbacks method in 6 possible cases:
1. A widget does not have a callback
2. A widget's old and new values are equal, so the callback is not
called.
3. A widget's callback has no args provided.
4. A widget's callback has just args provided.
5. A widget's callback has just kwargs provided.
6. A widget's callback has both args and kwargs provided.
"""
prev_states = WidgetStates()
_create_widget("trigger", prev_states).trigger_value = True
_create_widget("bool", prev_states).bool_value = True
_create_widget("bool2", prev_states).bool_value = True
_create_widget("float", prev_states).double_value = 0.5
_create_widget("int", prev_states).int_value = 123
_create_widget("string", prev_states).string_value = "howdy!"
session_state = SessionState()
session_state.set_widgets_from_proto(prev_states)
mock_callback = MagicMock()
def deserializer(x):
return x
callback_cases = [
("trigger", "trigger_value", None, None, None),
("bool", "bool_value", mock_callback, None, None),
("bool2", "bool_value", mock_callback, None, None),
("float", "double_value", mock_callback, (1,), None),
("int", "int_value", mock_callback, None, {"x": 2}),
("string", "string_value", mock_callback, (1,), {"x": 2}),
]
for widget_id, value_type, callback, args, kwargs in callback_cases:
session_state._set_widget_metadata(
WidgetMetadata(
widget_id,
deserializer,
lambda x: x,
value_type=value_type,
callback=callback,
callback_args=args,
callback_kwargs=kwargs,
)
)
states = WidgetStates()
_create_widget("trigger", states).trigger_value = True
_create_widget("bool", states).bool_value = True
_create_widget("bool2", states).bool_value = False
_create_widget("float", states).double_value = 1.5
_create_widget("int", states).int_value = 321
_create_widget("string", states).string_value = "!ydwoh"
session_state.on_script_will_rerun(states)
mock_callback.assert_has_calls([call(), call(1), call(x=2), call(1, x=2)])
def test_marshall_excludes_widgets_without_state(self):
widget_states = WidgetStates()
_create_widget("trigger", widget_states).trigger_value = True
session_state = SessionState()
session_state.set_widgets_from_proto(widget_states)
session_state._set_widget_metadata(
WidgetMetadata("other_widget", lambda x: x, None, "trigger_value", True)
)
widgets = session_state.get_widget_states()
assert len(widgets) == 1
assert widgets[0].id == "trigger"
def test_reset_triggers(self):
states = WidgetStates()
session_state = SessionState()
_create_widget("trigger", states).trigger_value = True
_create_widget("int", states).int_value = 123
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(
WidgetMetadata("trigger", lambda x: x, None, "trigger_value")
)
session_state._set_widget_metadata(
WidgetMetadata("int", lambda x: x, None, "int_value")
)
assert session_state["trigger"]
assert session_state["int"] == 123
session_state._reset_triggers()
assert not session_state["trigger"]
assert session_state["int"] == 123
def test_reset_chat_input_triggers(self):
states = WidgetStates()
session_state = SessionState()
_create_widget("chat_input", states).chat_input_value.CopyFrom(
ChatInputValueProto(
data="Some Value",
)
)
_create_widget("int", states).int_value = 123
session_state.set_widgets_from_proto(states)
session_state._set_widget_metadata(
WidgetMetadata("chat_input", lambda x: x, None, "chat_input_value")
)
session_state._set_widget_metadata(
WidgetMetadata("int", lambda x: x, None, "int_value")
)
assert session_state["chat_input"].data == "Some Value"
assert session_state["int"] == 123
session_state._reset_triggers()
assert session_state["chat_input"] is None
assert session_state["int"] == 123
def test_coalesce_widget_states(self):
session_state = SessionState()
old_states = WidgetStates()
_create_widget("old_set_trigger", old_states).trigger_value = True
_create_widget("old_unset_trigger", old_states).trigger_value = False
_create_widget("old_set_chat_input", old_states).chat_input_value.CopyFrom(
ChatInputValueProto(data="Some String")
)
_create_widget(
"old_set_empty_chat_input", old_states
).chat_input_value.CopyFrom(ChatInputValueProto(data=""))
_create_widget("old_unset_chat_input", old_states).chat_input_value.CopyFrom(
ChatInputValueProto(data=None)
)
_create_widget("missing_in_new", old_states).int_value = 123
_create_widget("shape_changing_trigger", old_states).trigger_value = True
_create_widget("overwritten_chat_input", old_states).chat_input_value.CopyFrom(
ChatInputValueProto(data="old string")
)
session_state._set_widget_metadata(
create_metadata("old_set_trigger", "trigger_value")
)
session_state._set_widget_metadata(
create_metadata("old_unset_trigger", "trigger_value")
)
session_state._set_widget_metadata(
create_metadata("missing_in_new", "int_value")
)
session_state._set_widget_metadata(
create_metadata("shape changing trigger", "trigger_value")
)
new_states = WidgetStates()
_create_widget("old_set_trigger", new_states).trigger_value = False
_create_widget("new_set_trigger", new_states).trigger_value = True
_create_widget(
"old_set_empty_chat_input", new_states
).chat_input_value.CopyFrom(ChatInputValueProto(data=None))
_create_widget("new_set_chat_input", new_states).chat_input_value.CopyFrom(
ChatInputValueProto(data="Some other string")
)
_create_widget("added_in_new", new_states).int_value = 456
_create_widget("shape_changing_trigger", new_states).int_value = 3
_create_widget("overwritten_chat_input", new_states).chat_input_value.CopyFrom(
ChatInputValueProto(data="Overwritten string")
)
session_state._set_widget_metadata(
create_metadata("new_set_trigger", "trigger_value")
)
session_state._set_widget_metadata(create_metadata("added_in_new", "int_value"))
session_state._set_widget_metadata(
create_metadata("shape_changing_trigger", "int_value")
)
session_state.set_widgets_from_proto(
_coalesce_widget_states(old_states, new_states)
)
with pytest.raises(KeyError):
session_state["old_unset_trigger"]
with pytest.raises(KeyError):
session_state["missing_in_new"]
with pytest.raises(KeyError):
session_state["old_unset_string_trigger"]
assert session_state["old_set_trigger"]
assert session_state["new_set_trigger"]
assert session_state["added_in_new"] == 456
# Widgets that were triggers before, but no longer are, will *not*
# be coalesced
assert session_state["shape_changing_trigger"] == 3
def coalesce_widget_states_returns_None_if_both_inputs_None(self):
assert _coalesce_widget_states(None, None) is None
def coalesce_widget_states_returns_old_states_if_new_states_None(self):
old_states = WidgetStates()
assert _coalesce_widget_states(old_states, None) is old_states
def coalesce_widget_states_returns_new_states_if_old_states_None(self):
new_states = WidgetStates()
assert _coalesce_widget_states(None, new_states) is new_states
| WidgetManagerTests |
python | pytorch__pytorch | test/onnx/torchlib/ops_test_data.py | {
"start": 2306,
"end": 24119
} | class ____:
"""A dataclass to store the information to test an torchlib op."""
# The name of the op_info, e.g. "add"
op_info_name: str
# The torchlib ONNX Function to test
op: Callable[..., Any]
# The input wrangler function to adjust the input to fit the aten signature
input_wrangler: Optional[
Callable[[list[Any], dict[str, Any]], tuple[list[Any], dict[str, Any]]]
] = None
# Whether the op is non-deterministic
nondeterministic: bool = False
# Whether to compare the shape only for the output[index]
# For example: (1,2) means compare value for output[0] and shape for output[1] and [2]
# We may be able to combine this with the nondeterministic option
compare_shape_only_for_output: tuple[int, ...] = ()
# Whether the function is designed for complex inputs
complex: bool = False
# The ONNX opset version in which the function was introduced.
# Its specifies the minimum ONNX opset version required to use the function.
# It ensures that the function is only used when the target ONNX opset version
# is compatible. For example, if `opset_introduced=20`, the function will only
# be used when exporting to ONNX models targeting opset version 20 or higher.
opset_introduced: int = 18
# The acceptable tolerance of the inference result difference between PyTorch and ORT.
# Format: {dtype: (rtol, atol)}.
# For example: {torch.float16: (1e-3, 1e-3)}
tolerance: dict[torch.dtype, tuple[float, float]] = dataclasses.field(
default_factory=dict
)
# Expected skips or fails for the test and/or subtests
skips_or_fails: list[ops_test_common.DecorateMeta] = dataclasses.field(
default_factory=list
)
def get_tolerance(self, dtype: torch.dtype) -> tuple[float | None, float | None]:
"""Returns the (rtol, atol) tolerance for the given dtype."""
if (tolerance := self.tolerance.get(dtype)) is not None:
return tolerance
# Use the PyTorch default if not specified
# https://pytorch.org/docs/stable/testing.html
return (None, None)
def skip(
self,
variant_name: str = "",
*,
reason: str,
dtypes: Optional[Collection[torch.dtype]] = None,
device_type: Optional[str] = None,
matcher: Optional[Callable[[Any], Any]] = None,
enabled_if: bool = True,
test_class_name: Optional[str] = None,
) -> Self:
"""Skips an OpInfo test.
Args:
variant_name: Optional OpInfo variant_test_name.
reason: The reason for skipping.
dtypes: The dtypes to skip.
device_type: Device type. E.g. "cpu", "cuda".
matcher: A function that matches the test sample input. It is used only when
the skip is in the SKIP_XFAIL_SUBTESTS list.
enabled_if: Whether the skip is enabled.
test_class_name: The test class name to apply the skip to. If None, the skip
is applied to all test classes.
"""
self.skips_or_fails.append(
ops_test_common.skip(
self.op_info_name,
variant_name,
reason=reason,
dtypes=dtypes,
device_type=device_type,
matcher=matcher,
enabled_if=enabled_if,
test_class_name=test_class_name,
)
)
return self
def xfail(
self,
variant_name: str = "",
*,
reason: str,
dtypes: Optional[Collection[torch.dtype]] = None,
device_type: Optional[str] = None,
matcher: Optional[Callable[[Any], Any]] = None,
enabled_if: bool = True,
test_class_name: Optional[str] = None,
) -> Self:
"""Expects an OpInfo test to fail.
Args:
variant_name: Optional OpInfo variant_test_name.
reason: The reason for the failure.
dtypes: The dtypes to expect the failure
device_type: Device type. E.g. "cpu", "cuda"..
matcher: A function that matches the test sample input. It is used only when
the xfail is in the SKIP_XFAIL_SUBTESTS list.
enabled_if: Whether the xfail is enabled.
test_class_name: The test class name to apply the xfail to. If None, the
xfail is applied to all test classes.
"""
self.skips_or_fails.append(
ops_test_common.xfail(
self.op_info_name,
variant_name,
reason=reason,
dtypes=dtypes,
device_type=device_type,
matcher=matcher,
enabled_if=enabled_if,
test_class_name=test_class_name,
)
)
return self
# Modify this section ##########################################################
def _amin_amax_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "dim" not in kwargs:
# Supply an empty dim to match the aten signature
kwargs["dim"] = np.array([], dtype=np.int64)
else:
# Convert dim to a numpy array
kwargs["dim"] = np.array(kwargs["dim"], dtype=np.int64).reshape((-1,))
return args, kwargs
def _avg_pool_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "dim" not in kwargs:
if len(args) > 6:
kwargs["divisor_override"] = args.pop(6)
if len(args) > 5:
kwargs["count_include_pad"] = args.pop(5)
if len(args) > 4:
kwargs["ceil_mode"] = args.pop(4)
if len(args) > 3:
padding = args.pop(3)
if isinstance(padding, np.ndarray):
# Cannot using list(padding) here, because the element will be numpy.int64 instead of int
padding = padding.tolist()
kwargs["padding"] = padding
if len(args) > 2:
stride = args.pop(2)
if isinstance(stride, np.ndarray):
stride = stride.tolist()
kwargs["stride"] = stride
kernel_size = args.pop(1)
if isinstance(kernel_size, np.ndarray):
kernel_size = kernel_size.tolist()
kwargs["kernel_size"] = kernel_size
return args, kwargs
def _cross_entropy_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "reduction" in kwargs:
reduction_vals = ["none", "mean", "sum"]
value = kwargs["reduction"]
idx = reduction_vals.index(value)
kwargs["reduction"] = idx
return args, kwargs
def _dropout_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "training" in kwargs:
kwargs["train"] = kwargs["training"]
kwargs.pop("training")
return args, kwargs
def _einsum_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Swap the equation and tensors to revert the special handling in the OpInfo
return [args[1], args[0]], kwargs
def _embedding_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
"""Remove arguments not present in the aten op signature."""
kwargs.pop("max_norm", None)
kwargs.pop("norm_type", None)
return args, kwargs
def _empty_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
"""Remove arguments not present in the aten op signature."""
kwargs.pop("requires_grad", None)
return args, kwargs
def _grid_sample_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Convert string attribute to int as input
inter_mode_options = {"bilinear": 0, "nearest": 1, "bicubic": 2}
padding_mode_options = {"zeros": 0, "border": 1, "reflection": 2}
args.append(inter_mode_options[kwargs["mode"]])
args.append(padding_mode_options[kwargs["padding_mode"]])
args.append(kwargs["align_corners"])
kwargs.clear()
return args, kwargs
def _im2col_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Move kernel_size, dilation, padding and stride from args to kwargs
if len(args) == 5:
# Handle stride
stride = args.pop()
if isinstance(stride, np.ndarray): # convert stride to list[int]
stride = stride.tolist()
kwargs["stride"] = stride
# Handle padding
padding = args.pop()
if isinstance(padding, np.ndarray): # convert padding to list[int]
padding = padding.tolist()
kwargs["padding"] = padding
# Handle dilation
dilation = args.pop()
if isinstance(dilation, np.ndarray): # convert dilation to list[int]
dilation = dilation.tolist()
kwargs["dilation"] = dilation
# Handle kernel_size
kernel_size = args.pop()
if isinstance(kernel_size, np.ndarray): # convert kernel_size to list[int]
kernel_size = kernel_size.tolist()
kwargs["kernel_size"] = kernel_size
return args, kwargs
def _index_put_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
args[1] = [np.array(elem) for elem in args[1]]
return args, kwargs
def _max_pool_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Remove return_indices argument because this op doesn't accept it
kwargs.pop("return_indices", None)
return args, kwargs
def _mean_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Make the dims as tensor
if "dim" in kwargs:
kwargs["dim"] = np.array(kwargs["dim"], dtype=np.int64)
return args, kwargs
def _mse_loss_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "reduction" in kwargs:
reduction_vals = ["none", "mean", "sum"] # [0,1,2], default=1
value = kwargs["reduction"]
idx = reduction_vals.index(value)
kwargs["reduction"] = idx
return args, kwargs
def _nll_loss_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if "reduction" in kwargs:
# aten_nll_loss can only accept integer argument instead of string
reduction_vals = ["none", "mean", "sum"]
value = kwargs["reduction"]
kwargs["reduction"] = reduction_vals.index(value)
return args, kwargs
def _nonzero_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
kwargs.pop("as_tuple", None)
return args, kwargs
def _reflection_pad2d_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
args.pop(2) # remove 'reflect' arg
return args, kwargs
def _replication_pad2d_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
args.pop(2) # remove 'replicate' arg
return args, kwargs
def _replication_pad3d_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
args.pop(2) # remove 'replicate' arg
return args, kwargs
def _roll_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if len(args) >= 3:
if isinstance(args[2], np.ndarray): # convert dims to list[int]
# Change dims from args to kwargs to keep tuple/list type
dims = args.pop(2)
kwargs["dims"] = dims.tolist()
elif isinstance(args[2], int): # convert dims to list[int]
dims = args.pop(2)
kwargs["dims"] = []
kwargs["dims"].append(dims)
if len(args) >= 2:
if isinstance(args[1], np.ndarray): # convert shift to list[int]
shifts = args.pop(1)
kwargs["shifts"] = shifts.tolist()
elif isinstance(args[1], int):
shifts = args.pop(1)
kwargs["shifts"] = []
kwargs["shifts"].append(shifts)
return args, kwargs
def _scalar_tensor_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
kwargs.pop("requires_grad", None)
return args, kwargs
def _scatter_reduce_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# Put the string into kwargs, otherwise FullGraph mode could not find get 'reduce' argument
kwargs["reduce"] = args.pop(4)
return args, kwargs
def _sum_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
if kwargs.get("dim") is not None:
kwargs["dim"] = np.array(kwargs["dim"], dtype=np.int64)
return args, kwargs
def _unflatten_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
args[1] = np.array(args[1], dtype=np.int64)
return args, kwargs
def _where_input_wrangler(
args: list[Any], kwargs: dict[str, Any]
) -> tuple[list[Any], dict[str, Any]]:
# The aten::where op takes condition, x, y as inputs
# Swap the first two inputs
args[0], args[1] = args[1], args[0]
return args, kwargs
# Ops to be tested for numerical consistency between onnx and pytorch
# Find the names of the OpInfos in torch/testing/_internal/common_methods_invocations.py
TESTED_TORCHLIB_OPS: tuple[TorchLibOpInfo, ...] = (
TorchLibOpInfo("abs", core_ops.aten_abs),
TorchLibOpInfo("abs", core_ops.aten_abs_complex, complex=True),
TorchLibOpInfo("add", core_ops.aten_add, tolerance={torch.float16: (1e-3, 1e-3)}),
TorchLibOpInfo("add", core_ops.aten_add_complex, complex=True),
TorchLibOpInfo("gelu_op20", nn_ops.aten_gelu_opset20, opset_introduced=20),
TorchLibOpInfo(
"nn.functional.group_norm", nn_ops.aten_group_norm, opset_introduced=21
).skip(
reason="ONNX Runtime does not support zero sized inputs for GroupNorm",
matcher=lambda sample: sample.input.numel() == 0,
),
TorchLibOpInfo(
"nn.functional.rms_norm", nn_ops.aten_rms_norm, opset_introduced=23
).skip(
reason="ONNX Runtime does not support <1d inputs or zero sized inputs for RMSNorm",
matcher=lambda sample: len(sample.input.shape) < 2 or sample.input.numel() == 0,
),
)
ops_test_common.duplicate_opinfo(OPS_DB, "all", ("all_dim", "all_dims"))
ops_test_common.duplicate_opinfo(OPS_DB, "any", ("any_dim", "any_dims"))
ops_test_common.duplicate_opinfo(
OPS_DB, "arange", ("arange_start", "arange_start_step")
)
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_1d", ("atleast_1d_Sequence",))
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_2d", ("atleast_2d_Sequence",))
ops_test_common.duplicate_opinfo(OPS_DB, "atleast_3d", ("atleast_3d_Sequence",))
ops_test_common.duplicate_opinfo(
OPS_DB,
"bitwise_left_shift",
(
"bitwise_left_shift_int8",
"bitwise_left_shift_int16",
"bitwise_left_shift_int32",
"bitwise_left_shift_int64",
),
)
ops_test_common.duplicate_opinfo(
OPS_DB,
"bitwise_right_shift",
(
"bitwise_right_shift_int8",
"bitwise_right_shift_int16",
"bitwise_right_shift_int32",
"bitwise_right_shift_int64",
),
)
ops_test_common.duplicate_opinfo(OPS_DB, "cat", ("concat", "concatenate"))
ops_test_common.duplicate_opinfo(OPS_DB, "clone", ("lift_fresh_copy",))
ops_test_common.duplicate_opinfo(OPS_DB, "diagonal", ("diagonal_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "div", ("div_mode", "div_mode_int"))
ops_test_common.duplicate_opinfo(OPS_DB, "ge", ("ge_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "gt", ("gt_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "index_put", ("index_put_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "le", ("le_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "lt", ("lt_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "max", ("max_dim",))
ops_test_common.duplicate_opinfo(OPS_DB, "maximum", ("maximum_bool",))
ops_test_common.duplicate_opinfo(OPS_DB, "mean", ("mean_dim",))
ops_test_common.duplicate_opinfo(OPS_DB, "min", ("min_dim",))
ops_test_common.duplicate_opinfo(OPS_DB, "minimum", ("minimum_bool",))
ops_test_common.duplicate_opinfo(
OPS_DB,
"nn.functional.pad",
(
"nn.functional.reflection_pad2d",
"nn.functional.replication_pad2d",
"nn.functional.replication_pad3d",
),
)
ops_test_common.duplicate_opinfo(OPS_DB, "nn.functional.gelu", ("gelu_op20",))
ops_test_common.duplicate_opinfo(
OPS_DB,
"nn.functional.scaled_dot_product_attention",
("nn.functional.scaled_dot_product_attention_bool_mask",),
)
ops_test_common.duplicate_opinfo(
OPS_DB,
"nn.functional.celu",
("nn.functional.celu_type_promoted",),
)
ops_test_common.duplicate_opinfo(
OPS_DB, "ops.aten._log_softmax", ("ops.aten._log_softmax_half",)
)
ops_test_common.duplicate_opinfo(
OPS_DB, "ops.aten._softmax", ("ops.aten._softmax_half",)
)
ops_test_common.duplicate_opinfo(OPS_DB, "prod", ("prod_dim_int",))
ops_test_common.duplicate_opinfo(OPS_DB, "round", ("round_decimals",))
ops_test_common.duplicate_opinfo(OPS_DB, "squeeze", ("squeeze_dim",))
ops_test_common.duplicate_opinfo(OPS_DB, "view_as_complex", ("view_as_complex_copy",))
ops_test_common.duplicate_opinfo(OPS_DB, "view_as_real", ("view_as_real_copy",))
# MARK: End edits here
# These ops are not deterministic, so we check shape and dtype only
NONDETERMINISTIC_OPS: frozenset[str] = frozenset(
info.op_info_name for info in TESTED_TORCHLIB_OPS if info.nondeterministic
)
COMPARE_SHAPE_ONLY_OPS: dict[
str,
set,
] = {
info.op_info_name: set(info.compare_shape_only_for_output)
for info in TESTED_TORCHLIB_OPS
}
TORCHLIB_OPINFO_MAPPING: dict[
str,
TorchLibOpInfo,
] = {info.op_info_name: info for info in TESTED_TORCHLIB_OPS if not info.complex}
TESTED_OPS = frozenset(TORCHLIB_OPINFO_MAPPING)
EXPECTED_SKIPS_OR_FAILS: tuple[ops_test_common.DecorateMeta, ...] = tuple(
functools.reduce(
# Flatten the list
lambda a, b: [*a, *b],
[
[meta for meta in info.skips_or_fails if meta.matcher is None]
for info in TESTED_TORCHLIB_OPS
],
)
)
SKIP_XFAIL_SUBTESTS: tuple[ops_test_common.DecorateMeta, ...] = tuple(
functools.reduce(
# Flatten the list
lambda a, b: [*a, *b],
[
[meta for meta in info.skips_or_fails if meta.matcher is not None]
for info in TESTED_TORCHLIB_OPS
],
)
)
# MARK: Complex supported functions
COMPLEX_FUNCTION_MAPPING: dict[
str,
TorchLibOpInfo,
] = {info.op_info_name: info for info in TESTED_TORCHLIB_OPS if info.complex}
# Call dir(torch.ops.prims) and compare with entries in OPS_DB to create OpInfo for newly added prims ops
PRIMS_OPS_WITH_OP_INFO = (
"abs",
"acos",
"acosh",
"add",
"amax",
"amin",
"as_strided",
"as_strided_scatter",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_not",
"bitwise_or",
"bitwise_xor",
"cat",
"ceil",
"clone",
"conj",
"conj_physical",
"cos",
"cosh",
"digamma",
"div",
"empty",
"eq",
"erf",
"erfc",
"exp",
"exp2",
"expm1",
"fill",
"floor",
"fmax",
"fmin",
"fmod",
"full",
"full_like",
"gcd",
"ge",
"gt",
"hypot",
"igamma",
"igammac",
"imag",
"isfinite",
"le",
"lgamma",
"log",
"log10",
"log1p",
"log2",
"lt",
"maximum",
"minimum",
"mul",
"ne",
"neg",
"nextafter",
"normal",
"pow",
"prod",
"real",
"reciprocal",
"remainder",
"reshape",
"round",
"rsqrt",
"scalar_tensor",
"sign",
"signbit",
"sin",
"sinh",
"sqrt",
"squeeze",
"sub",
"sum",
"svd",
"tan",
"tanh",
"transpose",
"trunc",
"uniform",
"where",
)
for op in PRIMS_OPS_WITH_OP_INFO:
# Duplicate opinfo for prim ops. The new names all start with "prims_". E.g. "abs" -> "prims_abs".
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, op)
# Duplicate cases where the prims op name is different from the torch op name
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "i0", "bessel_i0")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.bessel_j0", "bessel_j0")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.bessel_j1", "bessel_j1")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.erfcx", "erfcx")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.i0e", "bessel_i0e")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.i1", "bessel_i1")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.i1e", "bessel_i1e")
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.ndtri", "ndtri")
ops_test_common.duplicate_opinfo_for_prims(
OPS_DB, "special.spherical_bessel_j0", "spherical_bessel_j0"
)
ops_test_common.duplicate_opinfo_for_prims(OPS_DB, "special.zeta", "zeta")
OP_WITH_SKIPPED_XFAIL_SUBTESTS = frozenset(meta.op_name for meta in SKIP_XFAIL_SUBTESTS)
ALL_OPS_IN_DB = frozenset(op_info.name for op_info in OPS_DB)
# Assert all ops in OPINFO_FUNCTION_MAPPING are in the OPS_DB
assert TESTED_OPS.issubset(ALL_OPS_IN_DB), f"{TESTED_OPS - ALL_OPS_IN_DB} not in OPS_DB"
assert NONDETERMINISTIC_OPS.issubset(TESTED_OPS), (
f"{NONDETERMINISTIC_OPS - TESTED_OPS} not in TESTED_OPS"
)
| TorchLibOpInfo |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 10004,
"end": 10535
} | class ____(ModelEvent):
''' Announce a value being submitted on a text input widget.
'''
event_name = 'value_submit'
value: str
def __init__(self, model: TextInput | None, value: str) -> None:
from .models.widgets import TextInput
if model is not None and not isinstance(model, TextInput):
clsname = self.__class__.__name__
raise ValueError(f"{clsname} event only applies to text input models")
super().__init__(model=model)
self.value = value
| ValueSubmit |
python | huggingface__transformers | src/transformers/models/glm46v/modeling_glm46v.py | {
"start": 2247,
"end": 3199
} | class ____(ModelOutput):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
rope_deltas: Optional[torch.LongTensor] = None
@auto_docstring
| Glm46VModelOutputWithPast |
python | django-guardian__django-guardian | guardian/testapp/tests/test_admin.py | {
"start": 1433,
"end": 13146
} | class ____(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser("admin", "admin@example.com", "admin")
self.user = User.objects.create_user("joe", "joe@example.com", "joe")
self.group = Group.objects.create(name="group")
self.client = Client()
self.obj = ContentType.objects.create(model="bar", app_label="fake-for-guardian-tests")
self.obj_info = self.obj._meta.app_label, self.obj._meta.model_name
def tearDown(self):
self.client.logout()
def _login_superuser(self):
self.client.login(username="admin", password="admin")
def test_view_manage_wrong_obj(self):
self._login_superuser()
url = reverse(
"admin:%s_%s_permissions_manage_user" % self.obj_info, kwargs={"object_pk": -10, "user_id": self.user.pk}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["object"], self.obj)
def test_view_manage_wrong_user(self):
self._login_superuser()
url = reverse(
"admin:%s_%s_permissions_manage_user" % self.obj_info, kwargs={"object_pk": self.obj.pk, "user_id": -10}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_manage_user_form(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"user": self.user.username, "submit_manage_user": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse(
"admin:%s_%s_permissions_manage_user" % self.obj_info,
kwargs={"object_pk": self.obj.pk, "user_id": self.user.pk},
)
self.assertEqual(response.request["PATH_INFO"], redirect_url)
@unittest.skipIf(
DJANGO_VERSION >= (3, 0) and "mysql" in os.environ.get("DATABASE_URL", ""),
"Negative ids no longer work in Django 3.0+ with MySQL.",
)
def test_view_manage_negative_user_form(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
self.user = User.objects.create(username="negative_id_user", pk=-2010)
data = {"user": self.user.username, "submit_manage_user": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse("admin:%s_%s_permissions_manage_user" % self.obj_info, args=[self.obj.pk, self.user.pk])
self.assertEqual(response.request["PATH_INFO"], redirect_url)
def test_view_manage_user_form_wrong_user(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"user": "wrong-user", "submit_manage_user": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("user" in response.context["user_form"].errors)
def test_view_manage_user_form_wrong_field(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"user": "<xss>", "submit_manage_user": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("user" in response.context["user_form"].errors)
def test_view_manage_user_form_empty_user(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"user": "", "submit_manage_user": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("user" in response.context["user_form"].errors)
def test_view_manage_user_wrong_perms(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions_manage_user" % self.obj_info, args=[self.obj.pk, self.user.pk])
perms = ["change_user"] # This is not self.obj related permission
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue("permissions" in response.context["form"].errors)
def test_view_manage_user(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions_manage_user" % self.obj_info, args=[self.obj.pk, self.user.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = {c[0] for c in response.context["form"].fields["permissions"].choices}
self.assertEqual(
{p.codename for p in get_perms_for_model(self.obj)},
choices,
)
# Add some perms and check if changes were persisted
perms = ["change_%s" % self.obj_info[1], "delete_%s" % self.obj_info[1]]
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertIn("selected", str(response.context["form"]))
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ["change_%s" % self.obj_info[1]]
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
def test_view_manage_group_form(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"group": self.group.name, "submit_manage_group": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse(
"admin:%s_%s_permissions_manage_group" % self.obj_info, args=[self.obj.pk, self.group.id]
)
self.assertEqual(response.request["PATH_INFO"], redirect_url)
@unittest.skipIf(
DJANGO_VERSION >= (3, 0) and "mysql" in os.environ.get("DATABASE_URL", ""),
"Negative ids no longer work in Django 3.0+ with MySQL.",
)
def test_view_manage_negative_group_form(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
self.group = Group.objects.create(name="neagive_id_group", id=-2010)
data = {"group": self.group.name, "submit_manage_group": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse(
"admin:%s_%s_permissions_manage_group" % self.obj_info, args=[self.obj.pk, self.group.id]
)
self.assertEqual(response.request["PATH_INFO"], redirect_url)
def test_view_manage_group_form_wrong_group(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"group": "wrong-group", "submit_manage_group": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("group" in response.context["group_form"].errors)
def test_view_manage_group_form_wrong_field(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"group": "<xss>", "submit_manage_group": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("group" in response.context["group_form"].errors)
def test_view_manage_group_form_empty_group(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions" % self.obj_info, args=[self.obj.pk])
data = {"group": "", "submit_manage_group": "submit"}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue("group" in response.context["group_form"].errors)
def test_view_manage_group_wrong_perms(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions_manage_group" % self.obj_info, args=[self.obj.pk, self.group.id])
perms = ["change_user"] # This is not self.obj related permission
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue("permissions" in response.context["form"].errors)
def test_view_manage_group(self):
self._login_superuser()
url = reverse("admin:%s_%s_permissions_manage_group" % self.obj_info, args=[self.obj.pk, self.group.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = {c[0] for c in response.context["form"].fields["permissions"].choices}
self.assertEqual(
{p.codename for p in get_perms_for_model(self.obj)},
choices,
)
# Add some perms and check if changes were persisted
perms = ["change_%s" % self.obj_info[1], "delete_%s" % self.obj_info[1]]
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ["delete_%s" % self.obj_info[1]]
data = {"permissions": perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
if "django.contrib.admin" not in settings.INSTALLED_APPS:
# Skip admin tests if admin app is not registered
# we simpy clean up AdminTests class ...
# TODO: use @unittest.skipUnless('django.contrib.admin' in settings.INSTALLED_APPS)
# if possible (requires Python 2.7, though)
AdminTests = type("AdminTests", (TestCase,), {})
@skipUnlessTestApp
| AdminTests |
python | scrapy__scrapy | scrapy/spiders/sitemap.py | {
"start": 772,
"end": 5741
} | class ____(Spider):
sitemap_urls: Sequence[str] = ()
sitemap_rules: Sequence[tuple[re.Pattern[str] | str, str | CallbackT]] = [
("", "parse")
]
sitemap_follow: Sequence[re.Pattern[str] | str] = [""]
sitemap_alternate_links: bool = False
_max_size: int
_warn_size: int
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
spider = super().from_crawler(crawler, *args, **kwargs)
spider._max_size = getattr(
spider, "download_maxsize", spider.settings.getint("DOWNLOAD_MAXSIZE")
)
spider._warn_size = getattr(
spider, "download_warnsize", spider.settings.getint("DOWNLOAD_WARNSIZE")
)
return spider
def __init__(self, *a: Any, **kw: Any):
super().__init__(*a, **kw)
self._cbs: list[tuple[re.Pattern[str], CallbackT]] = []
for r, c in self.sitemap_rules:
if isinstance(c, str):
c = cast("CallbackT", getattr(self, c))
self._cbs.append((regex(r), c))
self._follow: list[re.Pattern[str]] = [regex(x) for x in self.sitemap_follow]
async def start(self) -> AsyncIterator[Any]:
for item_or_request in self.start_requests():
yield item_or_request
def start_requests(self) -> Iterable[Request]:
for url in self.sitemap_urls:
yield Request(url, self._parse_sitemap)
def sitemap_filter(
self, entries: Iterable[dict[str, Any]]
) -> Iterable[dict[str, Any]]:
"""This method can be used to filter sitemap entries by their
attributes, for example, you can filter locs with lastmod greater
than a given date (see docs).
"""
yield from entries
def _parse_sitemap(self, response: Response) -> Iterable[Request]:
if response.url.endswith("/robots.txt"):
for url in sitemap_urls_from_robots(response.text, base_url=response.url):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning(
"Ignoring invalid sitemap: %(response)s",
{"response": response},
extra={"spider": self},
)
return
s = Sitemap(body)
it = self.sitemap_filter(s)
if s.type == "sitemapindex":
for loc in iterloc(it, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == "urlset":
for loc in iterloc(it, self.sitemap_alternate_links):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response: Response) -> bytes | None:
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
if gzip_magic_number(response):
uncompressed_size = len(response.body)
max_size = response.meta.get("download_maxsize", self._max_size)
warn_size = response.meta.get("download_warnsize", self._warn_size)
try:
body = gunzip(response.body, max_size=max_size)
except _DecompressionMaxSizeExceeded:
return None
if uncompressed_size < warn_size <= len(body):
logger.warning(
f"{response} body size after decompression ({len(body)} B) "
f"is larger than the download warning size ({warn_size} B)."
)
return body
# actual gzipped sitemap files are decompressed above ;
# if we are here (response body is not gzipped)
# and have a response for .xml.gz,
# it usually means that it was already gunzipped
# by HttpCompression middleware,
# the HTTP response being sent with "Content-Encoding: gzip"
# without actually being a .xml.gz file in the first place,
# merely XML gzip-compressed on the fly,
# in other word, here, we have plain XML
if response.url.endswith(".xml") or response.url.endswith(".xml.gz"):
return response.body
return None
def regex(x: re.Pattern[str] | str) -> re.Pattern[str]:
if isinstance(x, str):
return re.compile(x)
return x
def iterloc(it: Iterable[dict[str, Any]], alt: bool = False) -> Iterable[str]:
for d in it:
yield d["loc"]
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and "alternate" in d:
yield from d["alternate"]
| SitemapSpider |
python | huggingface__transformers | src/transformers/models/ernie/modeling_ernie.py | {
"start": 46085,
"end": 46521
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@auto_docstring(
custom_intro="""
Ernie Model with a `next sentence prediction (classification)` head on top.
"""
)
| ErnieOnlyNSPHead |
python | RaRe-Technologies__gensim | gensim/topic_coherence/text_analysis.py | {
"start": 6930,
"end": 8211
} | class ____(BaseAnalyzer):
"""Analyzer that builds up an inverted index to accumulate stats."""
def __init__(self, *args):
"""
Parameters
----------
args : dict
Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`
Examples
--------
.. sourcecode:: pycon
>>> from gensim.topic_coherence import text_analysis
>>>
>>> ids = {1: 'fake', 4: 'cats'}
>>> ininb = text_analysis.InvertedIndexBased(ids)
>>>
>>> print(ininb._inverted_index)
[set([]) set([])]
"""
super(InvertedIndexBased, self).__init__(*args)
self._inverted_index = np.array([set() for _ in range(self._vocab_size)])
def _get_occurrences(self, word_id):
return len(self._inverted_index[word_id])
def _get_co_occurrences(self, word_id1, word_id2):
s1 = self._inverted_index[word_id1]
s2 = self._inverted_index[word_id2]
return len(s1.intersection(s2))
def index_to_dict(self):
contiguous2id = {n: word_id for word_id, n in self.id2contiguous.items()}
return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)}
| InvertedIndexBased |
python | euske__pdfminer | pdfminer/pdfdocument.py | {
"start": 4174,
"end": 5969
} | class ____(PDFXRef):
def __repr__(self):
return '<PDFXRefFallback: offsets=%r>' % (self.offsets.keys())
PDFOBJ_CUE = re.compile(br'^(\d+)\s+(\d+)\s+obj\b')
def load(self, parser):
parser.seek(0)
while 1:
try:
(pos, line) = parser.nextline()
except PSEOF:
break
if line.startswith(b'trailer'):
parser.seek(pos)
self.load_trailer(parser)
if self.debug: logging.info('trailer: %r' % self.get_trailer())
break
m = self.PDFOBJ_CUE.match(line)
if not m:
continue
(objid, genno) = m.groups()
objid = int(objid)
genno = int(genno)
self.offsets[objid] = (None, pos, genno)
# expand ObjStm.
parser.seek(pos)
(_, obj) = parser.nextobject()
if isinstance(obj, PDFStream) and obj.get('Type') is LITERAL_OBJSTM:
stream = stream_value(obj)
try:
n = stream['N']
except KeyError:
if STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
parser1 = PDFStreamParser(stream.get_data())
objs = []
try:
while 1:
(_, obj) = parser1.nextobject()
objs.append(obj)
except PSEOF:
pass
n = min(n, len(objs)//2)
for index in range(n):
objid1 = objs[index*2]
self.offsets[objid1] = (objid, index, 0)
return
## PDFXRefStream
##
| PDFXRefFallback |
python | numba__numba | numba/cuda/simulator/cudadrv/devices.py | {
"start": 324,
"end": 1691
} | class ____:
'''
This stub implements functionality only for simulating a single GPU
at the moment.
'''
def __init__(self, device_id):
self._device_id = device_id
self._device = FakeCUDADevice()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __str__(self):
return "<Managed Device {self.id}>".format(self=self)
@property
def id(self):
return self._device_id
@property
def device(self):
return self._device
@property
def compute_capability(self):
return _SIMULATOR_CC
def reset(self):
pass
def get_memory_info(self):
"""
Cross-platform free / total host memory is hard without external
dependencies, e.g. `psutil` - so return infinite memory to maintain API
type compatibility
"""
return _MemoryInfo(float('inf'), float('inf'))
def memalloc(self, sz):
"""
Allocates memory on the simulated device
At present, there is no division between simulated
host memory and simulated device memory.
"""
return np.ndarray(sz, dtype='u1')
def memhostalloc(self, sz, mapped=False, portable=False, wc=False):
'''Allocates memory on the host'''
return self.memalloc(sz)
| FakeCUDAContext |
python | PyCQA__pylint | tests/functional/a/access/access_to_protected_members.py | {
"start": 746,
"end": 1138
} | class ____(MyClass):
"""Subclass with protected members."""
def __init__(self):
MyClass._protected = 5
super()._private_method()
INST = Subclass()
INST.attr = 1
print(INST.attr)
INST._protected = 2 # [protected-access]
print(INST._protected) # [protected-access]
INST._cls_protected = 3 # [protected-access]
print(INST._cls_protected) # [protected-access]
| Subclass |
python | huggingface__transformers | src/transformers/models/roformer/modeling_roformer.py | {
"start": 27581,
"end": 27922
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RoFormerLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@auto_docstring
| RoFormerOnlyMLMHead |
python | graphql-python__graphene | graphene/tests/issues/test_313.py | {
"start": 366,
"end": 634
} | class ____(graphene.Mutation):
class Arguments:
text = graphene.String(required=True)
result = graphene.Field(CreatePostResult)
def mutate(self, info, text):
result = Success(yeah="yeah")
return CreatePost(result=result)
| CreatePost |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py | {
"start": 62559,
"end": 63401
} | class ____(RendezvousBackend):
def __init__(self) -> None:
self._lock = threading.Lock()
self._state = None
self._token = None
@property
def name(self):
return "_in_memory_backend"
def get_state(self):
with self._lock:
if self._state is None:
return None
return (self._state, self._token)
return self._state
def set_state(self, state, token):
if state is None:
raise ValueError("State cannot be None.")
with self._lock:
if token is None and self._token is not None:
return None
if self._token != token:
return None
self._state = state
self._token = self._token + 1 if self._token is not None else 0
| _InMemoryRendezvousBackend |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_artifacts/client.py | {
"start": 9725,
"end": 11056
} | class ____(BaseAsyncClient):
async def read_latest_artifacts(
self, **kwargs: Unpack["ArtifactCollectionReadParams"]
) -> list["ArtifactCollection"]:
response = await self.request(
"POST",
"/artifacts/latest/filter",
json={
"artifacts": (
artifact_filter.model_dump(mode="json", exclude_unset=True)
if (artifact_filter := kwargs.get("artifact_filter"))
else None
),
"flow_runs": (
flow_run_filter.model_dump(mode="json", exclude_unset=True)
if (flow_run_filter := kwargs.get("flow_run_filter"))
else None
),
"task_runs": (
task_run_filter.model_dump(mode="json", exclude_unset=True)
if (task_run_filter := kwargs.get("task_run_filter"))
else None
),
"limit": kwargs.get("limit", None),
"offset": kwargs.get("offset", 0),
"sort": kwargs.get("sort", None),
},
)
from prefect.client.schemas.objects import ArtifactCollection
return ArtifactCollection.model_validate_list(response.json())
| ArtifactCollectionAsyncClient |
python | PyCQA__bandit | bandit/core/node_visitor.py | {
"start": 299,
"end": 10830
} | class ____:
def __init__(
self, fname, fdata, metaast, testset, debug, nosec_lines, metrics
):
self.debug = debug
self.nosec_lines = nosec_lines
self.scores = {
"SEVERITY": [0] * len(constants.RANKING),
"CONFIDENCE": [0] * len(constants.RANKING),
}
self.depth = 0
self.fname = fname
self.fdata = fdata
self.metaast = metaast
self.testset = testset
self.imports = set()
self.import_aliases = {}
self.tester = b_tester.BanditTester(
self.testset, self.debug, nosec_lines, metrics
)
# in some cases we can't determine a qualified name
try:
self.namespace = b_utils.get_module_qualname_from_path(fname)
except b_utils.InvalidModulePath:
LOG.warning(
"Unable to find qualified name for module: %s", self.fname
)
self.namespace = ""
LOG.debug("Module qualified name: %s", self.namespace)
self.metrics = metrics
def visit_ClassDef(self, node):
"""Visitor for AST ClassDef node
Add class name to current namespace for all descendants.
:param node: Node being inspected
:return: -
"""
# For all child nodes, add this class name to current namespace
self.namespace = b_utils.namespace_path_join(self.namespace, node.name)
def visit_FunctionDef(self, node):
"""Visitor for AST FunctionDef nodes
add relevant information about the node to
the context for use in tests which inspect function definitions.
Add the function name to the current namespace for all descendants.
:param node: The node that is being inspected
:return: -
"""
self.context["function"] = node
qualname = self.namespace + "." + b_utils.get_func_name(node)
name = qualname.split(".")[-1]
self.context["qualname"] = qualname
self.context["name"] = name
# For all child nodes and any tests run, add this function name to
# current namespace
self.namespace = b_utils.namespace_path_join(self.namespace, name)
self.update_scores(self.tester.run_tests(self.context, "FunctionDef"))
def visit_Call(self, node):
"""Visitor for AST Call nodes
add relevant information about the node to
the context for use in tests which inspect function calls.
:param node: The node that is being inspected
:return: -
"""
self.context["call"] = node
qualname = b_utils.get_call_name(node, self.import_aliases)
name = qualname.split(".")[-1]
self.context["qualname"] = qualname
self.context["name"] = name
self.update_scores(self.tester.run_tests(self.context, "Call"))
def visit_Import(self, node):
"""Visitor for AST Import nodes
add relevant information about node to
the context for use in tests which inspect imports.
:param node: The node that is being inspected
:return: -
"""
for nodename in node.names:
if nodename.asname:
self.import_aliases[nodename.asname] = nodename.name
self.imports.add(nodename.name)
self.context["module"] = nodename.name
self.update_scores(self.tester.run_tests(self.context, "Import"))
def visit_ImportFrom(self, node):
"""Visitor for AST ImportFrom nodes
add relevant information about node to
the context for use in tests which inspect imports.
:param node: The node that is being inspected
:return: -
"""
module = node.module
if module is None:
return self.visit_Import(node)
for nodename in node.names:
# TODO(ljfisher) Names in import_aliases could be overridden
# by local definitions. If this occurs bandit will see the
# name in import_aliases instead of the local definition.
# We need better tracking of names.
if nodename.asname:
self.import_aliases[nodename.asname] = (
module + "." + nodename.name
)
else:
# Even if import is not aliased we need an entry that maps
# name to module.name. For example, with 'from a import b'
# b should be aliased to the qualified name a.b
self.import_aliases[nodename.name] = (
module + "." + nodename.name
)
self.imports.add(module + "." + nodename.name)
self.context["module"] = module
self.context["name"] = nodename.name
self.update_scores(self.tester.run_tests(self.context, "ImportFrom"))
def visit_Constant(self, node):
"""Visitor for AST Constant nodes
call the appropriate method for the node type.
this maintains compatibility with <3.6 and 3.8+
This code is heavily influenced by Anthony Sottile (@asottile) here:
https://bugs.python.org/msg342486
:param node: The node that is being inspected
:return: -
"""
if isinstance(node.value, str):
self.visit_Str(node)
elif isinstance(node.value, bytes):
self.visit_Bytes(node)
def visit_Str(self, node):
"""Visitor for AST String nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["str"] = node.value
if not isinstance(node._bandit_parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange(node._bandit_parent)
self.update_scores(self.tester.run_tests(self.context, "Str"))
def visit_Bytes(self, node):
"""Visitor for AST Bytes nodes
add relevant information about node to
the context for use in tests which inspect strings.
:param node: The node that is being inspected
:return: -
"""
self.context["bytes"] = node.value
if not isinstance(node._bandit_parent, ast.Expr): # docstring
self.context["linerange"] = b_utils.linerange(node._bandit_parent)
self.update_scores(self.tester.run_tests(self.context, "Bytes"))
def pre_visit(self, node):
self.context = {}
self.context["imports"] = self.imports
self.context["import_aliases"] = self.import_aliases
if self.debug:
LOG.debug(ast.dump(node))
self.metaast.add_node(node, "", self.depth)
if hasattr(node, "lineno"):
self.context["lineno"] = node.lineno
if hasattr(node, "col_offset"):
self.context["col_offset"] = node.col_offset
if hasattr(node, "end_col_offset"):
self.context["end_col_offset"] = node.end_col_offset
self.context["node"] = node
self.context["linerange"] = b_utils.linerange(node)
self.context["filename"] = self.fname
self.context["file_data"] = self.fdata
LOG.debug(
"entering: %s %s [%s]", hex(id(node)), type(node), self.depth
)
self.depth += 1
LOG.debug(self.context)
return True
def visit(self, node):
name = node.__class__.__name__
method = "visit_" + name
visitor = getattr(self, method, None)
if visitor is not None:
if self.debug:
LOG.debug("%s called (%s)", method, ast.dump(node))
visitor(node)
else:
self.update_scores(self.tester.run_tests(self.context, name))
def post_visit(self, node):
self.depth -= 1
LOG.debug("%s\texiting : %s", self.depth, hex(id(node)))
# HACK(tkelsey): this is needed to clean up post-recursion stuff that
# gets setup in the visit methods for these node types.
if isinstance(node, (ast.FunctionDef, ast.ClassDef)):
self.namespace = b_utils.namespace_path_split(self.namespace)[0]
def generic_visit(self, node):
"""Drive the visitor."""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
max_idx = len(value) - 1
for idx, item in enumerate(value):
if isinstance(item, ast.AST):
if idx < max_idx:
item._bandit_sibling = value[idx + 1]
else:
item._bandit_sibling = None
item._bandit_parent = node
if self.pre_visit(item):
self.visit(item)
self.generic_visit(item)
self.post_visit(item)
elif isinstance(value, ast.AST):
value._bandit_sibling = None
value._bandit_parent = node
if self.pre_visit(value):
self.visit(value)
self.generic_visit(value)
self.post_visit(value)
def update_scores(self, scores):
"""Score updater
Since we moved from a single score value to a map of scores per
severity, this is needed to update the stored list.
:param score: The score list to update our scores with
"""
# we'll end up with something like:
# SEVERITY: {0, 0, 0, 10} where 10 is weighted by finding and level
for score_type in self.scores:
self.scores[score_type] = list(
map(operator.add, self.scores[score_type], scores[score_type])
)
def process(self, data):
"""Main process loop
Build and process the AST
:param lines: lines code to process
:return score: the aggregated score for the current file
"""
f_ast = ast.parse(data)
self.generic_visit(f_ast)
# Run tests that do not require access to the AST,
# but only to the whole file source:
self.context = {
"file_data": self.fdata,
"filename": self.fname,
"lineno": 0,
"linerange": [0, 1],
"col_offset": 0,
}
self.update_scores(self.tester.run_tests(self.context, "File"))
return self.scores
| BanditNodeVisitor |
python | kamyu104__LeetCode-Solutions | Python/maximum-elegance-of-a-k-length-subsequence.py | {
"start": 116,
"end": 1220
} | class ____(object):
def findMaximumElegance(self, items, k):
"""
:type items: List[List[int]]
:type k: int
:rtype: int
"""
curr = 0
lookup = set()
stk = []
for p, c in heapq.nlargest(k, items):
if c in lookup:
stk.append(p)
curr += p
lookup.add(c)
sl = SortedList()
lookup2 = {}
for p, c in items:
if c in lookup:
continue
if c in lookup2:
if lookup2[c] >= p:
continue
sl.remove((lookup2[c], c))
sl.add((p, c))
lookup2[c] = p
if len(sl) > len(stk):
del lookup2[sl[0][1]]
del sl[0]
result = curr+len(lookup)**2
for p, c in reversed(sl):
curr += p-stk.pop()
lookup.add(c)
result = max(result, curr+len(lookup)**2)
return result
# Time: O(n + klogk)
# Space: O(n)
import random
import collections
# quick select, sort, greedy
| Solution |
python | getsentry__sentry | src/sentry/snuba/utils.py | {
"start": 3214,
"end": 4818
} | class ____:
query_string: str
query_extra: str
query: str
def build_query_strings(
subscription: QuerySubscription | None, snuba_query: SnubaQuery
) -> QueryStrings:
"""
Constructs a QueryStrings dataclass given a QuerySubscription and SnubaQuery.
query_string value is derived from the snuba_query.query and the subscription.query_extra.
TODO: determine whether this is necessary in all places where `snuba_query.query` is used.
"""
query_extra = ""
if subscription and subscription.query_extra:
if snuba_query.query:
query_extra = " and "
query_extra += subscription.query_extra
return QueryStrings(
query=snuba_query.query,
query_extra=query_extra,
query_string=f"{snuba_query.query}{query_extra}",
)
def dataset_split_decision_inferred_from_query(columns, query):
"""
Infers split decision based on fields we know exclusively belong to one
dataset or the other. Biases towards Errors dataset.
"""
for field in ERROR_ONLY_FIELDS:
if field in query:
return DiscoverSavedQueryTypes.ERROR_EVENTS
for field in TRANSACTION_ONLY_FIELDS:
if field in query:
return DiscoverSavedQueryTypes.TRANSACTION_LIKE
for column in columns:
for field in ERROR_ONLY_FIELDS:
if field in column:
return DiscoverSavedQueryTypes.ERROR_EVENTS
for field in TRANSACTION_ONLY_FIELDS:
if field in column:
return DiscoverSavedQueryTypes.TRANSACTION_LIKE
return None
| QueryStrings |
python | django__django | tests/postgres_tests/models.py | {
"start": 1842,
"end": 2406
} | class ____(PostgreSQLModel):
ips = ArrayField(models.GenericIPAddressField(), default=list)
uuids = ArrayField(models.UUIDField(), default=list)
decimals = ArrayField(
models.DecimalField(max_digits=5, decimal_places=2), default=list
)
tags = ArrayField(TagField(), blank=True, null=True)
json = ArrayField(models.JSONField(default=dict), default=list, null=True)
int_ranges = ArrayField(IntegerRangeField(), blank=True, null=True)
bigint_ranges = ArrayField(BigIntegerRangeField(), blank=True, null=True)
| OtherTypesArrayModel |
python | huggingface__transformers | src/transformers/models/vivit/configuration_vivit.py | {
"start": 782,
"end": 5142
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ViViT
[google/vivit-b-16x2-kinetics400](https://huggingface.co/google/vivit-b-16x2-kinetics400) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
num_frames (`int`, *optional*, defaults to 32):
The number of frames in each video.
tubelet_size (`list[int]`, *optional*, defaults to `[2, 16, 16]`):
The size (resolution) of each tubelet.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_fast"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"`, `"gelu_fast"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
Example:
```python
>>> from transformers import VivitConfig, VivitModel
>>> # Initializing a ViViT google/vivit-b-16x2-kinetics400 style configuration
>>> configuration = VivitConfig()
>>> # Initializing a model (with random weights) from the google/vivit-b-16x2-kinetics400 style configuration
>>> model = VivitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vivit"
def __init__(
self,
image_size=224,
num_frames=32,
tubelet_size=[2, 16, 16],
num_channels=3,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu_fast",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-06,
qkv_bias=True,
**kwargs,
):
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
super().__init__(**kwargs)
__all__ = ["VivitConfig"]
| VivitConfig |
python | pytorch__pytorch | test/inductor/test_aot_inductor.py | {
"start": 288075,
"end": 288569
} | class ____(TestCase):
device = "cpu"
device_type = "cpu"
check_model = check_model
check_model_with_multiple_inputs = check_model_with_multiple_inputs
code_check_count = code_check_count
allow_stack_allocation = False
use_minimal_arrayref_interface = False
copy_tests(
AOTInductorTestsTemplate,
AOTInductorTestABICompatibleCpu,
"cpu",
CPU_TEST_FAILURES,
)
@unittest.skipIf(sys.platform == "darwin", "No CUDA on MacOS")
| AOTInductorTestABICompatibleCpu |
python | tensorflow__tensorflow | tensorflow/python/util/object_identity.py | {
"start": 6429,
"end": 6992
} | class ____(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
__slots__ = ()
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
# LINT.ThenChange(//tensorflow/python/keras/utils/object_identity.py)
| ObjectIdentityWeakSet |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py | {
"start": 343,
"end": 435
} | class ____:
if ...:
...
else:
def __eq__(self, other): ...
| MaybeEqElse |
python | keras-team__keras | guides/making_new_layers_and_models_via_subclassing.py | {
"start": 5430,
"end": 6901
} | class ____(keras.layers.Layer):
def __init__(self):
super().__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = keras.activations.relu(x)
x = self.linear_2(x)
x = keras.activations.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(
ops.ones(shape=(3, 64))
) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
"""
## Backend-agnostic layers and backend-specific layers
As long as a layer only uses APIs from the `keras.ops` namespace
(or other Keras namespaces such as `keras.activations`, `keras.random`, or `keras.layers`),
then it can be used with any backend -- TensorFlow, JAX, or PyTorch.
All layers you've seen so far in this guide work with all Keras backends.
The `keras.ops` namespace gives you access to:
- The NumPy API, e.g. `ops.matmul`, `ops.sum`, `ops.reshape`, `ops.stack`, etc.
- Neural networks-specific APIs such as `ops.softmax`, `ops.conv`, `ops.binary_crossentropy`, `ops.relu`, etc.
You can also use backend-native APIs in your layers (such as `tf.nn` functions),
but if you do this, then your layer will only be usable with the backend in question.
For instance, you could write the following JAX-specific layer using `jax.numpy`:
```python
import jax
| MLPBlock |
python | sphinx-doc__sphinx | tests/test_ext_napoleon/test_ext_napoleon_docstring.py | {
"start": 3517,
"end": 33308
} | class ____:
docstrings = [
(
"""Single line summary""",
"""Single line summary""",
),
(
"""
Single line summary
Extended description
""",
"""
Single line summary
Extended description
""",
),
(
"""
Single line summary
Args:
arg1(str):Extended
description of arg1
""",
"""
Single line summary
:Parameters: **arg1** (*str*) -- Extended
description of arg1
""",
),
(
"""
Single line summary
Args:
arg1(str):Extended
description of arg1
arg2 ( int ) : Extended
description of arg2
Keyword Args:
kwarg1(str):Extended
description of kwarg1
kwarg2 ( int ) : Extended
description of kwarg2""",
"""
Single line summary
:Parameters: * **arg1** (*str*) -- Extended
description of arg1
* **arg2** (*int*) -- Extended
description of arg2
:Keyword Arguments: * **kwarg1** (*str*) -- Extended
description of kwarg1
* **kwarg2** (*int*) -- Extended
description of kwarg2
""",
),
(
"""
Single line summary
Arguments:
arg1(str):Extended
description of arg1
arg2 ( int ) : Extended
description of arg2
Keyword Arguments:
kwarg1(str):Extended
description of kwarg1
kwarg2 ( int ) : Extended
description of kwarg2""",
"""
Single line summary
:Parameters: * **arg1** (*str*) -- Extended
description of arg1
* **arg2** (*int*) -- Extended
description of arg2
:Keyword Arguments: * **kwarg1** (*str*) -- Extended
description of kwarg1
* **kwarg2** (*int*) -- Extended
description of kwarg2
""",
),
(
"""
Single line summary
Return:
str:Extended
description of return value
""",
"""
Single line summary
:returns: *str* -- Extended
description of return value
""",
),
(
"""
Single line summary
Returns:
str:Extended
description of return value
""",
"""
Single line summary
:returns: *str* -- Extended
description of return value
""",
),
(
"""
Single line summary
Returns:
Extended
description of return value
""",
"""
Single line summary
:returns: Extended
description of return value
""",
),
(
"""
Single line summary
Returns:
Extended
""",
"""
Single line summary
:returns: Extended
""",
),
(
"""
Single line summary
Args:
arg1(str):Extended
description of arg1
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
""",
"""
Single line summary
:Parameters: * **arg1** (*str*) -- Extended
description of arg1
* **\\*args** -- Variable length argument list.
* **\\*\\*kwargs** -- Arbitrary keyword arguments.
""",
),
(
"""
Single line summary
Args:
arg1 (list(int)): Description
arg2 (list[int]): Description
arg3 (dict(str, int)): Description
arg4 (dict[str, int]): Description
""",
"""
Single line summary
:Parameters: * **arg1** (*list(int)*) -- Description
* **arg2** (*list[int]*) -- Description
* **arg3** (*dict(str, int)*) -- Description
* **arg4** (*dict[str, int]*) -- Description
""",
),
(
"""
Single line summary
Receive:
arg1 (list(int)): Description
arg2 (list[int]): Description
""",
"""
Single line summary
:Receives: * **arg1** (*list(int)*) -- Description
* **arg2** (*list[int]*) -- Description
""",
),
(
"""
Single line summary
Receives:
arg1 (list(int)): Description
arg2 (list[int]): Description
""",
"""
Single line summary
:Receives: * **arg1** (*list(int)*) -- Description
* **arg2** (*list[int]*) -- Description
""",
),
(
"""
Single line summary
Yield:
str:Extended
description of yielded value
""",
"""
Single line summary
:Yields: *str* -- Extended
description of yielded value
""",
),
(
"""
Single line summary
Yields:
Extended
description of yielded value
""",
"""
Single line summary
:Yields: Extended
description of yielded value
""",
),
(
"""
Single line summary
Args:
arg1 (list of str): Extended
description of arg1.
arg2 (tuple of int): Extended
description of arg2.
arg3 (tuple of list of float): Extended
description of arg3.
arg4 (int, float, or list of bool): Extended
description of arg4.
arg5 (list of int, float, or bool): Extended
description of arg5.
arg6 (list of int or float): Extended
description of arg6.
""",
"""
Single line summary
:Parameters: * **arg1** (*list of str*) -- Extended
description of arg1.
* **arg2** (*tuple of int*) -- Extended
description of arg2.
* **arg3** (*tuple of list of float*) -- Extended
description of arg3.
* **arg4** (*int, float, or list of bool*) -- Extended
description of arg4.
* **arg5** (*list of int, float, or bool*) -- Extended
description of arg5.
* **arg6** (*list of int or float*) -- Extended
description of arg6.
""",
),
]
def test_sphinx_admonitions(self):
admonition_map = {
'Attention': 'attention',
'Caution': 'caution',
'Danger': 'danger',
'Error': 'error',
'Hint': 'hint',
'Important': 'important',
'Note': 'note',
'Tip': 'tip',
'Todo': 'todo',
'Warning': 'warning',
'Warnings': 'warning',
}
config = Config()
for section, admonition in admonition_map.items():
# Multiline
actual = GoogleDocstring(
f'{section}:\n'
' this is the first line\n'
'\n'
' and this is the second line\n',
config,
)
expect = (
f'.. {admonition}::\n'
'\n'
' this is the first line\n'
' \n'
' and this is the second line\n'
)
assert str(actual) == expect
# Single line
actual = GoogleDocstring(f'{section}:\n this is a single line\n', config)
expect = f'.. {admonition}:: this is a single line\n'
assert str(actual) == expect
def test_docstrings(self):
config = Config(
napoleon_use_param=False,
napoleon_use_rtype=False,
napoleon_use_keyword=False,
)
for docstring, expected in self.docstrings:
actual = GoogleDocstring(dedent(docstring), config)
expected = dedent(expected)
assert str(actual) == expected
def test_parameters_with_class_reference(self):
docstring = """\
Construct a new XBlock.
This class should only be used by runtimes.
Arguments:
runtime (:py:class:`~typing.Dict`\\[:py:class:`int`,:py:class:`str`\\]): Use it to
access the environment. It is available in XBlock code
as ``self.runtime``.
field_data (:py:class:`FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
scope_ids (:py:class:`ScopeIds`): Identifiers needed to resolve scopes.
"""
actual = GoogleDocstring(docstring)
expected = """\
Construct a new XBlock.
This class should only be used by runtimes.
:param runtime: Use it to
access the environment. It is available in XBlock code
as ``self.runtime``.
:type runtime: :py:class:`~typing.Dict`\\[:py:class:`int`,:py:class:`str`\\]
:param field_data: Interface used by the XBlock
fields to access their data from wherever it is persisted.
:type field_data: :py:class:`FieldData`
:param scope_ids: Identifiers needed to resolve scopes.
:type scope_ids: :py:class:`ScopeIds`
"""
assert str(actual) == expected
def test_attributes_with_class_reference(self):
docstring = """\
Attributes:
in_attr(:py:class:`numpy.ndarray`): super-dooper attribute
"""
actual = GoogleDocstring(docstring)
expected = """\
.. attribute:: in_attr
super-dooper attribute
:type: :py:class:`numpy.ndarray`
"""
assert str(actual) == expected
docstring = """\
Attributes:
in_attr(numpy.ndarray): super-dooper attribute
"""
actual = GoogleDocstring(docstring)
expected = """\
.. attribute:: in_attr
super-dooper attribute
:type: numpy.ndarray
"""
def test_attributes_with_use_ivar(self):
docstring = """\
Attributes:
foo (int): blah blah
bar (str): blah blah
"""
config = Config(napoleon_use_ivar=True)
actual = GoogleDocstring(docstring, config, obj=self.__class__)
expected = """\
:ivar foo: blah blah
:vartype foo: int
:ivar bar: blah blah
:vartype bar: str
"""
assert str(actual) == expected
def test_code_block_in_returns_section(self):
docstring = """
Returns:
foobar: foo::
codecode
codecode
"""
expected = """
:returns:
foo::
codecode
codecode
:rtype: foobar
"""
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_colon_in_return_type(self):
docstring = """Example property.
Returns:
:py:class:`~.module.submodule.SomeClass`: an example instance
if available, None if not available.
"""
expected = """Example property.
:returns: an example instance
if available, None if not available.
:rtype: :py:class:`~.module.submodule.SomeClass`
"""
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_xrefs_in_return_type(self):
docstring = """Example Function
Returns:
:py:class:`numpy.ndarray`: A :math:`n \\times 2` array containing
a bunch of math items
"""
expected = """Example Function
:returns: A :math:`n \\times 2` array containing
a bunch of math items
:rtype: :py:class:`numpy.ndarray`
"""
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_raises_types(self):
docstrings = [
(
"""
Example Function
Raises:
RuntimeError:
A setting wasn't specified, or was invalid.
ValueError:
Something something value error.
:py:class:`AttributeError`
errors for missing attributes.
~InvalidDimensionsError
If the dimensions couldn't be parsed.
`InvalidArgumentsError`
If the arguments are invalid.
:py:exc:`~ValueError`
If the arguments are wrong.
""",
"""
Example Function
:raises RuntimeError: A setting wasn't specified, or was invalid.
:raises ValueError: Something something value error.
:raises AttributeError: errors for missing attributes.
:raises ~InvalidDimensionsError: If the dimensions couldn't be parsed.
:raises InvalidArgumentsError: If the arguments are invalid.
:raises ~ValueError: If the arguments are wrong.
""",
),
################################
(
"""
Example Function
Raises:
InvalidDimensionsError
""",
"""
Example Function
:raises InvalidDimensionsError:
""",
),
################################
(
"""
Example Function
Raises:
Invalid Dimensions Error
""",
"""
Example Function
:raises Invalid Dimensions Error:
""",
),
################################
(
"""
Example Function
Raises:
Invalid Dimensions Error: With description
""",
"""
Example Function
:raises Invalid Dimensions Error: With description
""",
),
################################
(
"""
Example Function
Raises:
InvalidDimensionsError: If the dimensions couldn't be parsed.
""",
"""
Example Function
:raises InvalidDimensionsError: If the dimensions couldn't be parsed.
""",
),
################################
(
"""
Example Function
Raises:
Invalid Dimensions Error: If the dimensions couldn't be parsed.
""",
"""
Example Function
:raises Invalid Dimensions Error: If the dimensions couldn't be parsed.
""",
),
################################
(
"""
Example Function
Raises:
If the dimensions couldn't be parsed.
""",
"""
Example Function
:raises If the dimensions couldn't be parsed.:
""",
),
################################
(
"""
Example Function
Raises:
:py:class:`exc.InvalidDimensionsError`
""",
"""
Example Function
:raises exc.InvalidDimensionsError:
""",
),
################################
(
"""
Example Function
Raises:
:py:class:`exc.InvalidDimensionsError`: If the dimensions couldn't be parsed.
""",
"""
Example Function
:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
""",
),
################################
(
"""
Example Function
Raises:
:py:class:`exc.InvalidDimensionsError`: If the dimensions couldn't be parsed,
then a :py:class:`exc.InvalidDimensionsError` will be raised.
""",
"""
Example Function
:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed,
then a :py:class:`exc.InvalidDimensionsError` will be raised.
""",
),
################################
(
"""
Example Function
Raises:
:py:class:`exc.InvalidDimensionsError`: If the dimensions couldn't be parsed.
:py:class:`exc.InvalidArgumentsError`: If the arguments are invalid.
""",
"""
Example Function
:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
:raises exc.InvalidArgumentsError: If the arguments are invalid.
""",
),
################################
(
"""
Example Function
Raises:
:py:class:`exc.InvalidDimensionsError`
:py:class:`exc.InvalidArgumentsError`
""",
"""
Example Function
:raises exc.InvalidDimensionsError:
:raises exc.InvalidArgumentsError:
""",
),
]
for docstring, expected in docstrings:
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_kwargs_in_arguments(self):
docstring = """Allows to create attributes binded to this device.
Some other paragraph.
Code sample for usage::
dev.bind(loopback=Loopback)
dev.loopback.configure()
Arguments:
**kwargs: name/class pairs that will create resource-managers
bound as instance attributes to this instance. See code
example above.
"""
expected = """Allows to create attributes binded to this device.
Some other paragraph.
Code sample for usage::
dev.bind(loopback=Loopback)
dev.loopback.configure()
:param \\*\\*kwargs: name/class pairs that will create resource-managers
bound as instance attributes to this instance. See code
example above.
"""
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_section_header_formatting(self):
docstrings = [
(
"""
Summary line
Example:
Multiline reStructuredText
literal code block
""",
"""
Summary line
.. rubric:: Example
Multiline reStructuredText
literal code block
""",
),
################################
(
"""
Summary line
Example::
Multiline reStructuredText
literal code block
""",
"""
Summary line
Example::
Multiline reStructuredText
literal code block
""",
),
################################
(
"""
Summary line
:Example:
Multiline reStructuredText
literal code block
""",
"""
Summary line
:Example:
Multiline reStructuredText
literal code block
""",
),
]
for docstring, expected in docstrings:
actual = GoogleDocstring(docstring)
assert str(actual) == expected
def test_list_in_parameter_description(self):
docstring = """One line summary.
Parameters:
no_list (int):
one_bullet_empty (int):
*
one_bullet_single_line (int):
- first line
one_bullet_two_lines (int):
+ first line
continued
two_bullets_single_line (int):
- first line
- second line
two_bullets_two_lines (int):
* first line
continued
* second line
continued
one_enumeration_single_line (int):
1. first line
one_enumeration_two_lines (int):
1) first line
continued
two_enumerations_one_line (int):
(iii) first line
(iv) second line
two_enumerations_two_lines (int):
a. first line
continued
b. second line
continued
one_definition_one_line (int):
item 1
first line
one_definition_two_lines (int):
item 1
first line
continued
two_definitions_one_line (int):
item 1
first line
item 2
second line
two_definitions_two_lines (int):
item 1
first line
continued
item 2
second line
continued
one_definition_blank_line (int):
item 1
first line
extra first line
two_definitions_blank_lines (int):
item 1
first line
extra first line
item 2
second line
extra second line
definition_after_inline_text (int): text line
item 1
first line
definition_after_normal_text (int):
text line
item 1
first line
"""
expected = """One line summary.
:param no_list:
:type no_list: int
:param one_bullet_empty:
*
:type one_bullet_empty: int
:param one_bullet_single_line:
- first line
:type one_bullet_single_line: int
:param one_bullet_two_lines:
+ first line
continued
:type one_bullet_two_lines: int
:param two_bullets_single_line:
- first line
- second line
:type two_bullets_single_line: int
:param two_bullets_two_lines:
* first line
continued
* second line
continued
:type two_bullets_two_lines: int
:param one_enumeration_single_line:
1. first line
:type one_enumeration_single_line: int
:param one_enumeration_two_lines:
1) first line
continued
:type one_enumeration_two_lines: int
:param two_enumerations_one_line:
(iii) first line
(iv) second line
:type two_enumerations_one_line: int
:param two_enumerations_two_lines:
a. first line
continued
b. second line
continued
:type two_enumerations_two_lines: int
:param one_definition_one_line:
item 1
first line
:type one_definition_one_line: int
:param one_definition_two_lines:
item 1
first line
continued
:type one_definition_two_lines: int
:param two_definitions_one_line:
item 1
first line
item 2
second line
:type two_definitions_one_line: int
:param two_definitions_two_lines:
item 1
first line
continued
item 2
second line
continued
:type two_definitions_two_lines: int
:param one_definition_blank_line:
item 1
first line
extra first line
:type one_definition_blank_line: int
:param two_definitions_blank_lines:
item 1
first line
extra first line
item 2
second line
extra second line
:type two_definitions_blank_lines: int
:param definition_after_inline_text: text line
item 1
first line
:type definition_after_inline_text: int
:param definition_after_normal_text: text line
item 1
first line
:type definition_after_normal_text: int
"""
config = Config(napoleon_use_param=True)
actual = GoogleDocstring(docstring, config)
assert str(actual) == expected
expected = """One line summary.
:Parameters: * **no_list** (*int*)
* **one_bullet_empty** (*int*) --
*
* **one_bullet_single_line** (*int*) --
- first line
* **one_bullet_two_lines** (*int*) --
+ first line
continued
* **two_bullets_single_line** (*int*) --
- first line
- second line
* **two_bullets_two_lines** (*int*) --
* first line
continued
* second line
continued
* **one_enumeration_single_line** (*int*) --
1. first line
* **one_enumeration_two_lines** (*int*) --
1) first line
continued
* **two_enumerations_one_line** (*int*) --
(iii) first line
(iv) second line
* **two_enumerations_two_lines** (*int*) --
a. first line
continued
b. second line
continued
* **one_definition_one_line** (*int*) --
item 1
first line
* **one_definition_two_lines** (*int*) --
item 1
first line
continued
* **two_definitions_one_line** (*int*) --
item 1
first line
item 2
second line
* **two_definitions_two_lines** (*int*) --
item 1
first line
continued
item 2
second line
continued
* **one_definition_blank_line** (*int*) --
item 1
first line
extra first line
* **two_definitions_blank_lines** (*int*) --
item 1
first line
extra first line
item 2
second line
extra second line
* **definition_after_inline_text** (*int*) -- text line
item 1
first line
* **definition_after_normal_text** (*int*) -- text line
item 1
first line
"""
config = Config(napoleon_use_param=False)
actual = GoogleDocstring(docstring, config)
assert str(actual) == expected
def test_custom_generic_sections(self):
docstrings = (
(
"""\
Really Important Details:
You should listen to me!
""",
""".. rubric:: Really Important Details
You should listen to me!
""",
),
(
"""\
Sooper Warning:
Stop hitting yourself!
""",
""":Warns: **Stop hitting yourself!**
""",
),
(
"""\
Params Style:
arg1 (int): Description of arg1
arg2 (str): Description of arg2
""",
"""\
:Params Style: * **arg1** (*int*) -- Description of arg1
* **arg2** (*str*) -- Description of arg2
""",
),
(
"""\
Returns Style:
description of custom section
""",
""":Returns Style: description of custom section
""",
),
)
test_config = Config(
napoleon_custom_sections=[
'Really Important Details',
('Sooper Warning', 'warns'),
('Params Style', 'params_style'),
('Returns Style', 'returns_style'),
]
)
for docstring, expected in docstrings:
actual = GoogleDocstring(docstring, test_config)
assert str(actual) == expected
def test_noindex(self):
docstring = """
Attributes:
arg
description
Methods:
func(i, j)
description
"""
expected = """
.. attribute:: arg
:no-index:
description
.. method:: func(i, j)
:no-index:
description
""" # NoQA: W293
config = Config()
actual = GoogleDocstring(
docstring,
config=config,
app=None,
what='module',
options={'no-index': True},
)
assert str(actual) == expected
def test_keywords_with_types(self):
docstring = """\
Do as you please
Keyword Args:
gotham_is_yours (None): shall interfere.
"""
actual = GoogleDocstring(docstring)
expected = """\
Do as you please
:keyword gotham_is_yours: shall interfere.
:kwtype gotham_is_yours: None
"""
assert str(actual) == expected
def test_pep526_annotations(self):
# Test class attributes annotations
config = Config(
napoleon_attr_annotations=True,
)
actual = GoogleDocstring(
cleandoc(PEP526GoogleClass.__doc__),
config,
app=None,
what='class',
obj=PEP526GoogleClass,
)
expected = """\
Sample class with PEP 526 annotations and google docstring.
.. attribute:: attr1
Attr1 description.
:type: int
.. attribute:: attr2
Attr2 description.
:type: str
"""
assert str(actual) == expected
def test_preprocess_types(self):
docstring = """\
Do as you please
Yield:
str:Extended
"""
actual = GoogleDocstring(docstring)
expected = """\
Do as you please
:Yields: *str* -- Extended
"""
assert str(actual) == expected
config = Config(napoleon_preprocess_types=True)
actual = GoogleDocstring(docstring, config)
expected = """\
Do as you please
:Yields: :py:class:`str` -- Extended
"""
assert str(actual) == expected
| TestGoogleDocstring |
python | django__django | tests/m2m_signals/models.py | {
"start": 31,
"end": 148
} | class ____(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ("name",)
| Part |
python | getsentry__sentry | src/sentry/migrations/0918_sentry_release_arrayfield.py | {
"start": 193,
"end": 1600
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0917_convert_org_saved_searches_to_views"),
]
operations = [
migrations.AlterField(
model_name="release",
name="authors",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(), default=list, null=True, size=None
),
),
]
| Migration |
python | Textualize__textual | tests/option_list/test_option_list_movement.py | {
"start": 226,
"end": 4734
} | class ____(App[None]):
"""Test option list application."""
def compose(self) -> ComposeResult:
yield OptionList("1", "2", "3", None, "4", "5", "6")
async def test_initial_highlight() -> None:
"""The highlight should start on the first item."""
async with OptionListApp().run_test() as pilot:
assert pilot.app.query_one(OptionList).highlighted == 0
async def test_cleared_highlight_is_none() -> None:
"""The highlight should be `None` if the list is cleared."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.clear_options()
assert option_list.highlighted is None
async def test_cleared_movement_does_nothing() -> None:
"""The highlight should remain `None` if the list is cleared."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.clear_options()
assert option_list.highlighted is None
await pilot.press("tab", "down", "up", "pagedown", "pageup", "home", "end")
assert option_list.highlighted is None
async def test_move_down() -> None:
"""The highlight should move down when asked to."""
async with OptionListApp().run_test() as pilot:
await pilot.press("tab", "down")
assert pilot.app.query_one(OptionList).highlighted == 1
async def test_move_down_from_end() -> None:
"""The highlight should wrap around when moving down from the end."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.highlighted = 5
await pilot.press("tab", "down")
assert option_list.highlighted == 0
async def test_move_up() -> None:
"""The highlight should move up when asked to."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.highlighted = 1
await pilot.press("tab", "up")
assert option_list.highlighted == 0
async def test_move_up_from_nowhere() -> None:
"""The highlight should settle on the last item when moving up from `None`."""
async with OptionListApp().run_test() as pilot:
await pilot.press("tab", "up")
assert pilot.app.query_one(OptionList).highlighted == 5
async def test_move_end() -> None:
"""The end key should go to the end of the list."""
async with OptionListApp().run_test() as pilot:
await pilot.press("tab", "end")
assert pilot.app.query_one(OptionList).highlighted == 5
async def test_move_home() -> None:
"""The home key should go to the start of the list."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.highlighted == 0
option_list.highlighted = 5
assert option_list.highlighted == 5
await pilot.press("tab", "home")
assert option_list.highlighted == 0
async def test_page_down_from_start_short_list() -> None:
"""Doing a page down from the start of a short list should move to the end."""
async with OptionListApp().run_test() as pilot:
await pilot.press("tab", "pagedown")
assert pilot.app.query_one(OptionList).highlighted == 5
async def test_page_up_from_end_short_list() -> None:
"""Doing a page up from the end of a short list should move to the start."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.highlighted == 0
option_list.highlighted = 5
assert option_list.highlighted == 5
await pilot.press("tab", "pageup")
assert option_list.highlighted == 0
async def test_page_down_from_end_short_list() -> None:
"""Doing a page down from the end of a short list should go nowhere."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.highlighted == 0
option_list.highlighted = 5
assert option_list.highlighted == 5
await pilot.press("tab", "pagedown")
assert option_list.highlighted == 5
async def test_page_up_from_start_short_list() -> None:
"""Doing a page up from the start of a short list go nowhere."""
async with OptionListApp().run_test() as pilot:
await pilot.press("tab", "pageup")
assert pilot.app.query_one(OptionList).highlighted == 0
| OptionListApp |
python | wandb__wandb | wandb/apis/public/registries/registries_search.py | {
"start": 8054,
"end": 11514
} | class ____(RelayPaginator["ArtifactMembershipFragment", "Artifact"]):
"""An lazy iterator of `Artifact` objects in a Registry."""
QUERY: Document # Must be set per-instance
last_response: ArtifactMembershipConnection | None
def __init__(
self,
client: RetryingClient,
organization: str,
registry_filter: dict[str, Any] | None = None,
collection_filter: dict[str, Any] | None = None,
artifact_filter: dict[str, Any] | None = None,
per_page: PositiveInt = 100,
):
from wandb.sdk.artifacts._generated import REGISTRY_VERSIONS_GQL
omit_fields = omit_artifact_fields(client)
self.QUERY = gql_compat(REGISTRY_VERSIONS_GQL, omit_fields=omit_fields)
self.client = client
self.organization = organization
self.registry_filter = registry_filter
self.collection_filter = collection_filter
self.artifact_filter = artifact_filter or {}
variables = {
"registryFilter": json.dumps(f) if (f := registry_filter) else None,
"collectionFilter": json.dumps(f) if (f := collection_filter) else None,
"artifactFilter": json.dumps(f) if (f := artifact_filter) else None,
"organization": organization,
}
super().__init__(client, variables=variables, per_page=per_page)
@override
def __next__(self):
# Implement custom next since its possible to load empty pages because of auth
self.index += 1
while len(self.objects) <= self.index:
if not self._load_page():
raise StopIteration
return self.objects[self.index]
@property
def length(self) -> int | None:
if self.last_response is None:
return None
return len(self.last_response.edges)
@override
def _update_response(self) -> None:
from wandb.sdk.artifacts._generated import RegistryVersions
from wandb.sdk.artifacts._models.pagination import ArtifactMembershipConnection
data = self.client.execute(self.QUERY, variable_values=self.variables)
result = RegistryVersions.model_validate(data)
if not ((org := result.organization) and (org_entity := org.org_entity)):
raise ValueError(
f"Organization {self.organization!r} not found. Please verify the organization name is correct."
)
try:
conn = org_entity.artifact_memberships
self.last_response = ArtifactMembershipConnection.model_validate(conn)
except (LookupError, AttributeError, ValidationError) as e:
raise ValueError("Unexpected response data") from e
def _convert(self, node: ArtifactMembershipFragment) -> Artifact | None:
from wandb.sdk.artifacts._validators import FullArtifactPath
from wandb.sdk.artifacts.artifact import Artifact
if not (
(collection := node.artifact_collection)
and (project := collection.project)
and node.artifact
and (version_idx := node.version_index) is not None
):
return None
return Artifact._from_membership(
membership=node,
target=FullArtifactPath(
prefix=project.entity.name,
project=project.name,
name=f"{collection.name}:v{version_idx}",
),
client=self.client,
)
| Versions |
python | doocs__leetcode | solution/1100-1199/1114.Print in Order/Solution.py | {
"start": 0,
"end": 519
} | class ____:
def __init__(self):
self.l2 = threading.Lock()
self.l3 = threading.Lock()
self.l2.acquire()
self.l3.acquire()
def first(self, printFirst: 'Callable[[], None]') -> None:
printFirst()
self.l2.release()
def second(self, printSecond: 'Callable[[], None]') -> None:
self.l2.acquire()
printSecond()
self.l3.release()
def third(self, printThird: 'Callable[[], None]') -> None:
self.l3.acquire()
printThird()
| Foo |
python | mlflow__mlflow | mlflow/tensorflow/autologging.py | {
"start": 193,
"end": 6716
} | class ____(TensorBoard, metaclass=ExceptionSafeClass):
pass
def _extract_input_example_from_tensor_or_ndarray(
input_features: tensorflow.Tensor | np.ndarray,
) -> np.ndarray:
"""
Extracts first `INPUT_EXAMPLE_SAMPLE_ROWS` from the next_input, which can either be of
numpy array or tensor type.
Args:
input_features: an input of type `np.ndarray` or `tensorflow.Tensor`
Returns:
A slice (of limit `INPUT_EXAMPLE_SAMPLE_ROWS`) of the input of type `np.ndarray`.
Returns `None` if the type of `input_features` is unsupported.
Examples
--------
when next_input is nd.array:
>>> input_data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
>>> _extract_input_example_from_tensor_or_ndarray(input_data)
array([1, 2, 3, 4, 5])
when next_input is tensorflow.Tensor:
>>> input_data = tensorflow.convert_to_tensor([1, 2, 3, 4, 5, 6])
>>> _extract_input_example_from_tensor_or_ndarray(input_data)
array([1, 2, 3, 4, 5])
"""
input_feature_slice = None
if isinstance(input_features, tensorflow.Tensor):
input_feature_slice = input_features.numpy()[0:INPUT_EXAMPLE_SAMPLE_ROWS]
elif isinstance(input_features, np.ndarray):
input_feature_slice = input_features[0:INPUT_EXAMPLE_SAMPLE_ROWS]
return input_feature_slice
def _extract_sample_numpy_dict(
input_numpy_features_dict: dict[str, np.ndarray],
) -> dict[str, np.ndarray] | np.ndarray:
"""
Extracts `INPUT_EXAMPLE_SAMPLE_ROWS` sample from next_input
as numpy array of dict(str -> ndarray) type.
Args:
input_numpy_features_dict: A tensor or numpy array
Returns:
A slice (limit `INPUT_EXAMPLE_SAMPLE_ROWS`) of the input of same type as next_input.
Returns `None` if the type of `input_numpy_features_dict` is unsupported.
Examples
--------
when next_input is dict:
>>> input_data = {"a": np.array([1, 2, 3, 4, 5, 6, 7, 8])}
>>> _extract_sample_numpy_dict(input_data)
{'a': array([1, 2, 3, 4, 5])}
"""
sliced_data_as_numpy = None
if isinstance(input_numpy_features_dict, dict):
sliced_data_as_numpy = {
k: _extract_input_example_from_tensor_or_ndarray(v)
for k, v in input_numpy_features_dict.items()
}
return sliced_data_as_numpy
def _extract_input_example_from_batched_tf_dataset(
dataset: tensorflow.data.Dataset,
) -> np.ndarray | dict[str, np.ndarray]:
"""
Extracts sample feature tensors from the input dataset as numpy array.
Input Dataset's tensors must contain tuple of (features, labels) that are
used for tensorflow/keras train or fit methods
Args:
dataset: a tensorflow batched/unbatched dataset representing tuple of (features, labels)
Returns:
a numpy array of length `INPUT_EXAMPLE_SAMPLE_ROWS`
Returns `None` if the type of `dataset` slices are unsupported.
Examples
--------
>>> input_dataset = tensorflow.data.Dataset.from_tensor_slices(
... (
... {
... "SepalLength": np.array(list(range(0, 20))),
... "SepalWidth": np.array(list(range(0, 20))),
... "PetalLength": np.array(list(range(0, 20))),
... "PetalWidth": np.array(list(range(0, 20))),
... },
... np.array(list(range(0, 20))),
... )
... ).batch(10)
>>> _extract_input_example_from_batched_tf_dataset(input_dataset)
{'SepalLength': array([0, 1, 2, 3, 4]),
'SepalWidth': array([0, 1, 2, 3, 4]),
'PetalLength': array([0, 1, 2, 3, 4]),
'PetalWidth': array([0, 1, 2, 3, 4])}
"""
limited_df_iter = list(dataset.take(INPUT_EXAMPLE_SAMPLE_ROWS))
first_batch = limited_df_iter[0]
input_example_slice = None
if isinstance(first_batch, tuple):
features = first_batch[0]
if isinstance(features, dict):
input_example_slice = _extract_sample_numpy_dict(features)
elif isinstance(features, (np.ndarray, tensorflow.Tensor)):
input_example_slice = _extract_input_example_from_tensor_or_ndarray(features)
return input_example_slice
def extract_input_example_from_tf_input_fn(input_fn):
"""
Extracts sample data from dict (str -> ndarray),
``tensorflow.Tensor`` or ``tensorflow.data.Dataset`` type.
Args:
input_fn: Tensorflow's input function used for train method
Returns:
A slice (of limit ``mlflow.utils.autologging_utils.INPUT_EXAMPLE_SAMPLE_ROWS``)
of the input of type `np.ndarray`.
Returns `None` if the return type of ``input_fn`` is unsupported.
"""
input_training_data = input_fn()
input_features = None
if isinstance(input_training_data, tuple):
features = input_training_data[0]
if isinstance(features, dict):
input_features = _extract_sample_numpy_dict(features)
elif isinstance(features, (np.ndarray, tensorflow.Tensor)):
input_features = _extract_input_example_from_tensor_or_ndarray(features)
elif isinstance(input_training_data, tensorflow.data.Dataset):
input_features = _extract_input_example_from_batched_tf_dataset(input_training_data)
return input_features
def extract_tf_keras_input_example(input_training_data):
"""
Generates a sample ndarray or dict (str -> ndarray)
from the input type 'x' for keras ``fit`` or ``fit_generator``
Args:
input_training_data: Keras input function used for ``fit`` or ``fit_generator`` methods.
Returns:
a slice of type ndarray or
dict (str -> ndarray) limited to
``mlflow.utils.autologging_utils.INPUT_EXAMPLE_SAMPLE_ROWS``.
Throws ``MlflowException`` exception, if input_training_data is unsupported.
Returns `None` if the type of input_training_data is unsupported.
"""
input_data_slice = None
if isinstance(input_training_data, tensorflow.keras.utils.Sequence):
input_training_data = input_training_data[:][0]
if isinstance(input_training_data, (np.ndarray, tensorflow.Tensor)):
input_data_slice = _extract_input_example_from_tensor_or_ndarray(input_training_data)
elif isinstance(input_training_data, dict):
input_data_slice = _extract_sample_numpy_dict(input_training_data)
elif isinstance(input_training_data, tensorflow.data.Dataset):
input_data_slice = _extract_input_example_from_batched_tf_dataset(input_training_data)
return input_data_slice
| _TensorBoard |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/test_leads.py | {
"start": 344,
"end": 5390
} | class ____(HubspotCRMSearchStream):
SCOPES = ["crm.objects.leads.read"]
CURSOR_FIELD = "updatedAt"
STREAM_NAME = "leads"
OBJECT_TYPE = "leads"
ASSOCIATIONS = ["companies", "contacts"]
OBJECT_ID = "12345"
@HttpMocker()
def test_given_records_when_read_extract_desired_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_oauth=True, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.oauth_config(), self.STREAM_NAME, SyncMode.full_refresh)
assert len(output.records) == 1
@HttpMocker()
def test_given_one_page_when_read_stream_private_token_then_return_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh)
assert len(output.records) == 1
@HttpMocker()
def test_given_error_response_when_read_analytics_then_get_trace_message(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), HttpResponse(status_code=500, body="{}"), method="post")
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh)
assert len(output.records) == 0
assert len(output.trace_messages) > 0
assert len(output.errors) > 0
@HttpMocker()
def test_given_500_then_200_when_read_then_return_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), [HttpResponse(status_code=500, body="{}"), self.response()], method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh)
assert len(output.records) == 1
assert len(output.trace_messages) > 0
assert len(output.errors) == 0
@HttpMocker()
def test_given_missing_scopes_error_when_read_then_stop_sync(self, http_mocker: HttpMocker):
self.mock_oauth(http_mocker, self.ACCESS_TOKEN)
self.mock_custom_objects_streams(http_mocker)
self.read_from_stream(self.oauth_config(), self.STREAM_NAME, SyncMode.full_refresh, expecting_exception=True)
@HttpMocker()
def test_given_unauthorized_error_when_read_then_stop_sync(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), HttpResponse(status_code=http.HTTPStatus.UNAUTHORIZED, body="{}"), method="post")
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh)
assert len(output.records) == 0
assert len(output.trace_messages) > 0
assert len(output.errors) > 0
@HttpMocker()
def test_given_one_page_when_read_then_get_records_with_flattened_properties(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.full_refresh)
record = output.records[0].record.data
assert "properties" in record # legacy struct remains to not introduce breaking changes
prop_fields = len([f for f in record if f.startswith("properties_")])
assert prop_fields > 0
@HttpMocker()
def test_given_incremental_sync_when_read_then_state_message_produced_and_state_match_latest_record(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.state_messages) == 2
| TestLeadsStream |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 7502,
"end": 8724
} | class ____(object):
"""Checker used for string fields.
Always returns a unicode value, even if the input is of type str.
"""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, str)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, str)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
else:
try:
proposed_value.encode('utf8')
except UnicodeEncodeError:
raise ValueError('%.1024r isn\'t a valid unicode string and '
'can\'t be encoded in UTF-8.'%
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
| UnicodeValueChecker |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1565867,
"end": 1567405
} | class ____(
MarkPropDefnumberArray, NumericArrayMarkPropDef
):
"""
ValueDefWithConditionMarkPropFieldOrDatumDefnumberArray schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : dict, Sequence[float], :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {
"$ref": "#/definitions/ValueDefWithCondition<MarkPropFieldOrDatumDef,number[]>"
}
def __init__(
self,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
value: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
**kwds,
):
super().__init__(condition=condition, value=value, **kwds)
| ValueDefWithConditionMarkPropFieldOrDatumDefnumberArray |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 52967,
"end": 54173
} | class ____(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
# scheme = splittype(uri) # deprecated as of Python 3.8
scheme = urlparse(uri)[0]
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
| ServerProxy |
python | jazzband__django-model-utils | tests/models.py | {
"start": 10991,
"end": 11142
} | class ____(SoftDeletableQuerySet[ModelT]):
def only_read(self) -> QuerySet[ModelT]:
return self.filter(is_read=True)
| CustomSoftDeleteQuerySet |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N804.py | {
"start": 1163,
"end": 1348
} | class ____(ABCMeta):
def bad_method(this):
this = this
this
def bad_method(this):
self = this
def func(x):
return x
foo = {}
| RenamingInMethodBodyClass |
python | doocs__leetcode | solution/0800-0899/0872.Leaf-Similar Trees/Solution.py | {
"start": 192,
"end": 689
} | class ____:
def leafSimilar(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:
def dfs(root: Optional[TreeNode], nums: List[int]) -> None:
if root.left == root.right:
nums.append(root.val)
return
if root.left:
dfs(root.left, nums)
if root.right:
dfs(root.right, nums)
l1, l2 = [], []
dfs(root1, l1)
dfs(root2, l2)
return l1 == l2
| Solution |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 205,
"end": 908
} | class ____:
"""Something inconsequential for the test."""
def __init__(self):
self.aaa = 2
def regular_method(self):
"""this method is a real method since it access to self"""
self.function_method()
def function_method(self): # [no-self-use]
"""this method isn' a real method since it doesn't need self"""
print('hello')
async def async_regular_method(self):
"""this async method is a real method since it accesses self"""
await self.async_function_method()
async def async_function_method(self): # [no-self-use]
"""this async method isn't a real method since it doesn't need self"""
print('hello')
| Toto |
python | Pylons__pyramid | tests/pkgs/eventonly/__init__.py | {
"start": 587,
"end": 667
} | class ____:
def __init__(self, response):
self.response = response
| Foo |
python | simonw__sqlite-utils | sqlite_utils/utils.py | {
"start": 5667,
"end": 5724
} | class ____(RowsFromFileError):
pass
| RowsFromFileBadJSON |
python | allegroai__clearml | clearml/binding/gradio_bind.py | {
"start": 269,
"end": 4543
} | class ____:
_current_task = None
__patched = False
_default_gradio_address = "0.0.0.0"
_default_gradio_port = 7860
_root_path_format = "/service/{}/"
__server_config_warning = set()
@classmethod
def update_current_task(cls, task: Optional[Any] = None) -> None:
cls._current_task = task
if cls.__patched:
return
if "gradio" in sys.modules:
cls.patch_gradio()
else:
PostImportHookPatching.add_on_import("gradio", cls.patch_gradio)
@classmethod
def patch_gradio(cls) -> None:
if cls.__patched:
return
# noinspection PyBroadException
try:
import gradio
gradio.routes.App.get_blocks = _patched_call(gradio.routes.App.get_blocks, PatchGradio._patched_get_blocks)
gradio.blocks.Blocks.launch = _patched_call(gradio.blocks.Blocks.launch, PatchGradio._patched_launch)
except Exception:
pass
cls.__patched = True
@staticmethod
def _patched_get_blocks(original_fn: Callable, *args: Any, **kwargs: Any) -> Any:
blocks = original_fn(*args, **kwargs)
if not PatchGradio._current_task or not running_remotely():
return blocks
blocks.config["root"] = PatchGradio._root_path_format.format(PatchGradio._current_task.id)
blocks.root = blocks.config["root"]
return blocks
@staticmethod
def _patched_launch(original_fn: Callable, *args: Any, **kwargs: Any) -> Any:
if not PatchGradio._current_task:
return original_fn(*args, **kwargs)
PatchGradio.__warn_on_server_config(
kwargs.get("server_name"),
kwargs.get("server_port"),
kwargs.get("root_path"),
)
if not running_remotely():
return original_fn(*args, **kwargs)
# noinspection PyProtectedMember
PatchGradio._current_task._set_runtime_properties(
{
"_SERVICE": "EXTERNAL",
"_ADDRESS": get_private_ip(),
"_PORT": PatchGradio._default_gradio_port,
}
)
PatchGradio._current_task.set_system_tags(["external_service"])
kwargs["server_name"] = PatchGradio._default_gradio_address
kwargs["server_port"] = PatchGradio._default_gradio_port
kwargs["root_path"] = PatchGradio._root_path_format.format(PatchGradio._current_task.id)
# noinspection PyBroadException
try:
return original_fn(*args, **kwargs)
except Exception:
del kwargs["root_path"]
return original_fn(*args, **kwargs)
@classmethod
def __warn_on_server_config(
cls,
server_name: Optional[str],
server_port: Optional[int],
root_path: Optional[str],
) -> None:
if (server_name is None or server_name == PatchGradio._default_gradio_address) and (
server_port is None and server_port == PatchGradio._default_gradio_port
):
return
if (server_name, server_port, root_path) in cls.__server_config_warning:
return
cls.__server_config_warning.add((server_name, server_port, root_path))
if server_name is not None and server_port is not None:
server_config = "{}:{}".format(server_name, server_port)
what_to_ignore = "name and port"
elif server_name is not None:
server_config = str(server_name)
what_to_ignore = "name"
else:
server_config = str(server_port)
what_to_ignore = "port"
getLogger().warning(
"ClearML only supports '{}:{}' as the Gradio server. Ignoring {} '{}' in remote execution".format(
PatchGradio._default_gradio_address,
PatchGradio._default_gradio_port,
what_to_ignore,
server_config,
)
)
if root_path is not None:
getLogger().warning(
"ClearML will override root_path '{}' to '{}' in remote execution".format(
root_path,
PatchGradio._root_path_format.format(PatchGradio._current_task.id),
)
)
| PatchGradio |
python | apache__airflow | airflow-core/src/airflow/models/callback.py | {
"start": 6873,
"end": 8606
} | class ____(Callback):
"""Callbacks that run on the Triggerer (must be async)."""
__mapper_args__ = {"polymorphic_identity": CallbackType.TRIGGERER}
def __init__(self, callback_def: ImportPathCallbackDefProtocol, **kwargs):
"""
Initialize a TriggererCallback from a callback definition.
:param callback_def: Callback definition with path and kwargs
:param kwargs: Passed to parent Callback.__init__ (see base class for details)
"""
super().__init__(**kwargs)
self.fetch_method = CallbackFetchMethod.IMPORT_PATH
self.data |= callback_def.serialize()
def __repr__(self):
return f"{self.data['path']}({self.data['kwargs'] or ''}) on a triggerer"
def queue(self):
from airflow.models.trigger import Trigger
from airflow.triggers.callback import CallbackTrigger
self.trigger = Trigger.from_object(
CallbackTrigger(
callback_path=self.data["path"],
callback_kwargs=self.data["kwargs"],
)
)
super().queue()
def handle_event(self, event: TriggerEvent, session: Session):
from airflow.triggers.callback import PAYLOAD_BODY_KEY, PAYLOAD_STATUS_KEY
if (status := event.payload.get(PAYLOAD_STATUS_KEY)) and status in (ACTIVE_STATES | TERMINAL_STATES):
self.state = status
if status in TERMINAL_STATES:
self.trigger = None
self.output = event.payload.get(PAYLOAD_BODY_KEY)
Stats.incr(**self.get_metric_info(status, self.output))
session.add(self)
else:
log.error("Unexpected event received: %s", event.payload)
| TriggererCallback |
python | celery__celery | t/unit/contrib/test_worker.py | {
"start": 289,
"end": 1972
} | class ____:
def setup_method(self):
self.app = Celery('celerytest', backend='cache+memory://', broker='memory://', )
@self.app.task
def add(x, y):
return x + y
self.add = add
@self.app.task
def error_task():
raise NotImplementedError()
self.error_task = error_task
self.app.config_from_object({
'worker_hijack_root_logger': False,
})
# to avoid changing the root logger level to ERROR,
# we have to set both app.log.loglevel start_worker arg to 0
# (see celery.app.log.setup_logging_subsystem)
self.app.log.loglevel = 0
def test_start_worker(self):
with start_worker(app=self.app, loglevel=0):
result = self.add.s(1, 2).apply_async()
val = result.get(timeout=5)
assert val == 3
def test_start_worker_with_exception(self):
"""Make sure that start_worker does not hang on exception"""
with pytest.raises(NotImplementedError):
with start_worker(app=self.app, loglevel=0):
result = self.error_task.apply_async()
result.get(timeout=5)
def test_start_worker_with_hostname_config(self):
"""Make sure a custom hostname can be supplied to the TestWorkController"""
test_hostname = 'test_name@test_host'
with start_worker(app=self.app, loglevel=0, hostname=test_hostname) as w:
assert isinstance(w, TestWorkController)
assert w.hostname == test_hostname
result = self.add.s(1, 2).apply_async()
val = result.get(timeout=5)
assert val == 3
| test_worker |
python | huggingface__transformers | src/transformers/models/olmo/modeling_olmo.py | {
"start": 13271,
"end": 15000
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: OlmoConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = OlmoAttention(config=config, layer_idx=layer_idx)
self.mlp = OlmoMLP(config)
self.input_layernorm = OlmoLayerNorm(config.hidden_size)
self.post_attention_layernorm = OlmoLayerNorm(config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| OlmoDecoderLayer |
python | realpython__materials | hashtable/01_hashtable_prototype/09_report_the_hash_tables_length/hashtable.py | {
"start": 107,
"end": 1447
} | class ____:
def __init__(self, capacity):
if capacity < 1:
raise ValueError("Capacity must be a positive number")
self._slots = capacity * [None]
def __len__(self):
return len(self.pairs)
def __delitem__(self, key):
if key in self:
self._slots[self._index(key)] = None
else:
raise KeyError(key)
def __setitem__(self, key, value):
self._slots[self._index(key)] = Pair(key, value)
def __getitem__(self, key):
pair = self._slots[self._index(key)]
if pair is None:
raise KeyError(key)
return pair.value
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
@property
def pairs(self):
return {pair for pair in self._slots if pair}
@property
def values(self):
return [pair.value for pair in self.pairs]
@property
def keys(self):
return {pair.key for pair in self.pairs}
@property
def capacity(self):
return len(self._slots)
def _index(self, key):
return hash(key) % self.capacity
| HashTable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1234639,
"end": 1235767
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable):
"""Represents a 'merged' event on a given pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "commit", "created_at", "merge_ref", "merge_ref_name", "pull_request")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
commit = sgqlc.types.Field(Commit, graphql_name="commit")
"""Identifies the commit associated with the `merge` event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
merge_ref = sgqlc.types.Field("Ref", graphql_name="mergeRef")
"""Identifies the Ref associated with the `merge` event."""
merge_ref_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="mergeRefName")
"""Identifies the name of the Ref associated with the `merge` event."""
pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest")
"""PullRequest referenced by event."""
| MergedEvent |
python | astropy__astropy | astropy/utils/metadata/merge.py | {
"start": 459,
"end": 3633
} | class ____:
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
def __init_subclass__(cls):
members = vars(cls)
# Register merging class (except for base MergeStrategy class)
if (types := members.get("types")) is not None:
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
@classmethod
def _merge(cls, left, right):
try:
return cls.merge(left, right)
except Exception as err:
raise MergeConflictError(err)
| MergeStrategy |
python | huggingface__transformers | src/transformers/utils/deprecation.py | {
"start": 934,
"end": 8031
} | class ____(ExplicitEnum):
NONE = "none"
NOTIFY = "notify"
NOTIFY_ALWAYS = "notify_always"
RAISE = "raise"
def deprecate_kwarg(
old_name: str,
version: str,
new_name: str | None = None,
warn_if_greater_or_equal_version: bool = False,
raise_if_greater_or_equal_version: bool = False,
raise_if_both_names: bool = False,
additional_message: str | None = None,
):
"""
Function or method decorator to notify users about deprecated keyword arguments, replacing them with a new name if specified.
Note that is decorator is `torch.compile`-safe, i.e. it will not cause graph breaks (but no warning will be displayed if compiling).
This decorator allows you to:
- Notify users when a keyword argument is deprecated.
- Automatically replace deprecated keyword arguments with new ones.
- Raise an error if deprecated arguments are used, depending on the specified conditions.
By default, the decorator notifies the user about the deprecated argument while the `transformers.__version__` < specified `version`
in the decorator. To keep notifications with any version `warn_if_greater_or_equal_version=True` can be set.
Parameters:
old_name (`str`):
Name of the deprecated keyword argument.
version (`str`):
The version in which the keyword argument was (or will be) deprecated.
new_name (`Optional[str]`, *optional*):
The new name for the deprecated keyword argument. If specified, the deprecated keyword argument will be replaced with this new name.
warn_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to show warning if current `transformers` version is greater or equal to the deprecated version.
raise_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if current `transformers` version is greater or equal to the deprecated version.
raise_if_both_names (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if both deprecated and new keyword arguments are set.
additional_message (`Optional[str]`, *optional*):
An additional message to append to the default deprecation message.
Raises:
ValueError:
If raise_if_greater_or_equal_version is True and the current version is greater than or equal to the deprecated version, or if raise_if_both_names is True and both old and new keyword arguments are provided.
Returns:
Callable:
A wrapped function that handles the deprecated keyword arguments according to the specified parameters.
Example usage with renaming argument:
```python
@deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="6.0.0")
def my_function(do_reduce_labels):
print(do_reduce_labels)
my_function(reduce_labels=True) # Will show a deprecation warning and use do_reduce_labels=True
```
Example usage without renaming argument:
```python
@deprecate_kwarg("max_size", version="6.0.0")
def my_function(max_size):
print(max_size)
my_function(max_size=1333) # Will show a deprecation warning
```
"""
deprecated_version = packaging.version.parse(version)
current_version = packaging.version.parse(__version__)
is_greater_or_equal_version = current_version >= deprecated_version
if is_greater_or_equal_version:
version_message = f"and removed starting from version {version}"
else:
version_message = f"and will be removed in version {version}"
def wrapper(func):
# Required for better warning message
sig = inspect.signature(func)
function_named_args = set(sig.parameters.keys())
is_instance_method = "self" in function_named_args
is_class_method = "cls" in function_named_args
@wraps(func)
def wrapped_func(*args, **kwargs):
# Get class + function name (just for better warning message)
func_name = func.__name__
if is_instance_method:
func_name = f"{args[0].__class__.__name__}.{func_name}"
elif is_class_method:
func_name = f"{args[0].__name__}.{func_name}"
minimum_action = Action.NONE
message = None
# deprecated kwarg and its new version are set for function call -> replace it with new name
if old_name in kwargs and new_name in kwargs:
minimum_action = Action.RAISE if raise_if_both_names else Action.NOTIFY_ALWAYS
message = f"Both `{old_name}` and `{new_name}` are set for `{func_name}`. Using `{new_name}={kwargs[new_name]}` and ignoring deprecated `{old_name}={kwargs[old_name]}`."
kwargs.pop(old_name)
# only deprecated kwarg is set for function call -> replace it with new name
elif old_name in kwargs and new_name is not None and new_name not in kwargs:
minimum_action = Action.NOTIFY
message = f"`{old_name}` is deprecated {version_message} for `{func_name}`. Use `{new_name}` instead."
kwargs[new_name] = kwargs.pop(old_name)
# deprecated kwarg is not set for function call and new name is not specified -> just notify
elif old_name in kwargs:
minimum_action = Action.NOTIFY
message = f"`{old_name}` is deprecated {version_message} for `{func_name}`."
if message is not None and additional_message is not None:
message = f"{message} {additional_message}"
# update minimum_action if argument is ALREADY deprecated (current version >= deprecated version)
if is_greater_or_equal_version:
# change to (NOTIFY, NOTIFY_ALWAYS) -> RAISE if specified
# in case we want to raise error for already deprecated arguments
if raise_if_greater_or_equal_version and minimum_action != Action.NONE:
minimum_action = Action.RAISE
# change to NOTIFY -> NONE if specified (NOTIFY_ALWAYS can't be changed to NONE)
# in case we want to ignore notifications for already deprecated arguments
elif not warn_if_greater_or_equal_version and minimum_action == Action.NOTIFY:
minimum_action = Action.NONE
# raise error or notify user
if minimum_action == Action.RAISE:
raise ValueError(message)
# If we are compiling, we do not raise the warning as it would break compilation
elif minimum_action in (Action.NOTIFY, Action.NOTIFY_ALWAYS) and not is_torchdynamo_compiling():
# DeprecationWarning is ignored by default, so we use FutureWarning instead
warnings.warn(message, FutureWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapped_func
return wrapper
| Action |
python | google__pytype | pytype/rewrite/convert_test.py | {
"start": 137,
"end": 304
} | class ____(test_utils.PytdTestBase, test_utils.ContextfulTestBase):
def setUp(self):
super().setUp()
self.conv = self.ctx.abstract_converter
| ConverterTestBase |
python | huggingface__transformers | tests/pipelines/test_pipelines_zero_shot.py | {
"start": 1145,
"end": 12626
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
classifier = ZeroShotClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
candidate_labels=["polics", "health"],
)
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def run_pipeline_test(self, classifier, _):
outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
# No kwarg
outputs = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
)
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
)
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
outputs = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}"
)
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
# https://github.com/huggingface/transformers/issues/13846
outputs = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
for i in range(1)
],
)
outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
for i in range(2)
],
)
with self.assertRaises(ValueError):
classifier("", candidate_labels="politics")
with self.assertRaises(TypeError):
classifier(None, candidate_labels="politics")
with self.assertRaises(ValueError):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(TypeError):
classifier("Who are you voting for in 2020?", candidate_labels=None)
with self.assertRaises(ValueError):
classifier(
"Who are you voting for in 2020?",
candidate_labels="politics",
hypothesis_template="Not formatting template",
)
with self.assertRaises(AttributeError):
classifier(
"Who are you voting for in 2020?",
candidate_labels="politics",
hypothesis_template=None,
)
self.run_entailment_id(classifier)
def run_entailment_id(self, zero_shot_classifier: Pipeline):
config = zero_shot_classifier.model.config
original_label2id = config.label2id
original_entailment = zero_shot_classifier.entailment_id
config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
zero_shot_classifier.model.config.label2id = original_label2id
self.assertEqual(original_entailment, zero_shot_classifier.entailment_id)
@require_torch
def test_truncation(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"]
)
@require_torch
def test_small_model_pt(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
)
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
},
)
@require_torch
def test_small_model_pt_fp16(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
dtype=torch.float16,
)
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
},
)
@require_torch
def test_small_model_pt_bf16(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
dtype=torch.bfloat16,
)
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
},
)
@slow
@require_torch
def test_large_model_pt(self):
zero_shot_classifier = pipeline("zero-shot-classification", model="FacebookAI/roberta-large-mnli")
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
},
)
outputs = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.",
candidate_labels=["machine learning", "statistics", "translation", "vision"],
multi_label=True,
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
},
)
| ZeroShotClassificationPipelineTests |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/DockArea.py | {
"start": 14234,
"end": 15329
} | class ____(QtWidgets.QWidget):
def __init__(self, area, **kwargs):
QtWidgets.QWidget.__init__(self, **kwargs)
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.dockarea = area
self.layout.addWidget(area)
def closeEvent(self, *args):
# restore docks to their original area
docks = self.dockarea.findAll()[1]
for dock in docks.values():
if hasattr(dock, 'orig_area'):
dock.orig_area.addDock(dock, )
# clear dock area, and close remaining docks
self.dockarea.clear()
self.dockarea.apoptose() # This call is needed to remove the temporary dock area when a dock is undocked into a
# temporary window and either the temporary window is closed or the dock dragged back into the main window.
# Otherwise, calling saveState and then restoreState fails with a TypeError trying to restore half-present
# floating windows. See GH issue #3125
super().closeEvent(*args)
| TempAreaWindow |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 669,
"end": 712
} | class ____:
name: str
count: int
| Item |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/decorator_location.py | {
"start": 1196,
"end": 2520
} | class ____:
def __call__(self, f: Callable[[int], None]) -> Callable[[int], None]:
return f
@with_logging
@with_logging2
def decorated_logging_logging2(x: int) -> None:
_test_sink(x)
@skip_this_decorator
def decorated_skip_this_decorator(x: int) -> None:
_test_sink(x)
@with_logging2
@skip_this_decorator
def decorated_logging2_skip_this_decorator(x: int) -> None:
_test_sink(x)
@ignore_this_decorator
def decorated_ignore_this_decorator(x: int) -> None:
_test_sink(x)
@ignore_this_decorator_factory(1)
def decorated_ignore_this_decorator_factory(x: int) -> None:
_test_sink(x)
@ignore_this_decorator_class()
def decorated_ignore_this_decorator_class(x: int) -> None:
_test_sink(x)
@ignore_this_decorator
@skip_this_decorator
def decorated_ignore_then_skip_decorator(x: int) -> None:
_test_sink(x)
@with_logging
@ignore_this_decorator
def decorated_logging_ignore_this_decorator(x: int) -> None:
_test_sink(x)
def pass_local_variable_to_x(f: Callable) -> Callable:
@wraps(f)
def inner(request: str, *args, **kwargs) -> None:
_test_sink(request)
x = 42
f(request, x, *args, **kwargs)
return inner
@pass_local_variable_to_x
def handle_request(request: str, x: int, y: int) -> None:
_test_sink(x)
| ignore_this_decorator_class |
python | kamyu104__LeetCode-Solutions | Python/sender-with-largest-word-count.py | {
"start": 84,
"end": 473
} | class ____(object):
def largestWordCount(self, messages, senders):
"""
:type messages: List[str]
:type senders: List[str]
:rtype: str
"""
cnt = collections.Counter()
for m, s in itertools.izip(messages, senders):
cnt[s] += m.count(' ')+1
return max((k for k in cnt.iterkeys()), key=lambda x: (cnt[x], x))
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py | {
"start": 1223,
"end": 13079
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
# Define short constants for true & false, so the data & mask can be lined
# up in the examples below. This makes it easier to read the examples, to
# see which values should be kept vs. masked.
T = True
F = False
@parameterized.parameters([
#=========================================================================
# Docstring examples
#=========================================================================
dict(
descr='Docstring example 1',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
mask=[[T, F, T], [F, F, F], [T, F, F]],
expected=ragged_factory_ops.constant_value([[1, 3], [], [7]])),
dict(
descr='Docstring example 2',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=ragged_factory_ops.constant_value([[F, F, T], [F], [T, T]]),
expected=ragged_factory_ops.constant_value([[3], [], [5, 6]])),
dict(
descr='Docstring example 3',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=[True, False, True],
expected=ragged_factory_ops.constant_value([[1, 2, 3], [5, 6]])),
#=========================================================================
# Uniform data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[7]; mask.shape=[7]',
data=[1, 2, 3, 4, 5, 6, 7],
mask=[T, F, T, T, F, F, F],
expected=[1, 3, 4]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]],
mask=[True, False, True, True, False],
expected=[[1, 2, 3], [7, 8, 9], [10, 11, 12]]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5, 3]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2], [3, 4, 5]],
mask=[[F, F, F], [T, F, T], [T, T, T], [F, F, F], [T, T, F]],
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [7, 8, 9], [], [3, 4]])),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[T, F], [T, T], [F, F]],
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F], [T, T]]],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2], [6, 8]]])),
dict(
descr='data.shape=mask.shape=[2, 2, 2, 2]',
data=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 3], [5, 7]]]],
mask=[[[[T, T], [F, F]], [[T, F], [F, F]]],
[[[F, F], [F, F]], [[T, T], [T, F]]]],
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5], []]], [[[], []], [[1, 3], [5]]]])),
#=========================================================================
# Ragged data and ragged mask.
#=========================================================================
dict(
descr='data.shape=[5, (D2)]; mask.shape=[5, (D2)]',
data=ragged_factory_ops.constant_value(
[[1, 2], [3, 4, 5, 6], [7, 8, 9], [], [1, 2, 3]]),
mask=ragged_factory_ops.constant_value(
[[F, F], [F, T, F, T], [F, F, F], [], [T, F, T]]),
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [], [], [1, 3]])),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]]),
mask=ragged_factory_ops.constant_value([[T, F], [T, T], [F, F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []])),
dict(
descr='data.shape=[3, (D2), D3]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [2, 4]], [[6, 8]]],
ragged_rank=1),
mask=ragged_factory_ops.constant_value([[T, F], [T, T, F], [F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2), (D3)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4]]]),
mask=ragged_factory_ops.constant_value(
[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F]]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2]]])),
dict(
descr=('data.shape=[3, (D2), (D3), (D4)]; '
'mask.shape=[3, (D2), (D3), (D4)]'),
data=ragged_factory_ops.constant_value(
[[[[1, 2], [3, 4]], [[5, 6]]], [[[2, 4], [6, 8]]]]),
mask=ragged_factory_ops.constant_value(
[[[[T, T], [F, F]], [[T, F]]], [[[F, F], [T, T]]]]),
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5]]], [[[], [6, 8]]]])),
#=========================================================================
# Ragged mask and uniform data
#=========================================================================
dict(
descr='data.shape=[2, 3]; mask.shape=[2, (3)]',
data=[[1, 2, 3], [4, 5, 6]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value([[1], [5, 6]])),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3)]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[9, 0], [2, 4]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3), 2]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value(
[[[T, F], [F, F], [T, T]], [[T, F], [F, T], [F, F]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[1], [], [5, 6]], [[7], [0], []]])),
#=========================================================================
# Ragged data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[4, (D2)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [], [5, 6]]),
mask=[T, F, T, F],
expected=ragged_factory_ops.constant_value([[1, 2, 3], []])),
dict(
descr='data.shape=[4, (D2), (D3)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[4], []], [[5, 6]], []]),
mask=[T, F, T, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[5, 6]], []])),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[1, (2)]; mask.shape=[1, 2]',
data=ragged_factory_ops.constant_value([[1, 2]]),
mask=[[T, F]],
expected=ragged_factory_ops.constant_value([[1]])),
dict(
descr='data.shape=[2, (2), (D3)]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1], [2, 3]], [[], [4, 5, 6]]]),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value([[[1]], [[], [4, 5, 6]]])),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2, 3]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[[T, F, F], [T, F, T]], [[T, F, T], [F, F, F]]],
expected=ragged_factory_ops.constant_value(
[[[1], [4, 6]], [[7, 9], []]])),
]) # pyformat: disable
def testBooleanMask(self, descr, data, mask, expected):
actual = ragged_array_ops.boolean_mask(data, mask)
self.assertAllEqual(actual, expected)
def testErrors(self):
if not context.executing_eagerly():
self.assertRaisesRegex(ValueError,
r'mask\.shape\.ndims must be known statically',
ragged_array_ops.boolean_mask, [[1, 2]],
array_ops.placeholder(dtypes.bool))
self.assertRaises(TypeError, ragged_array_ops.boolean_mask, [[1, 2]],
[[0, 1]])
self.assertRaisesRegex(
ValueError, 'Tensor conversion requested dtype bool for '
'RaggedTensor with dtype int32', ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[0, 0]]))
self.assertRaisesRegex(ValueError,
r'Shapes \(1, 2\) and \(1, 3\) are incompatible',
ragged_array_ops.boolean_mask, [[1, 2]],
[[True, False, True]])
self.assertRaisesRegex(errors.InvalidArgumentError,
r'Inputs must have identical ragged splits',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[True, False, True]]))
self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask, [[1, 2]], True)
self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]), True)
if __name__ == '__main__':
googletest.main()
| RaggedBooleanMaskOpTest |
python | graphql-python__graphene | examples/simple_example.py | {
"start": 18,
"end": 132
} | class ____(graphene.ObjectType):
id = graphene.ID()
name = graphene.String()
age = graphene.Int()
| Patron |
python | pypa__installer | tests/test_utils.py | {
"start": 4061,
"end": 5376
} | class ____:
@pytest.mark.parametrize(
("data", "expected"),
[
pytest.param(
b"#!python\ntest",
b"#!/my/python\ntest",
id="python",
),
pytest.param(
b"#!pythonw\ntest",
b"#!/my/python\ntest",
id="pythonw",
),
pytest.param(
b"#!python something\ntest",
b"#!/my/python\ntest",
id="python-with-args",
),
pytest.param(
b"#!python",
b"#!/my/python\n",
id="python-no-content",
),
],
)
def test_replace_shebang(self, data, expected):
with BytesIO(data) as source, fix_shebang(source, "/my/python") as stream:
result = stream.read()
assert result == expected
@pytest.mark.parametrize(
"data",
[
b"#!py\ntest",
b"#!something\ntest",
b"#something\ntest",
b"#something",
b"something",
],
)
def test_keep_data(self, data):
with BytesIO(data) as source, fix_shebang(source, "/my/python") as stream:
result = stream.read()
assert result == data
| TestScript |
python | gevent__gevent | src/gevent/tests/test__server.py | {
"start": 10605,
"end": 15880
} | class ____(TestCase):
def get_spawn(self):
return gevent.spawn
def _test_server_start_stop(self, restartable):
self.report_netstat('before start')
self.start_server()
self.report_netstat('after start')
if restartable and self.Settings.restartable:
self.server.stop_accepting()
self.report_netstat('after stop_accepting')
self.assertNotAccepted()
self.server.start_accepting()
self.report_netstat('after start_accepting')
sleep_to_clear_old_sockets()
self.assertRequestSucceeded()
self.stop_server()
self.report_netstat('after stop')
def test_backlog_is_not_accepted_for_socket(self):
self.switch_expected = False
with self.assertRaises(TypeError):
self.ServerClass(self.get_listener(), backlog=25)
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes times out")
@greentest.skipOnAppVeyor("Sometimes times out.")
def test_backlog_is_accepted_for_address(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0), backlog=25)
self.assertConnectionRefused()
self._test_server_start_stop(restartable=False)
def test_subclass_just_create(self):
self.server = self.ServerSubClass(self.get_listener())
self.assertNotAccepted()
@greentest.skipOnAppVeyor("Sometimes times out.")
def test_subclass_with_socket(self):
self.server = self.ServerSubClass(self.get_listener())
# the connection won't be refused, because there exists a
# listening socket, but it won't be handled also
self.assertNotAccepted()
self._test_server_start_stop(restartable=True)
def test_subclass_with_address(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertConnectionRefused()
self._test_server_start_stop(restartable=True)
def test_invalid_callback(self):
self._test_invalid_callback()
@greentest.reraises_flaky_timeout(socket.timeout)
def _test_serve_forever(self):
g = gevent.spawn(self.server.serve_forever)
try:
sleep_to_clear_old_sockets()
self.assertRequestSucceeded()
self.server.stop()
self.assertFalse(self.server.started)
self.assertConnectionRefused()
finally:
g.kill()
g.get()
self.server.stop()
def test_serve_forever(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertFalse(self.server.started)
self.assertConnectionRefused()
self._test_serve_forever()
def test_serve_forever_after_start(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertConnectionRefused()
self.assertFalse(self.server.started)
self.server.start()
self.assertTrue(self.server.started)
self._test_serve_forever()
@greentest.skipIf(greentest.EXPECT_POOR_TIMER_RESOLUTION, "Sometimes spuriously fails")
def test_server_closes_client_sockets(self):
self.server = self.ServerClass((greentest.DEFAULT_BIND_ADDR, 0), lambda *args: [])
self.server.start()
sleep_to_clear_old_sockets()
with self.makefile() as conn:
self.send_request_to_fd(conn)
# use assert500 below?
with gevent.Timeout._start_new_or_dummy(1):
try:
result = conn.read()
if result:
assert result.startswith('HTTP/1.0 500 Internal Server Error'), repr(result)
except socket.timeout:
pass
except socket.error as ex:
if ex.args[0] == 10053:
pass # "established connection was aborted by the software in your host machine"
elif ex.args[0] == errno.ECONNRESET:
pass
else:
raise
self.stop_server()
@property
def socket(self):
return self.server.socket
def test_error_in_spawn(self):
self.init_server()
self.assertTrue(self.server.started)
error = ExpectedError('test_error_in_spawn')
def _spawn(*_args):
gevent.getcurrent().throw(error)
self.server._spawn = _spawn
self.expect_one_error()
self.assertAcceptedConnectionError()
self.assert_error(ExpectedError, error)
def test_server_repr_when_handle_is_instancemethod(self):
# PR 501
self.init_server()
assert self.server.started
self.assertIn('Server', repr(self.server))
self.server.set_handle(self.server.handle)
self.assertIn('handle=<bound method', repr(self.server))
self.assertIn('of self>', repr(self.server))
self.server.set_handle(self.test_server_repr_when_handle_is_instancemethod)
self.assertIn('test_server_repr_when_handle_is_instancemethod', repr(self.server))
def handle():
pass
self.server.set_handle(handle)
self.assertIn('handle=<function', repr(self.server))
| TestDefaultSpawn |
python | walkccc__LeetCode | solutions/743. Network Delay Time/743.py | {
"start": 0,
"end": 708
} | class ____:
def networkDelayTime(self, times: list[list[int]], n: int, k: int) -> int:
graph = [[] for _ in range(n)]
for u, v, w in times:
graph[u - 1].append((v - 1, w))
return self._dijkstra(graph, k - 1)
def _dijkstra(self, graph: list[list[tuple[int, int]]], src: int) -> int:
dist = [math.inf] * len(graph)
dist[src] = 0
minHeap = [(dist[src], src)] # (d, u)
while minHeap:
d, u = heapq.heappop(minHeap)
if d > dist[u]:
continue
for v, w in graph[u]:
if d + w < dist[v]:
dist[v] = d + w
heapq.heappush(minHeap, (dist[v], v))
maxDist = max(dist)
return maxDist if maxDist != math.inf else -1
| Solution |
python | getsentry__sentry | src/sentry/integrations/github/integration.py | {
"start": 7534,
"end": 9203
} | class ____(TypedDict):
installation_id: str
github_account: str
avatar_url: str
def build_repository_query(metadata: Mapping[str, Any], name: str, query: str) -> bytes:
"""
Builds a query for the GitHub Search API. Always includes both forks and original repositories.
Test out your query updates here: https://github.com/search/advanced
"""
account_type = "user" if metadata["account_type"] == "User" else "org"
return f"fork:true {account_type}:{name} {query}".encode()
def error(
request,
org: RpcUserOrganizationContext | None,
error_short="Invalid installation request.",
error_long=ERR_INTEGRATION_INVALID_INSTALLATION_REQUEST,
):
if org is None:
org_id = None
else:
org_id = org.organization.id
logger.error(
"github.installation_error",
extra={"org_id": org_id, "error_short": error_short},
)
return render_to_response(
"sentry/integrations/github-integration-failed.html",
context={
"error": error_long,
"payload": {
"success": False,
"data": {"error": _(error_short)},
},
"document_origin": get_document_origin(org),
},
request=request,
)
def get_document_origin(org) -> str:
if org and features.has("system:multi-region"):
return f'"{generate_organization_url(org.organization.slug)}"'
return "document.origin"
# Github App docs and list of available endpoints
# https://docs.github.com/en/rest/apps/installations
# https://docs.github.com/en/rest/overview/endpoints-available-for-github-apps
| GithubInstallationInfo |
python | MongoEngine__mongoengine | tests/fields/test_boolean_field.py | {
"start": 99,
"end": 1719
} | class ____(MongoDBTestCase):
def test_storage(self):
class Person(Document):
admin = BooleanField()
person = Person(admin=True)
person.save()
assert get_as_pymongo(person) == {"_id": person.id, "admin": True}
def test_construction_does_not_fail_uncastable_value(self):
class BoolFail:
def __bool__(self):
return "bogus"
class Person(Document):
admin = BooleanField()
person = Person(admin=BoolFail())
person.admin == "bogus"
def test_validation(self):
"""Ensure that invalid values cannot be assigned to boolean
fields.
"""
class Person(Document):
admin = BooleanField()
person = Person()
person.admin = True
person.validate()
person.admin = 2
with pytest.raises(ValidationError):
person.validate()
person.admin = "Yes"
with pytest.raises(ValidationError):
person.validate()
person.admin = "False"
with pytest.raises(ValidationError):
person.validate()
def test_weirdness_constructor(self):
"""When attribute is set in contructor, it gets cast into a bool
which causes some weird behavior. We dont necessarily want to maintain this behavior
but its a known issue
"""
class Person(Document):
admin = BooleanField()
new_person = Person(admin="False")
assert new_person.admin
new_person = Person(admin="0")
assert new_person.admin
| TestBooleanField |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 57558,
"end": 57844
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
lookup_key: Optional[str] = Field(
None, description="A key that can be used to look up query details."
)
| SqlStatementOutput |
python | doocs__leetcode | solution/1500-1599/1550.Three Consecutive Odds/Solution.py | {
"start": 0,
"end": 285
} | class ____:
def threeConsecutiveOdds(self, arr: List[int]) -> bool:
cnt = 0
for x in arr:
if x & 1:
cnt += 1
if cnt == 3:
return True
else:
cnt = 0
return False
| Solution |
python | pytorch__pytorch | test/mobile/test_bytecode.py | {
"start": 4149,
"end": 14640
} | class ____(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert actual_version == expect_version
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
model_path = pytorch_test_dir / "cpp" / "jit" / model_info["model_name"]
check_model_version(model_path, version)
def test_bytecode_values_for_all_backport_functions(self):
# Find the maximum version of the checked in models, start backporting to the minimum support version,
# and comparing the bytecode pkl content.
# It can't be merged to the test `test_all_backport_functions`, because optimization is dynamic and
# the content might change when optimize function changes. This test focuses
# on bytecode.pkl content validation. For the content validation, it is not byte to byte check, but
# regular expression matching. The wildcard can be used to skip some specific content comparison.
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
current_from_version = maximum_checked_in_model_version
with tempfile.TemporaryDirectory() as tmpdirname:
while current_from_version > MINIMUM_TO_VERSION:
# Load model v5 and run forward method
model_name = SCRIPT_MODULE_BYTECODE_PKL[current_from_version][
"model_name"
]
input_model_path = pytorch_test_dir / "cpp" / "jit" / model_name
# A temporary model file will be export to this path, and run through bytecode.pkl
# content check.
tmp_output_model_path_backport = Path(
tmpdirname, "tmp_script_module_backport.ptl"
)
current_to_version = current_from_version - 1
backport_success = _backport_for_mobile(
input_model_path, tmp_output_model_path_backport, current_to_version
)
assert backport_success
expect_bytecode_pkl = SCRIPT_MODULE_BYTECODE_PKL[current_to_version][
"bytecode_pkl"
]
buf = io.StringIO()
torch.utils.show_pickle.main(
[
"",
tmpdirname
+ "/"
+ tmp_output_model_path_backport.name
+ "@*/bytecode.pkl",
],
output_stream=buf,
)
output = buf.getvalue()
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expect_bytecode_pkl.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert isMatch
current_from_version -= 1
shutil.rmtree(tmpdirname)
# Please run this test manually when working on backport.
# This test passes in OSS, but fails internally, likely due to missing step in build
# def test_all_backport_functions(self):
# # Backport from the latest bytecode version to the minimum support version
# # Load, run the backport model, and check version
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
# module_input = 1
# expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
# # temporary input model file and output model file will be exported in the temporary folder
# with tempfile.TemporaryDirectory() as tmpdirname:
# tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(str(tmp_input_model_path))
# current_from_version = _get_model_bytecode_version(tmp_input_model_path)
# current_to_version = current_from_version - 1
# tmp_output_model_path = Path(tmpdirname, "tmp_script_module_backport.ptl")
# while current_to_version >= MINIMUM_TO_VERSION:
# # Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, current_to_version)
# assert(backport_success)
# backport_version = _get_model_bytecode_version(tmp_output_model_path)
# assert(backport_version == current_to_version)
# # Load model and run forward method
# mobile_module = _load_for_lite_interpreter(str(tmp_input_model_path))
# mobile_module_result = mobile_module(module_input)
# torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
# current_to_version -= 1
# # Check backport failure case
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, MINIMUM_TO_VERSION - 1)
# assert(not backport_success)
# # need to clean the folder before it closes, otherwise will run into git not clean error
# shutil.rmtree(tmpdirname)
# Check just the test_backport_bytecode_from_file_to_file mechanism but not the function implementations
def test_backport_bytecode_from_file_to_file(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = (
pytorch_test_dir
/ "cpp"
/ "jit"
/ SCRIPT_MODULE_BYTECODE_PKL[maximum_checked_in_model_version]["model_name"]
)
if maximum_checked_in_model_version > MINIMUM_TO_VERSION:
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_backport_model_path = Path(
tmpdirname, "tmp_script_module_v5_backported_to_v4.ptl"
)
# backport from file
success = _backport_for_mobile(
script_module_v5_path,
tmp_backport_model_path,
maximum_checked_in_model_version - 1,
)
assert success
buf = io.StringIO()
torch.utils.show_pickle.main(
[
"",
tmpdirname
+ "/"
+ tmp_backport_model_path.name
+ "@*/bytecode.pkl",
],
output_stream=buf,
)
output = buf.getvalue()
expected_result = SCRIPT_MODULE_V4_BYTECODE_PKL
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expected_result.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert isMatch
# Load model v4 and run forward method
mobile_module = _load_for_lite_interpreter(str(tmp_backport_model_path))
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones(
[2, 4], dtype=torch.float64
)
torch.testing.assert_close(
mobile_module_result, expected_mobile_module_result
)
shutil.rmtree(tmpdirname)
# Check just the _backport_for_mobile_to_buffer mechanism but not the function implementations
def test_backport_bytecode_from_file_to_buffer(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = (
pytorch_test_dir
/ "cpp"
/ "jit"
/ SCRIPT_MODULE_BYTECODE_PKL[maximum_checked_in_model_version]["model_name"]
)
if maximum_checked_in_model_version > MINIMUM_TO_VERSION:
# Backport model to v4
script_module_v4_buffer = _backport_for_mobile_to_buffer(
script_module_v5_path, maximum_checked_in_model_version - 1
)
# Check version of the model v4 from backport
bytesio = io.BytesIO(script_module_v4_buffer)
backport_version = _get_model_bytecode_version(bytesio)
assert backport_version == maximum_checked_in_model_version - 1
# Load model v4 from backport and run forward method
bytesio = io.BytesIO(script_module_v4_buffer)
mobile_module = _load_for_lite_interpreter(bytesio)
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
torch.testing.assert_close(
mobile_module_result, expected_mobile_module_result
)
def test_get_model_ops_and_info(self):
# TODO update this to be more in the style of the above tests after a backport from 6 -> 5 exists
script_module_v6 = pytorch_test_dir / "cpp" / "jit" / "script_module_v6.ptl"
ops_v6 = _get_model_ops_and_info(script_module_v6)
assert ops_v6["aten::add.int"].num_schema_args == 2
assert ops_v6["aten::add.Scalar"].num_schema_args == 2
def test_get_mobile_model_contained_types(self):
class MyTestModule(torch.nn.Module):
def forward(self, x):
return x + 10
sample_input = torch.tensor([1])
script_module = torch.jit.script(MyTestModule())
script_module(sample_input)
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
type_list = _get_mobile_model_contained_types(buffer)
assert len(type_list) >= 0
if __name__ == "__main__":
run_tests()
| testVariousModelVersions |
python | doocs__leetcode | solution/2300-2399/2392.Build a Matrix With Conditions/Solution.py | {
"start": 0,
"end": 1091
} | class ____:
def buildMatrix(
self, k: int, rowConditions: List[List[int]], colConditions: List[List[int]]
) -> List[List[int]]:
def f(cond):
g = defaultdict(list)
indeg = [0] * (k + 1)
for a, b in cond:
g[a].append(b)
indeg[b] += 1
q = deque([i for i, v in enumerate(indeg[1:], 1) if v == 0])
res = []
while q:
for _ in range(len(q)):
i = q.popleft()
res.append(i)
for j in g[i]:
indeg[j] -= 1
if indeg[j] == 0:
q.append(j)
return None if len(res) != k else res
row = f(rowConditions)
col = f(colConditions)
if row is None or col is None:
return []
ans = [[0] * k for _ in range(k)]
m = [0] * (k + 1)
for i, v in enumerate(col):
m[v] = i
for i, v in enumerate(row):
ans[i][m[v]] = v
return ans
| Solution |
python | django-import-export__django-import-export | import_export/admin.py | {
"start": 32473,
"end": 32619
} | class ____(ImportExportMixin, admin.ModelAdmin):
"""
Subclass of ModelAdmin with import/export functionality.
"""
| ImportExportModelAdmin |
python | pytorch__pytorch | test/jit/fixtures_srcs/fixtures_src.py | {
"start": 1202,
"end": 1310
} | class ____(torch.nn.Module):
def forward(self, x):
return torch._C._nn.gelu(x)
| TestVersionedGeluV9 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_selections.py | {
"start": 778,
"end": 3675
} | class ____(graphene.ObjectType):
assetSelectionString = graphene.String()
assetKeys = non_null_list(GrapheneAssetKey)
assetChecks = non_null_list(GrapheneAssetCheckHandle)
assets = non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneAsset")
assetsOrError = graphene.NonNull("dagster_graphql.schema.roots.assets.GrapheneAssetsOrError")
def __init__(
self,
asset_selection: AssetSelection,
repository_handle: RepositoryHandle,
):
self._asset_selection = asset_selection
self._repository_handle = repository_handle
self._resolved_keys = None
self._resolved_checks = None
def resolve_assetSelectionString(self, _graphene_info) -> str:
return str(
self._asset_selection
& CodeLocationAssetSelection(
selected_code_location=self._repository_handle.code_location_origin.location_name
)
)
def resolve_assetKeys(self, graphene_info: ResolveInfo):
return [
GrapheneAssetKey(path=asset_key.path)
for asset_key in self._get_resolved_and_sorted_keys(graphene_info)
]
def resolve_assetChecks(self, graphene_info: ResolveInfo):
return [
GrapheneAssetCheckHandle(handle)
for handle in self._get_resolved_and_sorted_checks(graphene_info)
]
def _get_assets(self, graphene_info: ResolveInfo):
return [
get_asset(asset_key) for asset_key in self._get_resolved_and_sorted_keys(graphene_info)
]
def resolve_assets(self, graphene_info: ResolveInfo):
return self._get_assets(graphene_info)
def _get_resolved_and_sorted_keys(self, graphene_info: ResolveInfo) -> Sequence[AssetKey]:
"""Use this to maintain stability in ordering."""
if self._resolved_keys is None:
repo = graphene_info.context.get_repository(self._repository_handle)
self._resolved_keys = sorted(self._asset_selection.resolve(repo.asset_graph), key=str)
return self._resolved_keys
def _get_resolved_and_sorted_checks(
self, graphene_info: ResolveInfo
) -> Sequence[AssetCheckKey]:
"""Use this to maintain stability in ordering."""
if self._resolved_checks is None:
repo = graphene_info.context.get_repository(self._repository_handle)
self._resolved_checks = sorted(
self._asset_selection.resolve_checks(repo.asset_graph), key=str
)
return self._resolved_checks
@capture_error
def resolve_assetsOrError(self, graphene_info) -> "GrapheneAssetConnection":
from dagster_graphql.schema.roots.assets import GrapheneAssetConnection
return GrapheneAssetConnection(nodes=self._get_assets(graphene_info), cursor=None)
class Meta:
name = "AssetSelection"
| GrapheneAssetSelection |
python | Netflix__metaflow | metaflow/_vendor/packaging/_manylinux.py | {
"start": 2024,
"end": 8813
} | class ____(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> Optional[str]:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> Optional[str]:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
RuntimeWarning,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache()
def _get_glibc_version() -> Tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(linux: str, arch: str) -> Iterator[str]:
if not _have_compatible_abi(sys.executable, arch):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
| _GLibCVersion |
python | sympy__sympy | sympy/functions/elementary/trigonometric.py | {
"start": 93459,
"end": 100804
} | class ____(InverseTrigonometricFunction):
r"""
The inverse cotangent function.
Returns the arc cotangent of x (measured in radians).
Explanation
===========
``acot(x)`` will evaluate automatically in the cases
$x \in \{\infty, -\infty, \tilde{\infty}, 0, 1, -1\}$
and for some instances when the result is a rational multiple of $\pi$
(see the eval class method).
A purely imaginary argument will lead to an ``acoth`` expression.
``acot(x)`` has a branch cut along $(-i, i)$, hence it is discontinuous
at 0. Its range for real $x$ is $(-\frac{\pi}{2}, \frac{\pi}{2}]$.
Examples
========
>>> from sympy import acot, sqrt
>>> acot(0)
pi/2
>>> acot(1)
pi/4
>>> acot(sqrt(3) - 2)
-5*pi/12
See Also
========
sympy.functions.elementary.trigonometric.sin
sympy.functions.elementary.trigonometric.csc
sympy.functions.elementary.trigonometric.cos
sympy.functions.elementary.trigonometric.sec
sympy.functions.elementary.trigonometric.tan
sympy.functions.elementary.trigonometric.cot
sympy.functions.elementary.trigonometric.asin
sympy.functions.elementary.trigonometric.acsc
sympy.functions.elementary.trigonometric.acos
sympy.functions.elementary.trigonometric.asec
sympy.functions.elementary.trigonometric.atan
sympy.functions.elementary.trigonometric.atan2
References
==========
.. [1] https://dlmf.nist.gov/4.23
.. [2] https://functions.wolfram.com/ElementaryFunctions/ArcCot
"""
_singularities = (S.ImaginaryUnit, -S.ImaginaryUnit)
def fdiff(self, argindex=1):
if argindex == 1:
return -1/(1 + self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if s.args[0].is_rational:
return False
else:
return s.is_rational
def _eval_is_positive(self):
return self.args[0].is_nonnegative
def _eval_is_negative(self):
return self.args[0].is_negative
def _eval_is_extended_real(self):
return self.args[0].is_extended_real
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity or arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return pi/ 2
elif arg is S.One:
return pi/4
elif arg is S.NegativeOne:
return -pi/4
if arg is S.ComplexInfinity:
return S.Zero
if arg.could_extract_minus_sign():
return -cls(-arg)
if arg.is_number:
atan_table = cls._atan_table()
if arg in atan_table:
ang = pi/2 - atan_table[arg]
if ang > pi/2: # restrict to (-pi/2,pi/2]
ang -= pi
return ang
i_coeff = _imaginary_unit_as_coefficient(arg)
if i_coeff is not None:
from sympy.functions.elementary.hyperbolic import acoth
return -S.ImaginaryUnit*acoth(i_coeff)
if arg.is_zero:
return pi*S.Half
if isinstance(arg, cot):
ang = arg.args[0]
if ang.is_comparable:
ang %= pi # restrict to [0,pi)
if ang > pi/2: # restrict to (-pi/2,pi/2]
ang -= pi
return ang
if isinstance(arg, tan): # atan(x) + acot(x) = pi/2
ang = arg.args[0]
if ang.is_comparable:
ang = pi/2 - atan(arg)
if ang > pi/2: # restrict to (-pi/2,pi/2]
ang -= pi
return ang
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return pi/2 # FIX THIS
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return S.NegativeOne**((n + 1)//2)*x**n/n
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
if x0 is S.NaN:
return self.func(arg.as_leading_term(x))
if x0 is S.ComplexInfinity:
return (1/arg).as_leading_term(x)
# Handling branch points
if x0 in (-S.ImaginaryUnit, S.ImaginaryUnit, S.Zero):
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir).expand()
# Handling points lying on branch cuts [-I, I]
if x0.is_imaginary and (1 + x0**2).is_positive:
ndir = arg.dir(x, cdir if cdir else 1)
if re(ndir).is_positive:
if im(x0).is_positive:
return self.func(x0) + pi
elif re(ndir).is_negative:
if im(x0).is_negative:
return self.func(x0) - pi
else:
return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir).expand()
return self.func(x0)
def _eval_nseries(self, x, n, logx, cdir=0): # acot
arg0 = self.args[0].subs(x, 0)
# Handling branch points
if arg0 in (S.ImaginaryUnit, S.NegativeOne*S.ImaginaryUnit):
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
res = super()._eval_nseries(x, n=n, logx=logx)
if arg0 is S.ComplexInfinity:
return res
ndir = self.args[0].dir(x, cdir if cdir else 1)
if arg0.is_zero:
if re(ndir) < 0:
return res - pi
return res
# Handling points lying on branch cuts [-I, I]
if arg0.is_imaginary and (1 + arg0**2).is_positive:
if re(ndir).is_positive:
if im(arg0).is_positive:
return res + pi
elif re(ndir).is_negative:
if im(arg0).is_negative:
return res - pi
else:
return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir)
return res
def _eval_aseries(self, n, args0, x, logx):
if args0[0] in [S.Infinity, S.NegativeInfinity]:
return atan(1/self.args[0])._eval_nseries(x, n, logx)
else:
return super()._eval_aseries(n, args0, x, logx)
def _eval_rewrite_as_log(self, x, **kwargs):
return S.ImaginaryUnit/2*(log(1 - S.ImaginaryUnit/x)
- log(1 + S.ImaginaryUnit/x))
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return cot
def _eval_rewrite_as_asin(self, arg, **kwargs):
return (arg*sqrt(1/arg**2)*
(pi/2 - asin(sqrt(-arg**2)/sqrt(-arg**2 - 1))))
def _eval_rewrite_as_acos(self, arg, **kwargs):
return arg*sqrt(1/arg**2)*acos(sqrt(-arg**2)/sqrt(-arg**2 - 1))
def _eval_rewrite_as_atan(self, arg, **kwargs):
return atan(1/arg)
def _eval_rewrite_as_asec(self, arg, **kwargs):
return arg*sqrt(1/arg**2)*asec(sqrt((1 + arg**2)/arg**2))
def _eval_rewrite_as_acsc(self, arg, **kwargs):
return arg*sqrt(1/arg**2)*(pi/2 - acsc(sqrt((1 + arg**2)/arg**2)))
| acot |
python | pytorch__pytorch | torch/_inductor/comm_analysis.py | {
"start": 3390,
"end": 3444
} | class ____(IntEnum):
TREE = 0
RING = 1
| NCCL_ALGO |
python | scrapy__scrapy | scrapy/extensions/httpcache.py | {
"start": 12093,
"end": 16577
} | class ____:
def __init__(self, settings: BaseSettings):
self.cachedir: str = data_path(settings["HTTPCACHE_DIR"])
self.expiration_secs: int = settings.getint("HTTPCACHE_EXPIRATION_SECS")
self.use_gzip: bool = settings.getbool("HTTPCACHE_GZIP")
# https://github.com/python/mypy/issues/10740
self._open: Callable[Concatenate[str | os.PathLike, str, ...], IO[bytes]] = (
gzip.open if self.use_gzip else open # type: ignore[assignment]
)
def open_spider(self, spider: Spider) -> None:
logger.debug(
"Using filesystem cache storage in %(cachedir)s",
{"cachedir": self.cachedir},
extra={"spider": spider},
)
assert spider.crawler.request_fingerprinter
self._fingerprinter = spider.crawler.request_fingerprinter
def close_spider(self, spider: Spider) -> None:
pass
def retrieve_response(self, spider: Spider, request: Request) -> Response | None:
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return None # not cached
rpath = Path(self._get_request_path(spider, request))
with self._open(rpath / "response_body", "rb") as f:
body = f.read()
with self._open(rpath / "response_headers", "rb") as f:
rawheaders = f.read()
url = metadata["response_url"]
status = metadata["status"]
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url, body=body)
return respcls(url=url, headers=headers, status=status, body=body)
def store_response(
self, spider: Spider, request: Request, response: Response
) -> None:
"""Store the given response in the cache."""
rpath = Path(self._get_request_path(spider, request))
if not rpath.exists():
rpath.mkdir(parents=True)
metadata = {
"url": request.url,
"method": request.method,
"status": response.status,
"response_url": response.url,
"timestamp": time(),
}
with self._open(rpath / "meta", "wb") as f:
f.write(to_bytes(repr(metadata)))
with self._open(rpath / "pickled_meta", "wb") as f:
pickle.dump(metadata, f, protocol=4)
with self._open(rpath / "response_headers", "wb") as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(rpath / "response_body", "wb") as f:
f.write(response.body)
with self._open(rpath / "request_headers", "wb") as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(rpath / "request_body", "wb") as f:
f.write(request.body)
def _get_request_path(self, spider: Spider, request: Request) -> str:
key = self._fingerprinter.fingerprint(request).hex()
return str(Path(self.cachedir, spider.name, key[0:2], key))
def _read_meta(self, spider: Spider, request: Request) -> dict[str, Any] | None:
rpath = Path(self._get_request_path(spider, request))
metapath = rpath / "pickled_meta"
if not metapath.exists():
return None # not found
mtime = metapath.stat().st_mtime
if 0 < self.expiration_secs < time() - mtime:
return None # expired
with self._open(metapath, "rb") as f:
return cast("dict[str, Any]", pickle.load(f)) # noqa: S301
def parse_cachecontrol(header: bytes) -> dict[bytes, bytes | None]:
"""Parse Cache-Control header
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None,
... b'max-age': b'3600'}
True
>>> parse_cachecontrol(b'') == {}
True
"""
directives = {}
for directive in header.split(b","):
key, sep, val = directive.strip().partition(b"=")
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str: str | bytes | None) -> int | None:
try:
date_str = to_unicode(date_str, encoding="ascii") # type: ignore[arg-type]
return mktime_tz(parsedate_tz(date_str)) # type: ignore[arg-type]
except Exception:
return None
| FilesystemCacheStorage |
python | realpython__materials | python-313/typing/deprecations.py | {
"start": 1706,
"end": 2036
} | class ____:
def __init__(self, major: int, minor: int = 0, patch: int = 0) -> None:
self.major = major
self.minor = minor
self.patch = patch
concatenate("three", "thirteen")
add(3, 13)
VersionType(3, 13)
version = Version(3, 13)
version.increase("patch")
print(version)
print(version.bugfix)
| VersionType |
python | realpython__materials | django-diary/source_code_final/entries/views.py | {
"start": 1094,
"end": 1439
} | class ____(LockedView, SuccessMessageMixin, DeleteView):
model = Entry
success_url = reverse_lazy("entry-list")
success_message = "Your entry was deleted!"
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super().delete(request, *args, **kwargs)
| EntryDeleteView |
python | pandas-dev__pandas | pandas/tests/indexing/test_iloc.py | {
"start": 49022,
"end": 51669
} | class ____:
def test_frame_iloc_getitem_callable(self):
# GH#11485
df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD"))
# return location
res = df.iloc[lambda x: [1, 3]]
tm.assert_frame_equal(res, df.iloc[[1, 3]])
res = df.iloc[lambda x: [1, 3], :]
tm.assert_frame_equal(res, df.iloc[[1, 3], :])
res = df.iloc[lambda x: [1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
# mixture
res = df.iloc[[1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[[1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
res = df.iloc[lambda x: [1, 3], 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
def test_frame_iloc_setitem_callable(self):
# GH#11485
df = DataFrame(
{"X": [1, 2, 3, 4], "Y": Series(list("aabb"), dtype=object)},
index=list("ABCD"),
)
# return location
res = df.copy()
res.iloc[lambda x: [1, 3]] = 0
exp = df.copy()
exp.iloc[[1, 3]] = 0
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], :] = -1
exp = df.copy()
exp.iloc[[1, 3], :] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: 0] = 5
exp = df.copy()
exp.iloc[[1, 3], 0] = 5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
exp = df.copy()
exp.iloc[[1, 3], [0]] = 25
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.iloc[[1, 3], lambda x: 0] = -3
exp = df.copy()
exp.iloc[[1, 3], 0] = -3
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[[1, 3], lambda x: [0]] = -5
exp = df.copy()
exp.iloc[[1, 3], [0]] = -5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], 0] = 10
exp = df.copy()
exp.iloc[[1, 3], 0] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
exp = df.copy()
exp.iloc[[1, 3], [0]] = [-5, -5]
tm.assert_frame_equal(res, exp)
| TestILocCallable |
python | kamyu104__LeetCode-Solutions | Python/powerful-integers.py | {
"start": 95,
"end": 759
} | class ____(object):
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
result = set()
log_x = int(math.floor(math.log(bound) / math.log(x)))+1 if x != 1 else 1
log_y = int(math.floor(math.log(bound) / math.log(y)))+1 if y != 1 else 1
pow_x = 1
for i in xrange(log_x):
pow_y = 1
for j in xrange(log_y):
val = pow_x + pow_y
if val <= bound:
result.add(val)
pow_y *= y
pow_x *= x
return list(result)
| Solution |
python | pytorch__pytorch | test/dynamo/test_model_output.py | {
"start": 1871,
"end": 8767
} | class ____(torch._dynamo.test_case.TestCase):
@maybe_skip
def test_mo_create(self):
def fn(a, b):
tmp = BaseModelOutput(a + 1, attentions=b + 3)
return tmp
torch._dynamo.testing.standard_test(self, fn=fn, nargs=2, expected_ops=2)
@maybe_skip
def test_mo_assign(self):
def fn(a, b):
tmp = BaseModelOutput(last_hidden_state=b + 3)
tmp.hidden_states = a + 7
tmp["attentions"] = a + b + 6
return tmp
args = [torch.randn(10), torch.randn(10)]
obj1 = fn(*args)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
obj2 = opt_fn(*args)
self.assertTrue(same(obj1.last_hidden_state, obj2.last_hidden_state))
self.assertTrue(same(obj1.hidden_states, obj2.hidden_states))
self.assertTrue(same(obj1.attentions, obj2.attentions))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
def _common(self, fn, op_count):
args = [
BaseModelOutput(
last_hidden_state=torch.randn(10), attentions=torch.randn(10)
)
]
obj1 = fn(*args)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch._dynamo.optimize_assert(cnts)(fn)
obj2 = opt_fn(*args)
self.assertTrue(same(obj1, obj2))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, op_count)
@maybe_skip
def test_mo_getattr(self):
def fn(obj: BaseModelOutput):
x = obj.last_hidden_state * 10
if obj.hidden_states is not None:
x += obj.hidden_states
if obj.attentions is not None:
x += obj.attentions
return x
self._common(fn, 2)
@maybe_skip
def test_mo_getattr_missing(self):
def fn(obj: BaseModelOutput):
if getattr(obj, "asdf", None) is not None:
obj.asdf += 1
return obj.attentions + 1
self._common(fn, 1)
@maybe_skip
def test_mo_getitem(self):
def fn(obj: BaseModelOutput):
x = obj["last_hidden_state"] * 10
if "hidden_stats" in obj:
x += obj["hidden_states"]
if "attentions" in obj:
x += obj["attentions"]
return x
self._common(fn, 2)
@maybe_skip
def test_mo_tuple(self):
def fn(obj: BaseModelOutput):
a, b = obj.to_tuple()
return a + b * 10
self._common(fn, 2)
@maybe_skip
def test_mo_index(self):
def fn(obj: BaseModelOutput):
return obj[0] * 10 + obj[1]
self._common(fn, 2)
@maybe_skip
def test_mo_init(self):
@dataclasses.dataclass
class MyDataClass(ModelOutput):
a: torch.Tensor
b: torch.Tensor = None
c: torch.Tensor = None
d: torch.Tensor = None
e: torch.Tensor = None
def fn(obj):
class_fields = dataclasses.fields(obj)
assert len(class_fields)
assert all(field.default is None for field in class_fields[1:])
other_fields_are_none = all(
getattr(obj, field.name) is None for field in class_fields[1:]
)
assert not other_fields_are_none
total = getattr(obj, class_fields[0].name)
for field in class_fields[1:]:
v = getattr(obj, field.name)
if v is not None:
total += v
return total
tensors = [torch.randn(10), torch.randn(10), torch.randn(10)]
obj1 = MyDataClass(*tensors)
correct1 = fn(obj1)
obj2 = MyDataClass(*tensors)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
self.assertTrue(same(opt_fn(obj2), correct1))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 2)
@maybe_skip
def test_mo_init2(self):
# this ModelOutput subclass runs a different __post_init__ codepath
@dataclasses.dataclass
class MyDataClass(ModelOutput):
x: torch.FloatTensor = None
def fn(x):
obj = MyDataClass(x=x * 3)
return obj
inp = torch.randn(3, 3)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(inp).x, opt_fn(inp).x)
@maybe_skip
def test_mo_init_with_disable(self):
# Can result in "non-function or method super: <slot wrapper '__setattr__' of 'object' objects>"
# graph breaks (although it may not be the first)
# Minimal repro for https://github.com/pytorch/pytorch/issues/126028
@dataclasses.dataclass
class MyDataClass(ModelOutput):
x: torch.FloatTensor = None
@torch._dynamo.disable(recursive=False)
def fn(x):
return MyDataClass(x=x)
inp = torch.randn(3, 3)
opt_fn = torch.compile(fn, backend="eager")
self.assertEqual(fn(inp).x, opt_fn(inp).x)
@maybe_skip
def test_mo_newkey(self):
obj = BaseModelOutput()
def fn(obj):
return obj["wwww"] + 1
inp = torch.randn(3, 3)
obj["wwww"] = inp
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(obj), opt_fn(obj))
@maybe_skip
def test_mo_from_outside(self):
def fn(obj):
return obj.attentions + 1
obj = BaseModelOutput(attentions=torch.randn(3, 3))
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
self.assertEqual(fn(obj), opt_fn(obj))
@maybe_skip
def test_mo_reconstruct_bytecode(self):
def fn(inp):
return BaseModelOutput(attentions=inp + 1)
inp = torch.randn(3, 3)
opt_fn = torch.compile(fn, backend="eager")
self.assertEqual(fn(inp).attentions, opt_fn(inp).attentions)
@maybe_skip
def test_none(self):
class Model(torch.nn.Module):
def forward(self, x):
x = x + 1
return CausalLMOutputWithPast(loss=None, logits=x)[0]
model = Model()
opt_model = torch.compile(model, backend="eager", fullgraph=True)
x = torch.randn(1, 1, 1, 1)
self.assertTrue(same(model(x), opt_model(x)))
@maybe_skip
def test_reconstruction(self):
class Model(torch.nn.Module):
def forward(self, x):
x = x + 1
return CausalLMOutputWithPast(loss=x, logits=None)
model = Model()
x = torch.randn(1, 1, 1, 1)
eo = torch._dynamo.export(Model(), aten_graph=True)(x)
self.assertTrue(same(model(x), eo.graph_module(x)))
| TestModelOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.