language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/cohere/modular_cohere.py | {
"start": 8659,
"end": 11669
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: CohereConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = CohereAttention(config=config, layer_idx=layer_idx)
self.mlp = CohereMLP(config)
self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states_attention, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states_mlp = self.mlp(hidden_states)
hidden_states = residual + hidden_states_attention + hidden_states_mlp
return hidden_states
| CohereDecoderLayer |
python | astropy__astropy | astropy/utils/masked/core.py | {
"start": 14672,
"end": 17015
} | class ____(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
out["data"] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = masked_array.mask
elif method == "null_value":
out["data"] = np.ma.MaskedArray(
masked_array.unmasked, mask=masked_array.mask
)
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault("mask", getattr(map["data"], "mask", False))
return self._parent_cls.from_unmasked(**map)
| MaskedNDArrayInfo |
python | ipython__ipython | IPython/core/display.py | {
"start": 26743,
"end": 36323
} | class ____(DisplayObject):
_read_flags = "rb"
_FMT_JPEG = "jpeg"
_FMT_PNG = "png"
_FMT_GIF = "gif"
_FMT_WEBP = "webp"
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG, _FMT_GIF, _FMT_WEBP]
_MIMETYPES = {
_FMT_PNG: "image/png",
_FMT_JPEG: "image/jpeg",
_FMT_GIF: "image/gif",
_FMT_WEBP: "image/webp",
}
def __init__(
self,
data=None,
url=None,
filename=None,
format=None,
embed=None,
width=None,
height=None,
retina=False,
unconfined=False,
metadata=None,
alt=None,
):
"""Create a PNG/JPEG/GIF/WEBP image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg/gif/webp). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined : bool
Set unconfined=True to disable max-width confinement of the image.
metadata : dict
Specify extra metadata to attach to the image.
alt : unicode
Alternative text for the image, for use by screen readers.
Examples
--------
embedded image data, works in qtconsole and notebook
when passed positionally, the first arg can be any of raw image data,
a URL, or a filename from which to load image data.
The result is always embedding image data for inline images.
>>> Image('https://www.google.fr/images/srpr/logo3w.png') # doctest: +SKIP
<IPython.core.display.Image object>
>>> Image('/path/to/image.jpg')
<IPython.core.display.Image object>
>>> Image(b'RAW_PNG_DATA...')
<IPython.core.display.Image object>
Specifying Image(url=...) does not embed the image data,
it only generates ``<img>`` tag with a link to the source.
This will not work in the qtconsole or offline.
>>> Image(url='https://www.google.fr/images/srpr/logo3w.png')
<IPython.core.display.Image object>
"""
if isinstance(data, (Path, PurePath)):
data = str(data)
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, str) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
elif ext == u'png':
format = self._FMT_PNG
elif ext == u'gif':
format = self._FMT_GIF
elif ext == "webp":
format = self._FMT_WEBP
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
elif data[:8] == _PNG:
format = self._FMT_PNG
elif data[8:12] == _WEBP:
format = self._FMT_WEBP
elif data[:6] == _GIF1 or data[:6] == _GIF2:
format = self._FMT_GIF
# failed to detect format, default png
if format is None:
format = self._FMT_PNG
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = format.lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
if self.embed:
self._mimetype = self._MIMETYPES.get(self.format)
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.alt = alt
super(Image, self).__init__(data=data, url=url, filename=filename,
metadata=metadata)
if self.width is None and self.metadata.get('width', {}):
self.width = metadata['width']
if self.height is None and self.metadata.get('height', {}):
self.height = metadata['height']
if self.alt is None and self.metadata.get("alt", {}):
self.alt = metadata["alt"]
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == self._FMT_PNG:
w, h = _pngxy(self.data)
elif self.format == self._FMT_JPEG:
w, h = _jpegxy(self.data)
elif self.format == self._FMT_GIF:
w, h = _gifxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = alt = ""
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
if self.alt:
alt = ' alt="%s"' % html.escape(self.alt)
return '<img src="{url}"{width}{height}{klass}{alt}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
alt=alt,
)
def _repr_mimebundle_(self, include=None, exclude=None):
"""Return the image as a mimebundle
Any new mimetype support should be implemented here.
"""
if self.embed:
mimetype = self._mimetype
data, metadata = self._data_and_metadata(always_both=True)
if metadata:
metadata = {mimetype: metadata}
return {mimetype: data}, metadata
else:
return {'text/html': self._repr_html_()}
def _data_and_metadata(self, always_both=False):
"""shortcut for returning metadata with shape information, if defined"""
try:
b64_data = b2a_base64(self.data, newline=False).decode("ascii")
except TypeError as e:
raise FileNotFoundError(
"No such file or directory: '%s'" % (self.data)) from e
md = {}
if self.metadata:
md.update(self.metadata)
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.alt:
md["alt"] = self.alt
if md or always_both:
return b64_data, md
else:
return b64_data
def _repr_png_(self):
if self.embed and self.format == self._FMT_PNG:
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and self.format == self._FMT_JPEG:
return self._data_and_metadata()
def _find_ext(self, s):
base, ext = splitext(s)
if not ext:
return base
# `splitext` includes leading period, so we skip it
return ext[1:].lower()
| Image |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_chartarea05.py | {
"start": 315,
"end": 1501
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_chartarea05.xlsx")
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "pie"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_chartarea(
{
"border": {"color": "#FFFF00", "dash_type": "long_dash"},
"fill": {"color": "#92D050"},
}
)
chart.set_plotarea(
{"border": {"dash_type": "square_dot"}, "fill": {"color": "#FF0000"}}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | doocs__leetcode | solution/2400-2499/2449.Minimum Number of Operations to Make Arrays Similar/Solution.py | {
"start": 0,
"end": 242
} | class ____:
def makeSimilar(self, nums: List[int], target: List[int]) -> int:
nums.sort(key=lambda x: (x & 1, x))
target.sort(key=lambda x: (x & 1, x))
return sum(abs(a - b) for a, b in zip(nums, target)) // 4
| Solution |
python | Pylons__pyramid | tests/test_static.py | {
"start": 8888,
"end": 15671
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.static import static_view
return static_view
def _makeOne(self, *arg, **kw):
kw['use_subpath'] = True
return self._getTargetClass()(*arg, **kw)
def _makeRequest(self, kw=None):
from pyramid.request import Request
environ = {
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
'SERVER_NAME': 'example.com',
'SERVER_PORT': '6543',
'PATH_INFO': '/',
'SCRIPT_NAME': '',
'REQUEST_METHOD': 'GET',
}
if kw is not None:
environ.update(kw)
return Request(environ=environ)
def test_ctor_defaultargs(self):
inst = self._makeOne('package:resource_name')
self.assertEqual(inst.package_name, 'package')
self.assertEqual(inst.docroot, 'resource_name')
self.assertEqual(inst.cache_max_age, 3600)
self.assertEqual(inst.index, 'index.html')
self.assertEqual(inst.reload, False)
self.assertEqual(inst.content_encodings, {})
def test_call_adds_slash_path_info_empty(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest({'PATH_INFO': ''})
request.subpath = ()
context = DummyContext()
from pyramid.httpexceptions import HTTPMovedPermanently
self.assertRaises(HTTPMovedPermanently, inst, context, request)
def test_path_info_slash_means_index_html(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ()
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_oob_singledot(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('.', 'index.html')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_emptyelement(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('', 'index.html')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_dotdotslash(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir', '..', '..', 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_dotdotslash_encoded(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir', '%2E%2E', '%2E%2E', 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_os_sep(self):
import os
inst = self._makeOne('tests:fixtures/static')
dds = '..' + os.sep
request = self._makeRequest()
request.subpath = ('subdir', dds, dds, 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_doesnt_exist(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = 'notthere,'
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_isdir(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>subdir</html>' in response.body)
def test_resource_is_file(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_resource_is_file_with_cache_max_age(self):
inst = self._makeOne('tests:fixtures/static', cache_max_age=600)
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 5)
header_names = [x[0] for x in response.headerlist]
header_names.sort()
self.assertEqual(
header_names,
[
'Cache-Control',
'Content-Length',
'Content-Type',
'Expires',
'Last-Modified',
],
)
def test_resource_is_file_with_no_cache_max_age(self):
inst = self._makeOne('tests:fixtures/static', cache_max_age=None)
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 3)
header_names = [x[0] for x in response.headerlist]
header_names.sort()
self.assertEqual(
header_names, ['Content-Length', 'Content-Type', 'Last-Modified']
)
def test_resource_notmodified(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.if_modified_since = fiveyrsfuture
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
start_response = DummyStartResponse()
app_iter = response(request.environ, start_response)
try:
self.assertEqual(start_response.status, '304 Not Modified')
self.assertEqual(list(app_iter), [])
finally:
app_iter.close()
def test_not_found(self):
inst = self._makeOne('tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('notthere.html',)
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
| Test_static_view_use_subpath_True |
python | great-expectations__great_expectations | versioneer.py | {
"start": 15899,
"end": 18888
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
| NotThisMethod |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py | {
"start": 3623,
"end": 4336
} | class ____:
"""Test AllEcsConfigKeys class."""
def test_all_config_keys_values(self):
"""Test that all config keys have correct values."""
# Test inherited keys
assert AllEcsConfigKeys.ASSIGN_PUBLIC_IP == "assign_public_ip"
assert AllEcsConfigKeys.CLUSTER == "cluster"
# Test additional keys
assert AllEcsConfigKeys.AWS_CONN_ID == "conn_id"
assert AllEcsConfigKeys.CHECK_HEALTH_ON_STARTUP == "check_health_on_startup"
assert AllEcsConfigKeys.MAX_RUN_TASK_ATTEMPTS == "max_run_task_attempts"
assert AllEcsConfigKeys.REGION_NAME == "region_name"
assert AllEcsConfigKeys.RUN_TASK_KWARGS == "run_task_kwargs"
| TestAllEcsConfigKeys |
python | sphinx-doc__sphinx | tests/test_util/test_util_typing.py | {
"start": 1790,
"end": 1865
} | class ____:
__args__ = int
@dataclasses.dataclass(frozen=True)
| BrokenType |
python | getsentry__sentry | src/sentry/identity/services/identity/model.py | {
"start": 487,
"end": 1065
} | class ____(RpcModel):
id: int
idp_id: int # IdentityProvider id
user_id: int
external_id: str
data: dict[str, Any]
def get_identity(self) -> "Provider":
from sentry.identity import get
from sentry.identity.services.identity import identity_service
from sentry.users.models.identity import IdentityProvider
identity_provider = identity_service.get_provider(provider_id=self.idp_id)
if identity_provider is None:
raise IdentityProvider.DoesNotExist
return get(identity_provider.type)
| RpcIdentity |
python | getsentry__sentry | src/sentry/users/services/usersocialauth/model.py | {
"start": 722,
"end": 838
} | class ____(TypedDict, total=False):
id: int
user_id: int
provider: str
uid: str
| UserSocialAuthFilterArgs |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/cloud_storage_transfer_service.py | {
"start": 2348,
"end": 4306
} | class ____:
"""Google Cloud Transfer operation status."""
IN_PROGRESS = "IN_PROGRESS"
PAUSED = "PAUSED"
SUCCESS = "SUCCESS"
FAILED = "FAILED"
ABORTED = "ABORTED"
# A list of keywords used to build a request or response
ACCESS_KEY_ID = "accessKeyId"
ALREADY_EXISTING_IN_SINK = "overwriteObjectsAlreadyExistingInSink"
AWS_ACCESS_KEY = "awsAccessKey"
AWS_SECRET_ACCESS_KEY = "secretAccessKey"
AWS_S3_DATA_SOURCE = "awsS3DataSource"
AWS_ROLE_ARN = "roleArn"
BODY = "body"
BUCKET_NAME = "bucketName"
COUNTERS = "counters"
DAY = "day"
DESCRIPTION = "description"
FILTER = "filter"
FILTER_JOB_NAMES = "job_names"
FILTER_PROJECT_ID = "project_id"
GCS_DATA_SINK = "gcsDataSink"
GCS_DATA_SOURCE = "gcsDataSource"
HOURS = "hours"
HTTP_DATA_SOURCE = "httpDataSource"
INCLUDE_PREFIXES = "includePrefixes"
JOB_NAME = "name"
LIST_URL = "list_url"
METADATA = "metadata"
MINUTES = "minutes"
MONTH = "month"
NAME = "name"
OBJECT_CONDITIONS = "object_conditions"
OPERATIONS = "operations"
OVERWRITE_OBJECTS_ALREADY_EXISTING_IN_SINK = "overwriteObjectsAlreadyExistingInSink"
PATH = "path"
PROJECT_ID = "projectId"
SCHEDULE = "schedule"
SCHEDULE_END_DATE = "scheduleEndDate"
SCHEDULE_START_DATE = "scheduleStartDate"
SECONDS = "seconds"
SECRET_ACCESS_KEY = "secretAccessKey"
START_TIME_OF_DAY = "startTimeOfDay"
STATUS = "status"
STATUS1 = "status"
TRANSFER_JOB = "transfer_job"
TRANSFER_JOBS = "transferJobs"
TRANSFER_JOB_FIELD_MASK = "update_transfer_job_field_mask"
TRANSFER_OPERATIONS = "transferOperations"
TRANSFER_OPTIONS = "transfer_options"
TRANSFER_SPEC = "transferSpec"
YEAR = "year"
ALREADY_EXIST_CODE = 409
NEGATIVE_STATUSES = {GcpTransferOperationStatus.FAILED, GcpTransferOperationStatus.ABORTED}
def gen_job_name(job_name: str) -> str:
"""
Add a unique suffix to the job name.
:param job_name:
:return: job_name with suffix
"""
uniq = int(time.time())
return f"{job_name}_{uniq}"
| GcpTransferOperationStatus |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 18044,
"end": 18143
} | class ____(BaseModel):
message: str = Field(..., description="Warning message")
| CollectionWarning |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 18495,
"end": 19438
} | class ____(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
| ThreadedSocketTestMixin |
python | davidhalter__parso | parso/tree.py | {
"start": 15021,
"end": 15333
} | class ____(BaseNode):
"""Concrete implementation for interior nodes."""
__slots__ = ('type',)
def __init__(self, type, children):
super().__init__(children)
self.type = type
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
| Node |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/output/win32.py | {
"start": 2799,
"end": 17362
} | class ____(Output):
"""
I/O abstraction for rendering to Windows consoles.
(cmd.exe and similar.)
"""
def __init__(
self,
stdout: TextIO,
use_complete_width: bool = False,
default_color_depth: ColorDepth | None = None,
) -> None:
self.use_complete_width = use_complete_width
self.default_color_depth = default_color_depth
self._buffer: list[str] = []
self.stdout: TextIO = stdout
self.hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
self._in_alternate_screen = False
self._hidden = False
self.color_lookup_table = ColorLookupTable()
# Remember the default console colors.
info = self.get_win32_screen_buffer_info()
self.default_attrs = info.wAttributes if info else 15
if _DEBUG_RENDER_OUTPUT:
self.LOG = open(_DEBUG_RENDER_OUTPUT_FILENAME, "ab")
def fileno(self) -> int:
"Return file descriptor."
return self.stdout.fileno()
def encoding(self) -> str:
"Return encoding used for stdout."
return self.stdout.encoding
def write(self, data: str) -> None:
if self._hidden:
data = " " * get_cwidth(data)
self._buffer.append(data)
def write_raw(self, data: str) -> None:
"For win32, there is no difference between write and write_raw."
self.write(data)
def get_size(self) -> Size:
info = self.get_win32_screen_buffer_info()
# We take the width of the *visible* region as the size. Not the width
# of the complete screen buffer. (Unless use_complete_width has been
# set.)
if self.use_complete_width:
width = info.dwSize.X
else:
width = info.srWindow.Right - info.srWindow.Left
height = info.srWindow.Bottom - info.srWindow.Top + 1
# We avoid the right margin, windows will wrap otherwise.
maxwidth = info.dwSize.X - 1
width = min(maxwidth, width)
# Create `Size` object.
return Size(rows=height, columns=width)
def _winapi(self, func: Callable[..., _T], *a: object, **kw: object) -> _T:
"""
Flush and call win API function.
"""
self.flush()
if _DEBUG_RENDER_OUTPUT:
self.LOG.write((f"{func.__name__!r}").encode() + b"\n")
self.LOG.write(
b" " + ", ".join([f"{i!r}" for i in a]).encode("utf-8") + b"\n"
)
self.LOG.write(
b" "
+ ", ".join([f"{type(i)!r}" for i in a]).encode("utf-8")
+ b"\n"
)
self.LOG.flush()
try:
return func(*a, **kw)
except ArgumentError as e:
if _DEBUG_RENDER_OUTPUT:
self.LOG.write((f" Error in {func.__name__!r} {e!r} {e}\n").encode())
raise
def get_win32_screen_buffer_info(self) -> CONSOLE_SCREEN_BUFFER_INFO:
"""
Return Screen buffer info.
"""
# NOTE: We don't call the `GetConsoleScreenBufferInfo` API through
# `self._winapi`. Doing so causes Python to crash on certain 64bit
# Python versions. (Reproduced with 64bit Python 2.7.6, on Windows
# 10). It is not clear why. Possibly, it has to do with passing
# these objects as an argument, or through *args.
# The Python documentation contains the following - possibly related - warning:
# ctypes does not support passing unions or structures with
# bit-fields to functions by value. While this may work on 32-bit
# x86, it's not guaranteed by the library to work in the general
# case. Unions and structures with bit-fields should always be
# passed to functions by pointer.
# Also see:
# - https://github.com/ipython/ipython/issues/10070
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86
self.flush()
sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
self.hconsole, byref(sbinfo)
)
# success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo,
# self.hconsole, byref(sbinfo))
if success:
return sbinfo
else:
raise NoConsoleScreenBufferError
def set_title(self, title: str) -> None:
"""
Set terminal title.
"""
self._winapi(windll.kernel32.SetConsoleTitleW, title)
def clear_title(self) -> None:
self._winapi(windll.kernel32.SetConsoleTitleW, "")
def erase_screen(self) -> None:
start = COORD(0, 0)
sbinfo = self.get_win32_screen_buffer_info()
length = sbinfo.dwSize.X * sbinfo.dwSize.Y
self.cursor_goto(row=0, column=0)
self._erase(start, length)
def erase_down(self) -> None:
sbinfo = self.get_win32_screen_buffer_info()
size = sbinfo.dwSize
start = sbinfo.dwCursorPosition
length = (size.X - size.X) + size.X * (size.Y - sbinfo.dwCursorPosition.Y)
self._erase(start, length)
def erase_end_of_line(self) -> None:
""""""
sbinfo = self.get_win32_screen_buffer_info()
start = sbinfo.dwCursorPosition
length = sbinfo.dwSize.X - sbinfo.dwCursorPosition.X
self._erase(start, length)
def _erase(self, start: COORD, length: int) -> None:
chars_written = c_ulong()
self._winapi(
windll.kernel32.FillConsoleOutputCharacterA,
self.hconsole,
c_char(b" "),
DWORD(length),
_coord_byval(start),
byref(chars_written),
)
# Reset attributes.
sbinfo = self.get_win32_screen_buffer_info()
self._winapi(
windll.kernel32.FillConsoleOutputAttribute,
self.hconsole,
sbinfo.wAttributes,
length,
_coord_byval(start),
byref(chars_written),
)
def reset_attributes(self) -> None:
"Reset the console foreground/background color."
self._winapi(
windll.kernel32.SetConsoleTextAttribute, self.hconsole, self.default_attrs
)
self._hidden = False
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
(
fgcolor,
bgcolor,
bold,
underline,
strike,
italic,
blink,
reverse,
hidden,
dim,
) = attrs
self._hidden = bool(hidden)
# Start from the default attributes.
win_attrs: int = self.default_attrs
if color_depth != ColorDepth.DEPTH_1_BIT:
# Override the last four bits: foreground color.
if fgcolor:
win_attrs = win_attrs & ~0xF
win_attrs |= self.color_lookup_table.lookup_fg_color(fgcolor)
# Override the next four bits: background color.
if bgcolor:
win_attrs = win_attrs & ~0xF0
win_attrs |= self.color_lookup_table.lookup_bg_color(bgcolor)
# Reverse: swap these four bits groups.
if reverse:
win_attrs = (
(win_attrs & ~0xFF)
| ((win_attrs & 0xF) << 4)
| ((win_attrs & 0xF0) >> 4)
)
self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole, win_attrs)
def disable_autowrap(self) -> None:
# Not supported by Windows.
pass
def enable_autowrap(self) -> None:
# Not supported by Windows.
pass
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
pos = COORD(X=column, Y=row)
self._winapi(
windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos)
)
def cursor_up(self, amount: int) -> None:
sr = self.get_win32_screen_buffer_info().dwCursorPosition
pos = COORD(X=sr.X, Y=sr.Y - amount)
self._winapi(
windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos)
)
def cursor_down(self, amount: int) -> None:
self.cursor_up(-amount)
def cursor_forward(self, amount: int) -> None:
sr = self.get_win32_screen_buffer_info().dwCursorPosition
# assert sr.X + amount >= 0, 'Negative cursor position: x=%r amount=%r' % (sr.X, amount)
pos = COORD(X=max(0, sr.X + amount), Y=sr.Y)
self._winapi(
windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos)
)
def cursor_backward(self, amount: int) -> None:
self.cursor_forward(-amount)
def flush(self) -> None:
"""
Write to output stream and flush.
"""
if not self._buffer:
# Only flush stdout buffer. (It could be that Python still has
# something in its buffer. -- We want to be sure to print that in
# the correct color.)
self.stdout.flush()
return
data = "".join(self._buffer)
if _DEBUG_RENDER_OUTPUT:
self.LOG.write((f"{data!r}").encode() + b"\n")
self.LOG.flush()
# Print characters one by one. This appears to be the best solution
# in order to avoid traces of vertical lines when the completion
# menu disappears.
for b in data:
written = DWORD()
retval = windll.kernel32.WriteConsoleW(
self.hconsole, b, 1, byref(written), None
)
assert retval != 0
self._buffer = []
def get_rows_below_cursor_position(self) -> int:
info = self.get_win32_screen_buffer_info()
return info.srWindow.Bottom - info.dwCursorPosition.Y + 1
def scroll_buffer_to_prompt(self) -> None:
"""
To be called before drawing the prompt. This should scroll the console
to left, with the cursor at the bottom (if possible).
"""
# Get current window size
info = self.get_win32_screen_buffer_info()
sr = info.srWindow
cursor_pos = info.dwCursorPosition
result = SMALL_RECT()
# Scroll to the left.
result.Left = 0
result.Right = sr.Right - sr.Left
# Scroll vertical
win_height = sr.Bottom - sr.Top
if 0 < sr.Bottom - cursor_pos.Y < win_height - 1:
# no vertical scroll if cursor already on the screen
result.Bottom = sr.Bottom
else:
result.Bottom = max(win_height, cursor_pos.Y)
result.Top = result.Bottom - win_height
# Scroll API
self._winapi(
windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result)
)
def enter_alternate_screen(self) -> None:
"""
Go to alternate screen buffer.
"""
if not self._in_alternate_screen:
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
# Create a new console buffer and activate that one.
handle = HANDLE(
self._winapi(
windll.kernel32.CreateConsoleScreenBuffer,
GENERIC_READ | GENERIC_WRITE,
DWORD(0),
None,
DWORD(1),
None,
)
)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)
self.hconsole = handle
self._in_alternate_screen = True
def quit_alternate_screen(self) -> None:
"""
Make stdout again the active buffer.
"""
if self._in_alternate_screen:
stdout = HANDLE(
self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE)
)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout)
self._winapi(windll.kernel32.CloseHandle, self.hconsole)
self.hconsole = stdout
self._in_alternate_screen = False
def enable_mouse_support(self) -> None:
ENABLE_MOUSE_INPUT = 0x10
# This `ENABLE_QUICK_EDIT_MODE` flag needs to be cleared for mouse
# support to work, but it's possible that it was already cleared
# before.
ENABLE_QUICK_EDIT_MODE = 0x0040
handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE))
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(
windll.kernel32.SetConsoleMode,
handle,
(original_mode.value | ENABLE_MOUSE_INPUT) & ~ENABLE_QUICK_EDIT_MODE,
)
def disable_mouse_support(self) -> None:
ENABLE_MOUSE_INPUT = 0x10
handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE))
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(
windll.kernel32.SetConsoleMode,
handle,
original_mode.value & ~ENABLE_MOUSE_INPUT,
)
def hide_cursor(self) -> None:
pass
def show_cursor(self) -> None:
pass
def set_cursor_shape(self, cursor_shape: CursorShape) -> None:
pass
def reset_cursor_shape(self) -> None:
pass
@classmethod
def win32_refresh_window(cls) -> None:
"""
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
"""
# Get console handle
handle = HANDLE(windll.kernel32.GetConsoleWindow())
RDW_INVALIDATE = 0x0001
windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
def get_default_color_depth(self) -> ColorDepth:
"""
Return the default color depth for a windows terminal.
Contrary to the Vt100 implementation, this doesn't depend on a $TERM
variable.
"""
if self.default_color_depth is not None:
return self.default_color_depth
return ColorDepth.DEPTH_4_BIT
| Win32Output |
python | walkccc__LeetCode | solutions/129. Sum Root to Leaf Numbers/129.py | {
"start": 0,
"end": 406
} | class ____:
def sumNumbers(self, root: TreeNode | None) -> int:
ans = 0
def dfs(root: TreeNode | None, path: int) -> None:
nonlocal ans
if not root:
return
if not root.left and not root.right:
ans += path * 10 + root.val
return
dfs(root.left, path * 10 + root.val)
dfs(root.right, path * 10 + root.val)
dfs(root, 0)
return ans
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 6083,
"end": 6139
} | class ____(forms.BooleanField):
pass
| BroadBooleanField |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/scheduler.py | {
"start": 548,
"end": 778
} | class ____(
BaseModel,
extra="forbid",
json_schema_extra={
"allOf": create_json_schema_conditionals({SchedulerType.CUSTOM: "customScheduler"})
},
):
type: SchedulerType
config: SchedulerConfig
| Scheduler |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 4735,
"end": 5378
} | class ____(graphene.InputObjectType):
pipelineName = graphene.String()
jobName = graphene.String()
repositoryName = graphene.NonNull(graphene.String)
repositoryLocationName = graphene.NonNull(graphene.String)
solidSelection = graphene.List(graphene.NonNull(graphene.String))
assetSelection = graphene.List(graphene.NonNull(GrapheneAssetKeyInput))
assetCheckSelection = graphene.List(graphene.NonNull(GrapheneAssetCheckHandleInput))
class Meta:
description = """This type represents the fields necessary to identify a job or pipeline"""
name = "JobOrPipelineSelector"
| GrapheneJobOrPipelineSelector |
python | pytorch__pytorch | test/test_indexing.py | {
"start": 919,
"end": 87998
} | class ____(TestCase):
def test_index(self, device):
def consec(size, start=1):
sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0)
sequence.add_(start - 1)
return sequence.view(*size)
reference = consec((3, 3, 3)).to(device)
# empty tensor indexing
self.assertEqual(
reference[torch.LongTensor().to(device)], reference.new(0, 3, 3)
)
self.assertEqual(reference[0], consec((3, 3)), atol=0, rtol=0)
self.assertEqual(reference[1], consec((3, 3), 10), atol=0, rtol=0)
self.assertEqual(reference[2], consec((3, 3), 19), atol=0, rtol=0)
self.assertEqual(reference[0, 1], consec((3,), 4), atol=0, rtol=0)
self.assertEqual(reference[0:2], consec((2, 3, 3)), atol=0, rtol=0)
self.assertEqual(reference[2, 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[:], consec((3, 3, 3)), atol=0, rtol=0)
# indexing with Ellipsis
self.assertEqual(
reference[..., 2],
torch.tensor([[3.0, 6.0, 9.0], [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]]),
atol=0,
rtol=0,
)
self.assertEqual(
reference[0, ..., 2], torch.tensor([3.0, 6.0, 9.0]), atol=0, rtol=0
)
self.assertEqual(reference[..., 2], reference[:, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, ..., 2], reference[0, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, 2, ...], reference[0, 2], atol=0, rtol=0)
self.assertEqual(reference[..., 2, 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, ..., 2, 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, 2, ..., 2], 27, atol=0, rtol=0)
self.assertEqual(reference[2, 2, 2, ...], 27, atol=0, rtol=0)
self.assertEqual(reference[...], reference, atol=0, rtol=0)
reference_5d = consec((3, 3, 3, 3, 3)).to(device)
self.assertEqual(
reference_5d[..., 1, 0], reference_5d[:, :, :, 1, 0], atol=0, rtol=0
)
self.assertEqual(
reference_5d[2, ..., 1, 0], reference_5d[2, :, :, 1, 0], atol=0, rtol=0
)
self.assertEqual(
reference_5d[2, 1, 0, ..., 1], reference_5d[2, 1, 0, :, 1], atol=0, rtol=0
)
self.assertEqual(reference_5d[...], reference_5d, atol=0, rtol=0)
# LongTensor indexing
reference = consec((5, 5, 5)).to(device)
idx = torch.LongTensor([2, 4]).to(device)
self.assertEqual(reference[idx], torch.stack([reference[2], reference[4]]))
# TODO: enable one indexing is implemented like in numpy
# self.assertEqual(reference[2, idx], torch.stack([reference[2, 2], reference[2, 4]]))
# self.assertEqual(reference[3, idx, 1], torch.stack([reference[3, 2], reference[3, 4]])[:, 1])
# None indexing
self.assertEqual(reference[2, None], reference[2].unsqueeze(0))
self.assertEqual(
reference[2, None, None], reference[2].unsqueeze(0).unsqueeze(0)
)
self.assertEqual(reference[2:4, None], reference[2:4].unsqueeze(1))
self.assertEqual(
reference[None, 2, None, None],
reference.unsqueeze(0)[:, 2].unsqueeze(0).unsqueeze(0),
)
self.assertEqual(
reference[None, 2:5, None, None],
reference.unsqueeze(0)[:, 2:5].unsqueeze(2).unsqueeze(2),
)
# indexing 0-length slice
self.assertEqual(torch.empty(0, 5, 5), reference[slice(0)])
self.assertEqual(torch.empty(0, 5), reference[slice(0), 2])
self.assertEqual(torch.empty(0, 5), reference[2, slice(0)])
self.assertEqual(torch.tensor([]), reference[2, 1:1, 2])
# indexing with step
reference = consec((10, 10, 10)).to(device)
self.assertEqual(reference[1:5:2], torch.stack([reference[1], reference[3]], 0))
self.assertEqual(
reference[1:6:2], torch.stack([reference[1], reference[3], reference[5]], 0)
)
self.assertEqual(reference[1:9:4], torch.stack([reference[1], reference[5]], 0))
self.assertEqual(
reference[2:4, 1:5:2],
torch.stack([reference[2:4, 1], reference[2:4, 3]], 1),
)
self.assertEqual(
reference[3, 1:6:2],
torch.stack([reference[3, 1], reference[3, 3], reference[3, 5]], 0),
)
self.assertEqual(
reference[None, 2, 1:9:4],
torch.stack([reference[2, 1], reference[2, 5]], 0).unsqueeze(0),
)
self.assertEqual(
reference[:, 2, 1:6:2],
torch.stack(
[reference[:, 2, 1], reference[:, 2, 3], reference[:, 2, 5]], 1
),
)
lst = [list(range(i, i + 10)) for i in range(0, 100, 10)]
_make_tensor = (
torch.DoubleTensor if not device.startswith("mps") else torch.FloatTensor
)
tensor = _make_tensor(lst).to(device)
for _ in range(100):
idx1_start = random.randrange(10)
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
idx1_step = random.randrange(1, 8)
idx1 = slice(idx1_start, idx1_end, idx1_step)
if random.randrange(2) == 0:
idx2_start = random.randrange(10)
idx2_end = idx2_start + random.randrange(1, 10 - idx2_start + 1)
idx2_step = random.randrange(1, 8)
idx2 = slice(idx2_start, idx2_end, idx2_step)
lst_indexed = [l[idx2] for l in lst[idx1]]
tensor_indexed = tensor[idx1, idx2]
else:
lst_indexed = lst[idx1]
tensor_indexed = tensor[idx1]
self.assertEqual(_make_tensor(lst_indexed), tensor_indexed)
self.assertRaises(ValueError, lambda: reference[1:9:0])
self.assertRaises(ValueError, lambda: reference[1:9:-1])
self.assertRaises(IndexError, lambda: reference[1, 1, 1, 1])
self.assertRaises(IndexError, lambda: reference[1, 1, 1, 1:1])
self.assertRaises(IndexError, lambda: reference[3, 3, 3, 3, 3, 3, 3, 3])
self.assertRaises(IndexError, lambda: reference[0.0])
self.assertRaises(TypeError, lambda: reference[0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, :, 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, ..., 0.0:2.0])
self.assertRaises(IndexError, lambda: reference[0.0, :, 0.0])
def delitem():
del reference[0]
self.assertRaises(TypeError, delitem)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.double)
@dtypesIfMPS(torch.half) # TODO: add bf16 there?
def test_advancedindex(self, device, dtype):
# Tests for Integer Array Indexing, Part I - Purely integer array
# indexing
def consec(size, start=1):
# Creates the sequence in float since CPU half doesn't support the
# needed operations. Converts to dtype before returning.
numel = reduce(operator.mul, size, 1)
sequence = torch.ones(numel, dtype=torch.float, device=device).cumsum(0)
sequence.add_(start - 1)
return sequence.view(*size).to(dtype=dtype)
# pick a random valid indexer type
def ri(indices):
choice = random.randint(0, 2)
if choice == 0:
return torch.LongTensor(indices).to(device)
elif choice == 1:
return list(indices)
else:
return tuple(indices)
def validate_indexing(x):
self.assertEqual(x[[0]], consec((1,)))
self.assertEqual(x[ri([0]),], consec((1,)))
self.assertEqual(x[ri([3]),], consec((1,), 4))
self.assertEqual(x[[2, 3, 4]], consec((3,), 3))
self.assertEqual(x[ri([2, 3, 4]),], consec((3,), 3))
self.assertEqual(
x[ri([0, 2, 4]),], torch.tensor([1, 3, 5], dtype=dtype, device=device)
)
def validate_setting(x):
x[[0]] = -2
self.assertEqual(x[[0]], torch.tensor([-2], dtype=dtype, device=device))
x[[0]] = -1
self.assertEqual(
x[ri([0]),], torch.tensor([-1], dtype=dtype, device=device)
)
x[[2, 3, 4]] = 4
self.assertEqual(
x[[2, 3, 4]], torch.tensor([4, 4, 4], dtype=dtype, device=device)
)
x[ri([2, 3, 4]),] = 3
self.assertEqual(
x[ri([2, 3, 4]),], torch.tensor([3, 3, 3], dtype=dtype, device=device)
)
x[ri([0, 2, 4]),] = torch.tensor([5, 4, 3], dtype=dtype, device=device)
self.assertEqual(
x[ri([0, 2, 4]),], torch.tensor([5, 4, 3], dtype=dtype, device=device)
)
# Only validates indexing and setting for Halves
if dtype == torch.half:
reference = consec((10,))
validate_indexing(reference)
validate_setting(reference)
return
# Case 1: Purely Integer Array Indexing
reference = consec((10,))
validate_indexing(reference)
# setting values
validate_setting(reference)
# Tensor with stride != 1
# strided is [1, 3, 5, 7]
reference = consec((10,))
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(),
storage_offset=0,
size=torch.Size([4]),
stride=[2],
)
self.assertEqual(strided[[0]], torch.tensor([1], dtype=dtype, device=device))
self.assertEqual(
strided[ri([0]),], torch.tensor([1], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([3]),], torch.tensor([7], dtype=dtype, device=device)
)
self.assertEqual(
strided[[1, 2]], torch.tensor([3, 5], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([1, 2]),], torch.tensor([3, 5], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([[2, 1], [0, 3]]),],
torch.tensor([[5, 3], [1, 7]], dtype=dtype, device=device),
)
# stride is [4, 8]
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(),
storage_offset=4,
size=torch.Size([2]),
stride=[4],
)
self.assertEqual(strided[[0]], torch.tensor([5], dtype=dtype, device=device))
self.assertEqual(
strided[ri([0]),], torch.tensor([5], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([1]),], torch.tensor([9], dtype=dtype, device=device)
)
self.assertEqual(
strided[[0, 1]], torch.tensor([5, 9], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([0, 1]),], torch.tensor([5, 9], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([[0, 1], [1, 0]]),],
torch.tensor([[5, 9], [9, 5]], dtype=dtype, device=device),
)
# reference is 1 2
# 3 4
# 5 6
reference = consec((3, 2))
self.assertEqual(
reference[ri([0, 1, 2]), ri([0])],
torch.tensor([1, 3, 5], dtype=dtype, device=device),
)
self.assertEqual(
reference[ri([0, 1, 2]), ri([1])],
torch.tensor([2, 4, 6], dtype=dtype, device=device),
)
self.assertEqual(reference[ri([0]), ri([0])], consec((1,)))
self.assertEqual(reference[ri([2]), ri([1])], consec((1,), 6))
self.assertEqual(
reference[(ri([0, 0]), ri([0, 1]))],
torch.tensor([1, 2], dtype=dtype, device=device),
)
self.assertEqual(
reference[(ri([0, 1, 1, 0, 2]), ri([1]))],
torch.tensor([2, 4, 4, 2, 6], dtype=dtype, device=device),
)
self.assertEqual(
reference[(ri([0, 0, 1, 1]), ri([0, 1, 0, 0]))],
torch.tensor([1, 2, 3, 3], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 2]])
columns = ([0],)
self.assertEqual(
reference[rows, columns],
torch.tensor([[1, 1], [3, 5]], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 2]])
columns = ri([1, 0])
self.assertEqual(
reference[rows, columns],
torch.tensor([[2, 1], [4, 5]], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 2]])
columns = ri([[0, 1], [1, 0]])
self.assertEqual(
reference[rows, columns],
torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device),
)
# setting values
reference[ri([0]), ri([1])] = -1
self.assertEqual(
reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
)
reference[ri([0, 1, 2]), ri([0])] = torch.tensor(
[-1, 2, -4], dtype=dtype, device=device
)
self.assertEqual(
reference[ri([0, 1, 2]), ri([0])],
torch.tensor([-1, 2, -4], dtype=dtype, device=device),
)
reference[rows, columns] = torch.tensor(
[[4, 6], [2, 3]], dtype=dtype, device=device
)
self.assertEqual(
reference[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
)
# Verify still works with Transposed (i.e. non-contiguous) Tensors
reference = torch.tensor(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype, device=device
).t_()
# Transposed: [[0, 4, 8],
# [1, 5, 9],
# [2, 6, 10],
# [3, 7, 11]]
self.assertEqual(
reference[ri([0, 1, 2]), ri([0])],
torch.tensor([0, 1, 2], dtype=dtype, device=device),
)
self.assertEqual(
reference[ri([0, 1, 2]), ri([1])],
torch.tensor([4, 5, 6], dtype=dtype, device=device),
)
self.assertEqual(
reference[ri([0]), ri([0])], torch.tensor([0], dtype=dtype, device=device)
)
self.assertEqual(
reference[ri([2]), ri([1])], torch.tensor([6], dtype=dtype, device=device)
)
self.assertEqual(
reference[(ri([0, 0]), ri([0, 1]))],
torch.tensor([0, 4], dtype=dtype, device=device),
)
self.assertEqual(
reference[(ri([0, 1, 1, 0, 3]), ri([1]))],
torch.tensor([4, 5, 5, 4, 7], dtype=dtype, device=device),
)
self.assertEqual(
reference[(ri([0, 0, 1, 1]), ri([0, 1, 0, 0]))],
torch.tensor([0, 4, 1, 1], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 2]])
columns = ([0],)
self.assertEqual(
reference[rows, columns],
torch.tensor([[0, 0], [1, 2]], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 2]])
columns = ri([1, 0])
self.assertEqual(
reference[rows, columns],
torch.tensor([[4, 0], [5, 2]], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 3]])
columns = ri([[0, 1], [1, 2]])
self.assertEqual(
reference[rows, columns],
torch.tensor([[0, 4], [5, 11]], dtype=dtype, device=device),
)
# setting values
reference[ri([0]), ri([1])] = -1
self.assertEqual(
reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
)
reference[ri([0, 1, 2]), ri([0])] = torch.tensor(
[-1, 2, -4], dtype=dtype, device=device
)
self.assertEqual(
reference[ri([0, 1, 2]), ri([0])],
torch.tensor([-1, 2, -4], dtype=dtype, device=device),
)
reference[rows, columns] = torch.tensor(
[[4, 6], [2, 3]], dtype=dtype, device=device
)
self.assertEqual(
reference[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
)
# stride != 1
# strided is [[1 3 5 7],
# [9 11 13 15]]
reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(), 1, size=torch.Size([2, 4]), stride=[8, 2]
)
self.assertEqual(
strided[ri([0, 1]), ri([0])],
torch.tensor([1, 9], dtype=dtype, device=device),
)
self.assertEqual(
strided[ri([0, 1]), ri([1])],
torch.tensor([3, 11], dtype=dtype, device=device),
)
self.assertEqual(
strided[ri([0]), ri([0])], torch.tensor([1], dtype=dtype, device=device)
)
self.assertEqual(
strided[ri([1]), ri([3])], torch.tensor([15], dtype=dtype, device=device)
)
self.assertEqual(
strided[(ri([0, 0]), ri([0, 3]))],
torch.tensor([1, 7], dtype=dtype, device=device),
)
self.assertEqual(
strided[(ri([1]), ri([0, 1, 1, 0, 3]))],
torch.tensor([9, 11, 11, 9, 15], dtype=dtype, device=device),
)
self.assertEqual(
strided[(ri([0, 0, 1, 1]), ri([0, 1, 0, 0]))],
torch.tensor([1, 3, 9, 9], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 1]])
columns = ([0],)
self.assertEqual(
strided[rows, columns],
torch.tensor([[1, 1], [9, 9]], dtype=dtype, device=device),
)
rows = ri([[0, 1], [1, 0]])
columns = ri([1, 2])
self.assertEqual(
strided[rows, columns],
torch.tensor([[3, 13], [11, 5]], dtype=dtype, device=device),
)
rows = ri([[0, 0], [1, 1]])
columns = ri([[0, 1], [1, 2]])
self.assertEqual(
strided[rows, columns],
torch.tensor([[1, 3], [11, 13]], dtype=dtype, device=device),
)
# setting values
# strided is [[10, 11],
# [17, 18]]
reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(), 10, size=torch.Size([2, 2]), stride=[7, 1]
)
self.assertEqual(
strided[ri([0]), ri([1])], torch.tensor([11], dtype=dtype, device=device)
)
strided[ri([0]), ri([1])] = -1
self.assertEqual(
strided[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
)
reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(), 10, size=torch.Size([2, 2]), stride=[7, 1]
)
self.assertEqual(
strided[ri([0, 1]), ri([1, 0])],
torch.tensor([11, 17], dtype=dtype, device=device),
)
strided[ri([0, 1]), ri([1, 0])] = torch.tensor(
[-1, 2], dtype=dtype, device=device
)
self.assertEqual(
strided[ri([0, 1]), ri([1, 0])],
torch.tensor([-1, 2], dtype=dtype, device=device),
)
reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
strided.set_(
reference.untyped_storage(), 10, size=torch.Size([2, 2]), stride=[7, 1]
)
rows = ri([[0], [1]])
columns = ri([[0, 1], [0, 1]])
self.assertEqual(
strided[rows, columns],
torch.tensor([[10, 11], [17, 18]], dtype=dtype, device=device),
)
strided[rows, columns] = torch.tensor(
[[4, 6], [2, 3]], dtype=dtype, device=device
)
self.assertEqual(
strided[rows, columns],
torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
)
# Tests using less than the number of dims, and ellipsis
# reference is 1 2
# 3 4
# 5 6
reference = consec((3, 2))
self.assertEqual(
reference[ri([0, 2]),],
torch.tensor([[1, 2], [5, 6]], dtype=dtype, device=device),
)
self.assertEqual(
reference[ri([1]), ...], torch.tensor([[3, 4]], dtype=dtype, device=device)
)
self.assertEqual(
reference[..., ri([1])],
torch.tensor([[2], [4], [6]], dtype=dtype, device=device),
)
# verify too many indices fails
with self.assertRaises(IndexError):
reference[ri([1]), ri([0, 2]), ri([3])]
# test invalid index fails
reference = torch.empty(10, dtype=dtype, device=device)
# can't test cuda/xpu because it is a device assert
if reference.device.type == "cpu":
for err_idx in (10, -11):
with self.assertRaisesRegex(IndexError, r"out of"):
reference[err_idx]
with self.assertRaisesRegex(IndexError, r"out of"):
reference[torch.LongTensor([err_idx]).to(device)]
with self.assertRaisesRegex(IndexError, r"out of"):
reference[[err_idx]]
def tensor_indices_to_np(tensor, indices):
# convert the Torch Tensor to a numpy array
tensor = tensor.to(device="cpu")
npt = tensor.numpy()
# convert indices
idxs = tuple(
i.tolist() if isinstance(i, torch.LongTensor) else i for i in indices
)
return npt, idxs
def get_numpy(tensor, indices):
npt, idxs = tensor_indices_to_np(tensor, indices)
# index and return as a Torch Tensor
return torch.tensor(npt[idxs], dtype=dtype, device=device)
def set_numpy(tensor, indices, value):
if not isinstance(value, int):
if self.device_type != "cpu":
value = value.cpu()
value = value.numpy()
npt, idxs = tensor_indices_to_np(tensor, indices)
npt[idxs] = value
return npt
def assert_get_eq(tensor, indexer):
self.assertEqual(tensor[indexer], get_numpy(tensor, indexer))
def assert_set_eq(tensor, indexer, val):
pyt = tensor.clone()
numt = tensor.clone()
pyt[indexer] = val
numt = torch.tensor(
set_numpy(numt, indexer, val), dtype=dtype, device=device
)
self.assertEqual(pyt, numt)
def assert_backward_eq(tensor, indexer):
cpu = tensor.float().detach().clone().requires_grad_(True)
outcpu = cpu[indexer]
gOcpu = torch.rand_like(outcpu)
outcpu.backward(gOcpu)
dev = cpu.to(device).detach().requires_grad_(True)
outdev = dev[indexer]
outdev.backward(gOcpu.to(device))
self.assertEqual(cpu.grad, dev.grad)
def get_set_tensor(indexed, indexer):
set_size = indexed[indexer].size()
set_count = indexed[indexer].numel()
set_tensor = torch.randperm(set_count).view(set_size).double().to(device)
return set_tensor
# Tensor is 0 1 2 3 4
# 5 6 7 8 9
# 10 11 12 13 14
# 15 16 17 18 19
reference = torch.arange(0.0, 20, dtype=dtype, device=device).view(4, 5)
indices_to_test = [
# grab the second, fourth columns
(slice(None), [1, 3]),
# first, third rows,
([0, 2], slice(None)),
# weird shape
(slice(None), [[0, 1], [2, 3]]),
# negatives
([-1], [0]),
([0, 2], [-1]),
(slice(None), [-1]),
]
# only test dupes on gets
get_indices_to_test = indices_to_test + [(slice(None), [0, 1, 1, 2, 2])]
for indexer in get_indices_to_test:
assert_get_eq(reference, indexer)
if self.device_type != "cpu":
assert_backward_eq(reference, indexer)
for indexer in indices_to_test:
assert_set_eq(reference, indexer, 44)
assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
reference = torch.arange(0.0, 160, dtype=dtype, device=device).view(4, 8, 5)
indices_to_test = [
(slice(None), slice(None), (0, 3, 4)),
(slice(None), (2, 4, 5, 7), slice(None)),
((2, 3), slice(None), slice(None)),
(slice(None), (0, 2, 3), (1, 3, 4)),
(slice(None), (0,), (1, 2, 4)),
(slice(None), (0, 1, 3), (4,)),
(slice(None), ((0, 1), (1, 0)), ((2, 3),)),
(slice(None), ((0, 1), (2, 3)), ((0,),)),
(slice(None), ((5, 6),), ((0, 3), (4, 4))),
((0, 2, 3), (1, 3, 4), slice(None)),
((0,), (1, 2, 4), slice(None)),
((0, 1, 3), (4,), slice(None)),
(((0, 1), (1, 0)), ((2, 1), (3, 5)), slice(None)),
(((0, 1), (1, 0)), ((2, 3),), slice(None)),
(((0, 1), (2, 3)), ((0,),), slice(None)),
(((2, 1),), ((0, 3), (4, 4)), slice(None)),
(((2,),), ((0, 3), (4, 1)), slice(None)),
# non-contiguous indexing subspace
((0, 2, 3), slice(None), (1, 3, 4)),
# [...]
# less dim, ellipsis
((0, 2),),
((0, 2), slice(None)),
((0, 2), Ellipsis),
((0, 2), slice(None), Ellipsis),
((0, 2), Ellipsis, slice(None)),
((0, 2), (1, 3)),
((0, 2), (1, 3), Ellipsis),
(Ellipsis, (1, 3), (2, 3)),
(Ellipsis, (2, 3, 4)),
(Ellipsis, slice(None), (2, 3, 4)),
(slice(None), Ellipsis, (2, 3, 4)),
# ellipsis counts for nothing
(Ellipsis, slice(None), slice(None), (0, 3, 4)),
(slice(None), Ellipsis, slice(None), (0, 3, 4)),
(slice(None), slice(None), Ellipsis, (0, 3, 4)),
(slice(None), slice(None), (0, 3, 4), Ellipsis),
(Ellipsis, ((0, 1), (1, 0)), ((2, 1), (3, 5)), slice(None)),
(((0, 1), (1, 0)), ((2, 1), (3, 5)), Ellipsis, slice(None)),
(((0, 1), (1, 0)), ((2, 1), (3, 5)), slice(None), Ellipsis),
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 212)
assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
if torch.accelerator.is_available():
assert_backward_eq(reference, indexer)
reference = torch.arange(0.0, 1296, dtype=dtype, device=device).view(3, 9, 8, 6)
indices_to_test = [
(slice(None), slice(None), slice(None), (0, 3, 4)),
(slice(None), slice(None), (2, 4, 5, 7), slice(None)),
(slice(None), (2, 3), slice(None), slice(None)),
((1, 2), slice(None), slice(None), slice(None)),
(slice(None), slice(None), (0, 2, 3), (1, 3, 4)),
(slice(None), slice(None), (0,), (1, 2, 4)),
(slice(None), slice(None), (0, 1, 3), (4,)),
(slice(None), slice(None), ((0, 1), (1, 0)), ((2, 3),)),
(slice(None), slice(None), ((0, 1), (2, 3)), ((0,),)),
(slice(None), slice(None), ((5, 6),), ((0, 3), (4, 4))),
(slice(None), (0, 2, 3), (1, 3, 4), slice(None)),
(slice(None), (0,), (1, 2, 4), slice(None)),
(slice(None), (0, 1, 3), (4,), slice(None)),
(slice(None), ((0, 1), (3, 4)), ((2, 3), (0, 1)), slice(None)),
(slice(None), ((0, 1), (3, 4)), ((2, 3),), slice(None)),
(slice(None), ((0, 1), (3, 2)), ((0,),), slice(None)),
(slice(None), ((2, 1),), ((0, 3), (6, 4)), slice(None)),
(slice(None), ((2,),), ((0, 3), (4, 2)), slice(None)),
((0, 1, 2), (1, 3, 4), slice(None), slice(None)),
((0,), (1, 2, 4), slice(None), slice(None)),
((0, 1, 2), (4,), slice(None), slice(None)),
(((0, 1), (0, 2)), ((2, 4), (1, 5)), slice(None), slice(None)),
(((0, 1), (1, 2)), ((2, 0),), slice(None), slice(None)),
(((2, 2),), ((0, 3), (4, 5)), slice(None), slice(None)),
(((2,),), ((0, 3), (4, 5)), slice(None), slice(None)),
(slice(None), (3, 4, 6), (0, 2, 3), (1, 3, 4)),
(slice(None), (2, 3, 4), (1, 3, 4), (4,)),
(slice(None), (0, 1, 3), (4,), (1, 3, 4)),
(slice(None), (6,), (0, 2, 3), (1, 3, 4)),
(slice(None), (2, 3, 5), (3,), (4,)),
(slice(None), (0,), (4,), (1, 3, 4)),
(slice(None), (6,), (0, 2, 3), (1,)),
(slice(None), ((0, 3), (3, 6)), ((0, 1), (1, 3)), ((5, 3), (1, 2))),
((2, 2, 1), (0, 2, 3), (1, 3, 4), slice(None)),
((2, 0, 1), (1, 2, 3), (4,), slice(None)),
((0, 1, 2), (4,), (1, 3, 4), slice(None)),
((0,), (0, 2, 3), (1, 3, 4), slice(None)),
((0, 2, 1), (3,), (4,), slice(None)),
((0,), (4,), (1, 3, 4), slice(None)),
((1,), (0, 2, 3), (1,), slice(None)),
(((1, 2), (1, 2)), ((0, 1), (2, 3)), ((2, 3), (3, 5)), slice(None)),
# less dim, ellipsis
(Ellipsis, (0, 3, 4)),
(Ellipsis, slice(None), (0, 3, 4)),
(Ellipsis, slice(None), slice(None), (0, 3, 4)),
(slice(None), Ellipsis, (0, 3, 4)),
(slice(None), slice(None), Ellipsis, (0, 3, 4)),
(slice(None), (0, 2, 3), (1, 3, 4)),
(slice(None), (0, 2, 3), (1, 3, 4), Ellipsis),
(Ellipsis, (0, 2, 3), (1, 3, 4), slice(None)),
((0,), (1, 2, 4)),
((0,), (1, 2, 4), slice(None)),
((0,), (1, 2, 4), Ellipsis),
((0,), (1, 2, 4), Ellipsis, slice(None)),
((1,),),
((0, 2, 1), (3,), (4,)),
((0, 2, 1), (3,), (4,), slice(None)),
((0, 2, 1), (3,), (4,), Ellipsis),
(Ellipsis, (0, 2, 1), (3,), (4,)),
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 1333)
assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
indices_to_test += [
(slice(None), slice(None), [[0, 1], [1, 0]], [[2, 3], [3, 0]]),
(slice(None), slice(None), [[2]], [[0, 3], [4, 4]]),
]
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 1333)
if self.device_type != "cpu":
assert_backward_eq(reference, indexer)
def test_advancedindex_big(self, device):
reference = torch.arange(0, 123344, dtype=torch.int, device=device)
self.assertEqual(
reference[[0, 123, 44488, 68807, 123343],],
torch.tensor([0, 123, 44488, 68807, 123343], dtype=torch.int),
)
def test_set_item_to_scalar_tensor(self, device):
m = random.randint(1, 10)
n = random.randint(1, 10)
z = torch.randn([m, n], device=device)
a = 1.0
w = torch.tensor(a, requires_grad=True, device=device)
z[:, 0] = w
z.sum().backward()
self.assertEqual(w.grad, m * a)
def test_single_int(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[4].shape, (7, 3))
def test_multiple_int(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[4].shape, (7, 3))
self.assertEqual(v[4, :, 1].shape, (7,))
def test_none(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[None].shape, (1, 5, 7, 3))
self.assertEqual(v[:, None].shape, (5, 1, 7, 3))
self.assertEqual(v[:, None, None].shape, (5, 1, 1, 7, 3))
self.assertEqual(v[..., None].shape, (5, 7, 3, 1))
def test_step(self, device):
v = torch.arange(10, device=device)
self.assertEqual(v[::1], v)
self.assertEqual(v[::2].tolist(), [0, 2, 4, 6, 8])
self.assertEqual(v[::3].tolist(), [0, 3, 6, 9])
self.assertEqual(v[::11].tolist(), [0])
self.assertEqual(v[1:6:2].tolist(), [1, 3, 5])
def test_step_assignment(self, device):
v = torch.zeros(4, 4, device=device)
v[0, 1::2] = torch.tensor([3.0, 4.0], device=device)
self.assertEqual(v[0].tolist(), [0, 3, 0, 4])
self.assertEqual(v[1:].sum(), 0)
def test_bool_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
boolIndices = torch.tensor(
[True, False, True, True, False], dtype=torch.bool, device=device
)
self.assertEqual(v[boolIndices].shape, (3, 7, 3))
self.assertEqual(v[boolIndices], torch.stack([v[0], v[2], v[3]]))
v = torch.tensor([True, False, True], dtype=torch.bool, device=device)
boolIndices = torch.tensor(
[True, False, False], dtype=torch.bool, device=device
)
uint8Indices = torch.tensor([1, 0, 0], dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
v1 = v[boolIndices]
v2 = v[uint8Indices]
self.assertEqual(v1.shape, v2.shape)
self.assertEqual(v1, v2)
self.assertEqual(
v[boolIndices], tensor([True], dtype=torch.bool, device=device)
)
self.assertEqual(len(w), 1)
def test_list_indices(self, device):
N = 1000
t = torch.randn(N, device=device)
# Set window size
W = 10
# Generate a list of lists, containing overlapping window indices
indices = [range(i, i + W) for i in range(N - W)]
for i in [len(indices), 100, 32]:
windowed_data = t[indices[:i]]
self.assertEqual(windowed_data.shape, (i, W))
with self.assertRaisesRegex(IndexError, "too many indices"):
windowed_data = t[indices[:31]]
def test_bool_indices_accumulate(self, device):
mask = torch.zeros(size=(10,), dtype=torch.bool, device=device)
y = torch.ones(size=(10, 10), device=device)
y.index_put_((mask,), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
def test_multiple_bool_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
# note: these broadcast together and are transposed to the first dim
mask1 = torch.tensor([1, 0, 1, 1, 0], dtype=torch.bool, device=device)
mask2 = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
self.assertEqual(v[mask1, :, mask2].shape, (3, 7))
def test_multi_dimensional_bool_mask(self, device):
x = torch.randn(2, 2, 3, device=device)
b = ((True, False), (False, False))
m = torch.tensor(b, dtype=torch.bool, device=device)
z = torch.tensor(0)
t = torch.tensor(True)
f = torch.tensor(False)
# Using boolean sequence
self.assertEqual(x[b,].shape, (1, 3))
self.assertEqual(x[b, ::2].shape, (1, 2))
self.assertEqual(x[b, None].shape, (1, 1, 3))
self.assertEqual(x[b, 0].shape, (1,))
self.assertEqual(x[b, z].shape, (1,))
self.assertEqual(x[b, True].shape, (1, 3))
self.assertEqual(x[b, True, True, True, True].shape, (1, 3))
self.assertEqual(x[b, False].shape, (0, 3))
self.assertEqual(x[b, True, True, False, True].shape, (0, 3))
self.assertEqual(x[b, t].shape, (1, 3))
self.assertEqual(x[b, f].shape, (0, 3))
# Using boolean tensor
self.assertEqual(x[m].shape, (1, 3))
self.assertEqual(x[m, ::2].shape, (1, 2))
self.assertEqual(x[m, None].shape, (1, 1, 3))
self.assertEqual(x[m, 0].shape, (1,))
self.assertEqual(x[m, z].shape, (1,))
self.assertEqual(x[m, True].shape, (1, 3))
self.assertEqual(x[m, True, True, True, True].shape, (1, 3))
self.assertEqual(x[m, False].shape, (0, 3))
self.assertEqual(x[m, True, True, False, True].shape, (0, 3))
self.assertEqual(x[m, t].shape, (1, 3))
self.assertEqual(x[m, f].shape, (0, 3))
# Boolean mask in the middle of indices array
x = torch.randn(3, 2, 2, 5, device=device)
self.assertEqual(x[:, m, :].shape, (3, 1, 5))
self.assertEqual(x[0, m, ::2].shape, (1, 3))
self.assertEqual(x[..., m, ::2].shape, (3, 1, 3))
self.assertEqual(x[None, ..., m, ::2].shape, (1, 3, 1, 3))
def test_bool_mask_assignment(self, device):
v = torch.tensor([[1, 2], [3, 4]], device=device)
mask = torch.tensor([1, 0], dtype=torch.bool, device=device)
v[mask, :] = 0
self.assertEqual(v, torch.tensor([[0, 0], [3, 4]], device=device))
v = torch.tensor([[1, 2], [3, 4]], device=device)
v[:, mask] = 0
self.assertEqual(v, torch.tensor([[0, 2], [0, 4]], device=device))
def test_multi_dimensional_bool_mask_assignment(self, device):
v = torch.tensor([[[[1], [2]], [[3], [4]]]], device=device)
mask = torch.tensor([[1, 0], [0, 1]], dtype=torch.bool, device=device)
v[:, mask, :] = 0
self.assertEqual(v, torch.tensor([[[[0], [2]], [[3], [0]]]], device=device))
v = torch.tensor([[[[1], [2]], [[3], [4]]]], device=device)
torch.ops.aten.index_put_(v, [None, mask, None], torch.tensor(0))
self.assertEqual(v, torch.tensor([[[[0], [2]], [[3], [0]]]], device=device))
def test_byte_mask(self, device):
v = torch.randn(5, 7, 3, device=device)
mask = torch.ByteTensor([1, 0, 1, 1, 0]).to(device)
with warnings.catch_warnings(record=True) as w:
res = v[mask]
self.assertEqual(res.shape, (3, 7, 3))
self.assertEqual(res, torch.stack([v[0], v[2], v[3]]))
self.assertEqual(len(w), 1)
v = torch.tensor([1.0], device=device)
self.assertEqual(v[v == 0], torch.tensor([], device=device))
def test_byte_mask_accumulate(self, device):
mask = torch.zeros(size=(10,), dtype=torch.uint8, device=device)
y = torch.ones(size=(10, 10), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y.index_put_((mask,), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
self.assertEqual(len(w), 2)
# MPS: Fails locally, but passes in CI...
@skipIfTorchDynamo(
"This test causes SIGKILL when running with dynamo, https://github.com/pytorch/pytorch/issues/88472"
)
@serialTest(TEST_CUDA or TEST_XPU or TEST_MPS)
def test_index_put_accumulate_large_tensor(self, device):
# This test is for tensors with number of elements >= INT_MAX (2^31 - 1).
N = (1 << 31) + 5
dt = torch.int8
a = torch.ones(N, dtype=dt, device=device)
indices = torch.tensor(
[-2, 0, -2, -1, 0, -1, 1], device=device, dtype=torch.long
)
values = torch.tensor([6, 5, 6, 6, 5, 7, 11], dtype=dt, device=device)
a.index_put_((indices,), values, accumulate=True)
self.assertEqual(a[0], 11)
self.assertEqual(a[1], 12)
self.assertEqual(a[2], 1)
self.assertEqual(a[-3], 1)
self.assertEqual(a[-2], 13)
self.assertEqual(a[-1], 14)
a = torch.ones((2, N), dtype=dt, device=device)
indices0 = torch.tensor([0, -1, 0, 1], device=device, dtype=torch.long)
indices1 = torch.tensor([-2, -1, 0, 1], device=device, dtype=torch.long)
values = torch.tensor([12, 13, 10, 11], dtype=dt, device=device)
a.index_put_((indices0, indices1), values, accumulate=True)
self.assertEqual(a[0, 0], 11)
self.assertEqual(a[0, 1], 1)
self.assertEqual(a[1, 0], 1)
self.assertEqual(a[1, 1], 12)
self.assertEqual(a[:, 2], torch.ones(2, dtype=torch.int8))
self.assertEqual(a[:, -3], torch.ones(2, dtype=torch.int8))
self.assertEqual(a[0, -2], 13)
self.assertEqual(a[1, -2], 1)
self.assertEqual(a[-1, -1], 14)
self.assertEqual(a[0, -1], 1)
@onlyNativeDeviceTypes
def test_index_put_accumulate_expanded_values(self, device):
# checks the issue with cuda: https://github.com/pytorch/pytorch/issues/39227
# and verifies consistency with CPU result
t = torch.zeros((5, 2))
t_dev = t.to(device)
indices = [torch.tensor([0, 1, 2, 3]), torch.tensor([1])]
indices_dev = [i.to(device) for i in indices]
values0d = torch.tensor(1.0)
values1d = torch.tensor([1.0])
out_cuda = t_dev.index_put_(indices_dev, values0d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values0d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
out_cuda = t_dev.index_put_(indices_dev, values1d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values1d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
t = torch.zeros(4, 3, 2)
t_dev = t.to(device)
indices = [
torch.tensor([0]),
torch.arange(3)[:, None],
torch.arange(2)[None, :],
]
indices_dev = [i.to(device) for i in indices]
values1d = torch.tensor([-1.0, -2.0])
values2d = torch.tensor([[-1.0, -2.0]])
out_cuda = t_dev.index_put_(indices_dev, values1d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values1d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
out_cuda = t_dev.index_put_(indices_dev, values2d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values2d, accumulate=True)
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyOn(["cuda", "xpu"])
def test_index_put_large_indices(self, device):
def generate_indices(num_indices: int, index_range: int):
indices = []
for _ in range(num_indices):
x = random.randint(0, index_range - 1)
indices.append(x)
return torch.tensor(indices)
num_indices = 401988
max_index_range = 2000
target_index_range = [16, 256, 2000]
# BFloat16
for generated_index_range in target_index_range:
# create CPU tensors
a_tensor_size = (max_index_range, 256)
a = torch.randn(a_tensor_size, dtype=torch.bfloat16)
b = generate_indices(
num_indices=num_indices, index_range=generated_index_range
)
c_tensor_size = (num_indices, 256)
c = torch.randn(c_tensor_size, dtype=torch.bfloat16)
# create GPU copies
a_dev = a.to(device)
b_dev = b.to(device)
c_dev = c.to(device)
# run
a.index_put_(indices=[b], values=c, accumulate=True)
a_dev.index_put_(indices=[b_dev], values=c_dev, accumulate=True)
self.assertEqual(a_dev.cpu(), a)
# Float32
for generated_index_range in target_index_range:
# create CPU tensors
a_tensor_size = (max_index_range, 256)
a = torch.randn(a_tensor_size, dtype=torch.float32)
b = generate_indices(
num_indices=num_indices, index_range=generated_index_range
)
c_tensor_size = (num_indices, 256)
c = torch.randn(c_tensor_size, dtype=torch.float32)
# create GPU copies
a_dev = a.to(device)
b_dev = b.to(device)
c_dev = c.to(device)
# run
torch.use_deterministic_algorithms(True)
a.index_put_(indices=[b], values=c, accumulate=True)
torch.use_deterministic_algorithms(False)
a_dev.index_put_(indices=[b_dev], values=c_dev, accumulate=True)
self.assertEqual(a_dev.cpu(), a)
@onlyOn(["cuda", "xpu"])
def test_index_put_accumulate_non_contiguous(self, device):
t = torch.zeros((5, 2, 2))
t_dev = t.to(device)
t1 = t_dev[:, 0, :]
t2 = t[:, 0, :]
self.assertTrue(not t1.is_contiguous())
self.assertTrue(not t2.is_contiguous())
indices = [torch.tensor([0, 1])]
indices_dev = [i.to(device) for i in indices]
value = torch.randn(2, 2)
out_cuda = t1.index_put_(indices_dev, value.to(device), accumulate=True)
out_cpu = t2.index_put_(indices, value, accumulate=True)
self.assertTrue(not t1.is_contiguous())
self.assertTrue(not t2.is_contiguous())
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyOn(["cuda", "xpu"])
def test_index_put_deterministic_with_optional_tensors(self, device):
def func(x, i, v):
with DeterministicGuard(True):
x[..., i] = v
return x
def func1(x, i, v):
with DeterministicGuard(True):
x[i] = v
return x
n = 4
t = torch.arange(n * 2, dtype=torch.float32).reshape(n, 2)
t_dev = t.to(device)
indices = torch.tensor([1, 0])
indices_dev = indices.to(device)
value0d = torch.tensor(10.0)
value1d = torch.tensor([1.0, 2.0])
values2d = torch.randn(n, 1)
for val in (value0d, value1d, values2d):
out_cuda = func(t_dev, indices_dev, val.to(device))
out_cpu = func(t, indices, val)
self.assertEqual(out_cuda.cpu(), out_cpu)
t = torch.zeros((5, 4))
t_dev = t.to(device)
indices = torch.tensor([1, 4, 3])
indices_dev = indices.to(device)
val = torch.randn(4)
out_cuda = func1(t_dev, indices_dev, val.to(device))
out_cpu = func1(t, indices, val)
self.assertEqual(out_cuda.cpu(), out_cpu)
t = torch.zeros(2, 3, 4)
ind = torch.tensor([0, 1])
val = torch.randn(6, 2)
with self.assertRaisesRegex(RuntimeError, "shape mismatch"):
func(t, ind, val)
with self.assertRaisesRegex(RuntimeError, "must match"):
func(t.to(device), ind.to(device), val.to(device))
val = torch.randn(2, 3, 1)
out_cuda = func1(t.to(device), ind.to(device), val.to(device))
out_cpu = func1(t, ind, val)
self.assertEqual(out_cuda.cpu(), out_cpu)
@onlyNativeDeviceTypes
def test_index_put_accumulate_duplicate_indices(self, device):
dtype = torch.float if device.startswith("mps") else torch.double
for i in range(1, 512):
# generate indices by random walk, this will create indices with
# lots of duplicates interleaved with each other
delta = torch.empty(i, dtype=dtype, device=device).uniform_(-1, 1)
indices = delta.cumsum(0).long()
input = torch.randn(indices.abs().max() + 1, device=device)
values = torch.randn(indices.size(0), device=device)
output = input.index_put((indices,), values, accumulate=True)
input_list = input.tolist()
indices_list = indices.tolist()
values_list = values.tolist()
for i, v in zip(indices_list, values_list):
input_list[i] += v
self.assertEqual(output, input_list)
@onlyNativeDeviceTypes
def test_index_ind_dtype(self, device):
x = torch.randn(4, 4, device=device)
ind_long = torch.randint(4, (4,), dtype=torch.long, device=device)
ind_int = ind_long.int()
src = torch.randn(4, device=device)
ref = x[ind_long, ind_long]
res = x[ind_int, ind_int]
self.assertEqual(ref, res)
ref = x[ind_long, :]
res = x[ind_int, :]
self.assertEqual(ref, res)
ref = x[:, ind_long]
res = x[:, ind_int]
self.assertEqual(ref, res)
# no repeating indices for index_put
ind_long = torch.arange(4, dtype=torch.long, device=device)
ind_int = ind_long.int()
for accum in (True, False):
inp_ref = x.clone()
inp_res = x.clone()
torch.index_put_(inp_ref, (ind_long, ind_long), src, accum)
torch.index_put_(inp_res, (ind_int, ind_int), src, accum)
self.assertEqual(inp_ref, inp_res)
@skipXLA
def test_index_put_accumulate_empty(self, device):
# Regression test for https://github.com/pytorch/pytorch/issues/94667
input = torch.rand([], dtype=torch.float32, device=device)
with self.assertRaises(RuntimeError):
input.index_put([], torch.tensor([1.0], device=device), True)
def test_multiple_byte_mask(self, device):
v = torch.randn(5, 7, 3, device=device)
# note: these broadcast together and are transposed to the first dim
mask1 = torch.ByteTensor([1, 0, 1, 1, 0]).to(device)
mask2 = torch.ByteTensor([1, 1, 1]).to(device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(v[mask1, :, mask2].shape, (3, 7))
self.assertEqual(len(w), 2)
def test_byte_mask2d(self, device):
v = torch.randn(5, 7, 3, device=device)
c = torch.randn(5, 7, device=device)
num_ones = (c > 0).sum()
r = v[c > 0]
self.assertEqual(r.shape, (num_ones, 3))
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_jit_indexing(self, device):
def fn1(x):
x[x < 50] = 1.0
return x
def fn2(x):
x[0:50] = 1.0
return x
scripted_fn1 = torch.jit.script(fn1)
scripted_fn2 = torch.jit.script(fn2)
data = torch.arange(100, device=device, dtype=torch.float)
out = scripted_fn1(data.detach().clone())
ref = torch.tensor(
np.concatenate((np.ones(50), np.arange(50, 100))),
device=device,
dtype=torch.float,
)
self.assertEqual(out, ref)
out = scripted_fn2(data.detach().clone())
self.assertEqual(out, ref)
def test_int_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
self.assertEqual(v[[0, 4, 2]].shape, (3, 7, 3))
self.assertEqual(v[:, [0, 4, 2]].shape, (5, 3, 3))
self.assertEqual(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
@dtypes(
torch.cfloat, torch.cdouble, torch.float, torch.bfloat16, torch.long, torch.bool
)
@dtypesIfCPU(
torch.cfloat, torch.cdouble, torch.float, torch.long, torch.bool, torch.bfloat16
)
@dtypesIfCUDA(
torch.cfloat,
torch.cdouble,
torch.half,
torch.long,
torch.bool,
torch.bfloat16,
torch.float8_e5m2,
torch.float8_e4m3fn,
)
@dtypesIfXPU(
torch.cfloat,
torch.cdouble,
torch.half,
torch.long,
torch.bool,
torch.bfloat16,
)
@dtypesIfMPS(torch.float, torch.float16, torch.long, torch.bool)
def test_index_put_src_datatype(self, device, dtype):
src = torch.ones(3, 2, 4, device=device, dtype=dtype)
vals = torch.ones(3, 2, 4, device=device, dtype=dtype)
indices = (torch.tensor([0, 2, 1]),)
res = src.index_put_(indices, vals, accumulate=True)
self.assertEqual(res.shape, src.shape)
@dtypes(torch.float, torch.bfloat16, torch.long, torch.bool)
@dtypesIfCPU(torch.float, torch.long, torch.bfloat16, torch.bool)
@dtypesIfCUDA(torch.half, torch.long, torch.bfloat16, torch.bool)
@dtypesIfXPU(torch.half, torch.long, torch.bfloat16, torch.bool)
def test_index_src_datatype(self, device, dtype):
src = torch.ones(3, 2, 4, device=device, dtype=dtype)
# test index
res = src[[0, 2, 1], :, :]
self.assertEqual(res.shape, src.shape)
# test index_put, no accum
src[[0, 2, 1], :, :] = res
self.assertEqual(res.shape, src.shape)
def test_int_indices2d(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
rows = torch.tensor([[0, 0], [3, 3]], device=device)
columns = torch.tensor([[0, 2], [0, 2]], device=device)
self.assertEqual(x[rows, columns].tolist(), [[0, 2], [9, 11]])
def test_int_indices_broadcast(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
rows = torch.tensor([0, 3], device=device)
columns = torch.tensor([0, 2], device=device)
result = x[rows[:, None], columns]
self.assertEqual(result.tolist(), [[0, 2], [9, 11]])
def test_empty_index(self, device):
x = torch.arange(0, 12, device=device).view(4, 3)
idx = torch.tensor([], dtype=torch.long, device=device)
self.assertEqual(x[idx].numel(), 0)
# empty assignment should have no effect but not throw an exception
y = x.clone()
y[idx] = -1
self.assertEqual(x, y)
mask = torch.zeros(4, 3, device=device).bool()
y[mask] = -1
self.assertEqual(x, y)
def test_empty_ndim_index(self, device):
x = torch.randn(5, device=device)
self.assertEqual(
torch.empty(0, 2, device=device),
x[torch.empty(0, 2, dtype=torch.int64, device=device)],
)
x = torch.randn(2, 3, 4, 5, device=device)
self.assertEqual(
torch.empty(2, 0, 6, 4, 5, device=device),
x[:, torch.empty(0, 6, dtype=torch.int64, device=device)],
)
x = torch.empty(10, 0, device=device)
self.assertEqual(x[[1, 2]].shape, (2, 0))
self.assertEqual(x[[], []].shape, (0,))
with self.assertRaisesRegex(IndexError, "for dimension with size 0"):
x[:, [0, 1]]
def test_empty_ndim_index_bool(self, device):
x = torch.randn(5, device=device)
self.assertRaises(
IndexError, lambda: x[torch.empty(0, 2, dtype=torch.uint8, device=device)]
)
def test_empty_slice(self, device):
x = torch.randn(2, 3, 4, 5, device=device)
y = x[:, :, :, 1]
z = y[:, 1:1, :]
self.assertEqual((2, 0, 4), z.shape)
# this isn't technically necessary, but matches NumPy stride calculations.
self.assertEqual((60, 20, 5), z.stride())
self.assertTrue(z.is_contiguous())
def test_index_getitem_copy_bools_slices(self, device):
true = torch.tensor(1, dtype=torch.uint8, device=device)
false = torch.tensor(0, dtype=torch.uint8, device=device)
tensors = [torch.randn(2, 3, device=device), torch.tensor(3.0, device=device)]
for a in tensors:
self.assertNotEqual(a.data_ptr(), a[True].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[False])
self.assertNotEqual(a.data_ptr(), a[true].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[false])
self.assertEqual(a.data_ptr(), a[None].data_ptr())
self.assertEqual(a.data_ptr(), a[...].data_ptr())
def test_index_setitem_bools_slices(self, device):
true = torch.tensor(1, dtype=torch.uint8, device=device)
false = torch.tensor(0, dtype=torch.uint8, device=device)
tensors = [torch.randn(2, 3, device=device), torch.tensor(3, device=device)]
for a in tensors:
# prefix with a 1,1, to ensure we are compatible with numpy which cuts off prefix 1s
# (some of these ops already prefix a 1 to the size)
neg_ones = torch.ones_like(a) * -1
neg_ones_expanded = neg_ones.unsqueeze(0).unsqueeze(0)
a[True] = neg_ones_expanded
self.assertEqual(a, neg_ones)
a[False] = 5
self.assertEqual(a, neg_ones)
a[true] = neg_ones_expanded * 2
self.assertEqual(a, neg_ones * 2)
a[false] = 5
self.assertEqual(a, neg_ones * 2)
a[None] = neg_ones_expanded * 3
self.assertEqual(a, neg_ones * 3)
a[...] = neg_ones_expanded * 4
self.assertEqual(a, neg_ones * 4)
if a.dim() == 0:
with self.assertRaises(IndexError):
a[:] = neg_ones_expanded * 5
def test_index_scalar_with_bool_mask(self, device):
a = torch.tensor(1, device=device)
uintMask = torch.tensor(True, dtype=torch.uint8, device=device)
boolMask = torch.tensor(True, dtype=torch.bool, device=device)
self.assertEqual(a[uintMask], a[boolMask])
self.assertEqual(a[uintMask].dtype, a[boolMask].dtype)
a = torch.tensor(True, dtype=torch.bool, device=device)
self.assertEqual(a[uintMask], a[boolMask])
self.assertEqual(a[uintMask].dtype, a[boolMask].dtype)
def test_setitem_expansion_error(self, device):
true = torch.tensor(True, device=device)
a = torch.randn(2, 3, device=device)
# check prefix with non-1s doesn't work
a_expanded = a.expand(torch.Size([5, 1]) + a.size())
# NumPy: ValueError
with self.assertRaises(RuntimeError):
a[True] = a_expanded
with self.assertRaises(RuntimeError):
a[true] = a_expanded
def test_getitem_scalars(self, device):
zero = torch.tensor(0, dtype=torch.int64, device=device)
one = torch.tensor(1, dtype=torch.int64, device=device)
# non-scalar indexed with scalars
a = torch.randn(2, 3, device=device)
self.assertEqual(a[0], a[zero])
self.assertEqual(a[0][1], a[zero][one])
self.assertEqual(a[0, 1], a[zero, one])
self.assertEqual(a[0, one], a[zero, 1])
# indexing by a scalar should slice (not copy)
self.assertEqual(a[0, 1].data_ptr(), a[zero, one].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.int()].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.short()].data_ptr())
# scalar indexed with scalar
r = torch.randn((), device=device)
with self.assertRaises(IndexError):
r[:]
with self.assertRaises(IndexError):
r[zero]
self.assertEqual(r, r[...])
def test_setitem_scalars(self, device):
zero = torch.tensor(0, dtype=torch.int64)
# non-scalar indexed with scalars
a = torch.randn(2, 3, device=device)
a_set_with_number = a.clone()
a_set_with_scalar = a.clone()
b = torch.randn(3, device=device)
a_set_with_number[0] = b
a_set_with_scalar[zero] = b
self.assertEqual(a_set_with_number, a_set_with_scalar)
a[1, zero] = 7.7
self.assertEqual(7.7, a[1, 0])
# scalar indexed with scalars
r = torch.randn((), device=device)
with self.assertRaises(IndexError):
r[:] = 8.8
with self.assertRaises(IndexError):
r[zero] = 8.8
r[...] = 9.9
self.assertEqual(9.9, r)
def test_basic_advanced_combined(self, device):
# From the NumPy indexing example
x = torch.arange(0, 12, device=device).view(4, 3)
self.assertEqual(x[1:2, 1:3], x[1:2, [1, 2]])
self.assertEqual(x[1:2, 1:3].tolist(), [[4, 5]])
# Check that it is a copy
unmodified = x.clone()
x[1:2, [1, 2]].zero_()
self.assertEqual(x, unmodified)
# But assignment should modify the original
unmodified = x.clone()
x[1:2, [1, 2]] = 0
self.assertNotEqual(x, unmodified)
def test_int_assignment(self, device):
x = torch.arange(0, 4, device=device).view(2, 2)
x[1] = 5
self.assertEqual(x.tolist(), [[0, 1], [5, 5]])
x = torch.arange(0, 4, device=device).view(2, 2)
x[1] = torch.arange(5, 7, device=device)
self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
def test_byte_tensor_assignment(self, device):
x = torch.arange(0.0, 16, device=device).view(4, 4)
b = torch.ByteTensor([True, False, True, False]).to(device)
value = torch.tensor([3.0, 4.0, 5.0, 6.0], device=device)
with warnings.catch_warnings(record=True) as w:
x[b] = value
self.assertEqual(len(w), 1)
self.assertEqual(x[0], value)
self.assertEqual(x[1], torch.arange(4.0, 8, device=device))
self.assertEqual(x[2], value)
self.assertEqual(x[3], torch.arange(12.0, 16, device=device))
def test_variable_slicing(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
indices = torch.IntTensor([0, 1]).to(device)
i, j = indices
self.assertEqual(x[i:j], x[0:1])
def test_ellipsis_tensor(self, device):
x = torch.arange(0, 9, device=device).view(3, 3)
idx = torch.tensor([0, 2], device=device)
self.assertEqual(x[..., idx].tolist(), [[0, 2], [3, 5], [6, 8]])
self.assertEqual(x[idx, ...].tolist(), [[0, 1, 2], [6, 7, 8]])
def test_unravel_index_errors(self, device):
with self.assertRaisesRegex(TypeError, r"expected 'indices' to be integer"):
torch.unravel_index(torch.tensor(0.5, device=device), (2, 2))
with self.assertRaisesRegex(TypeError, r"expected 'indices' to be integer"):
torch.unravel_index(torch.tensor([], device=device), (10, 3, 5))
with self.assertRaisesRegex(
TypeError, r"expected 'shape' to be int or sequence"
):
torch.unravel_index(
torch.tensor([1], device=device, dtype=torch.int64),
torch.tensor([1, 2, 3]),
)
with self.assertRaisesRegex(
TypeError, r"expected 'shape' sequence to only contain ints"
):
torch.unravel_index(
torch.tensor([1], device=device, dtype=torch.int64), (1, 2, 2.0)
)
with self.assertRaisesRegex(
ValueError, r"'shape' cannot have negative values, but got \(2, -3\)"
):
torch.unravel_index(torch.tensor(0, device=device), (2, -3))
def test_invalid_index(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
self.assertRaisesRegex(TypeError, "slice indices", lambda: x["0":"1"])
def test_out_of_bound_index(self, device):
x = torch.arange(0, 100, device=device).view(2, 5, 10)
self.assertRaisesRegex(
IndexError,
"index 5 is out of bounds for dimension 1 with size 5",
lambda: x[0, 5],
)
self.assertRaisesRegex(
IndexError,
"index 4 is out of bounds for dimension 0 with size 2",
lambda: x[4, 5],
)
self.assertRaisesRegex(
IndexError,
"index 15 is out of bounds for dimension 2 with size 10",
lambda: x[0, 1, 15],
)
self.assertRaisesRegex(
IndexError,
"index 12 is out of bounds for dimension 2 with size 10",
lambda: x[:, :, 12],
)
def test_zero_dim_index(self, device):
x = torch.tensor(10, device=device)
self.assertEqual(x, x.item())
def runner():
print(x[0])
return x[0]
self.assertRaisesRegex(IndexError, "invalid index", runner)
@onlyOn(["cuda", "xpu"])
def test_invalid_device(self, device):
idx = torch.tensor([0, 1])
b = torch.zeros(5, device=device)
c = torch.tensor([1.0, 2.0], device="cpu")
for accumulate in [True, False]:
self.assertRaises(
RuntimeError,
lambda: torch.index_put_(b, (idx,), c, accumulate=accumulate),
)
@onlyOn(["cuda", "xpu"])
def test_cpu_indices(self, device):
idx = torch.tensor([0, 1])
b = torch.zeros(2, device=device)
x = torch.ones(10, device=device)
x[idx] = b # index_put_
ref = torch.ones(10, device=device)
ref[:2] = 0
self.assertEqual(x, ref, atol=0, rtol=0)
out = x[idx] # index
self.assertEqual(out, torch.zeros(2, device=device), atol=0, rtol=0)
@dtypes(torch.long, torch.float32)
def test_take_along_dim(self, device, dtype):
def _test_against_numpy(t, indices, dim):
actual = torch.take_along_dim(t, indices, dim=dim)
t_np = t.cpu().numpy()
indices_np = indices.cpu().numpy()
expected = np.take_along_axis(t_np, indices_np, axis=dim)
self.assertEqual(actual, expected, atol=0, rtol=0)
for shape in [(3, 2), (2, 3, 5), (2, 4, 0), (2, 3, 1, 4)]:
for noncontiguous in [True, False]:
t = make_tensor(
shape, device=device, dtype=dtype, noncontiguous=noncontiguous
)
for dim in list(range(t.ndim)) + [None]:
if dim is None:
indices = torch.argsort(t.view(-1))
else:
indices = torch.argsort(t, dim=dim)
_test_against_numpy(t, indices, dim)
# test broadcasting
t = torch.ones((3, 4, 1), device=device)
indices = torch.ones((1, 2, 5), dtype=torch.long, device=device)
_test_against_numpy(t, indices, 1)
# test empty indices
t = torch.ones((3, 4, 5), device=device)
indices = torch.ones((3, 0, 5), dtype=torch.long, device=device)
_test_against_numpy(t, indices, 1)
@dtypes(torch.long, torch.float)
def test_take_along_dim_invalid(self, device, dtype):
shape = (2, 3, 1, 4)
dim = 0
t = make_tensor(shape, device=device, dtype=dtype)
indices = torch.argsort(t, dim=dim)
# dim of `t` and `indices` does not match
with self.assertRaisesRegex(
RuntimeError, "input and indices should have the same number of dimensions"
):
torch.take_along_dim(t, indices[0], dim=0)
# invalid `indices` dtype
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.bool), dim=0)
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.float), dim=0)
with self.assertRaisesRegex(RuntimeError, r"dtype of indices should be Long"):
torch.take_along_dim(t, indices.to(torch.int32), dim=0)
# invalid axis
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
torch.take_along_dim(t, indices, dim=-7)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
torch.take_along_dim(t, indices, dim=7)
@onlyOn(["cuda", "xpu"])
@dtypes(torch.float)
def test_gather_take_along_dim_cross_device(self, device, dtype):
shape = (2, 3, 1, 4)
dim = 0
t = make_tensor(shape, device=device, dtype=dtype)
indices = torch.argsort(t, dim=dim)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.gather(t, 0, indices.cpu())
with self.assertRaisesRegex(
RuntimeError,
r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()",
):
torch.take_along_dim(t, indices.cpu(), dim=0)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.gather(t.cpu(), 0, indices)
with self.assertRaisesRegex(
RuntimeError,
r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()",
):
torch.take_along_dim(t.cpu(), indices, dim=0)
@onlyOn(["cuda", "xpu"])
def test_cuda_broadcast_index_use_deterministic_algorithms(self, device):
with DeterministicGuard(True):
idx1 = torch.tensor([0])
idx2 = torch.tensor([2, 6])
idx3 = torch.tensor([1, 5, 7])
tensor_a = torch.rand(13, 11, 12, 13, 12).cpu()
tensor_b = tensor_a.to(device=device)
tensor_a[idx1] = 1.0
tensor_a[idx1, :, idx2, idx2, :] = 2.0
tensor_a[:, idx1, idx3, :, idx3] = 3.0
tensor_b[idx1] = 1.0
tensor_b[idx1, :, idx2, idx2, :] = 2.0
tensor_b[:, idx1, idx3, :, idx3] = 3.0
self.assertEqual(tensor_a, tensor_b.cpu(), atol=0, rtol=0)
tensor_a = torch.rand(10, 11).cpu()
tensor_b = tensor_a.to(device=device)
tensor_a[idx3] = 1.0
tensor_a[idx2, :] = 2.0
tensor_a[:, idx2] = 3.0
tensor_a[:, idx1] = 4.0
tensor_b[idx3] = 1.0
tensor_b[idx2, :] = 2.0
tensor_b[:, idx2] = 3.0
tensor_b[:, idx1] = 4.0
self.assertEqual(tensor_a, tensor_b.cpu(), atol=0, rtol=0)
tensor_a = torch.rand(10, 10).cpu()
tensor_b = tensor_a.to(device=device)
tensor_a[[8]] = 1.0
tensor_b[[8]] = 1.0
self.assertEqual(tensor_a, tensor_b.cpu(), atol=0, rtol=0)
tensor_a = torch.rand(10).cpu()
tensor_b = tensor_a.to(device=device)
tensor_a[6] = 1.0
tensor_b[6] = 1.0
self.assertEqual(tensor_a, tensor_b.cpu(), atol=0, rtol=0)
def test_index_limits(self, device):
# Regression test for https://github.com/pytorch/pytorch/issues/115415
t = torch.tensor([], device=device)
idx_min = torch.iinfo(torch.int64).min
idx_max = torch.iinfo(torch.int64).max
self.assertRaises(IndexError, lambda: t[idx_min])
self.assertRaises(IndexError, lambda: t[idx_max])
@parametrize("reduce", ["prod", "amin", "amax", "mean"])
@dtypes(*all_types_and(torch.half, torch.bfloat16))
@expectedFailureMPS # Unimplemented for MPS device
def test_index_reduce(self, device, dtype, reduce):
size = (3, 4, 5)
index_dtypes = [torch.int, torch.long]
include_selfs = [True, False]
amin_init = float("inf") if dtype.is_floating_point else torch.iinfo(dtype).max
amax_init = -float("inf") if dtype.is_floating_point else torch.iinfo(dtype).min
reduction_init = {"prod": 1, "mean": 0, "amin": amin_init, "amax": amax_init}
for dest_noncontig, src_noncontig, index_noncontig in product(
[True, False], repeat=3
):
for idx_dtype, include_self in product(index_dtypes, include_selfs):
for dim in range(len(size)):
num_src = np.random.randint(10)
num_dest = size[dim]
dest = make_tensor(
size, device=device, dtype=dtype, noncontiguous=dest_noncontig
)
src_size = size[:dim] + (num_src,) + size[dim + 1 :]
src = make_tensor(
src_size,
device=device,
dtype=dtype,
noncontiguous=src_noncontig,
)
idx = torch.testing.make_tensor(
num_src,
low=0,
high=num_dest,
dtype=idx_dtype,
device=device,
noncontiguous=index_noncontig,
)
expected = dest.clone()
dest.index_reduce_(dim, idx, src, reduce, include_self=include_self)
# fill rows in idx with reduction inits if include_self=False
if not include_self:
expected.index_fill_(dim, idx.long(), reduction_init[reduce])
expected = expected.transpose(0, dim)
src = src.transpose(0, dim)
for i in range(num_src):
if reduce == "prod":
expected[idx[i]] *= src[i]
elif reduce == "amin":
torch.minimum(
expected[idx[i]], src[i], out=expected[idx[i]]
)
elif reduce == "amax":
torch.maximum(
expected[idx[i]], src[i], out=expected[idx[i]]
)
else:
expected[idx[i]] += src[i]
if reduce == "mean":
counts = (
torch.ones_like(expected)
if include_self
else torch.zeros_like(expected)
)
counts.index_add_(0, idx, torch.ones_like(src))
counts.masked_fill_(counts == 0, 1)
if dtype.is_floating_point:
expected.div_(counts)
else:
expected.div_(counts, rounding_mode="floor")
expected = expected.transpose(0, dim)
self.assertEqual(dest, expected)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfMPS(*all_mps_types_and(torch.bool, torch.cfloat))
def test_index_copy(self, device, dtype):
# We just test for num_copy <= num_dest, as otherwise there are repeated indices
# and the behavior is undefined
num_copy, num_dest = 3, 5
def make_arg(batch_sizes, n, dim, contig):
size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:]
return make_tensor(
size_arg,
dtype=dtype,
device=device,
low=None,
high=None,
noncontiguous=not contig,
)
def ref_index_copy(tgt, dim, idx, src):
for i in range(idx.size(0)):
idx_dest = dim * (slice(None),) + (idx[i],)
idx_src = dim * (slice(None),) + (i,)
tgt[idx_dest] = src[idx_src]
# More thorough testing as in index_add
for dest_contig, src_contig, index_contig in product([True, False], repeat=3):
for other_sizes in ((), (4, 5)):
for dim in range(len(other_sizes)):
dest = make_arg(other_sizes, num_dest, dim, dest_contig)
src = make_arg(other_sizes, num_copy, dim, src_contig)
idx = torch.randperm(num_dest, dtype=torch.int64, device=device)[
:num_copy
]
if not index_contig:
idx = torch.repeat_interleave(idx, 2, dim=-1)
idx = idx[..., ::2]
dest2 = dest.clone()
dest.index_copy_(dim, idx, src)
ref_index_copy(dest2, dim, idx, src)
self.assertEqual(dest, dest2)
# onlyNativeDeviceTypes due to an XLA error:
# https://github.com/pytorch/pytorch/issues/53256
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfMPS(*all_mps_types_and(torch.bool, torch.cfloat))
def test_index_copy_scalars(self, device, dtype):
# Create the 8 possible combinations of scalar sizes for target / index / source
scalars = (
(
make_tensor(size_t, dtype=dtype, device=device, low=None, high=None),
make_tensor(size_i, dtype=torch.int64, device=device, low=0, high=1),
make_tensor(size_s, dtype=dtype, device=device, low=None, high=None),
)
for size_t, size_i, size_s in product([(), (1,)], repeat=3)
)
for target, idx, source in scalars:
target.index_copy_(0, idx, source)
self.assertEqual(target.item(), source.item())
@onlyCPU
def test_errors_index_copy(self, device):
# We do not test the GPU as the CUDA_ASSERT would break the CUDA context
idx_dim = 8
tgt_dim = 5
batch_dim = 3
# Too large of an index
a = torch.randn(batch_dim, tgt_dim, device=device)
idx = torch.full((idx_dim,), tgt_dim, device=device)
c = torch.zeros(batch_dim, idx_dim, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
# Too small (negative indices)
idx = torch.full((idx_dim,), -1, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
# Too small (very negative indices) - they should be unsupported even
# when support for negative indices is implemented for index_copy_
idx = torch.full((idx_dim,), -tgt_dim - 1, device=device)
with self.assertRaises(IndexError):
a.index_copy_(1, idx, c)
def _prepare_data_for_index_copy_and_add_deterministic(
self, dim: int, device: torch.device
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert dim >= 0 and dim < 3
a = [5, 4, 3]
a[dim] = 2000
x = torch.zeros(a, device=device)
b = a.copy()
elems = a[dim] * 20
b[dim] = elems
src = torch.rand(b, device=device)
index = torch.randint(a[dim], (elems,), device=device)
return (x, index, src)
@onlyNativeDeviceTypes
@skipXPUIf(True, "https://github.com/intel/torch-xpu-ops/issues/1973")
@expectedFailureMPS # See https://github.com/pytorch/pytorch/issues/161029
def test_index_copy_deterministic(self, device: torch.device) -> None:
for dim in range(3):
x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(
dim, device
)
with DeterministicGuard(True):
y0 = torch.index_copy(x, dim, index, src)
x0 = x.detach().clone()
index_list = index.tolist()
for i in range(len(index_list)):
if dim == 0:
x0[index_list[i], :, :] = src[i, :, :]
elif dim == 1:
x0[:, index_list[i], :] = src[:, i, :]
elif dim == 2:
x0[:, :, index_list[i]] = src[:, :, i]
self.assertEqual(x0, y0, atol=0, rtol=0)
@onlyNativeDeviceTypes
@expectedFailureMPS # See https://github.com/pytorch/pytorch/issues/161029
def test_index_add_deterministic(self, device: torch.device) -> None:
for dim in range(3):
x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(
dim, device
)
alpha = random.random() + 1
# on CPU it should be deterministic regardless of the deterministic mode
with DeterministicGuard(True):
y0 = torch.index_add(x, dim, index, src, alpha=alpha)
for _ in range(3):
y = torch.index_add(x, dim, index, src, alpha=alpha)
self.assertEqual(y, y0, atol=0, rtol=0)
with DeterministicGuard(False):
for _ in range(3):
y_nd = torch.index_add(x, dim, index, src, alpha=alpha)
self.assertEqual(y_nd, y0, atol=1e-3, rtol=1e-5)
@onlyNativeDeviceTypes
@skipXPUIf(True, "https://github.com/intel/torch-xpu-ops/issues/1973")
def test_index_put_non_accumulate_deterministic(self, device) -> None:
with DeterministicGuard(True):
for i in range(3):
m = random.randint(10, 20)
elems = random.randint(20000, 30000)
values = torch.rand(elems, device=device)
indices = torch.randint(m, (elems,), device=device)
input = torch.rand(m, device=device)
output = input.index_put((indices,), values, accumulate=False)
input_list = input.tolist()
indices_list = indices.tolist()
values_list = values.tolist()
for i, v in zip(indices_list, values_list):
input_list[i] = v
self.assertEqual(output, input_list)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfMPS(*all_mps_types_and(torch.bool)) # TODO: Add torch.cfloat here
def test_index_fill(self, device, dtype):
x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device)
index = torch.tensor([0], device=device)
x.index_fill_(1, index, 0)
self.assertEqual(x, torch.tensor([[0, 2], [0, 5]], dtype=dtype, device=device))
if not x.is_complex() and device != "meta":
with self.assertRaisesRegex(RuntimeError, r"Scalar"):
x.index_fill_(1, index, 1 + 1j)
# Make sure that the result stays 0-dim while applied to
# a 0-dim input
x = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(0, x.index_fill(0, index, -1).dim())
self.assertEqual(0, x.index_fill_(0, index, -1).dim())
# The test fails for zero-dimensional tensors on XLA
@onlyNativeDeviceTypes
@dtypes(*all_types_complex_float8_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfXPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
@dtypesIfMPS(*all_mps_types_and(torch.bool, torch.cfloat))
def test_index_select(self, device, dtype):
num_src, num_out = 3, 5
def make_arg(batch_sizes, n, dim, contig):
size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:]
return make_tensor(
size_arg,
dtype=dtype,
device=device,
low=None,
high=None,
noncontiguous=not contig,
)
def ref_index_select(src, dim, idx):
# some types not supported on numpy
not_np_dtypes = (
torch.bfloat16,
torch.float8_e5m2,
torch.float8_e5m2fnuz,
torch.float8_e4m3fn,
torch.float8_e4m3fnuz,
)
if dtype in not_np_dtypes:
src = src.float()
out = torch.from_numpy(
np.take(src.cpu().numpy(), idx.cpu().numpy(), axis=dim)
)
if dtype in not_np_dtypes:
out = out.to(device=device, dtype=dtype)
return out
for src_contig, idx_contig in product([True, False], repeat=2):
for other_sizes in ((), (4, 5)):
for dim in range(len(other_sizes)):
src = make_arg(other_sizes, num_src, dim, src_contig)
idx = make_tensor(
(num_out,),
dtype=torch.int64,
device=device,
low=0,
high=num_src,
noncontiguous=not idx_contig,
)
out = torch.index_select(src, dim, idx)
out2 = ref_index_select(src, dim, idx)
self.assertEqual(out, out2)
for idx_type in (torch.int32, torch.int64):
other_sizes = (3, 2)
dim = 1
src = make_arg(other_sizes, num_src, dim, True)
idx = make_tensor(
(num_out,),
dtype=idx_type,
device=device,
low=0,
high=num_src,
noncontiguous=False,
)
out = torch.index_select(src, dim, idx)
out2 = ref_index_select(src, dim, idx)
self.assertEqual(out, out2)
# Create the 4 possible combinations of scalar sizes for index / source
scalars = (
(
make_tensor(size_s, dtype=dtype, device=device),
torch.zeros(size_i, dtype=torch.int64, device=device),
)
for size_s, size_i in product([(), (1,)], repeat=2)
)
for source, idx in scalars:
out = source.index_select(0, idx)
self.assertEqual(out.item(), source.item())
# The tests below are from NumPy test_indexing.py with some modifications to
# make them compatible with PyTorch. It's licensed under the BDS license below:
#
# Copyright (c) 2005-2017, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| TestIndexing |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 3513,
"end": 3625
} | class ____(Variadic_TA[T_contra]): ...
Variadic_TA2 = Variadic[Unpack[tuple[int, T]]]
| VariadicChildContra_WithTA |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 1695,
"end": 1854
} | class ____(enum.Enum):
CompromisedPassword = "password compromised"
AccountFrozen = "account frozen"
AdminInitiated = "admin initiated"
| DisableReason |
python | python__mypy | mypyc/ir/class_ir.py | {
"start": 3329,
"end": 22038
} | class ____:
"""Intermediate representation of a class.
This also describes the runtime structure of native instances.
"""
def __init__(
self,
name: str,
module_name: str,
is_trait: bool = False,
is_generated: bool = False,
is_abstract: bool = False,
is_ext_class: bool = True,
is_final_class: bool = False,
) -> None:
self.name = name
self.module_name = module_name
self.is_trait = is_trait
self.is_generated = is_generated
self.is_abstract = is_abstract
self.is_ext_class = is_ext_class
self.is_final_class = is_final_class
# An augmented class has additional methods separate from what mypyc generates.
# Right now the only one is dataclasses.
self.is_augmented = False
# Does this inherit from a Python class?
self.inherits_python = False
# Do instances of this class have __dict__?
self.has_dict = False
# Do we allow interpreted subclasses? Derived from a mypyc_attr.
self.allow_interpreted_subclasses = False
# Does this class need getseters to be generated for its attributes? (getseters are also
# added if is_generated is False)
self.needs_getseters = False
# Is this class declared as serializable (supports copy.copy
# and pickle) using @mypyc_attr(serializable=True)?
#
# Additionally, any class with this attribute False but with
# an __init__ that can be called without any arguments is
# *implicitly serializable*. In this case __init__ will be
# called during deserialization without arguments. If this is
# True, we match Python semantics and __init__ won't be called
# during deserialization.
#
# This impacts also all subclasses. Use is_serializable() to
# also consider base classes.
self._serializable = False
# If this a subclass of some built-in python class, the name
# of the object for that class. We currently only support this
# in a few ad-hoc cases.
self.builtin_base: str | None = None
# Default empty constructor
self.ctor = FuncDecl(name, None, module_name, FuncSignature([], RInstance(self)))
# Declare setup method that allocates and initializes an object. type is the
# type of the class being initialized, which could be another class if there
# is an interpreted subclass.
# TODO: Make it a regular method and generate its body in IR
self.setup = FuncDecl(
"__mypyc__" + name + "_setup",
None,
module_name,
FuncSignature([RuntimeArg("type", object_rprimitive)], RInstance(self)),
)
# Attributes defined in the class (not inherited)
self.attributes: dict[str, RType] = {}
# Deletable attributes
self.deletable: list[str] = []
# We populate method_types with the signatures of every method before
# we generate methods, and we rely on this information being present.
self.method_decls: dict[str, FuncDecl] = {}
# Map of methods that are actually present in an extension class
self.methods: dict[str, FuncIR] = {}
# Glue methods for boxing/unboxing when a class changes the type
# while overriding a method. Maps from (parent class overridden, method)
# to IR of glue method.
self.glue_methods: dict[tuple[ClassIR, str], FuncIR] = {}
# Properties are accessed like attributes, but have behavior like method calls.
# They don't belong in the methods dictionary, since we don't want to expose them to
# Python's method API. But we want to put them into our own vtable as methods, so that
# they are properly handled and overridden. The property dictionary values are a tuple
# containing a property getter and an optional property setter.
self.properties: dict[str, tuple[FuncIR, FuncIR | None]] = {}
# We generate these in prepare_class_def so that we have access to them when generating
# other methods and properties that rely on these types.
self.property_types: dict[str, RType] = {}
self.vtable: dict[str, int] | None = None
self.vtable_entries: VTableEntries = []
self.trait_vtables: dict[ClassIR, VTableEntries] = {}
# N.B: base might not actually quite be the direct base.
# It is the nearest concrete base, but we allow a trait in between.
self.base: ClassIR | None = None
self.traits: list[ClassIR] = []
# Supply a working mro for most generated classes. Real classes will need to
# fix it up.
self.mro: list[ClassIR] = [self]
# base_mro is the chain of concrete (non-trait) ancestors
self.base_mro: list[ClassIR] = [self]
# Direct subclasses of this class (use subclasses() to also include non-direct ones)
# None if separate compilation prevents this from working.
#
# Often it's better to use has_no_subclasses() or subclasses() instead.
self.children: list[ClassIR] | None = []
# Instance attributes that are initialized in the class body.
self.attrs_with_defaults: set[str] = set()
# Attributes that are always initialized in __init__ or class body
# (inferred in mypyc.analysis.attrdefined using interprocedural analysis).
# These can never raise AttributeError when accessed. If an attribute
# is *not* always initialized, we normally use the error value for
# an undefined value. If the attribute byte has an overlapping error value
# (the error_overlap attribute is true for the RType), we use a bitmap
# to track if the attribute is defined instead (see bitmap_attrs).
self._always_initialized_attrs: set[str] = set()
# Attributes that are sometimes initialized in __init__
self._sometimes_initialized_attrs: set[str] = set()
# If True, __init__ can make 'self' visible to unanalyzed/arbitrary code
self.init_self_leak = False
# Definedness of these attributes is backed by a bitmap. Index in the list
# indicates the bit number. Includes inherited attributes. We need the
# bitmap for types such as native ints (i64 etc.) that can't have a dedicated
# error value that doesn't overlap a valid value. The bitmap is used if the
# value of an attribute is the same as the error value.
self.bitmap_attrs: list[str] = []
# If this is a generator environment class, what is the actual method for it
self.env_user_function: FuncIR | None = None
# If True, keep one freed, cleared instance available for immediate reuse to
# speed up allocations. This helps if many objects are freed quickly, before
# other instances of the same class are allocated. This is effectively a
# per-type free "list" of up to length 1.
self.reuse_freed_instance = False
# Is this a class inheriting from enum.Enum? Such classes can be special-cased.
self.is_enum = False
# Name of the function if this a callable class representing a coroutine.
self.coroutine_name: str | None = None
def __repr__(self) -> str:
return (
"ClassIR("
"name={self.name}, module_name={self.module_name}, "
"is_trait={self.is_trait}, is_generated={self.is_generated}, "
"is_abstract={self.is_abstract}, is_ext_class={self.is_ext_class}, "
"is_final_class={self.is_final_class}"
")".format(self=self)
)
@property
def fullname(self) -> str:
return f"{self.module_name}.{self.name}"
def real_base(self) -> ClassIR | None:
"""Return the actual concrete base class, if there is one."""
if len(self.mro) > 1 and not self.mro[1].is_trait:
return self.mro[1]
return None
def vtable_entry(self, name: str) -> int:
assert self.vtable is not None, "vtable not computed yet"
assert name in self.vtable, f"{self.name!r} has no attribute {name!r}"
return self.vtable[name]
def attr_details(self, name: str) -> tuple[RType, ClassIR]:
for ir in self.mro:
if name in ir.attributes:
return ir.attributes[name], ir
if name in ir.property_types:
return ir.property_types[name], ir
raise KeyError(f"{self.name!r} has no attribute {name!r}")
def attr_type(self, name: str) -> RType:
return self.attr_details(name)[0]
def method_decl(self, name: str) -> FuncDecl:
for ir in self.mro:
if name in ir.method_decls:
return ir.method_decls[name]
raise KeyError(f"{self.name!r} has no attribute {name!r}")
def method_sig(self, name: str) -> FuncSignature:
return self.method_decl(name).sig
def has_method(self, name: str) -> bool:
try:
self.method_decl(name)
except KeyError:
return False
return True
def is_method_final(self, name: str) -> bool:
subs = self.subclasses()
if subs is None:
return self.is_final_class
if self.has_method(name):
method_decl = self.method_decl(name)
for subc in subs:
if subc.method_decl(name) != method_decl:
return False
return True
else:
return not any(subc.has_method(name) for subc in subs)
def has_attr(self, name: str) -> bool:
try:
self.attr_type(name)
except KeyError:
return False
return True
def is_deletable(self, name: str) -> bool:
return any(name in ir.deletable for ir in self.mro)
def is_always_defined(self, name: str) -> bool:
if self.is_deletable(name):
return False
return name in self._always_initialized_attrs
def name_prefix(self, names: NameGenerator) -> str:
return names.private_name(self.module_name, self.name)
def struct_name(self, names: NameGenerator) -> str:
return f"{exported_name(self.fullname)}Object"
def get_method_and_class(
self, name: str, *, prefer_method: bool = False
) -> tuple[FuncIR, ClassIR] | None:
for ir in self.mro:
if name in ir.methods:
func_ir = ir.methods[name]
if not prefer_method and func_ir.decl.implicit:
# This is an implicit accessor, so there is also an attribute definition
# which the caller prefers. This happens if an attribute overrides a
# property.
return None
return func_ir, ir
return None
def get_method(self, name: str, *, prefer_method: bool = False) -> FuncIR | None:
res = self.get_method_and_class(name, prefer_method=prefer_method)
return res[0] if res else None
def has_method_decl(self, name: str) -> bool:
return any(name in ir.method_decls for ir in self.mro)
def has_no_subclasses(self) -> bool:
return self.children == [] and not self.allow_interpreted_subclasses
def subclasses(self) -> set[ClassIR] | None:
"""Return all subclasses of this class, both direct and indirect.
Return None if it is impossible to identify all subclasses, for example
because we are performing separate compilation.
"""
if self.children is None or self.allow_interpreted_subclasses:
return None
result = set(self.children)
for child in self.children:
if child.children:
child_subs = child.subclasses()
if child_subs is None:
return None
result.update(child_subs)
return result
def concrete_subclasses(self) -> list[ClassIR] | None:
"""Return all concrete (i.e. non-trait and non-abstract) subclasses.
Include both direct and indirect subclasses. Place classes with no children first.
"""
subs = self.subclasses()
if subs is None:
return None
concrete = {c for c in subs if not (c.is_trait or c.is_abstract)}
# We place classes with no children first because they are more likely
# to appear in various isinstance() checks. We then sort leaves by name
# to get stable order.
return sorted(concrete, key=lambda c: (len(c.children or []), c.name))
def is_serializable(self) -> bool:
return any(ci._serializable for ci in self.mro)
def serialize(self) -> JsonDict:
return {
"name": self.name,
"module_name": self.module_name,
"is_trait": self.is_trait,
"is_ext_class": self.is_ext_class,
"is_abstract": self.is_abstract,
"is_generated": self.is_generated,
"is_augmented": self.is_augmented,
"is_final_class": self.is_final_class,
"inherits_python": self.inherits_python,
"has_dict": self.has_dict,
"allow_interpreted_subclasses": self.allow_interpreted_subclasses,
"needs_getseters": self.needs_getseters,
"_serializable": self._serializable,
"builtin_base": self.builtin_base,
"ctor": self.ctor.serialize(),
# We serialize dicts as lists to ensure order is preserved
"attributes": [(k, t.serialize()) for k, t in self.attributes.items()],
# We try to serialize a name reference, but if the decl isn't in methods
# then we can't be sure that will work so we serialize the whole decl.
"method_decls": [
(k, d.id if k in self.methods else d.serialize())
for k, d in self.method_decls.items()
],
# We serialize method fullnames out and put methods in a separate dict
"methods": [(k, m.id) for k, m in self.methods.items()],
"glue_methods": [
((cir.fullname, k), m.id) for (cir, k), m in self.glue_methods.items()
],
# We serialize properties and property_types separately out of an
# abundance of caution about preserving dict ordering...
"property_types": [(k, t.serialize()) for k, t in self.property_types.items()],
"properties": list(self.properties),
"vtable": self.vtable,
"vtable_entries": serialize_vtable(self.vtable_entries),
"trait_vtables": [
(cir.fullname, serialize_vtable(v)) for cir, v in self.trait_vtables.items()
],
# References to class IRs are all just names
"base": self.base.fullname if self.base else None,
"traits": [cir.fullname for cir in self.traits],
"mro": [cir.fullname for cir in self.mro],
"base_mro": [cir.fullname for cir in self.base_mro],
"children": (
[cir.fullname for cir in self.children] if self.children is not None else None
),
"deletable": self.deletable,
"attrs_with_defaults": sorted(self.attrs_with_defaults),
"_always_initialized_attrs": sorted(self._always_initialized_attrs),
"_sometimes_initialized_attrs": sorted(self._sometimes_initialized_attrs),
"init_self_leak": self.init_self_leak,
"env_user_function": self.env_user_function.id if self.env_user_function else None,
"reuse_freed_instance": self.reuse_freed_instance,
"is_enum": self.is_enum,
"is_coroutine": self.coroutine_name,
}
@classmethod
def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> ClassIR:
fullname = data["module_name"] + "." + data["name"]
assert fullname in ctx.classes, "Class %s not in deser class map" % fullname
ir = ctx.classes[fullname]
ir.is_trait = data["is_trait"]
ir.is_generated = data["is_generated"]
ir.is_abstract = data["is_abstract"]
ir.is_ext_class = data["is_ext_class"]
ir.is_augmented = data["is_augmented"]
ir.is_final_class = data["is_final_class"]
ir.inherits_python = data["inherits_python"]
ir.has_dict = data["has_dict"]
ir.allow_interpreted_subclasses = data["allow_interpreted_subclasses"]
ir.needs_getseters = data["needs_getseters"]
ir._serializable = data["_serializable"]
ir.builtin_base = data["builtin_base"]
ir.ctor = FuncDecl.deserialize(data["ctor"], ctx)
ir.attributes = {k: deserialize_type(t, ctx) for k, t in data["attributes"]}
ir.method_decls = {
k: ctx.functions[v].decl if isinstance(v, str) else FuncDecl.deserialize(v, ctx)
for k, v in data["method_decls"]
}
ir.methods = {k: ctx.functions[v] for k, v in data["methods"]}
ir.glue_methods = {
(ctx.classes[c], k): ctx.functions[v] for (c, k), v in data["glue_methods"]
}
ir.property_types = {k: deserialize_type(t, ctx) for k, t in data["property_types"]}
ir.properties = {
k: (ir.methods[k], ir.methods.get(PROPSET_PREFIX + k)) for k in data["properties"]
}
ir.vtable = data["vtable"]
ir.vtable_entries = deserialize_vtable(data["vtable_entries"], ctx)
ir.trait_vtables = {
ctx.classes[k]: deserialize_vtable(v, ctx) for k, v in data["trait_vtables"]
}
base = data["base"]
ir.base = ctx.classes[base] if base else None
ir.traits = [ctx.classes[s] for s in data["traits"]]
ir.mro = [ctx.classes[s] for s in data["mro"]]
ir.base_mro = [ctx.classes[s] for s in data["base_mro"]]
ir.children = data["children"] and [ctx.classes[s] for s in data["children"]]
ir.deletable = data["deletable"]
ir.attrs_with_defaults = set(data["attrs_with_defaults"])
ir._always_initialized_attrs = set(data["_always_initialized_attrs"])
ir._sometimes_initialized_attrs = set(data["_sometimes_initialized_attrs"])
ir.init_self_leak = data["init_self_leak"]
ir.env_user_function = (
ctx.functions[data["env_user_function"]] if data["env_user_function"] else None
)
ir.reuse_freed_instance = data["reuse_freed_instance"]
ir.is_enum = data["is_enum"]
ir.coroutine_name = data["is_coroutine"]
return ir
| ClassIR |
python | pandas-dev__pandas | pandas/tests/io/test_stata.py | {
"start": 1168,
"end": 100374
} | class ____:
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_empty_dta(self, version, temp_file):
empty_ds = DataFrame(columns=["unit"])
# GH 7369, make sure can read a 0-obs dta file
path = temp_file
empty_ds.to_stata(path, write_index=False, version=version)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_empty_dta_with_dtypes(self, version, temp_file):
# GH 46240
# Fixing above bug revealed that types are not correctly preserved when
# writing empty DataFrames
empty_df_typed = DataFrame(
{
"i8": np.array([0], dtype=np.int8),
"i16": np.array([0], dtype=np.int16),
"i32": np.array([0], dtype=np.int32),
"i64": np.array([0], dtype=np.int64),
"u8": np.array([0], dtype=np.uint8),
"u16": np.array([0], dtype=np.uint16),
"u32": np.array([0], dtype=np.uint32),
"u64": np.array([0], dtype=np.uint64),
"f32": np.array([0], dtype=np.float32),
"f64": np.array([0], dtype=np.float64),
}
)
# GH 7369, make sure can read a 0-obs dta file
path = temp_file
empty_df_typed.to_stata(path, write_index=False, version=version)
empty_reread = read_stata(path)
expected = empty_df_typed
# No uint# support. Downcast since values in range for int#
expected["u8"] = expected["u8"].astype(np.int8)
expected["u16"] = expected["u16"].astype(np.int16)
expected["u32"] = expected["u32"].astype(np.int32)
# No int64 supported at all. Downcast since values in range for int32
expected["u64"] = expected["u64"].astype(np.int32)
expected["i64"] = expected["i64"].astype(np.int32)
tm.assert_frame_equal(expected, empty_reread)
tm.assert_series_equal(expected.dtypes, empty_reread.dtypes)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_index_col_none(self, version, temp_file):
df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]})
# GH 7369, make sure can read a 0-obs dta file
path = temp_file
df.to_stata(path, write_index=False, version=version)
read_df = read_stata(path)
assert isinstance(read_df.index, pd.RangeIndex)
expected = df
expected["a"] = expected["a"].astype(np.int32)
tm.assert_frame_equal(read_df, expected, check_index_type=True)
@pytest.mark.parametrize(
"version", [102, 103, 104, 105, 108, 110, 111, 113, 114, 115, 117, 118, 119]
)
def test_read_dta1(self, version, datapath):
file = datapath("io", "data", "stata", f"stata1_{version}.dta")
parsed = self.read_dta(file)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected["float_miss"] = expected["float_miss"].astype(np.float32)
# Column names too long for older Stata formats
if version <= 108:
expected = expected.rename(
columns={
"float_miss": "f_miss",
"double_miss": "d_miss",
"byte_miss": "b_miss",
"int_miss": "i_miss",
"long_miss": "l_miss",
}
)
tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self, datapath):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1),
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1),
),
(pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT),
],
columns=[
"datetime_c",
"datetime_big_c",
"date",
"weekly_date",
"monthly_date",
"quarterly_date",
"half_yearly_date",
"yearly_date",
],
)
# TODO(GH#55564): just pass M8[s] to the constructor
expected["datetime_c"] = expected["datetime_c"].astype("M8[ms]")
expected["date"] = expected["date"].astype("M8[s]")
expected["weekly_date"] = expected["weekly_date"].astype("M8[s]")
expected["monthly_date"] = expected["monthly_date"].astype("M8[s]")
expected["quarterly_date"] = expected["quarterly_date"].astype("M8[s]")
expected["half_yearly_date"] = expected["half_yearly_date"].astype("M8[s]")
expected["yearly_date"] = expected["yearly_date"].astype("M8[s]")
path1 = datapath("io", "data", "stata", "stata2_114.dta")
path2 = datapath("io", "data", "stata", "stata2_115.dta")
path3 = datapath("io", "data", "stata", "stata2_117.dta")
msg = "Leaving in Stata Internal Format"
with tm.assert_produces_warning(UserWarning, match=msg):
parsed_114 = self.read_dta(path1)
with tm.assert_produces_warning(UserWarning, match=msg):
parsed_115 = self.read_dta(path2)
with tm.assert_produces_warning(UserWarning, match=msg):
parsed_117 = self.read_dta(path3)
# FIXME: don't leave commented-out
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(
# datapath("io", "data", "stata", "stata2_113.dta")
# )
# FIXME: don't leave commented-out
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
@pytest.mark.parametrize(
"file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"]
)
def test_read_dta3(self, file, datapath):
file = datapath("io", "data", "stata", f"{file}.dta")
parsed = self.read_dta(file)
# match stata here
expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
expected = expected.astype(np.float32)
expected["year"] = expected["year"].astype(np.int16)
expected["quarter"] = expected["quarter"].astype(np.int8)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("version", [110, 111, 113, 114, 115, 117])
def test_read_dta4(self, version, datapath):
file = datapath("io", "data", "stata", f"stata4_{version}.dta")
parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
],
)
# these are all categoricals
for col in expected:
orig = expected[col].copy()
categories = np.asarray(expected["fully_labeled"][orig.notna()])
if col == "incompletely_labeled":
categories = orig
cat = orig.astype("category")._values
cat = cat.set_categories(categories, ordered=True)
cat.categories.rename(None, inplace=True)
expected[col] = cat
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("version", [102, 103, 104, 105, 108])
def test_readold_dta4(self, version, datapath):
# This test is the same as test_read_dta4 above except that the columns
# had to be renamed to match the restrictions in older file format
file = datapath("io", "data", "stata", f"stata4_{version}.dta")
parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"],
],
columns=[
"fulllab",
"fulllab2",
"incmplab",
"misslab",
"floatlab",
],
)
# these are all categoricals
for col in expected:
orig = expected[col].copy()
categories = np.asarray(expected["fulllab"][orig.notna()])
if col == "incmplab":
categories = orig
cat = orig.astype("category")._values
cat = cat.set_categories(categories, ordered=True)
cat.categories.rename(None, inplace=True)
expected[col] = cat
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed, expected)
# File containing strls
@pytest.mark.parametrize(
"file",
[
"stata12_117",
"stata12_be_117",
"stata12_118",
"stata12_be_118",
"stata12_119",
"stata12_be_119",
],
)
def test_read_dta_strl(self, file, datapath):
parsed = self.read_dta(datapath("io", "data", "stata", f"{file}.dta"))
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=["x", "y", "z"],
)
tm.assert_frame_equal(parsed, expected, check_dtype=False)
# 117 is not included in this list as it uses ASCII strings
@pytest.mark.parametrize(
"file",
[
"stata14_118",
"stata14_be_118",
"stata14_119",
"stata14_be_119",
],
)
def test_read_dta118_119(self, file, datapath):
parsed_118 = self.read_dta(datapath("io", "data", "stata", f"{file}.dta"))
parsed_118["Bytes"] = parsed_118["Bytes"].astype("O")
expected = DataFrame.from_records(
[
["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0],
["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan],
["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0],
["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4], # noqa: RUF001
["", "", "", 0, 0.3332999, "option a", 1 / 3.0],
],
columns=[
"Things",
"Cities",
"Unicode_Cities_Strl",
"Ints",
"Floats",
"Bytes",
"Longs",
],
)
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(datapath("io", "data", "stata", f"{file}.dta")) as rdr:
vl = rdr.variable_labels()
vl_expected = {
"Unicode_Cities_Strl": "Here are some strls with Ünicode chars",
"Longs": "long data",
"Things": "Here are some things",
"Bytes": "byte data",
"Ints": "int data",
"Cities": "Here are some cities",
"Floats": "float data",
}
tm.assert_dict_equal(vl, vl_expected)
assert rdr.data_label == "This is a Ünicode data label"
def test_read_write_dta5(self, temp_file):
original = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
original.index.name = "index"
path = temp_file
original.to_stata(path, convert_dates=None)
written_and_read_again = self.read_dta(path)
expected = original
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_write_dta6(self, datapath, temp_file):
original = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["year"] = original["year"].astype(np.int32)
original["quarter"] = original["quarter"].astype(np.int32)
path = temp_file
original.to_stata(path, convert_dates=None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta10(self, version, temp_file, using_infer_string):
original = DataFrame(
data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
columns=["string", "object", "integer", "floating", "datetime"],
)
original["object"] = Series(original["object"], dtype=object)
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["integer"] = original["integer"].astype(np.int32)
path = temp_file
original.to_stata(path, convert_dates={"datetime": "tc"}, version=version)
written_and_read_again = self.read_dta(path)
expected = original.copy()
# "tc" convert_dates means we store in ms
expected["datetime"] = expected["datetime"].astype("M8[ms]")
if using_infer_string:
expected["object"] = expected["object"].astype("str")
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
expected,
)
def test_stata_doc_examples(self, temp_file):
path = temp_file
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
)
df.to_stata(path)
def test_write_preserves_original(self, temp_file):
# 9795
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd")
)
df.loc[2, "a":"c"] = np.nan
df_copy = df.copy()
path = temp_file
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_encoding(self, version, datapath, temp_file):
# GH 4626, proper encoding handling
raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
result = encoded.kreis1849[0]
expected = raw.kreis1849[0]
assert result == expected
assert isinstance(result, str)
path = temp_file
encoded.to_stata(path, write_index=False, version=version)
reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self, temp_file):
original = DataFrame(
[(1, 2, 3, 4)],
columns=[
"good",
"b\u00e4d",
"8number",
"astringwithmorethan32characters______",
],
)
formatted = DataFrame(
[(1, 2, 3, 4)],
columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
path = temp_file
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
original.to_stata(path, convert_dates=None)
written_and_read_again = self.read_dta(path)
expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_read_write_dta12(self, version, temp_file):
original = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_1",
"astringwithmorethan32characters_2",
"+",
"-",
"short",
"delete",
],
)
formatted = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_",
"_0astringwithmorethan32character",
"_",
"_1_",
"_short",
"_delete",
],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
path = temp_file
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
original.to_stata(path, convert_dates=None, version=version)
# should get a warning for that format.
written_and_read_again = self.read_dta(path)
expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_read_write_dta13(self, temp_file):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
original.index.name = "index"
formatted = original
formatted["int64"] = formatted["int64"].astype(np.float64)
path = temp_file
original.to_stata(path)
written_and_read_again = self.read_dta(path)
expected = formatted
expected.index = expected.index.astype(np.int32)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize(
"file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"]
)
def test_read_write_reread_dta14(
self, file, parsed_114, version, datapath, temp_file
):
file = datapath("io", "data", "stata", f"{file}.dta")
parsed = self.read_dta(file)
parsed.index.name = "index"
tm.assert_frame_equal(parsed_114, parsed)
path = temp_file
parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version)
written_and_read_again = self.read_dta(path)
expected = parsed_114.copy()
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize(
"file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"]
)
def test_read_write_reread_dta15(self, file, datapath):
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
# TODO(GH#55564): directly cast to M8[s]
arr = expected["date_td"].astype("Period[D]")._values.asfreq("s", how="S")
expected["date_td"] = arr.view("M8[s]")
file = datapath("io", "data", "stata", f"{file}.dta")
parsed = self.read_dta(file)
tm.assert_frame_equal(expected, parsed)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_timestamp_and_label(self, version, temp_file):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = "This is a data file."
path = temp_file
original.to_stata(
path, time_stamp=time_stamp, data_label=data_label, version=version
)
with StataReader(path) as reader:
assert reader.time_stamp == "29 Feb 2000 14:21"
assert reader.data_label == data_label
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_timestamp(self, version, temp_file):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = "01 Jan 2000, 00:00:00"
path = temp_file
msg = "time_stamp should be datetime type"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, time_stamp=time_stamp, version=version)
assert not os.path.isfile(path)
def test_numeric_column_names(self, temp_file):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = "index"
path = temp_file
# should get a warning for that format.
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
expected = original
tm.assert_frame_equal(expected, written_and_read_again)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nan_to_missing_value(self, version, temp_file):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
path = temp_file
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
expected = original
tm.assert_frame_equal(written_and_read_again, expected)
def test_no_index(self, temp_file):
columns = ["x", "y"]
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns)
original.index.name = "index_not_written"
path = temp_file
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
with pytest.raises(KeyError, match=original.index.name):
written_and_read_again["index_not_written"]
def test_string_no_dates(self, temp_file):
s1 = Series(["a", "A longer string"])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
path = temp_file
original.to_stata(path)
written_and_read_again = self.read_dta(path)
expected = original
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_large_value_conversion(self, temp_file):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2**15 - 1], dtype=np.int16)
s3 = Series([1, 2**63 - 1], dtype=np.int64)
original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
original.index.name = "index"
path = temp_file
with tm.assert_produces_warning(PossiblePrecisionLoss, match="from int64 to"):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original
modified["s1"] = Series(modified["s1"], dtype=np.int16)
modified["s2"] = Series(modified["s2"], dtype=np.int32)
modified["s3"] = Series(modified["s3"], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
def test_dates_invalid_column(self, temp_file):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = "index"
path = temp_file
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
original.to_stata(path, convert_dates={0: "tc"})
written_and_read_again = self.read_dta(path)
expected = original.copy()
expected.columns = ["_0"]
expected.index = original.index.astype(np.int32)
expected["_0"] = expected["_0"].astype("M8[ms]")
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_105(self, datapath):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
df = read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0["clustnum"] = df0["clustnum"].astype(np.int16)
df0["pri_schl"] = df0["pri_schl"].astype(np.int8)
df0["psch_num"] = df0["psch_num"].astype(np.int8)
df0["psch_dis"] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_value_labels_old_format(self, datapath):
# GH 19417
#
# Test that value_labels() returns an empty dict if the file format
# predates supporting value labels.
dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
with StataReader(dpath) as reader:
assert reader.value_labels() == {}
def test_date_export_formats(self, temp_file):
columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"]
conversions = {c: c for c in columns}
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = "index"
expected_values = [
datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1),
] # Year
expected = DataFrame(
[expected_values],
index=pd.Index([0], dtype=np.int32, name="index"),
columns=columns,
dtype="M8[s]",
)
expected["tc"] = expected["tc"].astype("M8[ms]")
path = temp_file
original.to_stata(path, convert_dates=conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_write_missing_strings(self, temp_file):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame(
[["1"], [""]],
index=pd.RangeIndex(2, name="index"),
columns=["foo"],
)
path = temp_file
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize("byteorder", [">", "<"])
def test_bool_uint(self, byteorder, version, temp_file):
s0 = Series([0, 1, True], dtype=np.bool_)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32)
original = DataFrame(
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
)
original.index.name = "index"
path = temp_file
original.to_stata(path, byteorder=byteorder, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
expected = original
expected_types = (
np.int8,
np.int8,
np.int16,
np.int16,
np.int32,
np.int32,
np.float64,
)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self, datapath):
with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr:
sr_117 = rdr.variable_labels()
keys = ("var1", "var2", "var3")
labels = ("label1", "label2", "label3")
for k, v in sr_115.items():
assert k in sr_117
assert v == sr_117[k]
assert k in keys
assert v in labels
def test_minimal_size_col(self, temp_file):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
path = temp_file
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
sr._ensure_open() # The `_*list` variables are initialized here
for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist):
assert int(variable[1:]) == int(fmt[1:-1])
assert int(variable[1:]) == typ
def test_excessively_long_string(self, temp_file):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
msg = (
r"Fixed width strings in Stata \.dta files are limited to 244 "
r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
r"this restriction\. Use the\n'version=117' parameter to write "
r"the newer \(Stata 13 and later\) format\."
)
with pytest.raises(ValueError, match=msg):
path = temp_file
original.to_stata(path)
def test_missing_value_generator(self, temp_file):
types = ("b", "h", "l")
df = DataFrame([[0.0]], columns=["float_"])
path = temp_file
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ["." + chr(97 + i) for i in range(26)]
expected_values.insert(0, ".")
for t in types:
offset = valid_range[t][1]
for i in range(27):
val = StataMissingValue(offset + 1 + i)
assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack("<f", b"\x00\x00\x00\x7f")[0])
assert val.string == "."
val = StataMissingValue(struct.unpack("<f", b"\x00\xd0\x00\x7f")[0])
assert val.string == ".z"
# Test extremes for floats
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
)
assert val.string == "."
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x1a\xe0\x7f")[0]
)
assert val.string == ".z"
@pytest.mark.parametrize("version", [113, 115, 117])
def test_missing_value_conversion(self, version, datapath):
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
smv = StataMissingValue(101)
keys = sorted(smv.MISSING_VALUES.keys())
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(
datapath("io", "data", "stata", f"stata8_{version}.dta"),
convert_missing=True,
)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("version", [104, 105, 108, 110, 111])
def test_missing_value_conversion_compat(self, version, datapath):
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
smv = StataMissingValue(101)
keys = sorted(smv.MISSING_VALUES.keys())
data = []
row = [StataMissingValue(keys[j * 27]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(
datapath("io", "data", "stata", f"stata8_{version}.dta"),
convert_missing=True,
)
tm.assert_frame_equal(parsed, expected)
# The byte type was not supported prior to the 104 format
@pytest.mark.parametrize("version", [102, 103])
def test_missing_value_conversion_compat_nobyte(self, version, datapath):
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
smv = StataMissingValue(101)
keys = sorted(smv.MISSING_VALUES.keys())
data = []
row = [StataMissingValue(keys[j * 27]) for j in [1, 1, 2, 3, 4]]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(
datapath("io", "data", "stata", f"stata8_{version}.dta"),
convert_missing=True,
)
tm.assert_frame_equal(parsed, expected)
def test_big_dates(self, datapath, temp_file):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for year, month, day, hour, minute, second in zip(yr, mo, dd, hr, mm, ss):
row = []
for j in range(7):
if j == 0:
row.append(datetime(year, month, day, hour, minute, second))
elif j == 6:
row.append(datetime(year, 1, 1))
else:
row.append(datetime(year, month, day))
expected.append(row)
expected.append([pd.NaT] * 7)
columns = [
"date_tc",
"date_td",
"date_tw",
"date_tm",
"date_tq",
"date_th",
"date_ty",
]
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=object)
expected["date_tc"] = expected["date_tc"].astype("M8[ms]")
expected["date_td"] = expected["date_td"].astype("M8[s]")
expected["date_tm"] = expected["date_tm"].astype("M8[s]")
expected["date_tw"] = expected["date_tw"].astype("M8[s]")
expected["date_tq"] = expected["date_tq"].astype("M8[s]")
expected["date_th"] = expected["date_th"].astype("M8[s]")
expected["date_ty"] = expected["date_ty"].astype("M8[s]")
parsed_115 = read_stata(datapath("io", "data", "stata", "stata9_115.dta"))
parsed_117 = read_stata(datapath("io", "data", "stata", "stata9_117.dta"))
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = {c: c[-2:] for c in columns}
# {c : c[-2:] for c in columns}
path = temp_file
expected.index.name = "index"
msg = (
"Converting object-dtype columns of datetimes to datetime64 "
"when writing to stata is deprecated"
)
exp_object = expected.astype(object)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
exp_object.to_stata(path, convert_dates=date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
expected.set_index(expected.index.astype(np.int32)),
)
def test_dtype_conversion(self, datapath):
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].astype("M8[s]")
no_conversion = read_stata(
datapath("io", "data", "stata", "stata6_117.dta"), convert_dates=True
)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
preserve_dtypes=False,
)
# read_csv types are the same
expected2 = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
expected2["date_td"] = expected["date_td"]
tm.assert_frame_equal(expected2, conversion)
def test_drop_column(self, datapath):
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
columns = ["byte_", "int_", "long_"]
expected = expected[columns]
dropped = read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
columns=columns,
)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ["int_", "long_", "byte_"]
expected = expected[columns]
reordered = read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
columns=columns,
)
tm.assert_frame_equal(expected, reordered)
msg = "columns contains duplicate entries"
with pytest.raises(ValueError, match=msg):
read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
columns=["byte_", "byte_"],
)
msg = "The following columns were not found in the Stata data set: not_found"
with pytest.raises(ValueError, match=msg):
read_stata(
datapath("io", "data", "stata", "stata6_117.dta"),
convert_dates=True,
columns=["byte_", "int_", "long_", "not_found"],
)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.filterwarnings(
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
)
def test_categorical_writing(self, version, temp_file):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
"unlabeled",
],
)
path = temp_file
original.astype("category").to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
expected = original
expected.index = expected.index.set_names("index")
expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
expected["unlabeled"] = expected["unlabeled"].apply(str)
for col in expected:
orig = expected[col]
cat = orig.astype("category")._values
cat = cat.as_ordered()
if col == "unlabeled":
cat = cat.set_categories(orig, ordered=True)
cat.categories.rename(None, inplace=True)
expected[col] = cat
tm.assert_frame_equal(res, expected)
def test_categorical_warnings_and_errors(self, temp_file):
# Warning for non-string labels
original = DataFrame.from_records(
[["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
).astype("category")
msg = "data file created has not lost information due to duplicate labels"
with tm.assert_produces_warning(ValueLabelTypeMismatch, match=msg):
original.to_stata(temp_file)
# should get a warning for mixed content
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_categorical_with_stata_missing_values(self, version, temp_file):
values = [["a" + str(i)] for i in range(120)]
values.append([np.nan])
original = DataFrame.from_records(values, columns=["many_labels"])
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
original.index.name = "index"
path = temp_file
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
expected = original
for col in expected:
cat = expected[col]._values
new_cats = cat.remove_unused_categories().categories
cat = cat.set_categories(new_cats, ordered=True)
expected[col] = cat
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
def test_categorical_order(self, file, datapath):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [
(True, "ordered", ["a", "b", "c", "d", "e"], np.arange(5)),
(True, "reverse", ["a", "b", "c", "d", "e"], np.arange(5)[::-1]),
(True, "noorder", ["a", "b", "c", "d", "e"], np.array([2, 1, 4, 0, 3])),
(True, "floating", ["a", "b", "c", "d", "e"], np.arange(0, 5)),
(True, "float_missing", ["a", "d", "e"], np.array([0, 1, 2, -1, -1])),
(False, "nolabel", [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, "int32_mixed", ["d", 2, "e", "b", "a"], np.arange(5)),
]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append(
(col, pd.Categorical.from_codes(codes, labels, ordered=True))
)
else:
cols.append((col, Series(labels, dtype=np.float32)))
expected = DataFrame.from_dict(dict(cols))
# Read with and with out categoricals, ensure order is identical
file = datapath("io", "data", "stata", f"{file}.dta")
parsed = read_stata(file)
tm.assert_frame_equal(expected, parsed)
# Check identity of codes
for col in expected:
if isinstance(expected[col].dtype, CategoricalDtype):
tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes)
tm.assert_index_equal(
expected[col].cat.categories, parsed[col].cat.categories
)
@pytest.mark.parametrize("file", ["stata11_115", "stata11_117"])
def test_categorical_sorting(self, file, datapath):
parsed = read_stata(datapath("io", "data", "stata", f"{file}.dta"))
# Sort based on codes, not strings
parsed = parsed.sort_values("srh", na_position="first")
# Don't sort index
parsed.index = pd.RangeIndex(len(parsed))
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(
codes=codes, categories=categories, ordered=True
)
expected = Series(cat, name="srh")
tm.assert_series_equal(expected, parsed["srh"])
@pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
def test_categorical_ordering(self, file, datapath):
file = datapath("io", "data", "stata", f"{file}.dta")
parsed = read_stata(file)
parsed_unordered = read_stata(file, order_categoricals=False)
for col in parsed:
if not isinstance(parsed[col].dtype, CategoricalDtype):
continue
assert parsed[col].cat.ordered
assert not parsed_unordered[col].cat.ordered
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"file",
[
"stata1_117",
"stata2_117",
"stata3_117",
"stata4_117",
"stata5_117",
"stata6_117",
"stata7_117",
"stata8_117",
"stata9_117",
"stata10_117",
"stata11_117",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_117(
self, file, chunksize, convert_categoricals, convert_dates, datapath
):
fname = datapath("io", "data", "stata", f"{file}.dta")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
with read_stata(
fname,
iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
) as itr:
pos = 0
for j in range(5):
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
from_frame,
chunk,
check_dtype=False,
)
pos += chunksize
@staticmethod
def _convert_categorical(from_frame: DataFrame) -> DataFrame:
"""
Emulate the categorical casting behavior we expect from roundtripping.
"""
for col in from_frame:
ser = from_frame[col]
if isinstance(ser.dtype, CategoricalDtype):
cat = ser._values.remove_unused_categories()
if cat.categories.dtype == object:
categories = pd.Index._with_infer(cat.categories._values)
cat = cat.set_categories(categories)
elif cat.categories.dtype == "string" and len(cat.categories) == 0:
# if the read categories are empty, it comes back as object dtype
categories = cat.categories.astype(object)
cat = cat.set_categories(categories)
from_frame[col] = cat
return from_frame
def test_iterator(self, datapath):
fname = datapath("io", "data", "stata", "stata12_117.dta")
parsed = read_stata(fname)
expected = parsed.iloc[0:5, :]
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(expected, chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = next(itr)
tm.assert_frame_equal(expected, chunk)
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(expected, chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(expected, chunk)
# GH12153
with read_stata(fname, chunksize=4) as itr:
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize(
"file",
[
"stata2_115",
"stata3_115",
"stata4_115",
"stata5_115",
"stata6_115",
"stata7_115",
"stata8_115",
"stata9_115",
"stata10_115",
"stata11_115",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_115(
self, file, chunksize, convert_categoricals, convert_dates, datapath
):
fname = datapath("io", "data", "stata", f"{file}.dta")
# Read the whole file
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
# Compare to what we get when reading by chunk
with read_stata(
fname,
iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
) as itr:
pos = 0
for j in range(5):
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
from_frame = self._convert_categorical(from_frame)
tm.assert_frame_equal(
from_frame,
chunk,
check_dtype=False,
)
pos += chunksize
def test_read_chunks_columns(self, datapath):
fname = datapath("io", "data", "stata", "stata3_117.dta")
columns = ["quarter", "cpi", "m1"]
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos : pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_write_variable_labels(self, version, mixed_frame, temp_file):
# GH 13631, add support for writing variable labels
mixed_frame.index.name = "index"
variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
path = temp_file
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {
"index": "",
"a": "City Rank",
"b": "City Exponent",
"c": "City",
}
assert read_labels == expected_labels
variable_labels["index"] = "The Index"
path = temp_file
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_variable_labels(self, version, mixed_frame, temp_file):
mixed_frame.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
path = temp_file
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
@pytest.mark.parametrize("version", [114, 117])
def test_invalid_variable_label_encoding(self, version, mixed_frame, temp_file):
mixed_frame.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
variable_labels["a"] = "invalid character Œ"
path = temp_file
with pytest.raises(
ValueError, match="Variable labels must contain only characters"
):
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
def test_write_variable_label_errors(self, mixed_frame, temp_file):
values = ["\u03a1", "\u0391", "\u039d", "\u0394", "\u0391", "\u03a3"]
variable_labels_utf8 = {
"a": "City Rank",
"b": "City Exponent",
"c": "".join(values),
}
msg = (
"Variable labels must contain only characters that can be "
"encoded in Latin-1"
)
with pytest.raises(ValueError, match=msg):
path = temp_file
mixed_frame.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {
"a": "City Rank",
"b": "City Exponent",
"c": "A very, very, very long variable label "
"that is too long for Stata which means "
"that it has more than 80 characters",
}
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
path = temp_file
mixed_frame.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self, temp_file):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
expected = original[:]
# "tc" for convert_dates below stores with "ms" resolution
expected["dates"] = expected["dates"].astype("M8[ms]")
path = temp_file
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(expected, reread)
original.to_stata(path, write_index=False, convert_dates={"dates": "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
dates_idx = original.columns.tolist().index("dates")
original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self, temp_file):
original = DataFrame({"a": [1 + 2j, 2 + 4j]})
msg = "Data type complex128 not supported"
with pytest.raises(NotImplementedError, match=msg):
path = temp_file
original.to_stata(path)
def test_unsupported_datetype(self, temp_file):
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
msg = "Format %tC not implemented"
with pytest.raises(NotImplementedError, match=msg):
path = temp_file
original.to_stata(path, convert_dates={"dates": "tC"})
dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong")
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with pytest.raises(NotImplementedError, match="Data type datetime64"):
path = temp_file
original.to_stata(path)
def test_repeated_column_labels(self, datapath):
# GH 13923, 25772
msg = """
Value labels for column ethnicsn are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:\n-+\nwolof
"""
with pytest.raises(ValueError, match=msg):
read_stata(
datapath("io", "data", "stata", "stata15.dta"),
convert_categoricals=True,
)
def test_stata_111(self, datapath):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(datapath("io", "data", "stata", "stata7_111.dta"))
original = DataFrame(
{
"y": [1, 1, 1, 1, 1, 0, 0, np.nan, 0, 0],
"x": [1, 2, 1, 3, np.nan, 4, 3, 5, 1, 6],
"w": [2, np.nan, 5, 2, 4, 4, 3, 1, 2, 3],
"z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"],
}
)
original = original[["y", "x", "w", "z"]]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self, temp_file):
# GH 14618
df = DataFrame(
{
"ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307],
"ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max],
}
)
msg = (
r"Column ColumnTooBig has a maximum value \(.+\) outside the range "
r"supported by Stata \(.+\)"
)
with pytest.raises(ValueError, match=msg):
path = temp_file
df.to_stata(path)
def test_out_of_range_float(self, temp_file):
original = DataFrame(
{
"ColumnOk": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0,
],
"ColumnTooBig": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max,
],
}
)
original.index.name = "index"
for col in original:
original[col] = original[col].astype(np.float32)
path = temp_file
original.to_stata(path)
reread = read_stata(path)
original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
expected = original
tm.assert_frame_equal(reread.set_index("index"), expected)
@pytest.mark.parametrize("infval", [np.inf, -np.inf])
def test_inf(self, infval, temp_file):
# GH 45350
df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]})
msg = (
"Column WithInf contains infinity or -infinity"
"which is outside the range supported by Stata."
)
with pytest.raises(ValueError, match=msg):
path = temp_file
df.to_stata(path)
def test_path_pathlib(self):
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("write_index", [True, False])
def test_value_labels_iterator(self, write_index, temp_file):
# GH 16923
d = {"A": ["B", "E", "C", "A", "E"]}
df = DataFrame(data=d)
df["A"] = df["A"].astype("category")
path = temp_file
df.to_stata(path, write_index=write_index)
with read_stata(path, iterator=True) as dta_iter:
value_labels = dta_iter.value_labels()
assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}}
def test_set_index(self, temp_file):
# GH 17328
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.index.name = "index"
path = temp_file
df.to_stata(path)
reread = read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
@pytest.mark.parametrize(
"column", ["ms", "day", "week", "month", "qtr", "half", "yr"]
)
def test_date_parsing_ignores_format_details(self, column, datapath):
# GH 17797
#
# Test that display formats are ignored when determining if a numeric
# column is a date value.
#
# All date types are stored as numbers and format associated with the
# column denotes both the type of the date and the display format.
#
# STATA supports 9 date types which each have distinct units. We test 7
# of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
# accounts for leap seconds and %tb relies on STATAs business calendar.
df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta"))
unformatted = df.loc[0, column]
formatted = df.loc[0, column + "_fmt"]
assert unformatted == formatted
@pytest.mark.parametrize("byteorder", ["little", "big"])
def test_writer_117(self, byteorder, temp_file, using_infer_string):
original = DataFrame(
data=[
[
"string",
"object",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-25"),
"a",
"a" * 2045,
"a" * 5000,
"a",
],
[
"string-1",
"object-1",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-26"),
"b",
"b" * 2045,
"",
"",
],
],
columns=[
"string",
"object",
"int8",
"int16",
"int32",
"float32",
"float64",
"datetime",
"s1",
"s2045",
"srtl",
"forced_strl",
],
)
original["object"] = Series(original["object"], dtype=object)
original["int8"] = Series(original["int8"], dtype=np.int8)
original["int16"] = Series(original["int16"], dtype=np.int16)
original["int32"] = original["int32"].astype(np.int32)
original["float32"] = Series(original["float32"], dtype=np.float32)
original.index.name = "index"
copy = original.copy()
path = temp_file
original.to_stata(
path,
convert_dates={"datetime": "tc"},
byteorder=byteorder,
convert_strl=["forced_strl"],
version=117,
)
written_and_read_again = self.read_dta(path)
expected = original[:]
# "tc" for convert_dates means we store with "ms" resolution
expected["datetime"] = expected["datetime"].astype("M8[ms]")
if using_infer_string:
# object dtype (with only strings/None) comes back as string dtype
expected["object"] = expected["object"].astype("str")
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
expected,
)
tm.assert_frame_equal(original, copy)
def test_convert_strl_name_swap(self, temp_file):
original = DataFrame(
[["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]],
columns=["long1" * 10, "long", 1],
)
original.index.name = "index"
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
path = temp_file
original.to_stata(path, convert_strl=["long", 1], version=117)
reread = self.read_dta(path)
reread = reread.set_index("index")
reread.columns = original.columns
tm.assert_frame_equal(reread, original, check_index_type=False)
def test_invalid_date_conversion(self, temp_file):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
path = temp_file
msg = "convert_dates key must be a column or an integer"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, convert_dates={"wrong_name": "tc"})
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_nonfile_writing(self, version, temp_file):
# GH 21041
bio = io.BytesIO()
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.index.name = "index"
path = temp_file
df.to_stata(bio, version=version)
bio.seek(0)
with open(path, "wb") as dta:
dta.write(bio.read())
reread = read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
def test_gzip_writing(self, temp_file):
# writing version 117 requires seek and cannot be used with gzip
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.index.name = "index"
path = temp_file
with gzip.GzipFile(path, "wb") as gz:
df.to_stata(gz, version=114)
with gzip.GzipFile(path, "rb") as gz:
reread = read_stata(gz, index_col="index")
tm.assert_frame_equal(df, reread)
# 117 is not included in this list as it uses ASCII strings
@pytest.mark.parametrize(
"file",
[
"stata16_118",
"stata16_be_118",
"stata16_119",
"stata16_be_119",
],
)
def test_unicode_dta_118_119(self, file, datapath):
unicode_df = self.read_dta(datapath("io", "data", "stata", f"{file}.dta"))
columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"]
values = [
["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"],
["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"],
["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"],
[" ", " ", "d", " ", "d"],
[" ", "", "a", " ", "a"],
["", "", "s", "", "s"],
["", "", " ", "", " "],
]
expected = DataFrame(values, columns=columns)
tm.assert_frame_equal(unicode_df, expected)
def test_mixed_string_strl(self, temp_file, using_infer_string):
# GH 23633
output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}]
output = DataFrame(output)
output.number = output.number.astype("int32")
path = temp_file
output.to_stata(path, write_index=False, version=117)
reread = read_stata(path)
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
# Check strl supports all None (null)
output["mixed"] = None
output.to_stata(path, write_index=False, convert_strl=["mixed"], version=117)
reread = read_stata(path)
expected = output.fillna("")
if using_infer_string:
expected["mixed"] = expected["mixed"].astype("str")
tm.assert_frame_equal(reread, expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_all_none_exception(self, version, temp_file):
output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
output = DataFrame(output)
output["none"] = None
with pytest.raises(ValueError, match="Column `none` cannot be exported"):
output.to_stata(temp_file, version=version)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_invalid_file_not_written(self, version, temp_file):
content = "Here is one __�__ Another one __·__ Another one __½__"
df = DataFrame([content], columns=["invalid"])
msg1 = (
r"'latin-1' codec can't encode character '\\ufffd' "
r"in position 14: ordinal not in range\(256\)"
)
msg2 = (
"'ascii' codec can't decode byte 0xef in position 14: "
r"ordinal not in range\(128\)"
)
with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"):
df.to_stata(temp_file)
def test_strl_latin1(self, temp_file):
# GH 23573, correct GSO data to reflect correct size
output = DataFrame(
[["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"]
)
output.to_stata(temp_file, version=117, convert_strl=["var_strl"])
with open(temp_file, "rb") as reread:
content = reread.read()
expected = "þâÑÐŧ"
assert expected.encode("latin-1") in content
assert expected.encode("utf-8") in content
gsos = content.split(b"strls")[1][1:-2]
for gso in gsos.split(b"GSO")[1:]:
val = gso.split(b"\x00")[-2]
size = gso[gso.find(b"\x82") + 1]
assert len(val) == size - 1
def test_encoding_latin1_118(self, datapath):
# GH 25960
msg = """
One or more strings in the dta file could not be decoded using utf-8, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
# Move path outside of read_stata, or else assert_produces_warning
# will block pytests skip mechanism from triggering (failing the test)
# if the path is not present
path = datapath("io", "data", "stata", "stata1_encoding_118.dta")
with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w:
encoded = read_stata(path)
# with filter_level="always", produces 151 warnings which can be slow
assert len(w) == 1
assert w[0].message.args[0] == msg
expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
tm.assert_frame_equal(encoded, expected)
@pytest.mark.slow
def test_stata_119(self, datapath):
# Gzipped since contains 32,999 variables and uncompressed is 20MiB
# Just validate that the reader reports correct number of variables
# to avoid high peak memory
with gzip.open(
datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb"
) as gz:
with StataReader(gz) as reader:
reader._ensure_open()
assert reader._nvar == 32999
@pytest.mark.parametrize("version", [118, 119, None])
@pytest.mark.parametrize("byteorder", ["little", "big"])
def test_utf8_writer(self, version, byteorder, temp_file):
cat = pd.Categorical(["a", "β", "ĉ"], ordered=True)
data = DataFrame(
[
[1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"],
[2.0, 2, "ᴮ", ""],
[3.0, 3, "ᴰ", None],
],
columns=["Å", "β", "ĉ", "strls"],
)
data["ᴐᴬᵀ"] = cat
variable_labels = {
"Å": "apple",
"β": "ᵈᵉᵊ",
"ĉ": "ᴎტჄႲႳႴႶႺ",
"strls": "Long Strings",
"ᴐᴬᵀ": "",
}
data_label = "ᴅaᵀa-label"
value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}}
data["β"] = data["β"].astype(np.int32)
writer = StataWriterUTF8(
temp_file,
data,
data_label=data_label,
convert_strl=["strls"],
variable_labels=variable_labels,
write_index=False,
byteorder=byteorder,
version=version,
value_labels=value_labels,
)
writer.write_file()
reread_encoded = read_stata(temp_file)
# Missing is intentionally converted to empty strl
data["strls"] = data["strls"].fillna("")
# Variable with value labels is reread as categorical
data["β"] = (
data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered()
)
tm.assert_frame_equal(data, reread_encoded)
with StataReader(temp_file) as reader:
assert reader.data_label == data_label
assert reader.variable_labels() == variable_labels
data.to_stata(temp_file, version=version, write_index=False)
reread_to_stata = read_stata(temp_file)
tm.assert_frame_equal(data, reread_to_stata)
def test_writer_118_exceptions(self, temp_file):
df = DataFrame(np.zeros((1, 33000), dtype=np.int8))
with pytest.raises(ValueError, match="version must be either 118 or 119."):
StataWriterUTF8(temp_file, df, version=117)
with pytest.raises(ValueError, match="You must use version 119"):
StataWriterUTF8(temp_file, df, version=118)
@pytest.mark.parametrize(
"dtype_backend",
["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))],
)
def test_read_write_ea_dtypes(self, dtype_backend, temp_file, tmp_path):
dtype = "Int64" if dtype_backend == "numpy_nullable" else "int64[pyarrow]"
df = DataFrame(
{
"a": pd.array([1, 2, None], dtype=dtype),
"b": ["a", "b", "c"],
"c": [True, False, None],
"d": [1.5, 2.5, 3.5],
"e": pd.date_range("2020-12-31", periods=3, freq="D"),
},
index=pd.Index([0, 1, 2], name="index"),
)
df = df.convert_dtypes(dtype_backend=dtype_backend)
stata_path = tmp_path / "test_stata.dta"
df.to_stata(stata_path, version=118)
df.to_stata(temp_file)
written_and_read_again = self.read_dta(temp_file)
expected = DataFrame(
{
"a": [1, 2, np.nan],
"b": ["a", "b", "c"],
"c": [1.0, 0, np.nan],
"d": [1.5, 2.5, 3.5],
# stata stores with ms unit, so unit does not round-trip exactly
"e": pd.date_range("2020-12-31", periods=3, freq="D", unit="ms"),
},
index=pd.RangeIndex(range(3), name="index"),
)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [113, 114, 115, 117, 118, 119])
def test_read_data_int_validranges(self, version, datapath):
expected = DataFrame(
{
"byte": np.array([-127, 100], dtype=np.int8),
"int": np.array([-32767, 32740], dtype=np.int16),
"long": np.array([-2147483647, 2147483620], dtype=np.int32),
}
)
parsed = read_stata(
datapath("io", "data", "stata", f"stata_int_validranges_{version}.dta")
)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("version", [104, 105, 108, 110, 111])
def test_read_data_int_validranges_compat(self, version, datapath):
expected = DataFrame(
{
"byte": np.array([-128, 126], dtype=np.int8),
"int": np.array([-32768, 32766], dtype=np.int16),
"long": np.array([-2147483648, 2147483646], dtype=np.int32),
}
)
parsed = read_stata(
datapath("io", "data", "stata", f"stata_int_validranges_{version}.dta")
)
tm.assert_frame_equal(parsed, expected)
# The byte type was not supported prior to the 104 format
@pytest.mark.parametrize("version", [102, 103])
def test_read_data_int_validranges_compat_nobyte(self, version, datapath):
expected = DataFrame(
{
"byte": np.array([-128, 126], dtype=np.int16),
"int": np.array([-32768, 32766], dtype=np.int16),
"long": np.array([-2147483648, 2147483646], dtype=np.int32),
}
)
parsed = read_stata(
datapath("io", "data", "stata", f"stata_int_validranges_{version}.dta")
)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("version", [105, 108, 110, 111, 113, 114])
def test_backward_compat(version, datapath):
data_base = datapath("io", "data", "stata")
ref = os.path.join(data_base, "stata-compat-118.dta")
old = os.path.join(data_base, f"stata-compat-{version}.dta")
expected = read_stata(ref)
old_dta = read_stata(old)
tm.assert_frame_equal(old_dta, expected, check_dtype=False)
@pytest.mark.parametrize("version", [103, 104])
def test_backward_compat_nodateconversion(version, datapath):
# The Stata data format prior to 105 did not support a date format
# so read the raw values for comparison
data_base = datapath("io", "data", "stata")
ref = os.path.join(data_base, "stata-compat-118.dta")
old = os.path.join(data_base, f"stata-compat-{version}.dta")
expected = read_stata(ref, convert_dates=False)
old_dta = read_stata(old, convert_dates=False)
tm.assert_frame_equal(old_dta, expected, check_dtype=False)
@pytest.mark.parametrize("version", [102])
def test_backward_compat_nostring(version, datapath):
# The Stata data format prior to 105 did not support a date format
# so read the raw values for comparison
ref = datapath("io", "data", "stata", "stata-compat-118.dta")
old = datapath("io", "data", "stata", f"stata-compat-{version}.dta")
expected = read_stata(ref, convert_dates=False)
# The Stata data format prior to 103 did not support string data
expected = expected.drop(columns=["s10"])
old_dta = read_stata(old, convert_dates=False)
tm.assert_frame_equal(old_dta, expected, check_dtype=False)
@pytest.mark.parametrize("version", [105, 108, 110, 111, 113, 114, 118])
def test_bigendian(version, datapath):
ref = datapath("io", "data", "stata", f"stata-compat-{version}.dta")
big = datapath("io", "data", "stata", f"stata-compat-be-{version}.dta")
expected = read_stata(ref)
big_dta = read_stata(big)
tm.assert_frame_equal(big_dta, expected)
# Note: 102 format does not support big-endian byte order
@pytest.mark.parametrize("version", [103, 104])
def test_bigendian_nodateconversion(version, datapath):
# The Stata data format prior to 105 did not support a date format
# so read the raw values for comparison
ref = datapath("io", "data", "stata", f"stata-compat-{version}.dta")
big = datapath("io", "data", "stata", f"stata-compat-be-{version}.dta")
expected = read_stata(ref, convert_dates=False)
big_dta = read_stata(big, convert_dates=False)
tm.assert_frame_equal(big_dta, expected)
def test_direct_read(datapath, monkeypatch):
file_path = datapath("io", "data", "stata", "stata-compat-118.dta")
# Test that opening a file path doesn't buffer the file.
with StataReader(file_path) as reader:
# Must not have been buffered to memory
assert not reader.read().empty
assert not isinstance(reader._path_or_buf, io.BytesIO)
# Test that we use a given fp exactly, if possible.
with open(file_path, "rb") as fp:
with StataReader(fp) as reader:
assert not reader.read().empty
assert reader._path_or_buf is fp
# Test that we use a given BytesIO exactly, if possible.
with open(file_path, "rb") as fp:
with io.BytesIO(fp.read()) as bio:
with StataReader(bio) as reader:
assert not reader.read().empty
assert reader._path_or_buf is bio
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize("use_dict", [True, False])
@pytest.mark.parametrize("infer", [True, False])
def test_compression(
compression, version, use_dict, infer, compression_to_extension, tmp_path
):
file_name = "dta_inferred_compression.dta"
if compression:
if use_dict:
file_ext = compression
else:
file_ext = compression_to_extension[compression]
file_name += f".{file_ext}"
compression_arg = compression
if infer:
compression_arg = "infer"
if use_dict:
compression_arg = {"method": compression}
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
)
df.index.name = "index"
path = tmp_path / file_name
path.touch()
df.to_stata(path, version=version, compression=compression_arg)
if compression == "gzip":
with gzip.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "zip":
with zipfile.ZipFile(path, "r") as comp:
fp = io.BytesIO(comp.read(comp.filelist[0]))
elif compression == "tar":
with tarfile.open(path) as tar:
fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read())
elif compression == "bz2":
with bz2.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "zstd":
zstd = pytest.importorskip("zstandard")
with zstd.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression == "xz":
lzma = pytest.importorskip("lzma")
with lzma.open(path, "rb") as comp:
fp = io.BytesIO(comp.read())
elif compression is None:
fp = path
reread = read_stata(fp, index_col="index")
expected = df
tm.assert_frame_equal(reread, expected)
@pytest.mark.parametrize("method", ["zip", "infer"])
@pytest.mark.parametrize("file_ext", [None, "dta", "zip"])
def test_compression_dict(method, file_ext, tmp_path):
file_name = f"test.{file_ext}"
archive_name = "test.dta"
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
)
df.index.name = "index"
compression = {"method": method, "archive_name": archive_name}
path = tmp_path / file_name
path.touch()
df.to_stata(path, compression=compression)
if method == "zip" or file_ext == "zip":
with zipfile.ZipFile(path, "r") as zp:
assert len(zp.filelist) == 1
assert zp.filelist[0].filename == archive_name
fp = io.BytesIO(zp.read(zp.filelist[0]))
else:
fp = path
reread = read_stata(fp, index_col="index")
expected = df
tm.assert_frame_equal(reread, expected)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_chunked_categorical(version, temp_file):
df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")})
df.index.name = "index"
expected = df.copy()
df.to_stata(temp_file, version=version)
with StataReader(temp_file, chunksize=2, order_categoricals=False) as reader:
for i, block in enumerate(reader):
block = block.set_index("index")
assert "cats" in block
tm.assert_series_equal(
block.cats,
expected.cats.iloc[2 * i : 2 * (i + 1)],
check_index_type=len(block) > 1,
)
def test_chunked_categorical_partial(datapath):
dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
values = ["a", "b", "a", "b", 3.0]
msg = "series with value labels are not fully labeled"
with StataReader(dta_file, chunksize=2) as reader:
with tm.assert_produces_warning(CategoricalConversionWarning, match=msg):
for i, block in enumerate(reader):
assert list(block.cats) == values[2 * i : 2 * (i + 1)]
if i < 2:
idx = pd.Index(["a", "b"])
else:
idx = pd.Index([3.0], dtype="float64")
tm.assert_index_equal(block.cats.cat.categories, idx)
with tm.assert_produces_warning(CategoricalConversionWarning, match=msg):
with StataReader(dta_file, chunksize=5) as reader:
large_chunk = reader.__next__()
direct = read_stata(dta_file)
tm.assert_frame_equal(direct, large_chunk)
@pytest.mark.parametrize("chunksize", (-1, 0, "apple"))
def test_iterator_errors(datapath, chunksize):
dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
with pytest.raises(ValueError, match="chunksize must be a positive"):
with StataReader(dta_file, chunksize=chunksize):
pass
def test_iterator_value_labels(temp_file):
# GH 31544
values = ["c_label", "b_label"] + ["a_label"] * 500
df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)})
df.to_stata(temp_file, write_index=False)
expected = pd.Index(["a_label", "b_label", "c_label"])
with read_stata(temp_file, chunksize=100) as reader:
for j, chunk in enumerate(reader):
for i in range(2):
tm.assert_index_equal(chunk.dtypes.iloc[i].categories, expected)
tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100])
def test_precision_loss(temp_file):
df = DataFrame(
[[sum(2**i for i in range(60)), sum(2**i for i in range(52))]],
columns=["big", "little"],
)
with tm.assert_produces_warning(
PossiblePrecisionLoss, match="Column converted from int64 to float64"
):
df.to_stata(temp_file, write_index=False)
reread = read_stata(temp_file)
expected_dt = Series([np.float64, np.float64], index=["big", "little"])
tm.assert_series_equal(reread.dtypes, expected_dt)
assert reread.loc[0, "little"] == df.loc[0, "little"]
assert reread.loc[0, "big"] == float(df.loc[0, "big"])
def test_compression_roundtrip(compression, temp_file):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.index.name = "index"
df.to_stata(temp_file, compression=compression)
reread = read_stata(temp_file, compression=compression, index_col="index")
tm.assert_frame_equal(df, reread)
# explicitly ensure file was compressed.
with tm.decompress_file(temp_file, compression) as fh:
contents = io.BytesIO(fh.read())
reread = read_stata(contents, index_col="index")
tm.assert_frame_equal(df, reread)
@pytest.mark.parametrize("to_infer", [True, False])
@pytest.mark.parametrize("read_infer", [True, False])
def test_stata_compression(
compression_only, read_infer, to_infer, compression_to_extension, tmp_path
):
compression = compression_only
ext = compression_to_extension[compression]
filename = f"test.{ext}"
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.index.name = "index"
to_compression = "infer" if to_infer else compression
read_compression = "infer" if read_infer else compression
path = tmp_path / filename
path.touch()
df.to_stata(path, compression=to_compression)
result = read_stata(path, compression=read_compression, index_col="index")
tm.assert_frame_equal(result, df)
def test_non_categorical_value_labels(temp_file):
data = DataFrame(
{
"fully_labelled": [1, 2, 3, 3, 1],
"partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
"Y": [7, 7, 9, 8, 10],
"Z": pd.Categorical(["j", "k", "l", "k", "j"]),
}
)
path = temp_file
value_labels = {
"fully_labelled": {1: "one", 2: "two", 3: "three"},
"partially_labelled": {1.0: "one", 2.0: "two"},
}
expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}}
writer = StataWriter(path, data, value_labels=value_labels)
writer.write_file()
with StataReader(path) as reader:
reader_value_labels = reader.value_labels()
assert reader_value_labels == expected
msg = "Can't create value labels for notY, it wasn't found in the dataset."
value_labels = {"notY": {7: "label1", 8: "label2"}}
with pytest.raises(KeyError, match=msg):
StataWriter(path, data, value_labels=value_labels)
msg = (
"Can't create value labels for Z, value labels "
"can only be applied to numeric columns."
)
value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}}
with pytest.raises(ValueError, match=msg):
StataWriter(path, data, value_labels=value_labels)
def test_non_categorical_value_label_name_conversion(temp_file):
# Check conversion of invalid variable names
data = DataFrame(
{
"invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _
"6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _
"invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long
"aggregate": [2, 5, 5, 6, 6, 9], # Reserved words
(1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string
}
)
value_labels = {
"invalid~!": {1: "label1", 2: "label2"},
"6_invalid": {1: "label1", 2: "label2"},
"invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"},
"aggregate": {5: "five"},
(1, 2): {3: "three"},
}
expected = {
"invalid__": {1: "label1", 2: "label2"},
"_6_invalid": {1: "label1", 2: "label2"},
"invalid_name_longer_than_32_char": {8: "eight", 9: "nine"},
"_aggregate": {5: "five"},
"_1__2_": {3: "three"},
}
msg = "Not all pandas column names were valid Stata variable names"
with tm.assert_produces_warning(InvalidColumnName, match=msg):
data.to_stata(temp_file, value_labels=value_labels)
with StataReader(temp_file) as reader:
reader_value_labels = reader.value_labels()
assert reader_value_labels == expected
def test_non_categorical_value_label_convert_categoricals_error(temp_file):
# Mapping more than one value to the same label is valid for Stata
# labels, but can't be read with convert_categoricals=True
value_labels = {
"repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"}
}
data = DataFrame(
{
"repeated_labels": [10, 10, 20, 20, 40, 40],
}
)
data.to_stata(temp_file, value_labels=value_labels)
with StataReader(temp_file, convert_categoricals=False) as reader:
reader_value_labels = reader.value_labels()
assert reader_value_labels == value_labels
col = "repeated_labels"
repeats = "-" * 80 + "\n" + "\n".join(["More than ten"])
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
with pytest.raises(ValueError, match=msg):
read_stata(temp_file, convert_categoricals=True)
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
@pytest.mark.parametrize(
"dtype",
[
pd.BooleanDtype,
pd.Int8Dtype,
pd.Int16Dtype,
pd.Int32Dtype,
pd.Int64Dtype,
pd.UInt8Dtype,
pd.UInt16Dtype,
pd.UInt32Dtype,
pd.UInt64Dtype,
],
)
def test_nullable_support(dtype, version, temp_file):
df = DataFrame(
{
"a": Series([1.0, 2.0, 3.0]),
"b": Series([1, pd.NA, pd.NA], dtype=dtype.name),
"c": Series(["a", "b", None]),
}
)
dtype_name = df.b.dtype.numpy_dtype.name
# Only use supported names: no uint, bool or int64
dtype_name = dtype_name.replace("u", "")
if dtype_name == "int64":
dtype_name = "int32"
elif dtype_name == "bool":
dtype_name = "int8"
value = StataMissingValue.BASE_MISSING_VALUES[dtype_name]
smv = StataMissingValue(value)
expected_b = Series([1, smv, smv], dtype=object, name="b")
expected_c = Series(["a", "b", ""], name="c")
df.to_stata(temp_file, write_index=False, version=version)
reread = read_stata(temp_file, convert_missing=True)
tm.assert_series_equal(df.a, reread.a)
tm.assert_series_equal(reread.b, expected_b)
tm.assert_series_equal(reread.c, expected_c)
def test_empty_frame(temp_file):
# GH 46240
# create an empty DataFrame with int64 and float64 dtypes
df = DataFrame(data={"a": range(3), "b": [1.0, 2.0, 3.0]}).head(0)
path = temp_file
df.to_stata(path, write_index=False, version=117)
# Read entire dataframe
df2 = read_stata(path)
assert "b" in df2
# Dtypes don't match since no support for int32
dtypes = Series({"a": np.dtype("int32"), "b": np.dtype("float64")})
tm.assert_series_equal(df2.dtypes, dtypes)
# read one column of empty .dta file
df3 = read_stata(path, columns=["a"])
assert "b" not in df3
tm.assert_series_equal(df3.dtypes, dtypes.loc[["a"]])
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
def test_many_strl(temp_file, version):
n = 65534
df = DataFrame(np.arange(n), columns=["col"])
lbls = ["".join(v) for v in itertools.product(*([string.ascii_letters] * 3))]
value_labels = {"col": {i: lbls[i] for i in range(n)}}
df.to_stata(temp_file, value_labels=value_labels, version=version)
@pytest.mark.parametrize("version", [117, 118, 119, None])
def test_strl_missings(temp_file, version):
# GH 23633
# Check that strl supports None and pd.NA
df = DataFrame(
[
{"str1": "string" * 500, "number": 0},
{"str1": None, "number": 1},
{"str1": pd.NA, "number": 1},
]
)
df.to_stata(temp_file, version=version)
@pytest.mark.parametrize("version", [117, 118, 119, None])
def test_ascii_error(temp_file, version):
# GH #61583
# Check that 2 byte long unicode characters doesn't cause export error
df = DataFrame({"doubleByteCol": ["§" * 1500]})
df.to_stata(temp_file, write_index=0, version=version)
df_input = read_stata(temp_file)
tm.assert_frame_equal(df, df_input)
| TestStata |
python | vyperlang__vyper | vyper/warnings.py | {
"start": 1368,
"end": 1475
} | class ____(VyperWarning):
"""
Warn if past the EIP-170 size limit
"""
pass
| ContractSizeLimit |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_variable_command.py | {
"start": 3525,
"end": 23013
} | class ____:
@classmethod
def setup_class(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setup_method(self):
clear_db_variables()
def teardown_method(self):
clear_db_variables()
def test_variables_set(self):
"""Test variable_set command"""
variable_command.variables_set(self.parser.parse_args(["variables", "set", "foo", "bar"]))
assert Variable.get("foo") is not None
with pytest.raises(KeyError):
Variable.get("foo1")
def test_variables_set_with_description(self):
"""Test variable_set command with optional description argument"""
expected_var_desc = "foo_bar_description"
var_key = "foo"
variable_command.variables_set(
self.parser.parse_args(["variables", "set", var_key, "bar", "--description", expected_var_desc])
)
assert Variable.get(var_key) == "bar"
with create_session() as session:
actual_var_desc = session.scalar(select(Variable.description).where(Variable.key == var_key))
assert actual_var_desc == expected_var_desc
with pytest.raises(KeyError):
Variable.get("foo1")
def test_variables_get(self, stdout_capture):
Variable.set("foo", {"foo": "bar"}, serialize_json=True)
with stdout_capture as stdout:
variable_command.variables_get(self.parser.parse_args(["variables", "get", "foo"]))
assert stdout.getvalue() == '{\n "foo": "bar"\n}\n'
def test_get_variable_default_value(self, stdout_capture):
with stdout_capture as stdout:
variable_command.variables_get(
self.parser.parse_args(["variables", "get", "baz", "--default", "bar"])
)
assert stdout.getvalue() == "bar\n"
def test_get_variable_missing_variable(self):
with pytest.raises(SystemExit):
variable_command.variables_get(self.parser.parse_args(["variables", "get", "no-existing-VAR"]))
def test_variables_set_different_types(self):
"""Test storage of various data types"""
# Set a dict
variable_command.variables_set(
self.parser.parse_args(["variables", "set", "dict", '{"foo": "oops"}'])
)
# Set a list
variable_command.variables_set(self.parser.parse_args(["variables", "set", "list", '["oops"]']))
# Set str
variable_command.variables_set(self.parser.parse_args(["variables", "set", "str", "hello string"]))
# Set int
variable_command.variables_set(self.parser.parse_args(["variables", "set", "int", "42"]))
# Set float
variable_command.variables_set(self.parser.parse_args(["variables", "set", "float", "42.0"]))
# Set true
variable_command.variables_set(self.parser.parse_args(["variables", "set", "true", "true"]))
# Set false
variable_command.variables_set(self.parser.parse_args(["variables", "set", "false", "false"]))
# Set none
variable_command.variables_set(self.parser.parse_args(["variables", "set", "null", "null"]))
# Export and then import
variable_command.variables_export(
self.parser.parse_args(["variables", "export", "variables_types.json"])
)
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", "variables_types.json"]), session=session
)
# Assert value
assert Variable.get("dict", deserialize_json=True) == {"foo": "oops"}
assert Variable.get("str") == "hello string" # cannot json.loads(str)
assert Variable.get("int", deserialize_json=True) == 42
assert Variable.get("float", deserialize_json=True) == 42.0
assert Variable.get("true", deserialize_json=True) is True
assert Variable.get("false", deserialize_json=True) is False
assert Variable.get("null", deserialize_json=True) is None
# test variable import skip existing
# set varliable list to ["airflow"] and have it skip during import
variable_command.variables_set(self.parser.parse_args(["variables", "set", "list", '["airflow"]']))
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", "variables_types.json", "--action-on-existing-key", "skip"]
)
)
assert Variable.get("list", deserialize_json=True) == ["airflow"] # should not be overwritten
# test variable import fails on existing when action is set to fail
with pytest.raises(SystemExit):
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", "variables_types.json", "--action-on-existing-key", "fail"]
)
)
os.remove("variables_types.json")
def test_variables_list(self):
"""Test variable_list command"""
# Test command is received
variable_command.variables_list(self.parser.parse_args(["variables", "list"]))
def test_variables_delete(self):
"""Test variable_delete command"""
variable_command.variables_set(self.parser.parse_args(["variables", "set", "foo", "bar"]))
variable_command.variables_delete(self.parser.parse_args(["variables", "delete", "foo"]))
with pytest.raises(KeyError):
Variable.get("foo")
@pytest.mark.parametrize(
"filename",
[
os.devnull, # No extension (special file)
"variables.txt", # Unsupported .txt extension
"variables", # No extension
"variables.xml", # Unsupported .xml extension
],
)
def test_variables_import_unsupported_format(self, tmp_path, filename):
"""Test variables_import command with unsupported file formats"""
# Use devnull directly or create a file with unsupported extension
if filename == os.devnull:
file_path = filename
else:
file_path = tmp_path / filename
file_path.write_text("some content")
file_path = os.fspath(file_path)
with pytest.raises(SystemExit, match=r"Unsupported file format"):
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", file_path]), session=session
)
def test_variables_export(self):
"""Test variables_export command"""
variable_command.variables_export(self.parser.parse_args(["variables", "export", os.devnull]))
def test_variables_isolation(self, tmp_path):
"""Test isolation of variables"""
path1 = tmp_path / "testfile1.json"
path2 = tmp_path / "testfile2.json"
# First export
variable_command.variables_set(self.parser.parse_args(["variables", "set", "foo", '{"foo":"bar"}']))
variable_command.variables_set(self.parser.parse_args(["variables", "set", "bar", "original"]))
variable_command.variables_export(self.parser.parse_args(["variables", "export", os.fspath(path1)]))
variable_command.variables_set(self.parser.parse_args(["variables", "set", "bar", "updated"]))
variable_command.variables_set(self.parser.parse_args(["variables", "set", "foo", '{"foo":"oops"}']))
variable_command.variables_delete(self.parser.parse_args(["variables", "delete", "foo"]))
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(path1)]), session=session
)
assert Variable.get("bar") == "original"
assert Variable.get("foo") == '{\n "foo": "bar"\n}'
# Second export
variable_command.variables_export(self.parser.parse_args(["variables", "export", os.fspath(path2)]))
assert path1.read_text() == path2.read_text()
def test_variables_import_and_export_with_description(self, tmp_path):
"""Test variables_import with file-description parameter"""
variables_types_file = tmp_path / "variables_types.json"
variable_command.variables_set(
self.parser.parse_args(["variables", "set", "foo", "bar", "--description", "Foo var description"])
)
variable_command.variables_set(
self.parser.parse_args(["variables", "set", "foo1", "bar1", "--description", "12"])
)
variable_command.variables_set(self.parser.parse_args(["variables", "set", "foo2", "bar2"]))
variable_command.variables_export(
self.parser.parse_args(["variables", "export", os.fspath(variables_types_file)])
)
with open(variables_types_file) as f:
exported_vars = json.load(f)
assert exported_vars == {
"foo": {
"description": "Foo var description",
"value": "bar",
},
"foo1": {
"description": "12",
"value": "bar1",
},
"foo2": "bar2",
}
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(variables_types_file)]),
session=session,
)
assert Variable.get("foo") == "bar"
assert Variable.get("foo1") == "bar1"
assert Variable.get("foo2") == "bar2"
with create_session() as session:
assert (
session.scalar(select(Variable.description).where(Variable.key == "foo"))
== "Foo var description"
)
assert session.scalar(select(Variable.description).where(Variable.key == "foo1")) == "12"
@pytest.mark.parametrize("format", ["json", "yaml", "yml"])
def test_variables_import_formats(self, create_variable_file, simple_variable_data, format):
"""Test variables_import with different formats (JSON, YAML, YML)"""
file = create_variable_file(simple_variable_data, format=format)
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(file)]), session=session
)
assert Variable.get("key1") == "value1"
assert Variable.get("key2", deserialize_json=True) == {"nested": "dict", "with": ["list", "values"]}
assert Variable.get("key3", deserialize_json=True) == 123
assert Variable.get("key4", deserialize_json=True) is True
assert Variable.get("key5", deserialize_json=True) is None
@pytest.mark.parametrize("format", ["json", "yaml"])
def test_variables_import_with_descriptions(
self, create_variable_file, variable_data_with_descriptions, format
):
"""Test variables_import with descriptions in different formats (JSON, YAML)"""
file = create_variable_file(variable_data_with_descriptions, format=format)
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(file)]), session=session
)
assert Variable.get("var1") == "test_value"
assert Variable.get("var2", deserialize_json=True) == {"complex": "object"}
assert Variable.get("var3") == "simple_value"
with create_session() as session:
assert (
session.scalar(select(Variable.description).where(Variable.key == "var1"))
== "Test description for var1"
)
assert (
session.scalar(select(Variable.description).where(Variable.key == "var2"))
== "Complex variable"
)
assert session.scalar(select(Variable.description).where(Variable.key == "var3")) is None
def test_variables_import_env(self, create_variable_file, env_variable_data):
"""Test variables_import with ENV format"""
env_file = create_variable_file(env_variable_data, format="env")
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(env_file)]), session=session
)
assert Variable.get("KEY_A") == "value_a"
assert Variable.get("KEY_B") == "value with spaces"
assert Variable.get("KEY_C") == '{"json": "value", "number": 42}'
assert Variable.get("KEY_D") == "true"
assert Variable.get("KEY_E") == "123"
@pytest.mark.parametrize("format", ["json", "yaml", "yml"])
def test_variables_import_action_on_existing(self, create_variable_file, simple_variable_data, format):
"""Test variables_import with action_on_existing_key parameter for different formats"""
file = create_variable_file(simple_variable_data, format=format)
# Set up one existing variable with different value
Variable.set("key1", "original_value")
# Test skip action - existing key1 should keep original value, others should be imported
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", os.fspath(file), "--action-on-existing-key", "skip"]
),
session=session,
)
assert Variable.get("key1") == "original_value" # Should NOT be overwritten
assert Variable.get("key2", deserialize_json=True) == {"nested": "dict", "with": ["list", "values"]}
assert Variable.get("key3", deserialize_json=True) == 123
# Clean up non-existing keys for next test
for key in ["key2", "key3", "key4", "key5"]:
Variable.delete(key)
# Test overwrite action (default) - existing key1 should be overwritten
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", os.fspath(file), "--action-on-existing-key", "overwrite"]
),
session=session,
)
assert Variable.get("key1") == "value1" # Should be overwritten with new value
assert Variable.get("key2", deserialize_json=True) == {"nested": "dict", "with": ["list", "values"]}
# Test fail action - should fail when key1 already exists
Variable.set("key1", "original_value")
with pytest.raises(SystemExit, match="already exists"):
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", os.fspath(file), "--action-on-existing-key", "fail"]
),
session=session,
)
def test_variables_import_env_action_on_existing(self, tmp_path):
"""Test variables_import ENV with action_on_existing_key parameter"""
env_file = tmp_path / "variables_update.env"
env_content = """EXISTING_VAR=updated_value
NEW_VAR=fresh_value"""
env_file.write_text(env_content)
# Set up existing variable
Variable.set("EXISTING_VAR", "initial_value")
# Test skip action
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", os.fspath(env_file), "--action-on-existing-key", "skip"]
),
session=session,
)
assert Variable.get("EXISTING_VAR") == "initial_value"
assert Variable.get("NEW_VAR") == "fresh_value"
# Clean up for next test
Variable.delete("NEW_VAR")
# Test overwrite action
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(
["variables", "import", os.fspath(env_file), "--action-on-existing-key", "overwrite"]
),
session=session,
)
assert Variable.get("EXISTING_VAR") == "updated_value"
assert Variable.get("NEW_VAR") == "fresh_value"
@pytest.mark.parametrize(
("format", "invalid_content", "error_pattern"),
[
("json", '{"invalid": "json", missing_quotes: true}', "Failed to load the secret file"),
("yaml", "invalid:\n - yaml\n content: {missing", "Failed to load the secret file"),
("yml", "invalid:\n - yaml\n content: {missing", "Failed to load the secret file"),
("env", "INVALID_LINE_NO_EQUALS", "Invalid line format"),
],
)
def test_variables_import_invalid_format(self, tmp_path, format, invalid_content, error_pattern):
"""Test variables_import with invalid format files"""
invalid_file = tmp_path / f"invalid.{format}"
invalid_file.write_text(invalid_content)
with pytest.raises(SystemExit, match=error_pattern):
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(invalid_file)]),
session=session,
)
def test_variables_import_cross_format_compatibility(self, create_variable_file, simple_variable_data):
"""Test that the same variables can be imported from different formats consistently"""
# Create files in both formats using the same test data
json_file = create_variable_file(simple_variable_data, format="json")
yaml_file = create_variable_file(simple_variable_data, format="yaml")
# Test JSON import
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(json_file)]), session=session
)
json_results = {}
for key in simple_variable_data:
if key in ["key1"]: # String values don't need JSON deserialization
json_results[key] = Variable.get(key)
else:
json_results[key] = Variable.get(key, deserialize_json=True)
# Clear variables
for key in simple_variable_data:
Variable.delete(key)
# Test YAML import
with create_session() as session:
variable_command.variables_import(
self.parser.parse_args(["variables", "import", os.fspath(yaml_file)]), session=session
)
yaml_results = {}
for key in simple_variable_data:
if key in ["key1"]: # String values don't need JSON deserialization
yaml_results[key] = Variable.get(key)
else:
yaml_results[key] = Variable.get(key, deserialize_json=True)
# Compare results - both formats should produce identical results
assert json_results == yaml_results
assert json_results["key1"] == "value1"
assert json_results["key2"] == {"nested": "dict", "with": ["list", "values"]}
assert json_results["key3"] == 123
assert json_results["key4"] is True
assert json_results["key5"] is None
| TestCliVariables |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 79895,
"end": 83256
} | class ____(Response):
"""
Response of projects.get_by_id endpoint.
:param project: Project info
:type project: Project
"""
_service = "projects"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {
"project": {
"properties": {
"basename": {
"description": "Project base name",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"last_update": {
"description": "Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Project name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"project": {
"description": "Project info",
"oneOf": [{"$ref": "#/definitions/project"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, project: Any = None, **kwargs: Any) -> None:
super(GetByIdResponse, self).__init__(**kwargs)
self.project = project
@schema_property("project")
def project(self) -> Any:
return self._property_project
@project.setter
def project(self, value: Any) -> None:
if value is None:
self._property_project = None
return
if isinstance(value, dict):
value = Project.from_dict(value)
else:
self.assert_isinstance(value, "project", Project)
self._property_project = value
| GetByIdResponse |
python | gevent__gevent | src/gevent/events.py | {
"start": 8317,
"end": 8448
} | class ____(_AbstractMemoryEvent):
"""
Implementation of `IMemoryUsageThresholdExceeded`.
"""
| MemoryUsageThresholdExceeded |
python | uqfoundation__dill | dill/_dill.py | {
"start": 12415,
"end": 16407
} | class ____(StockPickler):
"""python's Pickler extended to interpreter sessions"""
dispatch: typing.Dict[type, typing.Callable[[Pickler, typing.Any], None]] \
= MetaCatchingDict(StockPickler.dispatch.copy())
"""The dispatch table, a dictionary of serializing functions used
by Pickler to save objects of specific types. Use :func:`pickle`
or :func:`register` to associate types to custom functions.
:meta hide-value:
"""
_session = False
from .settings import settings
def __init__(self, file, *args, **kwds):
settings = Pickler.settings
_byref = kwds.pop('byref', None)
#_strictio = kwds.pop('strictio', None)
_fmode = kwds.pop('fmode', None)
_recurse = kwds.pop('recurse', None)
StockPickler.__init__(self, file, *args, **kwds)
self._main = _main_module
self._diff_cache = {}
self._byref = settings['byref'] if _byref is None else _byref
self._strictio = False #_strictio
self._fmode = settings['fmode'] if _fmode is None else _fmode
self._recurse = settings['recurse'] if _recurse is None else _recurse
self._postproc = OrderedDict()
self._file = file
def save(self, obj, save_persistent_id=True):
# numpy hack
obj_type = type(obj)
if NumpyArrayType and not (obj_type is type or obj_type in Pickler.dispatch):
# register if the object is a numpy ufunc
# thanks to Paul Kienzle for pointing out ufuncs didn't pickle
if numpyufunc(obj_type):
@register(obj_type)
def save_numpy_ufunc(pickler, obj):
logger.trace(pickler, "Nu: %s", obj)
name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
StockPickler.save_global(pickler, obj, name=name)
logger.trace(pickler, "# Nu")
return
# NOTE: the above 'save' performs like:
# import copy_reg
# def udump(f): return f.__name__
# def uload(name): return getattr(numpy, name)
# copy_reg.pickle(NumpyUfuncType, udump, uload)
# register if the object is a numpy dtype
if numpydtype(obj_type):
@register(obj_type)
def save_numpy_dtype(pickler, obj):
logger.trace(pickler, "Dt: %s", obj)
pickler.save_reduce(_create_dtypemeta, (obj.type,), obj=obj)
logger.trace(pickler, "# Dt")
return
# NOTE: the above 'save' performs like:
# import copy_reg
# def uload(name): return type(NumpyDType(name))
# def udump(f): return uload, (f.type,)
# copy_reg.pickle(NumpyDTypeType, udump, uload)
# register if the object is a subclassed numpy array instance
if ndarraysubclassinstance(obj_type):
@register(obj_type)
def save_numpy_array(pickler, obj):
logger.trace(pickler, "Nu: (%s, %s)", obj.shape, obj.dtype)
npdict = getattr(obj, '__dict__', None)
f, args, state = obj.__reduce__()
pickler.save_reduce(_create_array, (f,args,state,npdict), obj=obj)
logger.trace(pickler, "# Nu")
return
# end numpy hack
if GENERATOR_FAIL and obj_type is GeneratorType:
msg = "Can't pickle %s: attribute lookup builtins.generator failed" % GeneratorType
raise PicklingError(msg)
StockPickler.save(self, obj, save_persistent_id)
save.__doc__ = StockPickler.save.__doc__
def dump(self, obj): #NOTE: if settings change, need to update attributes
logger.trace_setup(self)
StockPickler.dump(self, obj)
dump.__doc__ = StockPickler.dump.__doc__
| Pickler |
python | pytorch__pytorch | torch/_inductor/runtime/hints.py | {
"start": 3193,
"end": 3394
} | class ____(Enum):
PERSISTENT_REDUCTION = auto()
POINTWISE = auto()
REDUCTION = auto()
SPLIT_SCAN = auto()
TEMPLATE = auto()
USER_AUTOTUNE = auto()
FIXED = auto()
| HeuristicType |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_constructors.py | {
"start": 7367,
"end": 11645
} | class ____:
def test_constructor_positional(self):
# see GH#10758
msg = "'NoneType' object cannot be interpreted as an integer"
with pytest.raises(TypeError, match=msg):
Timestamp(2000, 1)
msg = "month must be in 1..12"
with pytest.raises(ValueError, match=msg):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError, match=msg):
Timestamp(2000, 13, 1)
if PY314:
msg = "must be in range 1..31 for month 1 in year 2000"
else:
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError, match=msg):
Timestamp(2000, 1, 32)
# see gh-11630
assert repr(Timestamp(2015, 11, 12)) == repr(Timestamp("20151112"))
assert repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) == repr(
Timestamp("2015-11-12 01:02:03.999999")
)
def test_constructor_keyword(self):
# GH#10758
msg = "function missing required argument 'day'|Required argument 'day'"
with pytest.raises(TypeError, match=msg):
Timestamp(year=2000, month=1)
msg = "month must be in 1..12"
with pytest.raises(ValueError, match=msg):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError, match=msg):
Timestamp(year=2000, month=13, day=1)
if PY314:
msg = "must be in range 1..31 for month 1 in year 2000"
else:
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError, match=msg):
Timestamp(year=2000, month=1, day=32)
assert repr(Timestamp(year=2015, month=11, day=12)) == repr(
Timestamp("20151112")
)
assert repr(
Timestamp(
year=2015,
month=11,
day=12,
hour=1,
minute=2,
second=3,
microsecond=999999,
)
) == repr(Timestamp("2015-11-12 01:02:03.999999"))
@pytest.mark.parametrize(
"arg",
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
"nanosecond",
],
)
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
msg = "Cannot pass a date attribute keyword argument"
with pytest.raises(ValueError, match=msg):
Timestamp("2010-10-10 12:59:59.999999999", **kwarg)
@pytest.mark.parametrize("kwargs", [{}, {"year": 2020}, {"year": 2020, "month": 1}])
def test_constructor_missing_keyword(self, kwargs):
# GH#31200
# The exact error message of datetime() depends on its version
msg1 = r"function missing required argument '(year|month|day)' \(pos [123]\)"
msg2 = r"Required argument '(year|month|day)' \(pos [123]\) not found"
msg = "|".join([msg1, msg2])
with pytest.raises(TypeError, match=msg):
Timestamp(**kwargs)
def test_constructor_positional_with_tzinfo(self):
# GH#31929
ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc)
expected = Timestamp("2020-12-31", tzinfo=timezone.utc)
assert ts == expected
@pytest.mark.parametrize("kwd", ["nanosecond", "microsecond", "second", "minute"])
def test_constructor_positional_keyword_mixed_with_tzinfo(self, kwd, request):
# TODO: if we passed microsecond with a keyword we would mess up
# xref GH#45307
if kwd != "nanosecond":
# nanosecond is keyword-only as of 2.0, others are not
mark = pytest.mark.xfail(reason="GH#45307")
request.applymarker(mark)
kwargs = {kwd: 4}
ts = Timestamp(2020, 12, 31, tzinfo=timezone.utc, **kwargs)
td_kwargs = {kwd + "s": 4}
td = Timedelta(**td_kwargs)
expected = Timestamp("2020-12-31", tz=timezone.utc) + td
assert ts == expected
| TestTimestampConstructorPositionalAndKeywordSupport |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 43306,
"end": 43658
} | class ____(Element):
proto: ToastProto = field(repr=False)
icon: str
def __init__(self, proto: ToastProto, root: ElementTree) -> None:
self.proto = proto
self.key = None
self.root = root
self.type = "toast"
@property
def value(self) -> str:
return self.proto.body
@dataclass(repr=False)
| Toast |
python | getsentry__sentry | tests/sentry/auth/test_access.py | {
"start": 2163,
"end": 3237
} | class ____(TestCase):
def from_user(self, *args, **kwds):
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
return access.from_user(*args, **kwds)
return silo_from_user(*args, **kwds)
def from_request(self, *args, **kwds):
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
return access.from_request(*args, **kwds)
return silo_from_request(*args, **kwds)
@assume_test_silo_mode(SiloMode.CONTROL)
def create_api_key(self, organization: Organization, **kwds):
return ApiKey.objects.create(organization_id=organization.id, **kwds)
@assume_test_silo_mode(SiloMode.CONTROL)
def create_auth_provider(self, organization: Organization, **kwds):
return AuthProvider.objects.create(organization_id=organization.id, **kwds)
@assume_test_silo_mode(SiloMode.CONTROL)
def create_auth_identity(self, auth_provider: AuthProvider, user: User, **kwds):
return AuthIdentity.objects.create(auth_provider=auth_provider, user=user, **kwds)
@all_silo_test
| AccessFactoryTestCase |
python | jazzband__pip-tools | piptools/resolver.py | {
"start": 6258,
"end": 19485
} | class ____(BaseResolver):
"""
Wrapper for the (deprecated) legacy dependency resolver.
"""
def __init__(
self,
constraints: Iterable[InstallRequirement],
existing_constraints: dict[str, InstallRequirement],
repository: BaseRepository,
cache: DependencyCache,
prereleases: bool | None = False,
clear_caches: bool = False,
allow_unsafe: bool = False,
unsafe_packages: set[str] | None = None,
) -> None:
"""Initialize LegacyResolver.
:param constraints: the constraints given
:type constraints: Iterable[InstallRequirement]
:param existing_constraints: constraints already present
:param repository: the repository to get the constraints from
:type repository: BaseRepository
:param cache: the cache to be used
:param prereleases: whether prereleases should be taken into account when resolving
(default is :py:data:`False`)
:param clear_caches: whether to clear repository and dependency caches before resolving
(default is :py:data:`False`)
:param allow_unsafe: whether unsafe packages should be allowed in the resulting requirements
(default is :py:data:`False`)
:param unsafe_packages: packages to be considered as unsafe
(default is :py:data:`None`)
:type unsafe_packages: set[str]
:raises: ``PipToolsError`` if the legacy resolver is not enabled
"""
self.our_constraints = set(constraints)
self.their_constraints: set[InstallRequirement] = set()
self.repository = repository
self.dependency_cache = cache
self.prereleases = prereleases
self.clear_caches = clear_caches
self.allow_unsafe = allow_unsafe
self.unsafe_constraints: set[InstallRequirement] = set()
self.unsafe_packages = unsafe_packages or UNSAFE_PACKAGES
options = self.repository.options
if "legacy-resolver" not in options.deprecated_features_enabled:
raise PipToolsError("Legacy resolver deprecated feature must be enabled.")
# Make sure there is no enabled 2020-resolver
options.features_enabled = omit_list_value(
options.features_enabled, "2020-resolver"
)
@property
def constraints(self) -> set[InstallRequirement]:
return set(
self._group_constraints(chain(self.our_constraints, self.their_constraints))
)
def resolve(self, max_rounds: int = 10) -> set[InstallRequirement]:
r"""
Find concrete package versions for all the given ``InstallRequirement``\ s
and their recursive dependencies and return a set of pinned
``InstallRequirement``\ s.
Resolves constraints one round at a time, until they don't change
anymore.
:param max_rounds: break out of resolution process after the given number of rounds
to prevent infinite loops (default is 10)
"""
if self.clear_caches:
self.dependency_cache.clear()
self.repository.clear_caches()
# Ignore existing packages
with update_env_context_manager(PIP_EXISTS_ACTION="i"):
for current_round in count(start=1): # pragma: no branch
if current_round > max_rounds:
raise RuntimeError(
"No stable configuration of concrete packages "
"could be found for the given constraints after "
"{max_rounds} rounds of resolving.\n"
"This is likely a bug.".format(max_rounds=max_rounds)
)
log.debug("")
log.debug(magenta(f"{f'ROUND {current_round}':^60}"))
has_changed, best_matches = self._resolve_one_round()
log.debug("-" * 60)
log.debug(
"Result of round {}: {}".format(
current_round,
"not stable" if has_changed else "stable, done",
)
)
if not has_changed:
break
# Only include hard requirements and not pip constraints
results = {req for req in best_matches if not req.constraint}
# Filter out unsafe requirements.
if not self.allow_unsafe:
self._filter_out_unsafe_constraints(
ireqs=results,
unsafe_packages=self.unsafe_packages,
)
return results
def _group_constraints(
self, constraints: Iterable[InstallRequirement]
) -> Iterator[InstallRequirement]:
"""
Group constraints (remember, InstallRequirements!) by their key name.
Then combine their SpecifierSets into a single InstallRequirement per
package. For example, given the following constraints:
Django<1.9,>=1.4.2
django~=1.5
Flask~=0.7
This will be combined into a single entry per package:
django~=1.5,<1.9,>=1.4.2
flask~=0.7
"""
constraints = list(constraints)
for ireq in constraints:
if ireq.name is None:
# get_dependencies has side-effect of assigning name to ireq
# (so we can group by the name below).
self.repository.get_dependencies(ireq)
# Sort first by name, i.e. the groupby key. Then within each group,
# sort editables first.
# This way, we don't bother with combining editables, since the first
# ireq will be editable, if one exists.
for _, ireqs in groupby(
sorted(constraints, key=(lambda x: (key_from_ireq(x), not x.editable))),
key=key_from_ireq,
):
yield combine_install_requirements(ireqs)
def _resolve_one_round(self) -> tuple[bool, set[InstallRequirement]]:
"""
Resolve one level of the current constraints.
This is achieved by finding the best match for each package
in the repository and adding all requirements for those best
package versions. Some of these constraints may be new
or updated.
:returns: whether new constraints appeared in this round. If no
constraints were added or changed, this indicates a stable
configuration.
"""
# Sort this list for readability of terminal output
constraints = sorted(self.constraints, key=key_from_ireq)
log.debug("Current constraints:")
with log.indentation():
for constraint in constraints:
log.debug(str(constraint))
log.debug("")
log.debug("Finding the best candidates:")
with log.indentation():
best_matches = {self.get_best_match(ireq) for ireq in constraints}
# Find the new set of secondary dependencies
log.debug("")
log.debug("Finding secondary dependencies:")
their_constraints: list[InstallRequirement] = []
with log.indentation():
for best_match in best_matches:
their_constraints.extend(self._iter_dependencies(best_match))
# Grouping constraints to make clean diff between rounds
theirs = set(self._group_constraints(their_constraints))
# NOTE: We need to compare RequirementSummary objects, since
# InstallRequirement does not define equality
diff = {RequirementSummary(t) for t in theirs} - {
RequirementSummary(t) for t in self.their_constraints
}
removed = {RequirementSummary(t) for t in self.their_constraints} - {
RequirementSummary(t) for t in theirs
}
has_changed = len(diff) > 0 or len(removed) > 0
if has_changed:
log.debug("")
log.debug("New dependencies found in this round:")
with log.indentation():
for new_dependency in sorted(diff, key=key_from_ireq):
log.debug(f"adding {new_dependency}")
log.debug("Removed dependencies in this round:")
with log.indentation():
for removed_dependency in sorted(removed, key=key_from_ireq):
log.debug(f"removing {removed_dependency}")
# Store the last round's results in the their_constraints
self.their_constraints = theirs
return has_changed, best_matches
def get_best_match(self, ireq: InstallRequirement) -> InstallRequirement:
"""
Return a (pinned or editable) InstallRequirement.
This indicates the best match to use for the given
InstallRequirement (in the form of an InstallRequirement).
Example:
Given the constraint Flask>=0.10, may return Flask==0.10.1 at
a certain moment in time.
Pinned requirements will always return themselves, i.e.
Flask==0.10.1 => Flask==0.10.1
"""
if ireq.editable or is_url_requirement(ireq):
# NOTE: it's much quicker to immediately return instead of
# hitting the index server
best_match = ireq
elif is_pinned_requirement(ireq):
# NOTE: it's much quicker to immediately return instead of
# hitting the index server
best_match = ireq
elif ireq.constraint:
# NOTE: This is not a requirement (yet) and does not need
# to be resolved
best_match = ireq
else:
best_match = self.repository.find_best_match(
ireq, prereleases=self.prereleases
)
# Format the best match
log.debug(
"found candidate {} (constraint was {})".format(
format_requirement(best_match), format_specifier(ireq)
)
)
best_match.comes_from = ireq.comes_from
if hasattr(ireq, "_source_ireqs"):
best_match._source_ireqs = ireq._source_ireqs
return best_match
def _iter_dependencies(
self, ireq: InstallRequirement
) -> Iterator[InstallRequirement]:
"""
Emit all secondary dependencies for an ireq.
Given a pinned, url, or editable InstallRequirement, collects all the
secondary dependencies for them, either by looking them up in a local
cache, or by reaching out to the repository.
Editable requirements will never be looked up, as they may have
changed at any time.
"""
# Pip does not resolve dependencies of constraints. We skip handling
# constraints here as well to prevent the cache from being polluted.
# Constraints that are later determined to be dependencies will be
# marked as non-constraints in later rounds by
# `combine_install_requirements`, and will be properly resolved.
# See https://github.com/pypa/pip/
# blob/6896dfcd831330c13e076a74624d95fa55ff53f4/src/pip/_internal/
# legacy_resolve.py#L325
if ireq.constraint:
return
if ireq.editable or is_url_requirement(ireq):
dependencies = self.repository.get_dependencies(ireq)
# Don't just yield from above. Instead, use the same `markers`-stripping
# behavior as we have for cached dependencies below.
dependency_strings = sorted(str(ireq.req) for ireq in dependencies)
yield from self._ireqs_of_dependencies(ireq, dependency_strings)
return
elif not is_pinned_requirement(ireq):
raise TypeError(f"Expected pinned or editable requirement, got {ireq}")
# Now, either get the dependencies from the dependency cache (for
# speed), or reach out to the external repository to
# download and inspect the package version and get dependencies
# from there
if ireq not in self.dependency_cache:
log.debug(
f"{format_requirement(ireq)} not in cache, need to check index",
fg="yellow",
)
dependencies = self.repository.get_dependencies(ireq)
self.dependency_cache[ireq] = sorted(str(ireq.req) for ireq in dependencies)
# Example: ['Werkzeug>=0.9', 'Jinja2>=2.4']
dependency_strings = self.dependency_cache[ireq]
yield from self._ireqs_of_dependencies(ireq, dependency_strings)
def _ireqs_of_dependencies(
self, ireq: InstallRequirement, dependency_strings: list[str]
) -> Iterator[InstallRequirement]:
log.debug(
"{:25} requires {}".format(
format_requirement(ireq),
", ".join(sorted(dependency_strings, key=lambda s: s.lower())) or "-",
)
)
# This yields new InstallRequirements that are similar to those that
# produced the dependency_strings, but they lack `markers` on their
# underlying Requirements:
for dependency_string in dependency_strings:
yield install_req_from_line(
dependency_string, constraint=ireq.constraint, comes_from=ireq
)
| LegacyResolver |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 91283,
"end": 91396
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
| AutoModelForTextToWaveform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol32.py | {
"start": 301,
"end": 392
} | class ____(Base1[Value], Protocol):
def method2(self, default: Value) -> Value: ...
| Base2 |
python | aio-libs__aiohttp | aiohttp/helpers.py | {
"start": 33257,
"end": 34647
} | class ____:
value: str
is_weak: bool = False
def validate_etag_value(value: str) -> None:
if value != ETAG_ANY and not _ETAGC_RE.fullmatch(value):
raise ValueError(
f"Value {value!r} is not a valid etag. Maybe it contains '\"'?"
)
def parse_http_date(date_str: str | None) -> datetime.datetime | None:
"""Process a date string, return a datetime object"""
if date_str is not None:
timetuple = parsedate(date_str)
if timetuple is not None:
with suppress(ValueError):
return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc)
return None
@functools.lru_cache
def must_be_empty_body(method: str, code: int) -> bool:
"""Check if a request must return an empty body."""
return (
code in EMPTY_BODY_STATUS_CODES
or method in EMPTY_BODY_METHODS
or (200 <= code < 300 and method in hdrs.METH_CONNECT_ALL)
)
def should_remove_content_length(method: str, code: int) -> bool:
"""Check if a Content-Length header should be removed.
This should always be a subset of must_be_empty_body
"""
# https://www.rfc-editor.org/rfc/rfc9110.html#section-8.6-8
# https://www.rfc-editor.org/rfc/rfc9110.html#section-15.4.5-4
return code in EMPTY_BODY_STATUS_CODES or (
200 <= code < 300 and method in hdrs.METH_CONNECT_ALL
)
| ETag |
python | spyder-ide__spyder | spyder/plugins/pylint/main_widget.py | {
"start": 2164,
"end": 2299
} | class ____:
Global = "global_section"
Section = "section_section"
History = "history_section"
| PylintWidgetOptionsMenuSections |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py | {
"start": 948,
"end": 6760
} | class ____(QueryExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py docstring">
"""Expect the expect the number of rows returned from a queried table to equal a specified value."""
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py metric_dependencies">
metric_dependencies = ("query.table",)
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py query">
query: str = """
SELECT COUNT(*)
FROM {active_batch}
"""
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py success_keys">
success_keys = (
"value",
"query",
)
# </snippet>
domain_keys = ("batch_id", "row_condition", "condition_parser")
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
super().validate_configuration(configuration)
value = configuration["kwargs"].get("value")
try:
assert value is not None, "'value' must be specified"
assert (
isinstance(value, int) and value >= 0
), "`value` must be an integer greater than or equal to zero"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py _validate function">
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py _validate function signature">
def _validate(
self,
metrics: dict,
runtime_configuration: dict | None = None,
execution_engine: ExecutionEngine | None = None,
) -> Union[ExpectationValidationResult, dict]:
# </snippet>
metrics = convert_to_json_serializable(data=metrics)
query_result = list(metrics.get("query.table")[0].values())[0]
value = self.configuration["kwargs"].get("value")
success = query_result == value
return {
"success": success,
"result": {"observed_value": query_result},
}
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py examples">
examples = [
{
"data": [
{
"data": {
"col1": [1, 2, 2, 3, 4],
"col2": ["a", "a", "b", "b", "a"],
},
},
],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 5,
},
"out": {"success": True},
"only_for": ["sqlite", "spark"],
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 2,
},
"out": {"success": False},
"only_for": ["sqlite", "spark"],
},
{
"title": "positive_test_static_data_asset",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 5,
"query": """
SELECT COUNT(*)
FROM test
""",
},
"out": {"success": True},
"only_for": ["sqlite"],
},
{
"title": "positive_test_row_condition",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 2,
"row_condition": 'col("col1")==2',
"condition_parser": "great_expectations",
},
"out": {"success": True},
"only_for": ["sqlite", "spark"],
},
],
},
]
# </snippet>
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["query-based"],
"contributors": ["@joegargery"],
}
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_table_row_count_to_be.py print_diagnostic_checklist">
ExpectQueriedTableRowCountToBe().print_diagnostic_checklist()
# </snippet>
# Note to users: code below this line is only for integration testing -- ignore!
diagnostics = ExpectQueriedTableRowCountToBe().run_diagnostics()
for check in diagnostics["tests"]:
assert check["test_passed"] is True
assert check["error_diagnostics"] is None
for check in diagnostics["errors"]:
assert check is None
for check in diagnostics["maturity_checklist"]["experimental"]:
if check["message"] == "Passes all linting checks":
continue
assert check["passed"] is True
| ExpectQueriedTableRowCountToBe |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_audio.py | {
"start": 6352,
"end": 7524
} | class ____(Data2VecAudioPreTrainedModel, Wav2Vec2Model):
def __init__(self, config: Data2VecAudioConfig):
Data2VecAudioPreTrainedModel.__init__(self, config)
self.config = config
self.feature_extractor = Data2VecAudioFeatureEncoder(config)
self.feature_projection = Data2VecAudioFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = Data2VecAudioEncoder(config)
self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.feature_extractor._freeze_parameters()
def forward(self, **super_kwargs):
return super().forward(**super_kwargs)
| Data2VecAudioModel |
python | django__django | tests/fixtures/tests.py | {
"start": 944,
"end": 1405
} | class ____(TestCase):
fixtures = ["fixture1.json", "fixture2.json"]
def test_class_fixtures(self):
"Test case has installed 3 fixture objects"
self.assertSequenceEqual(
Article.objects.values_list("headline", flat=True),
[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
)
| TestCaseFixtureLoadingTests |
python | pydantic__pydantic | tests/typechecking/misc.py | {
"start": 79,
"end": 567
} | class ____(BaseModel):
subs: list[Sub]
def func(model: Model) -> None:
model.model_dump(
include={'a': {1: True}},
)
model.model_dump(
include={'a': {'__all__': True}},
)
model.model_dump(
include={'a': {1: {'a'}}},
)
model.model_dump(
include={'a': {1, 2}},
)
# Invalid cases, should fail but the `IncEx` alias uses `bool` due to mypy limitations:
model.model_dump(
include={'a': {1: False}},
)
| Model |
python | run-llama__llama_index | llama-index-core/tests/storage/chat_store/test_sql_schema.py | {
"start": 252,
"end": 4362
} | class ____:
"""Test schema functionality in SQLAlchemyChatStore."""
def test_schema_parameter_initialization(self):
"""Test schema parameter initialization."""
# Without schema
store_no_schema = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
)
assert store_no_schema.db_schema is None
# With schema
store_with_schema = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
assert store_with_schema.db_schema == "test_schema"
def test_schema_serialization(self):
"""Test that schema is included in serialization."""
store = SQLAlchemyChatStore(
table_name="test_table",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
db_schema="test_schema",
)
# Test dump_store
dumped = store.dump_store()
assert "db_schema" in dumped
assert dumped["db_schema"] == "test_schema"
# Test model serialization
store_dict = store.model_dump()
assert "db_schema" in store_dict
assert store_dict["db_schema"] == "test_schema"
@pytest.mark.asyncio
async def test_postgresql_schema_creation(self):
"""Test that CREATE SCHEMA SQL is called for PostgreSQL."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
db_schema="test_schema",
)
# Mock the engine and connection
async_engine = MagicMock()
async_engine.begin.return_value.__aenter__ = AsyncMock()
async_engine.begin.return_value.__aexit__ = AsyncMock()
mock_conn = MagicMock()
mock_conn.execute = AsyncMock()
mock_conn.run_sync = AsyncMock()
async_engine.begin.return_value.__aenter__.return_value = mock_conn
store._async_engine = async_engine
# Call _setup_tables
await store._setup_tables(async_engine)
# Verify schema creation was called
mock_conn.execute.assert_called()
call_args = mock_conn.execute.call_args_list[0][0][0]
assert 'CREATE SCHEMA IF NOT EXISTS "test_schema"' in str(call_args)
# Verify MetaData has schema
assert store._metadata.schema == "test_schema"
@pytest.mark.asyncio
async def test_sqlite_schema_behavior(self):
"""Test that SQLite preserves schema parameter but doesn't use it in MetaData."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
# Add a message to trigger initialization
await store.add_message("test_user", ChatMessage(role="user", content="Hello!"))
# Schema parameter is preserved
assert store.db_schema == "test_schema"
# But MetaData doesn't have schema (SQLite limitation)
assert store._metadata.schema is None
# Operations still work
messages = await store.get_messages("test_user")
assert len(messages) == 1
assert messages[0].content == "Hello!"
@pytest.mark.asyncio
async def test_basic_operations_with_schema(self):
"""Test that basic operations work with schema."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
# Add and retrieve message
await store.add_message(
"schema_user", ChatMessage(role="user", content="Hello with schema!")
)
messages = await store.get_messages("schema_user")
assert len(messages) == 1
assert messages[0].content == "Hello with schema!"
# Verify schema is preserved
assert store.db_schema == "test_schema"
| TestSQLAlchemyChatStoreSchema |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ismn.py | {
"start": 1842,
"end": 4481
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid ISMN (International Standard Music Number)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"979-0-3452-4680-5",
"9790060115615",
" M-2306-7118-7",
"9790345246805",
"M230671187",
],
"some_other": [
"979-0-3452-4680-5",
"9790060115615",
" M-2306-7118-7",
"9790345246805",
"abcd",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.to_be_valid_ismn"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["python-stdnum"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIsmn().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidIsmn |
python | kennethreitz__tablib | src/tablib/core.py | {
"start": 2253,
"end": 23780
} | class ____:
"""The :class:`Dataset` object is the heart of Tablib. It provides all core
functionality.
Usually you create a :class:`Dataset` instance in your main module, and append
rows as you collect data. ::
data = tablib.Dataset()
data.headers = ('name', 'age')
for (name, age) in some_collector():
data.append((name, age))
Setting columns is similar. The column data length must equal the
current height of the data and headers must be set. ::
data = tablib.Dataset()
data.headers = ('first_name', 'last_name')
data.append(('John', 'Adams'))
data.append(('George', 'Washington'))
data.append_col((90, 67), header='age')
You can also set rows and headers upon instantiation. This is useful if
dealing with dozens or hundreds of :class:`Dataset` objects. ::
headers = ('first_name', 'last_name')
data = [('John', 'Adams'), ('George', 'Washington')]
data = tablib.Dataset(*data, headers=headers)
:param \\*args: (optional) list of rows to populate Dataset
:param headers: (optional) list strings for Dataset header row
:param title: (optional) string to use as title of the Dataset
.. admonition:: Format Attributes Definition
If you look at the code, the various output/import formats are not
defined within the :class:`Dataset` object. To add support for a new format, see
:ref:`Adding New Formats <newformats>`.
"""
def __init__(self, *args, **kwargs):
self._data = list(Row(arg) for arg in args)
self.__headers = None
# ('title', index) tuples
self._separators = []
# (column, callback) tuples
self._formatters = []
self.headers = kwargs.get('headers')
self.title = kwargs.get('title')
def __len__(self):
return self.height
def __getitem__(self, key):
if isinstance(key, str):
if key in self.headers:
pos = self.headers.index(key) # get 'key' index from each data
return [row[pos] for row in self._data]
else:
raise KeyError
else:
_results = self._data[key]
if isinstance(_results, Row):
return _results.tuple
else:
return [result.tuple for result in _results]
def __setitem__(self, key, value):
self._validate(value)
self._data[key] = Row(value)
def __delitem__(self, key):
if isinstance(key, str):
if key in self.headers:
pos = self.headers.index(key)
del self.headers[pos]
for i, row in enumerate(self._data):
del row[pos]
self._data[i] = row
else:
raise KeyError
else:
del self._data[key]
def __repr__(self):
try:
return '<%s dataset>' % (self.title.lower())
except AttributeError:
return '<dataset object>'
def __str__(self):
result = []
# Add str representation of headers.
if self.__headers:
result.append([str(h) for h in self.__headers])
# Add str representation of rows.
result.extend(list(map(str, row)) for row in self._data)
lens = [list(map(len, row)) for row in result]
field_lens = list(map(max, zip(*lens)))
# delimiter between header and data
if self.__headers:
result.insert(1, ['-' * length for length in field_lens])
format_string = '|'.join('{%s:%s}' % item for item in enumerate(field_lens))
return '\n'.join(format_string.format(*row) for row in result)
# ---------
# Internals
# ---------
def _get_in_format(self, fmt_key, **kwargs):
return registry.get_format(fmt_key).export_set(self, **kwargs)
def _set_in_format(self, fmt_key, in_stream, **kwargs):
in_stream = normalize_input(in_stream)
return registry.get_format(fmt_key).import_set(self, in_stream, **kwargs)
def _validate(self, row=None, col=None, safety=False):
"""Assures size of every row in dataset is of proper proportions."""
if row:
is_valid = (len(row) == self.width) if self.width else True
elif col:
if len(col) < 1:
is_valid = True
else:
is_valid = (len(col) == self.height) if self.height else True
else:
is_valid = all(len(x) == self.width for x in self._data)
if is_valid:
return True
else:
if not safety:
raise InvalidDimensions
return False
def _package(self, dicts=True, ordered=True):
"""Packages Dataset into lists of dictionaries for transmission."""
# TODO: Dicts default to false?
_data = list(self._data)
if ordered:
dict_pack = OrderedDict
else:
dict_pack = dict
# Execute formatters
if self._formatters:
for row_i, row in enumerate(_data):
for col, callback in self._formatters:
try:
if col is None:
for j, c in enumerate(row):
_data[row_i][j] = callback(c)
else:
_data[row_i][col] = callback(row[col])
except IndexError:
raise InvalidDatasetIndex
if self.headers:
if dicts:
data = [dict_pack(list(zip(self.headers, data_row))) for data_row in _data]
else:
data = [list(self.headers)] + list(_data)
else:
data = [list(row) for row in _data]
return data
def _get_headers(self):
"""An *optional* list of strings to be used for header rows and attribute names.
This must be set manually. The given list length must equal :attr:`Dataset.width`.
"""
return self.__headers
def _set_headers(self, collection):
"""Validating headers setter."""
self._validate(collection)
if collection:
try:
self.__headers = list(collection)
except TypeError:
raise TypeError
else:
self.__headers = None
headers = property(_get_headers, _set_headers)
def _get_dict(self):
"""A native Python representation of the :class:`Dataset` object. If headers have
been set, a list of Python dictionaries will be returned. If no headers have been set,
a list of tuples (rows) will be returned instead.
A dataset object can also be imported by setting the `Dataset.dict` attribute: ::
data = tablib.Dataset()
data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}]
"""
return self._package()
def _set_dict(self, pickle):
"""A native Python representation of the Dataset object. If headers have been
set, a list of Python dictionaries will be returned. If no headers have been
set, a list of tuples (rows) will be returned instead.
A dataset object can also be imported by setting the :attr:`Dataset.dict` attribute. ::
data = tablib.Dataset()
data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}]
"""
if not len(pickle):
return
# if list of rows
if isinstance(pickle[0], list):
self.wipe()
for row in pickle:
self.append(Row(row))
# if list of objects
elif isinstance(pickle[0], dict):
self.wipe()
self.headers = list(pickle[0].keys())
for row in pickle:
self.append(Row(list(row.values())))
else:
raise UnsupportedFormat
dict = property(_get_dict, _set_dict)
def _clean_col(self, col):
"""Prepares the given column for insert/append."""
col = list(col)
if self.headers:
header = [col.pop(0)]
else:
header = []
if len(col) == 1 and hasattr(col[0], '__call__'):
col = list(map(col[0], self._data))
col = tuple(header + col)
return col
@property
def height(self):
"""The number of rows currently in the :class:`Dataset`.
Cannot be directly modified.
"""
return len(self._data)
@property
def width(self):
"""The number of columns currently in the :class:`Dataset`.
Cannot be directly modified.
"""
try:
return len(self._data[0])
except IndexError:
try:
return len(self.headers)
except TypeError:
return 0
def load(self, in_stream, format=None, **kwargs):
"""
Import `in_stream` to the :class:`Dataset` object using the `format`.
`in_stream` can be a file-like object, a string, or a bytestring.
:param \\*\\*kwargs: (optional) custom configuration to the format `import_set`.
"""
stream = normalize_input(in_stream)
if not format:
format = detect_format(stream)
fmt = registry.get_format(format)
if not hasattr(fmt, 'import_set'):
raise UnsupportedFormat(f'Format {format} cannot be imported.')
if not import_set:
raise UnsupportedFormat(f'Format {format} cannot be imported.')
fmt.import_set(self, stream, **kwargs)
return self
def export(self, format, **kwargs):
"""
Export :class:`Dataset` object to `format`.
:param \\*\\*kwargs: (optional) custom configuration to the format `export_set`.
"""
fmt = registry.get_format(format)
if not hasattr(fmt, 'export_set'):
raise UnsupportedFormat(f'Format {format} cannot be exported.')
return fmt.export_set(self, **kwargs)
# ----
# Rows
# ----
def insert(self, index, row, tags=list()):
"""Inserts a row to the :class:`Dataset` at the given index.
Rows inserted must be the correct size (height or width).
The default behaviour is to insert the given row to the :class:`Dataset`
object at the given index.
"""
self._validate(row)
self._data.insert(index, Row(row, tags=tags))
def rpush(self, row, tags=list()):
"""Adds a row to the end of the :class:`Dataset`.
See :method:`Dataset.insert` for additional documentation.
"""
self.insert(self.height, row=row, tags=tags)
def lpush(self, row, tags=list()):
"""Adds a row to the top of the :class:`Dataset`.
See :method:`Dataset.insert` for additional documentation.
"""
self.insert(0, row=row, tags=tags)
def append(self, row, tags=list()):
"""Adds a row to the :class:`Dataset`.
See :method:`Dataset.insert` for additional documentation.
"""
self.rpush(row, tags)
def extend(self, rows, tags=list()):
"""Adds a list of rows to the :class:`Dataset` using
:method:`Dataset.append`
"""
for row in rows:
self.append(row, tags)
def lpop(self):
"""Removes and returns the first row of the :class:`Dataset`."""
cache = self[0]
del self[0]
return cache
def rpop(self):
"""Removes and returns the last row of the :class:`Dataset`."""
cache = self[-1]
del self[-1]
return cache
def pop(self):
"""Removes and returns the last row of the :class:`Dataset`."""
return self.rpop()
# -------
# Columns
# -------
def insert_col(self, index, col=None, header=None):
"""Inserts a column to the :class:`Dataset` at the given index.
Columns inserted must be the correct height.
You can also insert a column of a single callable object, which will
add a new column with the return values of the callable each as an
item in the column. ::
data.append_col(col=random.randint)
If inserting a column, and :attr:`Dataset.headers` is set, the
header attribute must be set, and will be considered the header for
that row.
See :ref:`dyncols` for an in-depth example.
.. versionchanged:: 0.9.0
If inserting a column, and :attr:`Dataset.headers` is set, the
header attribute must be set, and will be considered the header for
that row.
.. versionadded:: 0.9.0
If inserting a row, you can add :ref:`tags <tags>` to the row you are inserting.
This gives you the ability to :method:`filter <Dataset.filter>` your
:class:`Dataset` later.
"""
if col is None:
col = []
# Callable Columns...
if hasattr(col, '__call__'):
col = list(map(col, self._data))
col = self._clean_col(col)
self._validate(col=col)
if self.headers:
# pop the first item off, add to headers
if not header:
raise HeadersNeeded()
# corner case - if header is set without data
elif header and self.height == 0 and len(col):
raise InvalidDimensions
self.headers.insert(index, header)
if self.height and self.width:
for i, row in enumerate(self._data):
row.insert(index, col[i])
self._data[i] = row
else:
self._data = [Row([row]) for row in col]
def rpush_col(self, col, header=None):
"""Adds a column to the end of the :class:`Dataset`.
See :method:`Dataset.insert` for additional documentation.
"""
self.insert_col(self.width, col, header=header)
def lpush_col(self, col, header=None):
"""Adds a column to the top of the :class:`Dataset`.
See :method:`Dataset.insert` for additional documentation.
"""
self.insert_col(0, col, header=header)
def insert_separator(self, index, text='-'):
"""Adds a separator to :class:`Dataset` at given index."""
sep = (index, text)
self._separators.append(sep)
def append_separator(self, text='-'):
"""Adds a :ref:`separator <separators>` to the :class:`Dataset`."""
# change offsets if headers are or aren't defined
if not self.headers:
index = self.height if self.height else 0
else:
index = (self.height + 1) if self.height else 1
self.insert_separator(index, text)
def append_col(self, col, header=None):
"""Adds a column to the :class:`Dataset`.
See :method:`Dataset.insert_col` for additional documentation.
"""
self.rpush_col(col, header)
def get_col(self, index):
"""Returns the column from the :class:`Dataset` at the given index."""
return [row[index] for row in self._data]
# ----
# Misc
# ----
def add_formatter(self, col, handler):
"""Adds a formatter to the :class:`Dataset`.
.. versionadded:: 0.9.5
:param col: column to. Accepts index int or header str.
:param handler: reference to callback function to execute against
each cell value.
"""
if isinstance(col, str):
if col in self.headers:
col = self.headers.index(col) # get 'key' index from each data
else:
raise KeyError
if not col > self.width:
self._formatters.append((col, handler))
else:
raise InvalidDatasetIndex
return True
def filter(self, tag):
"""Returns a new instance of the :class:`Dataset`, excluding any rows
that do not contain the given :ref:`tags <tags>`.
"""
_dset = copy(self)
_dset._data = [row for row in _dset._data if row.has_tag(tag)]
return _dset
def sort(self, col, reverse=False):
"""Sort a :class:`Dataset` by a specific column, given string (for
header) or integer (for column index). The order can be reversed by
setting ``reverse`` to ``True``.
Returns a new :class:`Dataset` instance where columns have been
sorted.
"""
if isinstance(col, str):
if not self.headers:
raise HeadersNeeded
_sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse)
_dset = Dataset(headers=self.headers, title=self.title)
for item in _sorted:
row = [item[key] for key in self.headers]
_dset.append(row=row)
else:
if self.headers:
col = self.headers[col]
_sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse)
_dset = Dataset(headers=self.headers, title=self.title)
for item in _sorted:
if self.headers:
row = [item[key] for key in self.headers]
else:
row = item
_dset.append(row=row)
return _dset
def transpose(self):
"""Transpose a :class:`Dataset`, turning rows into columns and vice
versa, returning a new ``Dataset`` instance. The first row of the
original instance becomes the new header row."""
# Don't transpose if there is no data
if not self:
return
_dset = Dataset()
# The first element of the headers stays in the headers,
# it is our "hinge" on which we rotate the data
new_headers = [self.headers[0]] + self[self.headers[0]]
_dset.headers = new_headers
for index, column in enumerate(self.headers):
if column == self.headers[0]:
# It's in the headers, so skip it
continue
# Adding the column name as now they're a regular column
# Use `get_col(index)` in case there are repeated values
row_data = [column] + self.get_col(index)
row_data = Row(row_data)
_dset.append(row=row_data)
return _dset
def stack(self, other):
"""Stack two :class:`Dataset` instances together by
joining at the row level, and return new combined
``Dataset`` instance."""
if not isinstance(other, Dataset):
return
if self.width != other.width:
raise InvalidDimensions
# Copy the source data
_dset = copy(self)
rows_to_stack = [row for row in _dset._data]
other_rows = [row for row in other._data]
rows_to_stack.extend(other_rows)
_dset._data = rows_to_stack
return _dset
def stack_cols(self, other):
"""Stack two :class:`Dataset` instances together by
joining at the column level, and return a new
combined ``Dataset`` instance. If either ``Dataset``
has headers set, than the other must as well."""
if not isinstance(other, Dataset):
return
if self.headers or other.headers:
if not self.headers or not other.headers:
raise HeadersNeeded
if self.height != other.height:
raise InvalidDimensions
try:
new_headers = self.headers + other.headers
except TypeError:
new_headers = None
_dset = Dataset()
for column in self.headers:
_dset.append_col(col=self[column])
for column in other.headers:
_dset.append_col(col=other[column])
_dset.headers = new_headers
return _dset
def remove_duplicates(self):
"""Removes all duplicate rows from the :class:`Dataset` object
while maintaining the original order."""
seen = set()
self._data[:] = [row for row in self._data if not (tuple(row) in seen or seen.add(tuple(row)))]
def wipe(self):
"""Removes all content and headers from the :class:`Dataset` object."""
self._data = list()
self.__headers = None
def subset(self, rows=None, cols=None):
"""Returns a new instance of the :class:`Dataset`,
including only specified rows and columns.
"""
# Don't return if no data
if not self:
return
if rows is None:
rows = list(range(self.height))
if cols is None:
cols = list(self.headers)
# filter out impossible rows and columns
rows = [row for row in rows if row in range(self.height)]
cols = [header for header in cols if header in self.headers]
_dset = Dataset()
# filtering rows and columns
_dset.headers = list(cols)
_dset._data = []
for row_no, row in enumerate(self._data):
data_row = []
for key in _dset.headers:
if key in self.headers:
pos = self.headers.index(key)
data_row.append(row[pos])
else:
raise KeyError
if row_no in rows:
_dset.append(row=Row(data_row))
return _dset
| Dataset |
python | celery__celery | celery/apps/multi.py | {
"start": 3218,
"end": 8155
} | class ____:
"""Represents a node in a cluster."""
def __init__(self, name,
cmd=None, append=None, options=None, extra_args=None):
self.name = name
self.cmd = cmd or f"-m {celery_exe('worker', '--detach')}"
self.append = append
self.extra_args = extra_args or ''
self.options = self._annotate_with_default_opts(
options or OrderedDict())
self.expander = self._prepare_expander()
self.argv = self._prepare_argv()
self._pid = None
def _annotate_with_default_opts(self, options):
options['-n'] = self.name
self._setdefaultopt(options, ['--pidfile', '-p'], '/var/run/celery/%n.pid')
self._setdefaultopt(options, ['--logfile', '-f'], '/var/log/celery/%n%I.log')
self._setdefaultopt(options, ['--executable'], sys.executable)
return options
def _setdefaultopt(self, d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
value = d.setdefault(alt[0], os.path.normpath(value))
dir_path = os.path.dirname(value)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
return value
def _prepare_expander(self):
shortname, hostname = self.name.split('@', 1)
return build_expander(
self.name, shortname, hostname)
def _prepare_argv(self):
cmd = self.expander(self.cmd).split(' ')
i = cmd.index('celery') + 1
options = self.options.copy()
for opt, value in self.options.items():
if opt in (
'-A', '--app',
'-b', '--broker',
'--result-backend',
'--loader',
'--config',
'--workdir',
'-C', '--no-color',
'-q', '--quiet',
):
cmd.insert(i, format_opt(opt, self.expander(value)))
options.pop(opt)
cmd = [' '.join(cmd)]
argv = tuple(
cmd +
[format_opt(opt, self.expander(value))
for opt, value in options.items()] +
[self.extra_args]
)
if self.append:
argv += (self.expander(self.append),)
return argv
def alive(self):
return self.send(0)
def send(self, sig, on_error=None):
pid = self.pid
if pid:
try:
os.kill(pid, sig)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
maybe_call(on_error, self)
return False
return True
maybe_call(on_error, self)
def start(self, env=None, **kwargs):
return self._waitexec(
self.argv, path=self.executable, env=env, **kwargs)
def _waitexec(self, argv, path=sys.executable, env=None,
on_spawn=None, on_signalled=None, on_failure=None):
argstr = self.prepare_argv(argv, path)
maybe_call(on_spawn, self, argstr=' '.join(argstr), env=env)
pipe = Popen(argstr, env=env)
return self.handle_process_exit(
pipe.wait(),
on_signalled=on_signalled,
on_failure=on_failure,
)
def handle_process_exit(self, retcode, on_signalled=None, on_failure=None):
if retcode < 0:
maybe_call(on_signalled, self, -retcode)
return -retcode
elif retcode > 0:
maybe_call(on_failure, self, retcode)
return retcode
def prepare_argv(self, argv, path):
args = ' '.join([path] + list(argv))
return shlex.split(from_utf8(args), posix=not IS_WINDOWS)
def getopt(self, *alt):
for opt in alt:
try:
return self.options[opt]
except KeyError:
pass
raise KeyError(alt[0])
def __repr__(self):
return f'<{type(self).__name__}: {self.name}>'
@cached_property
def pidfile(self):
return self.expander(self.getopt('--pidfile', '-p'))
@cached_property
def logfile(self):
return self.expander(self.getopt('--logfile', '-f'))
@property
def pid(self):
if self._pid is not None:
return self._pid
try:
return Pidfile(self.pidfile).read_pid()
except ValueError:
pass
@pid.setter
def pid(self, value):
self._pid = value
@cached_property
def executable(self):
return self.options['--executable']
@cached_property
def argv_with_executable(self):
return (self.executable,) + self.argv
@classmethod
def from_kwargs(cls, name, **kwargs):
return cls(name, options=_kwargs_to_command_line(kwargs))
def maybe_call(fun, *args, **kwargs):
if fun is not None:
fun(*args, **kwargs)
| Node |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/emitter.py | {
"start": 1485,
"end": 2856
} | class ____:
# replacement for the list based stack of None/int
def __init__(self):
# type: () -> None
self.values = [] # type: List[Tuple[Any, bool]]
def append(self, val, seq):
# type: (Any, Any) -> None
self.values.append((val, seq))
def pop(self):
# type: () -> Any
return self.values.pop()[0]
def last_seq(self):
# type: () -> bool
# return the seq(uence) value for the element added before the last one
# in increase_indent()
try:
return self.values[-2][1]
except IndexError:
return False
def seq_flow_align(self, seq_indent, column, pre_comment=False):
# type: (int, int, Optional[bool]) -> int
# extra spaces because of dash
# nprint('seq_flow_align', self.values, pre_comment)
if len(self.values) < 2 or not self.values[-1][1]:
if len(self.values) == 0 or not pre_comment:
return 0
base = self.values[-1][0] if self.values[-1][0] is not None else 0
if pre_comment:
return base + seq_indent # type: ignore
# return (len(self.values)) * seq_indent
# -1 for the dash
return base + seq_indent - column - 1 # type: ignore
def __len__(self):
# type: () -> int
return len(self.values)
| Indents |
python | tiangolo__fastapi | docs_src/security/tutorial003_an_py310.py | {
"start": 776,
"end": 914
} | class ____(BaseModel):
username: str
email: str | None = None
full_name: str | None = None
disabled: bool | None = None
| User |
python | sqlalchemy__sqlalchemy | test/ext/test_horizontal_shard.py | {
"start": 29916,
"end": 31497
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String(30))
deferred_data = deferred(Column(String(30)))
@classmethod
def insert_data(cls, connection):
A = cls.classes.A
s = Session(connection)
s.add(A(data="d1", deferred_data="d2"))
s.commit()
def _session_fixture(self, **kw):
# the "fake" key here is to ensure that neither id_chooser
# nor query_chooser are actually used, only shard_chooser
# should be used.
return ShardedSession(
shards={"main": testing.db},
shard_chooser=lambda *args: "main",
identity_chooser=lambda *args: ["fake", "main"],
execute_chooser=lambda *args: ["fake", "main"],
**kw,
)
def test_refresh(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
session.refresh(a1)
def test_deferred(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
eq_(a1.deferred_data, "d2")
def test_unexpire(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
session.expire(a1)
eq_(a1.data, "d1")
| RefreshDeferExpireTest |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 37536,
"end": 40879
} | class ____:
def test_basic(self, xp):
# Test against hardcoded data
for k, v in dpss_data.items():
win, ratios = windows.dpss(*k, return_ratios=True, xp=xp)
xp_assert_close(win, v[0], atol=1e-7, err_msg=k)
xp_assert_close(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k)
def test_unity(self, xp):
# Test unity value handling (gh-2221)
for M in range(1, 21):
# corrected w/approximation (default)
win = windows.dpss(M, M / 2.1, xp=xp)
expected = M % 2 # one for odd, none for even
xp_assert_equal(np.isclose(win, 1.).sum(), expected,
err_msg=f'{win}')
# corrected w/subsample delay (slower)
win_sub = windows.dpss(M, M / 2.1, norm='subsample', xp=xp)
if M > 2:
# @M=2 the subsample doesn't do anything
xp_assert_equal(np.isclose(win_sub, 1.).sum(), expected,
err_msg=f'{win_sub}')
xp_assert_close(win, win_sub, rtol=0.03) # within 3%
# not the same, l2-norm
win_2 = windows.dpss(M, M / 2.1, norm=2, xp=xp)
expected = 1 if M == 1 else 0
xp_assert_equal(np.isclose(win_2, 1.).sum(), expected,
err_msg=f'{win_2}')
def test_extremes(self, xp):
# Test extremes of alpha
lam = windows.dpss(31, 6, 4, return_ratios=True, xp=xp)[1]
xp_assert_close(lam, xp.ones_like(lam))
lam = windows.dpss(31, 7, 4, return_ratios=True, xp=xp)[1]
xp_assert_close(lam, xp.ones_like(lam))
lam = windows.dpss(31, 8, 4, return_ratios=True, xp=xp)[1]
xp_assert_close(lam, xp.ones_like(lam))
def test_degenerate(self, xp):
# Test failures
assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax
assert_raises(ValueError, windows.dpss, 4, 1.5, -5)
assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)
assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2.
assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos
assert_raises(ValueError, windows.dpss, 3, 0, 3)
assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M
@skip_xp_backends(np_only=True)
def test_degenerate_signle_samples(self, xp):
# Single samples
w = windows.dpss(1, 1.)
xp_assert_equal(w, [1.])
w, ratio = windows.dpss(1, 1., return_ratios=True)
xp_assert_equal(w, [1.])
assert ratio == 1.
w, ratio = windows.dpss(1, 1., Kmax=4, return_ratios=True)
xp_assert_equal(w, [1.])
assert isinstance(ratio, np.ndarray)
xp_assert_equal(ratio, [1.])
assert_raises(ValueError, windows.dpss, 4, 1.5, -1, xp=xp) # Bad Kmax
assert_raises(ValueError, windows.dpss, 4, 1.5, -5, xp=xp)
assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1, xp=xp)
assert_raises(ValueError, windows.dpss, 3, 1.5, 3, xp=xp) # NW must be < N/2.
assert_raises(ValueError, windows.dpss, 3, -1, 3, xp=xp) # NW must be pos
assert_raises(ValueError, windows.dpss, 3, 0, 3, xp=xp)
assert_raises(ValueError, windows.dpss, -1, 1, 3, xp=xp) # negative M
@make_xp_test_case(windows.lanczos)
| TestDPSS |
python | rq__rq | rq/repeat.py | {
"start": 287,
"end": 4303
} | class ____:
"""Defines repeat behavior for scheduled jobs.
Attributes:
times (int): The number of times to repeat the job. Must be greater than 0.
intervals (Union[int, List[int]]): The intervals between job executions in seconds.
Can be a single integer value or a list of intervals. If a list is provided and it's
shorter than (times-1), the last interval will be reused for remaining repeats.
"""
times: int
intervals: list[int]
def __init__(self, times: int, interval: Optional[Union[int, Iterable[int]]] = 0):
"""Initialize a Repeat instance.
Args:
times (int): The number of times to repeat the job. Must be greater than 0.
interval (Optional[Union[int, Iterable[int]]], optional): The intervals between job executions in seconds.
Can be a single integer value or a list of intervals. Defaults to 0 (immediately repeated).
Raises:
ValueError: If times is less than 1 or if intervals contains negative values.
"""
if times < 1:
raise ValueError('times: please enter a value greater than 0')
if isinstance(interval, int):
if interval < 0:
raise ValueError('intervals: negative numbers are not allowed')
self.intervals = [interval]
elif isinstance(interval, Iterable):
interval_list = list(interval)
for i in interval_list:
if i < 0:
raise ValueError('intervals: negative numbers are not allowed')
self.intervals = interval_list
else:
raise TypeError('intervals must be an int or iterable of ints')
self.times = times
@classmethod
def get_interval(cls, count: int, intervals: list[int]) -> int:
"""Returns the appropriate interval based on the repeat count.
Args:
count (int): Current repeat count (0-based)
intervals (List[int]): List of intervals
Returns:
int: The interval to use
"""
if count >= len(intervals):
return intervals[-1] # Use the last interval if we've run out
return intervals[count]
@classmethod
def schedule(cls, job: 'Job', queue: 'Queue', pipeline: Optional['Pipeline'] = None):
"""Schedules a job to repeat based on its repeat configuration.
This decrements the job's repeats_left counter and either enqueues
it immediately (if interval is 0) or schedules it to run after the
specified interval.
Args:
job (Job): The job to repeat
queue (Queue): The queue to enqueue/schedule the job on
pipeline (Optional[Pipeline], optional): Redis pipeline to use. Defaults to None.
Returns:
scheduled_time (Optional[datetime]): When the job was scheduled to run, or None if not scheduled
"""
if job.repeats_left is None or job.repeats_left <= 0:
raise ValueError(f'Cannot schedule job {job.id}: no repeats left')
pipe = pipeline if pipeline is not None else job.connection.pipeline()
# Get the interval for this repeat based on remaining repeats
repeat_count = job.repeats_left - 1 # Count from the end (0-indexed)
interval = 0
if job.repeat_intervals:
interval = cls.get_interval(repeat_count, job.repeat_intervals)
# Decrement repeats_left
job.repeats_left = job.repeats_left - 1
job.save(pipeline=pipe)
if interval == 0:
# Enqueue the job immediately
queue._enqueue_job(job, pipeline=pipe)
else:
# Schedule the job to run after the interval
scheduled_time = datetime.now() + timedelta(seconds=interval)
queue.schedule_job(job, scheduled_time, pipeline=pipe)
# Execute the pipeline if we created it
if pipeline is None:
pipe.execute()
| Repeat |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 11483,
"end": 12152
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that takes checks if all elements in a row are less than 0.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
result = X < 0
return result.astype(float)
| LTTransformer |
python | kamyu104__LeetCode-Solutions | Python/longest-palindromic-subsequence-ii.py | {
"start": 31,
"end": 837
} | class ____(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
dp = [[[0]*26 for _ in xrange(len(s))] for _ in xrange(2)]
for i in reversed(xrange(len(s))):
for j in xrange(i+1, len(s)):
if i == j-1:
if s[j] == s[i]:
dp[i%2][j][ord(s[i])-ord('a')] = 2
else:
for k in xrange(26):
if s[j] == s[i] and ord(s[j])-ord('a') != k:
dp[i%2][j][ord(s[j])-ord('a')] = max(dp[i%2][j][ord(s[j])-ord('a')], dp[(i+1)%2][j-1][k]+2)
dp[i%2][j][k] = max(dp[i%2][j][k], dp[i%2][j-1][k], dp[(i+1)%2][j][k], dp[(i+1)%2][j-1][k])
return max(dp[0][-1])
| Solution |
python | pypa__hatch | tests/backend/builders/hooks/test_version.py | {
"start": 6246,
"end": 8966
} | class ____:
def test_default(self, temp_dir, helpers):
config = {"path": "baz.py", "pattern": True}
metadata = ProjectMetadata(
str(temp_dir),
PluginManager(),
{
"project": {"name": "foo", "dynamic": ["version"]},
"tool": {"hatch": {"metadata": {"hooks": {"custom": {}}}}},
},
)
file_path = temp_dir / DEFAULT_BUILD_SCRIPT
file_path.write_text(
helpers.dedent(
"""
from hatchling.metadata.plugin.interface import MetadataHookInterface
class CustomHook(MetadataHookInterface):
def update(self, metadata):
metadata['version'] = '1.2.3'
"""
)
)
version_file = temp_dir / "baz.py"
version_file.write_text(
helpers.dedent(
"""
__version__ = '0.0.0'
"""
)
)
build_data = {"artifacts": []}
hook = VersionBuildHook(str(temp_dir), config, None, metadata, "", "")
hook.initialize([], build_data)
assert version_file.read_text() == helpers.dedent(
"""
__version__ = '1.2.3'
"""
)
assert build_data["artifacts"] == ["/baz.py"]
def test_custom(self, temp_dir, helpers):
config = {"path": "baz.py", "pattern": 'v = "(?P<version>.+)"'}
metadata = ProjectMetadata(
str(temp_dir),
PluginManager(),
{
"project": {"name": "foo", "dynamic": ["version"]},
"tool": {"hatch": {"metadata": {"hooks": {"custom": {}}}}},
},
)
file_path = temp_dir / DEFAULT_BUILD_SCRIPT
file_path.write_text(
helpers.dedent(
"""
from hatchling.metadata.plugin.interface import MetadataHookInterface
class CustomHook(MetadataHookInterface):
def update(self, metadata):
metadata['version'] = '1.2.3'
"""
)
)
version_file = temp_dir / "baz.py"
version_file.write_text(
helpers.dedent(
"""
v = "0.0.0"
"""
)
)
build_data = {"artifacts": []}
hook = VersionBuildHook(str(temp_dir), config, None, metadata, "", "")
hook.initialize([], build_data)
assert version_file.read_text() == helpers.dedent(
"""
v = "1.2.3"
"""
)
assert build_data["artifacts"] == ["/baz.py"]
| TestPattern |
python | huggingface__transformers | src/transformers/models/ijepa/modular_ijepa.py | {
"start": 4462,
"end": 5541
} | class ____(IJepaPreTrainedModel, ViTModel):
def __init__(self, config: IJepaConfig, add_pooling_layer: bool = False, use_mask_token: bool = False):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token)
@auto_docstring(
custom_intro="""
IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)
e.g. for ImageNet.
<Tip>
Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by
setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
position embeddings to the higher resolution.
</Tip>
"""
)
| IJepaModel |
python | huggingface__transformers | src/transformers/models/reformer/modeling_reformer.py | {
"start": 80334,
"end": 90087
} | class ____(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
assert self.config.num_hidden_layers > 0, (
"`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']"
)
self.embeddings = ReformerEmbeddings(config)
self.encoder = ReformerEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
num_hashes: Optional[int] = None,
past_buckets_states: Optional[ReformerDynamicCache] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ReformerModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
past_buckets_states (`ReformerDynamicCache`, *optional*):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert len(input_shape) == 2, (
f"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}"
)
if past_buckets_states is not None:
assert not self.training, "`past_buckets_states` can only be used for inference, not for training`."
# original sequence length for padding
orig_sequence_length = input_shape[-1]
# if needs padding
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = (
input_shape[-1] % least_common_mult_chunk_length != 0
and input_shape[-1] > min_chunk_length
and past_buckets_states is None
)
if must_pad_to_match_chunk_length:
padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
if self.training is True:
raise ValueError(
f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple "
f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length "
f"of {input_shape[-1] + padding_length}."
)
# pad input
input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
input_shape=input_shape,
padding_length=padding_length,
padded_seq_length=least_common_mult_chunk_length,
device=device,
)
# start index for position encoding depends on incremental decoding
start_idx_pos_encodings = past_buckets_states.get_start_idx() if past_buckets_states is not None else 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
start_idx_pos_encodings=start_idx_pos_encodings,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
sequence_output = encoder_outputs.hidden_states
# if padding was applied
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None
hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None
attentions = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None)
return ReformerModelOutput(
last_hidden_state=sequence_output,
past_buckets_states=past_buckets_states,
hidden_states=hidden_states,
attentions=attentions,
)
def _pad_to_mult_of_chunk_length(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
input_shape=None,
padding_length=None,
padded_seq_length=None,
device=None,
):
logger.warning_once(
f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a "
f"multiple of `config.chunk_length`: {padded_seq_length}"
)
padded_input_ids = torch.full(
(input_shape[0], padding_length),
self.config.pad_token_id,
device=device,
dtype=torch.long,
)
# Extend `attention_mask`
if attention_mask is not None:
pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat(
[
torch.ones(input_shape, device=device, dtype=torch.bool),
torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.bool),
],
dim=-1,
)
# Extend `input_ids` with padding to match least common multiple chunk_length
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
# Pad position ids if given
if position_ids is not None:
padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)
padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
# Extend `inputs_embeds` with padding to match least common multiple chunk_length
if inputs_embeds is not None:
padded_inputs_embeds = self.get_input_embeddings()(padded_input_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return input_ids, inputs_embeds, attention_mask, position_ids, input_shape
@auto_docstring(
custom_intro="""
Reformer Model with a `language modeling` head on top.
"""
)
| ReformerModel |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 15870,
"end": 15975
} | class ____(PydanticValueError):
msg_template = 'value is not a valid IPv4 interface'
| IPv4InterfaceError |
python | spack__spack | lib/spack/spack/cmd/style.py | {
"start": 17117,
"end": 28392
} | class ____(TokenBase):
"""Reconstructs the tokens for previous specs, so we can reuse code to rotate them"""
# Dependency
START_EDGE_PROPERTIES = r"(?:\^\[)"
END_EDGE_PROPERTIES = r"(?:\])"
DEPENDENCY = r"(?:\^)"
# Version
VERSION_HASH_PAIR = SpecTokens.VERSION_HASH_PAIR.regex
GIT_VERSION = SpecTokens.GIT_VERSION.regex
VERSION = SpecTokens.VERSION.regex
# Variants
PROPAGATED_BOOL_VARIANT = SpecTokens.PROPAGATED_BOOL_VARIANT.regex
BOOL_VARIANT = SpecTokens.BOOL_VARIANT.regex
PROPAGATED_KEY_VALUE_PAIR = SpecTokens.PROPAGATED_KEY_VALUE_PAIR.regex
KEY_VALUE_PAIR = SpecTokens.KEY_VALUE_PAIR.regex
# Compilers
COMPILER_AND_VERSION = rf"(?:%\s*(?:{NAME})(?:[\s]*)@\s*(?:{VERSION_LIST}))"
COMPILER = rf"(?:%\s*(?:{NAME}))"
# FILENAME
FILENAME = SpecTokens.FILENAME.regex
# Package name
FULLY_QUALIFIED_PACKAGE_NAME = SpecTokens.FULLY_QUALIFIED_PACKAGE_NAME.regex
UNQUALIFIED_PACKAGE_NAME = SpecTokens.UNQUALIFIED_PACKAGE_NAME.regex
# DAG hash
DAG_HASH = SpecTokens.DAG_HASH.regex
# White spaces
WS = SpecTokens.WS.regex
# Unexpected character(s)
UNEXPECTED = SpecTokens.UNEXPECTED.regex
def _spec_str_reorder_compiler(idx: int, blocks: List[List[Token]]) -> None:
# only move the compiler to the back if it exists and is not already at the end
if not 0 <= idx < len(blocks) - 1:
return
# if there's only whitespace after the compiler, don't move it
if all(token.kind == _LegacySpecTokens.WS for block in blocks[idx + 1 :] for token in block):
return
# rotate left and always add at least one WS token between compiler and previous token
compiler_block = blocks.pop(idx)
if compiler_block[0].kind != _LegacySpecTokens.WS:
compiler_block.insert(0, Token(_LegacySpecTokens.WS, " "))
# delete the WS tokens from the new first block if it was at the very start, to prevent leading
# WS tokens.
while idx == 0 and blocks[0][0].kind == _LegacySpecTokens.WS:
blocks[0].pop(0)
blocks.append(compiler_block)
def _spec_str_format(spec_str: str) -> Optional[str]:
"""Given any string, try to parse as spec string, and rotate the compiler token to the end
of each spec instance. Returns the formatted string if it was changed, otherwise None."""
# We parse blocks of tokens that include leading whitespace, and move the compiler block to
# the end when we hit a dependency ^... or the end of a string.
# [@3.1][ +foo][ +bar][ %gcc@3.1][ +baz]
# [@3.1][ +foo][ +bar][ +baz][ %gcc@3.1]
current_block: List[Token] = []
blocks: List[List[Token]] = []
compiler_block_idx = -1
in_edge_attr = False
legacy_tokenizer = Tokenizer(_LegacySpecTokens)
for token in legacy_tokenizer.tokenize(spec_str):
if token.kind == _LegacySpecTokens.UNEXPECTED:
# parsing error, we cannot fix this string.
return None
elif token.kind in (_LegacySpecTokens.COMPILER, _LegacySpecTokens.COMPILER_AND_VERSION):
# multiple compilers are not supported in Spack v0.x, so early return
if compiler_block_idx != -1:
return None
current_block.append(token)
blocks.append(current_block)
current_block = []
compiler_block_idx = len(blocks) - 1
elif token.kind in (
_LegacySpecTokens.START_EDGE_PROPERTIES,
_LegacySpecTokens.DEPENDENCY,
_LegacySpecTokens.UNQUALIFIED_PACKAGE_NAME,
_LegacySpecTokens.FULLY_QUALIFIED_PACKAGE_NAME,
):
_spec_str_reorder_compiler(compiler_block_idx, blocks)
compiler_block_idx = -1
if token.kind == _LegacySpecTokens.START_EDGE_PROPERTIES:
in_edge_attr = True
current_block.append(token)
blocks.append(current_block)
current_block = []
elif token.kind == _LegacySpecTokens.END_EDGE_PROPERTIES:
in_edge_attr = False
current_block.append(token)
blocks.append(current_block)
current_block = []
elif in_edge_attr:
current_block.append(token)
elif token.kind in (
_LegacySpecTokens.VERSION_HASH_PAIR,
_LegacySpecTokens.GIT_VERSION,
_LegacySpecTokens.VERSION,
_LegacySpecTokens.PROPAGATED_BOOL_VARIANT,
_LegacySpecTokens.BOOL_VARIANT,
_LegacySpecTokens.PROPAGATED_KEY_VALUE_PAIR,
_LegacySpecTokens.KEY_VALUE_PAIR,
_LegacySpecTokens.DAG_HASH,
):
current_block.append(token)
blocks.append(current_block)
current_block = []
elif token.kind == _LegacySpecTokens.WS:
current_block.append(token)
else:
raise ValueError(f"unexpected token {token}")
if current_block:
blocks.append(current_block)
_spec_str_reorder_compiler(compiler_block_idx, blocks)
new_spec_str = "".join(token.value for block in blocks for token in block)
return new_spec_str if spec_str != new_spec_str else None
SpecStrHandler = Callable[[str, int, int, str, str], None]
def _spec_str_default_handler(path: str, line: int, col: int, old: str, new: str):
"""A SpecStrHandler that prints formatted spec strings and their locations."""
print(f"{path}:{line}:{col}: `{old}` -> `{new}`")
def _spec_str_fix_handler(path: str, line: int, col: int, old: str, new: str):
"""A SpecStrHandler that updates formatted spec strings in files."""
with open(path, "r", encoding="utf-8") as f:
lines = f.readlines()
new_line = lines[line - 1].replace(old, new)
if new_line == lines[line - 1]:
tty.warn(f"{path}:{line}:{col}: could not apply fix: `{old}` -> `{new}`")
return
lines[line - 1] = new_line
print(f"{path}:{line}:{col}: fixed `{old}` -> `{new}`")
with open(path, "w", encoding="utf-8") as f:
f.writelines(lines)
def _spec_str_ast(path: str, tree: ast.AST, handler: SpecStrHandler) -> None:
"""Walk the AST of a Python file and apply handler to formatted spec strings."""
for node in ast.walk(tree):
if sys.version_info >= (3, 8):
if isinstance(node, ast.Constant) and isinstance(node.value, str):
current_str = node.value
else:
continue
elif isinstance(node, ast.Str):
current_str = node.s
else:
continue
if not IS_PROBABLY_COMPILER.search(current_str):
continue
new = _spec_str_format(current_str)
if new is not None:
handler(path, node.lineno, node.col_offset, current_str, new)
def _spec_str_json_and_yaml(path: str, data: dict, handler: SpecStrHandler) -> None:
"""Walk a YAML or JSON data structure and apply handler to formatted spec strings."""
queue = [data]
seen = set()
while queue:
current = queue.pop(0)
if id(current) in seen:
continue
seen.add(id(current))
if isinstance(current, dict):
queue.extend(current.values())
queue.extend(current.keys())
elif isinstance(current, list):
queue.extend(current)
elif isinstance(current, str) and IS_PROBABLY_COMPILER.search(current):
new = _spec_str_format(current)
if new is not None:
mark = getattr(current, "_start_mark", None)
if mark:
line, col = mark.line + 1, mark.column + 1
else:
line, col = 0, 0
handler(path, line, col, current, new)
def _check_spec_strings(
paths: List[str], handler: SpecStrHandler = _spec_str_default_handler
) -> None:
"""Open Python, JSON and YAML files, and format their string literals that look like spec
strings. A handler is called for each formatting, which can be used to print or apply fixes."""
for path in paths:
is_json_or_yaml = path.endswith(".json") or path.endswith(".yaml") or path.endswith(".yml")
is_python = path.endswith(".py")
if not is_json_or_yaml and not is_python:
continue
try:
with open(path, "r", encoding="utf-8") as f:
# skip files that are likely too large to be user code or config
if os.fstat(f.fileno()).st_size > 1024 * 1024:
warnings.warn(f"skipping {path}: too large.")
continue
if is_json_or_yaml:
_spec_str_json_and_yaml(path, spack.util.spack_yaml.load_config(f), handler)
elif is_python:
_spec_str_ast(path, ast.parse(f.read()), handler)
except (OSError, spack.util.spack_yaml.SpackYAMLError, SyntaxError, ValueError):
warnings.warn(f"skipping {path}")
continue
def style(parser, args):
if args.spec_strings:
if not args.files:
tty.die("No files provided to check spec strings.")
handler = _spec_str_fix_handler if args.fix else _spec_str_default_handler
return _check_spec_strings(args.files, handler)
# save initial working directory for relativizing paths later
args.initial_working_dir = os.getcwd()
# ensure that the config files we need actually exist in the spack prefix.
# assertions b/c users should not ever see these errors -- they're checked in CI.
assert os.path.isfile(os.path.join(spack.paths.prefix, "pyproject.toml"))
assert os.path.isfile(os.path.join(spack.paths.prefix, ".flake8"))
# validate spack root if the user provided one
args.root = os.path.realpath(args.root) if args.root else spack.paths.prefix
spack_script = os.path.join(args.root, "bin", "spack")
if not os.path.exists(spack_script):
tty.die("This does not look like a valid spack root.", "No such file: '%s'" % spack_script)
file_list = args.files
if file_list:
def prefix_relative(path):
return os.path.relpath(os.path.abspath(os.path.realpath(path)), args.root)
file_list = [prefix_relative(p) for p in file_list]
# process --tool and --skip arguments
selected = set(tool_names)
if args.tool is not None:
selected = validate_toolset(args.tool)
if args.skip is not None:
selected -= validate_toolset(args.skip)
if not selected:
tty.msg("Nothing to run.")
return
tools_to_run = [t for t in tool_names if t in selected]
if missing_tools(tools_to_run):
_bootstrap_dev_dependencies()
return_code = 0
with working_dir(args.root):
if not file_list:
file_list = changed_files(args.base, args.untracked, args.all)
print_style_header(file_list, args, tools_to_run)
for tool_name in tools_to_run:
tool = tools[tool_name]
print_tool_header(tool_name)
return_code |= tool.fun(tool.executable, file_list, args)
if return_code == 0:
tty.msg(color.colorize("@*{spack style checks were clean}"))
else:
tty.error(color.colorize("@*{spack style found errors}"))
return return_code
| _LegacySpecTokens |
python | sqlalchemy__sqlalchemy | examples/vertical/dictlike.py | {
"start": 1765,
"end": 5093
} | class ____:
"""Adds obj[key] access to a mapped class.
This class basically proxies dictionary access to an attribute
called ``_proxied``. The class which inherits this class
should have an attribute called ``_proxied`` which points to a dictionary.
"""
def __len__(self):
return len(self._proxied)
def __iter__(self):
return iter(self._proxied)
def __getitem__(self, key):
return self._proxied[key]
def __contains__(self, key):
return key in self._proxied
def __setitem__(self, key, value):
self._proxied[key] = value
def __delitem__(self, key):
del self._proxied[key]
if __name__ == "__main__":
Base = declarative_base()
class AnimalFact(Base):
"""A fact about an animal."""
__tablename__ = "animal_fact"
animal_id = Column(ForeignKey("animal.id"), primary_key=True)
key = Column(Unicode(64), primary_key=True)
value = Column(UnicodeText)
class Animal(ProxiedDictMixin, Base):
"""an Animal"""
__tablename__ = "animal"
id = Column(Integer, primary_key=True)
name = Column(Unicode(100))
facts = relationship(
"AnimalFact", collection_class=attribute_keyed_dict("key")
)
_proxied = association_proxy(
"facts",
"value",
creator=lambda key, value: AnimalFact(key=key, value=value),
)
def __init__(self, name):
self.name = name
def __repr__(self):
return "Animal(%r)" % self.name
@classmethod
def with_characteristic(self, key, value):
return self.facts.any(key=key, value=value)
engine = create_engine("sqlite://")
Base.metadata.create_all(engine)
session = Session(bind=engine)
stoat = Animal("stoat")
stoat["color"] = "reddish"
stoat["cuteness"] = "somewhat"
# dict-like assignment transparently creates entries in the
# stoat.facts collection:
print(stoat.facts["color"])
session.add(stoat)
session.commit()
critter = session.query(Animal).filter(Animal.name == "stoat").one()
print(critter["color"])
print(critter["cuteness"])
critter["cuteness"] = "very"
print("changing cuteness:")
marten = Animal("marten")
marten["color"] = "brown"
marten["cuteness"] = "somewhat"
session.add(marten)
shrew = Animal("shrew")
shrew["cuteness"] = "somewhat"
shrew["poisonous-part"] = "saliva"
session.add(shrew)
loris = Animal("slow loris")
loris["cuteness"] = "fairly"
loris["poisonous-part"] = "elbows"
session.add(loris)
q = session.query(Animal).filter(
Animal.facts.any(
and_(AnimalFact.key == "color", AnimalFact.value == "reddish")
)
)
print("reddish animals", q.all())
q = session.query(Animal).filter(
Animal.with_characteristic("color", "brown")
)
print("brown animals", q.all())
q = session.query(Animal).filter(
~Animal.with_characteristic("poisonous-part", "elbows")
)
print("animals without poisonous-part == elbows", q.all())
q = session.query(Animal).filter(Animal.facts.any(value="somewhat"))
print('any animal with any .value of "somewhat"', q.all())
| ProxiedDictMixin |
python | weaviate__weaviate-python-client | weaviate/collections/batch/client.py | {
"start": 8019,
"end": 13742
} | class ____(_BatchWrapper):
def __init__(
self,
connection: ConnectionSync,
config: "_Collections",
consistency_level: Optional[ConsistencyLevel],
):
super().__init__(connection, consistency_level)
self.__config = config
self._vectorizer_batching: Optional[bool] = None
self.__executor = ThreadPoolExecutor()
# define one executor per client with it shared between all child batch contexts
def __create_batch_and_reset(
self, batch_client: Union[Type[_BatchClient], Type[_BatchClientNew]]
):
if self._vectorizer_batching is None or not self._vectorizer_batching:
try:
configs = self.__config.list_all(simple=True)
vectorizer_batching = False
for config in configs.values():
if config.vector_config is not None:
vectorizer_batching = False
for vec_config in config.vector_config.values():
if vec_config.vectorizer.vectorizer is not Vectorizers.NONE:
vectorizer_batching = True
break
vectorizer_batching = vectorizer_batching
else:
vectorizer_batching = any(
config.vectorizer_config is not None for config in configs.values()
)
if vectorizer_batching:
break
self._vectorizer_batching = vectorizer_batching
except UnexpectedStatusCodeError as e:
# we might not have the rights to query all collections
if e.status_code != 403:
raise e
self._vectorizer_batching = False
self._batch_data = _BatchDataWrapper() # clear old data
return _ContextManagerWrapper(
batch_client(
connection=self._connection,
consistency_level=self._consistency_level,
results=self._batch_data,
batch_mode=self._batch_mode,
executor=self.__executor,
vectorizer_batching=self._vectorizer_batching,
)
)
def dynamic(
self, consistency_level: Optional[ConsistencyLevel] = None
) -> ClientBatchingContextManager:
"""Configure dynamic batching.
When you exit the context manager, the final batch will be sent automatically.
Args:
consistency_level: The consistency level to be used to send batches. If not provided, the default value is `None`.
"""
self._batch_mode: _BatchMode = _DynamicBatching()
self._consistency_level = consistency_level
return self.__create_batch_and_reset(_BatchClient)
def fixed_size(
self,
batch_size: int = 100,
concurrent_requests: int = 2,
consistency_level: Optional[ConsistencyLevel] = None,
) -> ClientBatchingContextManager:
"""Configure fixed size batches. Note that the default is dynamic batching.
When you exit the context manager, the final batch will be sent automatically.
Args:
batch_size: The number of objects/references to be sent in one batch. If not provided, the default value is 100.
concurrent_requests: The number of concurrent requests when sending batches. This controls the number of concurrent requests
made to Weaviate and not the speed of batch creation within Python.
consistency_level: The consistency level to be used to send batches. If not provided, the default value is `None`.
"""
self._batch_mode = _FixedSizeBatching(batch_size, concurrent_requests)
self._consistency_level = consistency_level
return self.__create_batch_and_reset(_BatchClient)
def rate_limit(
self,
requests_per_minute: int,
consistency_level: Optional[ConsistencyLevel] = None,
) -> ClientBatchingContextManager:
"""Configure batches with a rate limited vectorizer.
When you exit the context manager, the final batch will be sent automatically.
Args:
requests_per_minute: The number of requests that the vectorizer can process per minute.
consistency_level: The consistency level to be used to send batches. If not provided, the default value is `None`.
"""
self._batch_mode = _RateLimitedBatching(requests_per_minute)
self._consistency_level = consistency_level
return self.__create_batch_and_reset(_BatchClient)
def experimental(
self,
*,
concurrency: Optional[int] = None,
consistency_level: Optional[ConsistencyLevel] = None,
) -> ClientBatchingContextManager:
"""Configure the batching context manager using the experimental server-side batching mode.
When you exit the context manager, the final batch will be sent automatically.
"""
if self._connection._weaviate_version.is_lower_than(1, 34, 0):
raise WeaviateUnsupportedFeatureError(
"Server-side batching", str(self._connection._weaviate_version), "1.34.0"
)
self._batch_mode = _ServerSideBatching(
# concurrency=concurrency
# if concurrency is not None
# else len(self._cluster.get_nodes_status())
concurrency=1, # hard-code until client-side multi-threading is fixed
)
self._consistency_level = consistency_level
return self.__create_batch_and_reset(_BatchClientNew)
| _BatchClientWrapper |
python | allegroai__clearml | examples/frameworks/pytorch/pytorch_mnist.py | {
"start": 334,
"end": 5610
} | class ____(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
Logger.current_logger().report_scalar(
"train", "loss", iteration=(epoch * len(train_loader) + batch_idx), value=loss.item())
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader, epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
Logger.current_logger().report_scalar(
"test", "loss", iteration=epoch, value=test_loss)
Logger.current_logger().report_scalar(
"test", "accuracy", iteration=epoch, value=(correct / len(test_loader.dataset)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='examples', task_name='PyTorch MNIST train')
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join('..', 'data'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join('..', 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader, epoch)
if args.save_model:
torch.save(model.state_dict(), os.path.join(gettempdir(), "mnist_cnn.pt"))
if __name__ == '__main__':
main()
| Net |
python | justquick__django-activity-stream | actstream/drf/views.py | {
"start": 7118,
"end": 10574
} | class ____(DefaultModelViewSet):
queryset = models.Follow.objects.order_by('-started', '-id').prefetch_related()
serializer_class = serializers.FollowSerializer
permission_classes = [permissions.IsAuthenticated]
@action(detail=False, permission_classes=[permissions.IsAuthenticated], methods=['POST'])
def follow(self, request):
"""
Creates the follow relationship.
The current user is set as user and the target is passed with content_type_id/object_id pair
"""
data = request.data.dict()
if 'content_type_id' not in data:
return Response(status=400)
ctype = get_object_or_404(ContentType, id=data.pop('content_type_id'))
obj = ctype.get_object_for_this_type(pk=data.pop('object_id'))
follow_action(request.user, obj, **data)
return Response(status=201)
@action(detail=False, permission_classes=[permissions.IsAuthenticated],
url_path='is_following/(?P<content_type_id>[^/.]+)/(?P<object_id>[^/.]+)', name='True if user is following object')
def is_following(self, request, content_type_id, object_id):
"""
Returns a JSON response whether the current user is following the object from content_type_id/object_id pair
"""
ctype = get_object_or_404(ContentType, id=content_type_id)
instance = ctype.get_object_for_this_type(pk=object_id)
following = models.Follow.objects.is_following(request.user, instance)
data = {'is_following': following}
return Response(json.dumps(data))
@action(detail=False, permission_classes=[permissions.IsAuthenticated],
url_path='following', name='List of instances I follow')
def following(self, request):
"""
Returns a JSON response whether the current user is following the object from content_type_id/object_id pair
"""
qs = models.Follow.objects.following_qs(request.user)
return Response(serializers.FollowingSerializer(qs, many=True).data)
@action(detail=False, permission_classes=[permissions.IsAuthenticated],
url_path='followers', name='List of followers for current user')
def followers(self, request):
"""
Returns a JSON response whether the current user is following the object from content_type_id/object_id pair
"""
user_model = get_user_model()
if user_model not in serializers.registered_serializers:
raise ModelNotRegistered(f'Auth user "{user_model.__name__}" not registered with actstream')
serializer = serializers.registered_serializers[user_model]
followers = models.Follow.objects.followers(request.user)
return Response(serializer(followers, many=True).data)
def viewset_factory(model_class, queryset=None):
"""
Returns a subclass of `ModelViewSet` for each model class in the registry
"""
if queryset is None:
queryset = model_class.objects.prefetch_related()
serializer_class = serializers.registered_serializers[model_class]
model_label = label(model_class)
if model_label in DRF_SETTINGS['VIEWSETS']:
return import_obj(DRF_SETTINGS['VIEWSETS'][model_label])
return type(f'{model_class.__name__}ViewSet', (DefaultModelViewSet,), {
'queryset': queryset,
'serializer_class': serializer_class,
})
registered_viewsets = serializers.registry_factory(viewset_factory)
| FollowViewSet |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_managers.py | {
"start": 8133,
"end": 10761
} | class ____(TestBuildManagerBase):
"""
Queries using External Manager should only include External Version builds.
It will only include pull/merge request Version builds in the queries.
"""
def test_all(self):
query = Build.external.all()
external_builds = {
self.build_private_external,
self.another_build_private_external,
self.shared_build_private_external,
self.build_public_external,
self.another_build_public_external,
self.shared_build_public_external,
}
self.assertEqual(query.count(), len(external_builds))
self.assertEqual(set(query), external_builds)
def test_public(self):
self.shared_project.external_builds_privacy_level = PRIVATE
self.shared_project.save()
query = Build.external.public()
public_external_builds = {
self.build_public_external,
self.build_private_external,
self.another_build_public_external,
self.another_build_private_external,
}
self.assertEqual(query.count(), len(public_external_builds))
self.assertEqual(set(query), public_external_builds)
def test_public_user(self):
self.project.external_builds_privacy_level = PRIVATE
self.project.save()
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
query = Build.external.public(user=self.user)
builds = {
self.build_private_external,
self.shared_build_private_external,
self.build_public_external,
self.shared_build_public_external,
}
self.assertEqual(query.count(), len(builds))
self.assertEqual(set(query), builds)
def test_public_project(self):
query = Build.external.public(user=self.user, project=self.project)
builds = {
self.build_private_external,
self.build_public_external,
}
self.assertEqual(query.count(), len(builds))
self.assertEqual(set(query), builds)
def test_api(self):
self.another_project.external_builds_privacy_level = PRIVATE
self.another_project.save()
query = Build.external.api(user=self.user)
builds = {
self.build_private_external,
self.shared_build_private_external,
self.build_public_external,
self.shared_build_public_external,
}
self.assertEqual(query.count(), len(builds))
self.assertEqual(set(query), builds)
| TestExternalBuildManager |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper_fxir.py | {
"start": 2800,
"end": 3910
} | class ____:
"""
Stores metadata about Triton kernels for use in FX.
"""
tuner: CachingAutotuner
wrapped: TraceableTritonKernelWrapper
def replace_floor_div(expr: sympy.Expr) -> sympy.Expr:
"""
Replace sympy.floor with FloorDiv.
"""
def replace(expr: sympy.Expr) -> sympy.Expr:
expr = sympy.together(expr)
# Division is represented as a Mul with a Rational factor or a Pow with negative
# exponent. We convert floor(Mul(...)) to FloorDiv(numerator, denominator) by
# partitioning factors into the numerator and denominator.
(numerator, denominator) = (sympy.S.One,) * 2
for arg in sympy.Mul.make_args(expr):
if isinstance(arg, sympy.Rational):
numerator *= arg.numerator
denominator *= arg.denominator
elif isinstance(arg, sympy.Pow) and arg.exp.is_negative:
denominator *= arg.base**-arg.exp
else:
numerator *= arg
return FloorDiv(numerator, denominator)
return expr.replace(sympy.floor, replace)
| TritonKernel |
python | wandb__wandb | wandb/errors/errors.py | {
"start": 782,
"end": 889
} | class ____(UsageError):
"""Raised when trying to use a feature that is not supported."""
| UnsupportedError |
python | tensorflow__tensorflow | tensorflow/core/function/transform/transform_test.py | {
"start": 1904,
"end": 2398
} | class ____(module_lib.Module):
@def_function.function
def f(self, x, y, add_2):
r = math_ops.add(x, y, name="x_plus_y")
if add_2:
return r + 2
else:
return r
def apply_transform(f, transform_fn):
"""Wrapper to apply a transformation on every traced tf.function."""
@def_function.function
def wrapped(*args):
updated_cf = transform.transform_function(
f, inputs=args, transform_fn=transform_fn)
return updated_cf(*args)
return wrapped
| Model |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/trivial_install_test_package/package.py | {
"start": 216,
"end": 686
} | class ____(Package):
"""This package is a stub with a trivial install method. It allows us
to test the install and uninstall logic of spack."""
homepage = "http://www.example.com/trivial_install"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
touch(join_path(prefix, "an_installation_file"))
| TrivialInstallTestPackage |
python | huggingface__transformers | src/transformers/models/convnextv2/modeling_convnextv2.py | {
"start": 7743,
"end": 9327
} | class ____(nn.Module):
"""ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config ([`ConvNextV2Config`]): Model configuration class.
in_channels (`int`): Number of input channels.
out_channels (`int`): Number of output channels.
depth (`int`): Number of residual blocks.
drop_path_rates(`list[float]`): Stochastic depth rates for each layer.
"""
def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
super().__init__()
if in_channels != out_channels or stride > 1:
self.downsampling_layer = nn.ModuleList(
[
ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
]
)
else:
self.downsampling_layer = nn.ModuleList()
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = nn.ModuleList(
[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer in self.downsampling_layer:
features = layer(features)
for layer in self.layers:
features = layer(features)
return features
# Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
| ConvNextV2Stage |
python | vyperlang__vyper | vyper/semantics/types/function.py | {
"start": 1363,
"end": 1434
} | class ____(_FunctionArg):
pass
@dataclass(kw_only=True)
| PositionalArg |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/lambda15.py | {
"start": 279,
"end": 519
} | class ____:
def __new__(cls, *args: Any, **kwargs: Any) -> "ClassA":
return super().__new__(*args, **kwargs)
def __init__(self, x: Callable[[float], float]) -> None:
self.x = x
ClassA(lambda r: identity(r) + 1)
| ClassA |
python | modin-project__modin | modin/core/dataframe/pandas/metadata/dtypes.py | {
"start": 38609,
"end": 44586
} | class ____(pandas.CategoricalDtype):
"""
A lazy proxy representing ``pandas.CategoricalDtype``.
Parameters
----------
categories : list-like, optional
ordered : bool, default: False
Notes
-----
Important note! One shouldn't use the class' constructor to instantiate a proxy instance,
it's intended only for compatibility purposes! In order to create a new proxy instance
use the appropriate class method `._build_proxy(...)`.
"""
def __init__(self, categories=None, ordered=False):
# These will be initialized later inside of the `._build_proxy()` method
self._parent, self._column_name, self._categories_val, self._materializer = (
None,
None,
None,
None,
)
super().__init__(categories, ordered)
@staticmethod
def update_dtypes(dtypes, new_parent):
"""
Update a parent for categorical proxies in a dtype object.
Parameters
----------
dtypes : dict-like
A dict-like object describing dtypes. The method will walk through every dtype
an update parents for categorical proxies inplace.
new_parent : object
"""
for key, value in dtypes.items():
if isinstance(value, LazyProxyCategoricalDtype):
dtypes[key] = value._update_proxy(new_parent, column_name=key)
def _update_proxy(self, parent, column_name):
"""
Create a new proxy, if either parent or column name are different.
Parameters
----------
parent : object
Source object to extract categories on demand.
column_name : str
Column name of the categorical column in the source object.
Returns
-------
pandas.CategoricalDtype or LazyProxyCategoricalDtype
"""
if self._is_materialized:
# The parent has been materialized, we don't need a proxy anymore.
return pandas.CategoricalDtype(self.categories, ordered=self._ordered)
elif parent is self._parent and column_name == self._column_name:
return self
else:
return self._build_proxy(parent, column_name, self._materializer)
@classmethod
def _build_proxy(cls, parent, column_name, materializer, dtype=None):
"""
Construct a lazy proxy.
Parameters
----------
parent : object
Source object to extract categories on demand.
column_name : str
Column name of the categorical column in the source object.
materializer : callable(parent, column_name) -> pandas.CategoricalDtype
A function to call in order to extract categorical values.
dtype : dtype, optional
The categories dtype.
Returns
-------
LazyProxyCategoricalDtype
"""
result = cls()
result._parent = parent
result._column_name = column_name
result._materializer = materializer
result._dtype = dtype
return result
def _get_dtype(self):
"""
Get the categories dtype.
Returns
-------
dtype
"""
if self._dtype is None:
self._dtype = self.categories.dtype
return self._dtype
def __reduce__(self):
"""
Serialize an object of this class.
Returns
-------
tuple
Notes
-----
This object is serialized into a ``pandas.CategoricalDtype`` as an actual proxy can't be
properly serialized because of the references it stores for its potentially distributed parent.
"""
return (pandas.CategoricalDtype, (self.categories, self.ordered))
@property
def _categories(self):
"""
Get materialized categorical values.
Returns
-------
pandas.Index
"""
if not self._is_materialized:
self._materialize_categories()
return self._categories_val
@_categories.setter
def _categories(self, categories):
"""
Set new categorical values.
Parameters
----------
categories : list-like
"""
self._categories_val = categories
self._parent = None # The parent is not required any more
self._materializer = None
self._dtype = None
@property
def _is_materialized(self) -> bool:
"""
Check whether categorical values were already materialized.
Returns
-------
bool
"""
return self._categories_val is not None
def _materialize_categories(self):
"""Materialize actual categorical values."""
ErrorMessage.catch_bugs_and_request_email(
failure_condition=self._parent is None,
extra_log="attempted to materialize categories with parent being 'None'",
)
categoricals = self._materializer(self._parent, self._column_name)
self._categories = categoricals.categories
self._ordered = categoricals.ordered
def get_categories_dtype(
cdt: Union[LazyProxyCategoricalDtype, pandas.CategoricalDtype],
) -> DtypeObj:
"""
Get the categories dtype.
Parameters
----------
cdt : LazyProxyCategoricalDtype or pandas.CategoricalDtype
Returns
-------
dtype
"""
return (
cdt._get_dtype()
if isinstance(cdt, LazyProxyCategoricalDtype)
else cdt.categories.dtype
)
def extract_dtype(value) -> DtypeObj | pandas.Series:
"""
Extract dtype(s) from the passed `value`.
Parameters
----------
value : object
Returns
-------
DtypeObj or pandas.Series of DtypeObj
"""
try:
dtype = pandas.api.types.pandas_dtype(value)
except (TypeError, ValueError):
dtype = pandas.Series(value).dtype
return dtype
| LazyProxyCategoricalDtype |
python | pypa__warehouse | tests/unit/manage/test_forms.py | {
"start": 25107,
"end": 27719
} | class ____:
def test_validate(self):
macaroon_service = pretend.stub(
find_macaroon=pretend.call_recorder(lambda id: pretend.stub())
)
request = pretend.stub()
user_service = pretend.stub(
find_userid=lambda *a, **kw: 1, check_password=lambda *a, **kw: True
)
form = forms.DeleteMacaroonForm(
formdata=MultiDict(
{
"password": "password",
"username": "username",
"macaroon_id": pretend.stub(),
}
),
request=request,
macaroon_service=macaroon_service,
user_service=user_service,
)
assert form.request is request
assert form.macaroon_service is macaroon_service
assert form.user_service is user_service
assert form.validate(), str(form.errors)
def test_validate_macaroon_id_invalid(self):
macaroon_service = pretend.stub(
find_macaroon=pretend.call_recorder(lambda id: None)
)
user_service = pretend.stub(
find_userid=lambda *a, **kw: 1, check_password=lambda *a, **kw: True
)
request = pretend.stub(
remote_addr=REMOTE_ADDR, banned=pretend.stub(by_ip=lambda ip_address: False)
)
form = forms.DeleteMacaroonForm(
formdata=MultiDict({"macaroon_id": pretend.stub(), "password": "password"}),
request=request,
macaroon_service=macaroon_service,
user_service=user_service,
username="username",
)
assert not form.validate()
assert form.macaroon_id.errors.pop() == "No such macaroon"
def test_validate_macaroon_id(self):
macaroon_service = pretend.stub(
find_macaroon=pretend.call_recorder(lambda id: pretend.stub())
)
user_service = pretend.stub(
find_userid=lambda *a, **kw: 1, check_password=lambda *a, **kw: True
)
request = pretend.stub(
remote_addr=REMOTE_ADDR, banned=pretend.stub(by_ip=lambda ip_address: False)
)
form = forms.DeleteMacaroonForm(
formdata=MultiDict(
{
"macaroon_id": pretend.stub(),
"username": "username",
"password": "password",
}
),
request=request,
macaroon_service=macaroon_service,
user_service=user_service,
)
assert form.validate(), str(form.errors)
| TestDeleteMacaroonForm |
python | huggingface__transformers | tests/utils/test_import_structure.py | {
"start": 1213,
"end": 9716
} | class ____(unittest.TestCase):
base_transformers_path = Path(__file__).parent.parent.parent
models_path = base_transformers_path / "src" / "transformers" / "models"
models_import_structure = spread_import_structure(define_import_structure(models_path))
def test_definition(self):
import_structure = define_import_structure(import_structures)
valid_frozensets: dict[frozenset | frozenset[str], dict[str, set[str]]] = {
frozenset(): {
"import_structure_raw_register": {"A0", "A4", "a0"},
"import_structure_register_with_comments": {"B0", "b0"},
},
frozenset({"random_item_that_should_not_exist"}): {"failing_export": {"A0"}},
frozenset({"torch"}): {
"import_structure_raw_register": {"A1", "A2", "A3", "a1", "a2", "a3"},
"import_structure_register_with_duplicates": {"C0", "C1", "C2", "C3", "c0", "c1", "c2", "c3"},
"import_structure_register_with_comments": {"B1", "B2", "B3", "b1", "b2", "b3"},
},
frozenset({"torch>=2.5"}): {"import_structure_raw_register_with_versions": {"D0", "d0"}},
frozenset({"torch>2.5"}): {"import_structure_raw_register_with_versions": {"D1", "d1"}},
frozenset({"torch<=2.5"}): {"import_structure_raw_register_with_versions": {"D2", "d2"}},
frozenset({"torch<2.5"}): {"import_structure_raw_register_with_versions": {"D3", "d3"}},
frozenset({"torch==2.5"}): {"import_structure_raw_register_with_versions": {"D4", "d4"}},
frozenset({"torch!=2.5"}): {"import_structure_raw_register_with_versions": {"D5", "d5"}},
frozenset({"torch>=2.5", "accelerate<0.20"}): {
"import_structure_raw_register_with_versions": {"D6", "d6"}
},
}
self.assertEqual(len(import_structure.keys()), len(valid_frozensets.keys()))
for _frozenset in valid_frozensets:
self.assertTrue(_frozenset in import_structure)
self.assertListEqual(
sorted(import_structure[_frozenset].keys()), sorted(valid_frozensets[_frozenset].keys())
)
for module, objects in valid_frozensets[_frozenset].items():
self.assertTrue(module in import_structure[_frozenset])
self.assertSetEqual(objects, import_structure[_frozenset][module])
def test_transformers_specific_model_import(self):
"""
This test ensures that there is equivalence between what is written down in __all__ and what is
written down with register().
It doesn't test the backends attributed to register().
"""
for architecture in os.listdir(self.models_path):
if (
os.path.isfile(self.models_path / architecture)
or architecture.startswith("_")
or architecture == "deprecated"
):
continue
with self.subTest(f"Testing arch {architecture}"):
import_structure = define_import_structure(self.models_path / architecture)
backend_agnostic_import_structure = {}
for module_object_mapping in import_structure.values():
for module, objects in module_object_mapping.items():
if module not in backend_agnostic_import_structure:
backend_agnostic_import_structure[module] = []
backend_agnostic_import_structure[module].extend(objects)
for module, objects in backend_agnostic_import_structure.items():
with open(self.models_path / architecture / f"{module}.py") as f:
content = f.read()
_all = fetch__all__(content)
if _all is None:
raise ValueError(f"{module} doesn't have __all__ defined.")
error_message = (
f"self.models_path / architecture / f'{module}.py doesn't seem to be defined correctly:\n"
f"Defined in __all__: {sorted(_all)}\nDefined with register: {sorted(objects)}"
)
self.assertListEqual(sorted(objects), sorted(_all), msg=error_message)
def test_import_spread(self):
"""
This test is specifically designed to test that varying levels of depth across import structures are
respected.
In this instance, frozensets are at respective depths of 1, 2 and 3, for example:
- models.{frozensets}
- models.albert.{frozensets}
- models.deprecated.transfo_xl.{frozensets}
"""
initial_import_structure = {
frozenset(): {"dummy_non_model": {"DummyObject"}},
"models": {
frozenset(): {"dummy_config": {"DummyConfig"}},
"albert": {
frozenset(): {"configuration_albert": {"AlbertConfig"}},
frozenset({"torch"}): {
"modeling_albert": {
"AlbertForMaskedLM",
}
},
},
"llama": {
frozenset(): {"configuration_llama": {"LlamaConfig"}},
frozenset({"torch"}): {
"modeling_llama": {
"LlamaForCausalLM",
}
},
},
"deprecated": {
"transfo_xl": {
frozenset({"torch"}): {
"modeling_transfo_xl": {
"TransfoXLModel",
}
},
frozenset(): {
"configuration_transfo_xl": {"TransfoXLConfig"},
"tokenization_transfo_xl": {"TransfoXLCorpus", "TransfoXLTokenizer"},
},
},
"deta": {
frozenset({"torch"}): {
"modeling_deta": {"DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel"}
},
frozenset(): {"configuration_deta": {"DetaConfig"}},
frozenset({"vision"}): {"image_processing_deta": {"DetaImageProcessor"}},
},
},
},
}
ground_truth_spread_import_structure = {
frozenset(): {
"dummy_non_model": {"DummyObject"},
"models.dummy_config": {"DummyConfig"},
"models.albert.configuration_albert": {"AlbertConfig"},
"models.llama.configuration_llama": {"LlamaConfig"},
"models.deprecated.transfo_xl.configuration_transfo_xl": {"TransfoXLConfig"},
"models.deprecated.transfo_xl.tokenization_transfo_xl": {"TransfoXLCorpus", "TransfoXLTokenizer"},
"models.deprecated.deta.configuration_deta": {"DetaConfig"},
},
frozenset({"torch"}): {
"models.albert.modeling_albert": {"AlbertForMaskedLM"},
"models.llama.modeling_llama": {"LlamaForCausalLM"},
"models.deprecated.transfo_xl.modeling_transfo_xl": {"TransfoXLModel"},
"models.deprecated.deta.modeling_deta": {"DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel"},
},
frozenset({"vision"}): {"models.deprecated.deta.image_processing_deta": {"DetaImageProcessor"}},
}
newly_spread_import_structure = spread_import_structure(initial_import_structure)
self.assertEqual(ground_truth_spread_import_structure, newly_spread_import_structure)
@pytest.mark.parametrize(
"backend,package_name,version_comparison,version",
[
pytest.param(Backend("torch>=2.5 "), "torch", VersionComparison.GREATER_THAN_OR_EQUAL.value, "2.5"),
pytest.param(Backend("torchvision==0.19.1"), "torchvision", VersionComparison.EQUAL.value, "0.19.1"),
],
)
def test_backend_specification(backend: Backend, package_name: str, version_comparison: Callable, version: str):
assert backend.package_name == package_name
assert VersionComparison.from_string(backend.version_comparison) == version_comparison
assert backend.version == version
| TestImportStructures |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 8475,
"end": 8542
} | class ____(HTTPClientError):
status_code = 411
| HTTPLengthRequired |
python | redis__redis-py | tests/test_asyncio/test_connection_pool.py | {
"start": 23410,
"end": 28215
} | class ____:
async def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
await bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._reader
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
async def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
await r.execute_command("DEBUG", "ERROR", "LOADING fake message")
if r.connection:
assert not r.connection._reader
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
async def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
await pipe.immediate_execute_command(
"DEBUG", "ERROR", "LOADING fake message"
)
pool = r.connection_pool
assert pipe.connection
assert pipe.connection in pool._in_use_connections
assert not pipe.connection._reader
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
async def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command("DEBUG", "ERROR", "LOADING fake message")
with pytest.raises(redis.BusyLoadingError):
await pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._reader
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
async def test_read_only_error(self, r):
"""READONLY errors get turned into ReadOnlyError exceptions"""
with pytest.raises(redis.ReadOnlyError):
await r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
@skip_if_redis_enterprise()
async def test_oom_error(self, r):
"""OOM errors get turned into OutOfMemoryError exceptions"""
with pytest.raises(redis.OutOfMemoryError):
# note: don't use the DEBUG OOM command since it's not the same
# as the db being full
await r.execute_command("DEBUG", "ERROR", "OOM blah blah")
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost:6379?db=0")
pool = connection.connection_pool
assert re.match(
r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
).groups() == (
"ConnectionPool",
"Connection",
"db=0,host=localhost,port=6379",
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
assert re.match(
r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket",
)
@skip_if_redis_enterprise()
async def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
await r.execute_command(
"DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
)
@skip_if_redis_enterprise()
async def test_connect_invalid_password_supplied(self, r):
"""AuthenticationError should be raised when sending the wrong password"""
with pytest.raises(redis.AuthenticationError):
await r.execute_command("DEBUG", "ERROR", "ERR invalid password")
@pytest.mark.onlynoncluster
| TestConnection |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 2243,
"end": 2759
} | class ____(BaseEvent):
"""
LLMCompletionStartEvent.
Args:
prompt (str): The prompt to be completed.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
prompt: str
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionStartEvent"
| LLMCompletionStartEvent |
python | getsentry__sentry | tests/sentry/test_no_create_or_update_usage.py | {
"start": 2013,
"end": 5954
} | class ____(ast.NodeVisitor):
def __init__(self, module_qualname: str) -> None:
self.module_qualname = module_qualname
self.context_stack: list[str] = []
self.usages: list[Usage] = []
def visit_ClassDef(self, node: ast.ClassDef) -> Any:
self.context_stack.append(node.name)
self.generic_visit(node)
self.context_stack.pop()
def visit_FunctionDef(self, node: ast.FunctionDef) -> Any:
self.context_stack.append(node.name)
self.generic_visit(node)
self.context_stack.pop()
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> Any:
self.context_stack.append(node.name)
self.generic_visit(node)
self.context_stack.pop()
def visit_Call(self, node: ast.Call) -> Any:
func = node.func
is_create_or_update = False
if isinstance(func, ast.Name):
is_create_or_update = func.id == "create_or_update"
elif isinstance(func, ast.Attribute):
is_create_or_update = func.attr == "create_or_update"
if is_create_or_update:
context = ".".join(self.context_stack) if self.context_stack else "<module>"
qualified = (
f"{self.module_qualname}.{context}"
if context != "<module>"
else self.module_qualname
)
# file_path is filled in by the scanner
self.usages.append(
Usage(
file_path="",
line=node.lineno,
col=getattr(node, "col_offset", 0),
qualified_context=qualified,
)
)
self.generic_visit(node)
def _iter_python_files(root: Path) -> Iterable[Path]:
yield from root.rglob("*.py")
def _module_qualname_from_path(repo_root: Path, file_path: Path) -> str:
rel = file_path.relative_to(repo_root)
# strip .py and convert / to .
parts = list(rel.parts)
# remove trailing .py
parts[-1] = parts[-1][:-3]
return ".".join(parts)
def _scan_create_or_update(repo_root: Path, src_root: Path) -> list[Usage]:
results: list[Usage] = []
for file_path in _iter_python_files(src_root):
text = file_path.read_text(encoding="utf-8")
try:
tree = ast.parse(text)
except SyntaxError:
# Ignore unparsable files (should not happen under src/)
continue
module_qualname = _module_qualname_from_path(repo_root, file_path)
visitor = CreateOrUpdateVisitor(module_qualname)
visitor.visit(tree)
rel_path = str(file_path.relative_to(repo_root))
for u in visitor.usages:
# fill file path for each usage
u.file_path = rel_path
results.extend(visitor.usages)
return results
def test_no_new_create_or_update_usage() -> None:
repo_root = Path(__file__).resolve().parents[2]
src_root = repo_root / "src"
usages = _scan_create_or_update(repo_root=repo_root, src_root=src_root)
violations: list[str] = []
for u in usages:
file_allowed = u.file_path in ALLOWLIST_FILES
if not file_allowed:
violations.append(
f"{u.file_path}:{u.line}:{u.col}: create_or_update used in {u.qualified_context}. "
f"Use Django's update_or_create instead."
)
if violations:
header = (
"Found disallowed uses of create_or_update. New code must use Django's update_or_create.\n"
"See Django docs: https://docs.djangoproject.com/en/5.2/ref/models/querysets/#update-or-create\n"
"If this is legacy code, add the specific function or file to the allowlist in "
"tests/sentry/test_no_create_or_update_usage.py and plan its refactor.\n\n"
)
detail = "\n".join(sorted(violations))
raise AssertionError(header + detail)
| CreateOrUpdateVisitor |
python | ansible__ansible | test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/deprecation.py | {
"start": 156,
"end": 331
} | class ____(object):
DOCUMENTATION = r"""
options: {}
deprecated:
alternative: Use some other module
why: Test deprecation
removed_in: '3.0.0'
"""
| ModuleDocFragment |
python | mkdocs__mkdocs | mkdocs/tests/livereload_tests.py | {
"start": 271,
"end": 1841
} | class ____:
def __init__(self, content):
self.in_file = io.BytesIO(content.encode())
self.out_file = io.BytesIO()
self.out_file.close = lambda: None
def makefile(self, *args, **kwargs):
return self.in_file
def sendall(self, data):
self.out_file.write(data)
@contextlib.contextmanager
def testing_server(root, builder=lambda: None, mount_path="/"):
"""Create the server and start most of its parts, but don't listen on a socket."""
with mock.patch("socket.socket"):
server = LiveReloadServer(
builder,
host="localhost",
port=0,
root=root,
mount_path=mount_path,
polling_interval=0.2,
)
server.server_name = "localhost"
server.server_port = 0
server.setup_environ()
server.observer.start()
thread = threading.Thread(target=server._build_loop, daemon=True)
thread.start()
yield server
server.shutdown()
thread.join()
def do_request(server, content):
request = FakeRequest(content + " HTTP/1.1")
server.RequestHandlerClass(request, ("127.0.0.1", 0), server)
response = request.out_file.getvalue()
headers, _, content = response.partition(b"\r\n\r\n")
status, _, headers = headers.partition(b"\r\n")
status = status.split(None, 1)[1].decode()
headers = email.message_from_bytes(headers)
headers["_status"] = status
return headers, content.decode()
SCRIPT_REGEX = r'<script>[\S\s]+?livereload\([0-9]+, [0-9]+\);\s*</script>'
| FakeRequest |
python | huggingface__transformers | tests/models/phimoe/test_modeling_phimoe.py | {
"start": 2856,
"end": 2987
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = PhimoeModel
@require_torch
| PhimoeModelTester |
python | PyCQA__pycodestyle | tests/test_E901.py | {
"start": 137,
"end": 913
} | class ____(unittest.TestCase):
def test_closing_brace(self):
errors = errors_from_src('}\n')
if sys.version_info < (3, 12): # pragma: <3.12 cover
self.assertEqual(errors, ['E901:2:1'])
else: # pragma: >=3.12 cover
self.assertEqual(errors, [])
def test_unclosed_brace(self):
src = '''\
if msg:
errmsg = msg % progress.get(cr_dbname))
def lasting(self, duration=300):
progress = self._progress.setdefault('foo', {}
'''
errors = errors_from_src(src)
if sys.version_info < (3, 12): # pragma: <3.12 cover
expected = ['E122:4:1']
else: # pragma: >=3.12 cover
expected = ['E122:4:1', 'E901:5:1'] # noqa: E501
self.assertEqual(errors, expected)
| E901Test |
python | pytorch__pytorch | torch/_inductor/runtime/triton_heuristics.py | {
"start": 1790,
"end": 2062
} | class ____(Config):
"""Inductor-specific Triton config with additional control flags"""
def __init__(self, *args, dynamic_scale_rblock=True, **kwargs):
super().__init__(*args, **kwargs)
self.dynamic_scale_rblock = dynamic_scale_rblock
| InductorConfig |
python | kubernetes-client__python | kubernetes/client/models/v1_certificate_signing_request.py | {
"start": 383,
"end": 7800
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1CertificateSigningRequestSpec',
'status': 'V1CertificateSigningRequestStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1CertificateSigningRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1CertificateSigningRequest. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1CertificateSigningRequest. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1CertificateSigningRequest.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1CertificateSigningRequest. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1CertificateSigningRequest. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1CertificateSigningRequest. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CertificateSigningRequest.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1CertificateSigningRequest. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1CertificateSigningRequest. # noqa: E501
:return: The metadata of this V1CertificateSigningRequest. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1CertificateSigningRequest.
:param metadata: The metadata of this V1CertificateSigningRequest. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1CertificateSigningRequest. # noqa: E501
:return: The spec of this V1CertificateSigningRequest. # noqa: E501
:rtype: V1CertificateSigningRequestSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1CertificateSigningRequest.
:param spec: The spec of this V1CertificateSigningRequest. # noqa: E501
:type: V1CertificateSigningRequestSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1CertificateSigningRequest. # noqa: E501
:return: The status of this V1CertificateSigningRequest. # noqa: E501
:rtype: V1CertificateSigningRequestStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CertificateSigningRequest.
:param status: The status of this V1CertificateSigningRequest. # noqa: E501
:type: V1CertificateSigningRequestStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CertificateSigningRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CertificateSigningRequest):
return True
return self.to_dict() != other.to_dict()
| V1CertificateSigningRequest |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_graphs_test.py | {
"start": 1360,
"end": 1880
} | class ____(test_util.TensorFlowTestCase):
def testParseTensorNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a:0"))
self.assertEqual(0, debug_graphs.get_output_slot("a:0"))
self.assertEqual("_b", debug_graphs.get_node_name("_b:1"))
self.assertEqual(1, debug_graphs.get_output_slot("_b:1"))
def testParseNodeNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a"))
self.assertEqual(0, debug_graphs.get_output_slot("a"))
| GetNodeNameAndOutputSlotTest |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 84605,
"end": 84866
} | class ____(BaseTest):
def test_inlineasm(self):
llvm.initialize_native_asmparser()
m = self.module(asm=asm_inlineasm)
tm = self.target_machine(jit=False)
asm = tm.emit_assembly(m)
self.assertIn('nop', asm)
| TestInlineAsm |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 3311,
"end": 3805
} | class ____(Protocol):
"""Interchange buffer object."""
@property
def bufsize(self) -> int:
"""Buffer size in bytes."""
@property
def ptr(self) -> int:
"""Pointer to start of the buffer as an integer."""
def __dlpack__(self) -> Any:
"""Represent this structure as DLPack interface."""
def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""Device type and device ID for where the data in the buffer resides."""
| Buffer |
python | pytorch__pytorch | torch/ao/quantization/fx/custom_config.py | {
"start": 1454,
"end": 15949
} | class ____:
"""
Custom configuration for :func:`~torch.ao.quantization.quantize_fx.prepare_fx` and
:func:`~torch.ao.quantization.quantize_fx.prepare_qat_fx`.
Example usage::
prepare_custom_config = PrepareCustomConfig() \
.set_standalone_module_name("module1", qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_standalone_module_class(MyStandaloneModule, qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_float_to_observed_mapping(FloatCustomModule, ObservedCustomModule) \
.set_non_traceable_module_names(["module2", "module3"]) \
.set_non_traceable_module_classes([NonTraceableModule1, NonTraceableModule2]) \
.set_input_quantized_indexes([0]) \
.set_output_quantized_indexes([0]) \
.set_preserved_attributes(["attr1", "attr2"])
"""
def __init__(self) -> None:
self.standalone_module_names: dict[str, StandaloneModuleConfigEntry] = {}
self.standalone_module_classes: dict[type, StandaloneModuleConfigEntry] = {}
self.float_to_observed_mapping: dict[QuantType, dict[type, type]] = {}
self.non_traceable_module_names: list[str] = []
self.non_traceable_module_classes: list[type] = []
self.input_quantized_indexes: list[int] = []
self.output_quantized_indexes: list[int] = []
self.preserved_attributes: list[str] = []
def __repr__(self):
dict_nonempty = {k: v for k, v in self.__dict__.items() if len(v) > 0}
return f"PrepareCustomConfig({dict_nonempty})"
def set_standalone_module_name(
self,
module_name: str,
qconfig_mapping: QConfigMapping | None,
example_inputs: tuple[Any, ...],
prepare_custom_config: PrepareCustomConfig | None,
backend_config: BackendConfig | None,
) -> PrepareCustomConfig:
"""
Set the configuration for running a standalone module identified by ``module_name``.
If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead.
If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used.
If ``backend_config`` is None, the parent ``backend_config`` will be used instead.
"""
self.standalone_module_names[module_name] = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, prepare_custom_config, backend_config
)
return self
def set_standalone_module_class(
self,
module_class: type,
qconfig_mapping: QConfigMapping | None,
example_inputs: tuple[Any, ...],
prepare_custom_config: PrepareCustomConfig | None,
backend_config: BackendConfig | None,
) -> PrepareCustomConfig:
"""
Set the configuration for running a standalone module identified by ``module_class``.
If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead.
If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used.
If ``backend_config`` is None, the parent ``backend_config`` will be used instead.
"""
self.standalone_module_classes[module_class] = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, prepare_custom_config, backend_config
)
return self
def set_float_to_observed_mapping(
self,
float_class: type,
observed_class: type,
quant_type: QuantType = QuantType.STATIC,
) -> PrepareCustomConfig:
"""
Set the mapping from a custom float module class to a custom observed module class.
The observed module class must have a ``from_float`` class method that converts the float module class
to the observed module class. This is currently only supported for static quantization.
"""
if quant_type != QuantType.STATIC:
raise ValueError(
"set_float_to_observed_mapping is currently only supported for static quantization"
)
if quant_type not in self.float_to_observed_mapping:
self.float_to_observed_mapping[quant_type] = {}
self.float_to_observed_mapping[quant_type][float_class] = observed_class
return self
def set_non_traceable_module_names(
self, module_names: list[str]
) -> PrepareCustomConfig:
"""
Set the modules that are not symbolically traceable, identified by name.
"""
self.non_traceable_module_names = module_names
return self
def set_non_traceable_module_classes(
self, module_classes: list[type]
) -> PrepareCustomConfig:
"""
Set the modules that are not symbolically traceable, identified by class.
"""
self.non_traceable_module_classes = module_classes
return self
def set_input_quantized_indexes(self, indexes: list[int]) -> PrepareCustomConfig:
"""
Set the indexes of the inputs of the graph that should be quantized.
Inputs are otherwise assumed to be in fp32 by default instead.
"""
self.input_quantized_indexes = indexes
return self
def set_output_quantized_indexes(self, indexes: list[int]) -> PrepareCustomConfig:
"""
Set the indexes of the outputs of the graph that should be quantized.
Outputs are otherwise assumed to be in fp32 by default instead.
"""
self.output_quantized_indexes = indexes
return self
def set_preserved_attributes(self, attributes: list[str]) -> PrepareCustomConfig:
"""
Set the names of the attributes that will persist in the graph module even if they are not used in
the model's ``forward`` method.
"""
self.preserved_attributes = attributes
return self
# TODO: remove this
@classmethod
def from_dict(
cls, prepare_custom_config_dict: dict[str, Any]
) -> PrepareCustomConfig:
"""
Create a ``PrepareCustomConfig`` from a dictionary with the following items:
"standalone_module_name": a list of (module_name, qconfig_mapping, example_inputs,
child_prepare_custom_config, backend_config) tuples
"standalone_module_class" a list of (module_class, qconfig_mapping, example_inputs,
child_prepare_custom_config, backend_config) tuples
"float_to_observed_custom_module_class": a nested dictionary mapping from quantization
mode to an inner mapping from float module classes to observed module classes, e.g.
{"static": {FloatCustomModule: ObservedCustomModule}}
"non_traceable_module_name": a list of modules names that are not symbolically traceable
"non_traceable_module_class": a list of module classes that are not symbolically traceable
"input_quantized_idxs": a list of indexes of graph inputs that should be quantized
"output_quantized_idxs": a list of indexes of graph outputs that should be quantized
"preserved_attributes": a list of attributes that persist even if they are not used in ``forward``
This function is primarily for backward compatibility and may be removed in the future.
"""
def _get_qconfig_mapping(obj: Any, dict_key: str) -> QConfigMapping | None:
"""
Convert the given object into a QConfigMapping if possible, else throw an exception.
"""
if isinstance(obj, QConfigMapping) or obj is None:
return obj
if isinstance(obj, dict):
return QConfigMapping.from_dict(obj)
raise ValueError(
f"Expected QConfigMapping in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'"
)
def _get_prepare_custom_config(
obj: Any, dict_key: str
) -> PrepareCustomConfig | None:
"""
Convert the given object into a PrepareCustomConfig if possible, else throw an exception.
"""
if isinstance(obj, PrepareCustomConfig) or obj is None:
return obj
if isinstance(obj, dict):
return PrepareCustomConfig.from_dict(obj)
raise ValueError(
f"Expected PrepareCustomConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'"
)
def _get_backend_config(obj: Any, dict_key: str) -> BackendConfig | None:
"""
Convert the given object into a BackendConfig if possible, else throw an exception.
"""
if isinstance(obj, BackendConfig) or obj is None:
return obj
if isinstance(obj, dict):
return BackendConfig.from_dict(obj)
raise ValueError(
f"Expected BackendConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'"
)
conf = cls()
for (
module_name,
qconfig_dict,
example_inputs,
_prepare_custom_config_dict,
backend_config_dict,
) in prepare_custom_config_dict.get(STANDALONE_MODULE_NAME_DICT_KEY, []):
qconfig_mapping = _get_qconfig_mapping(
qconfig_dict, STANDALONE_MODULE_NAME_DICT_KEY
)
prepare_custom_config = _get_prepare_custom_config(
_prepare_custom_config_dict, STANDALONE_MODULE_NAME_DICT_KEY
)
backend_config = _get_backend_config(
backend_config_dict, STANDALONE_MODULE_NAME_DICT_KEY
)
conf.set_standalone_module_name(
module_name,
qconfig_mapping,
example_inputs,
prepare_custom_config,
backend_config,
)
for (
module_class,
qconfig_dict,
example_inputs,
_prepare_custom_config_dict,
backend_config_dict,
) in prepare_custom_config_dict.get(STANDALONE_MODULE_CLASS_DICT_KEY, []):
qconfig_mapping = _get_qconfig_mapping(
qconfig_dict, STANDALONE_MODULE_CLASS_DICT_KEY
)
prepare_custom_config = _get_prepare_custom_config(
_prepare_custom_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY
)
backend_config = _get_backend_config(
backend_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY
)
conf.set_standalone_module_class(
module_class,
qconfig_mapping,
example_inputs,
prepare_custom_config,
backend_config,
)
for quant_type_name, custom_module_mapping in prepare_custom_config_dict.get(
FLOAT_TO_OBSERVED_DICT_KEY, {}
).items():
quant_type = _quant_type_from_str(quant_type_name)
for float_class, observed_class in custom_module_mapping.items():
conf.set_float_to_observed_mapping(
float_class, observed_class, quant_type
)
conf.set_non_traceable_module_names(
prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_NAME_DICT_KEY, [])
)
conf.set_non_traceable_module_classes(
prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_CLASS_DICT_KEY, [])
)
conf.set_input_quantized_indexes(
prepare_custom_config_dict.get(INPUT_QUANTIZED_INDEXES_DICT_KEY, [])
)
conf.set_output_quantized_indexes(
prepare_custom_config_dict.get(OUTPUT_QUANTIZED_INDEXES_DICT_KEY, [])
)
conf.set_preserved_attributes(
prepare_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])
)
return conf
def to_dict(self) -> dict[str, Any]:
"""
Convert this ``PrepareCustomConfig`` to a dictionary with the items described in
:func:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig.from_dict`.
"""
def _make_tuple(key: Any, e: StandaloneModuleConfigEntry):
qconfig_dict = e.qconfig_mapping.to_dict() if e.qconfig_mapping else None
prepare_custom_config_dict = (
e.prepare_custom_config.to_dict() if e.prepare_custom_config else None
)
return (
key,
qconfig_dict,
e.example_inputs,
prepare_custom_config_dict,
e.backend_config,
)
d: dict[str, Any] = {}
for module_name, sm_config_entry in self.standalone_module_names.items():
if STANDALONE_MODULE_NAME_DICT_KEY not in d:
d[STANDALONE_MODULE_NAME_DICT_KEY] = []
d[STANDALONE_MODULE_NAME_DICT_KEY].append(
_make_tuple(module_name, sm_config_entry)
)
for module_class, sm_config_entry in self.standalone_module_classes.items():
if STANDALONE_MODULE_CLASS_DICT_KEY not in d:
d[STANDALONE_MODULE_CLASS_DICT_KEY] = []
d[STANDALONE_MODULE_CLASS_DICT_KEY].append(
_make_tuple(module_class, sm_config_entry)
)
for (
quant_type,
float_to_observed_mapping,
) in self.float_to_observed_mapping.items():
if FLOAT_TO_OBSERVED_DICT_KEY not in d:
d[FLOAT_TO_OBSERVED_DICT_KEY] = {}
d[FLOAT_TO_OBSERVED_DICT_KEY][_get_quant_type_to_str(quant_type)] = (
float_to_observed_mapping
)
if len(self.non_traceable_module_names) > 0:
d[NON_TRACEABLE_MODULE_NAME_DICT_KEY] = self.non_traceable_module_names
if len(self.non_traceable_module_classes) > 0:
d[NON_TRACEABLE_MODULE_CLASS_DICT_KEY] = self.non_traceable_module_classes
if len(self.input_quantized_indexes) > 0:
d[INPUT_QUANTIZED_INDEXES_DICT_KEY] = self.input_quantized_indexes
if len(self.output_quantized_indexes) > 0:
d[OUTPUT_QUANTIZED_INDEXES_DICT_KEY] = self.output_quantized_indexes
if len(self.preserved_attributes) > 0:
d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
return d
| PrepareCustomConfig |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/pydantic/types.py | {
"start": 9488,
"end": 12475
} | class ____:
path_type: Literal['file', 'dir', 'new']
def __pydantic_modify_json_schema__(self, field_schema: dict[str, Any]) -> None:
format_conversion = {'file': 'file-path', 'dir': 'directory-path'}
field_schema.update(format=format_conversion.get(self.path_type, 'path'), type='string')
def __get_pydantic_core_schema__(
self, schema: core_schema.CoreSchema, **_kwargs: Any
) -> core_schema.FunctionSchema:
function_lookup = {
'file': cast(core_schema.ValidatorFunction, self.validate_file),
'dir': cast(core_schema.ValidatorFunction, self.validate_directory),
'new': cast(core_schema.ValidatorFunction, self.validate_new),
}
return core_schema.function_after_schema(
schema,
function_lookup[self.path_type],
)
@staticmethod
def validate_file(path: Path, _: core_schema.ValidationInfo) -> Path:
if path.is_file():
return path
else:
raise PydanticCustomError('path_not_file', 'Path does not point to a file')
@staticmethod
def validate_directory(path: Path, _: core_schema.ValidationInfo) -> Path:
if path.is_dir():
return path
else:
raise PydanticCustomError('path_not_directory', 'Path does not point to a directory')
@staticmethod
def validate_new(path: Path, _: core_schema.ValidationInfo) -> Path:
if path.exists():
raise PydanticCustomError('path_exists', 'path already exists')
elif not path.parent.exists():
raise PydanticCustomError('parent_does_not_exist', 'Parent directory does not exist')
else:
return path
FilePath = Annotated[Path, PathType('file')]
DirectoryPath = Annotated[Path, PathType('dir')]
NewPath = Annotated[Path, PathType('new')]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
Json = Annotated[AnyType, ...] # Json[list[str]] will be recognized by type checkers as list[str]
else:
class Json:
@classmethod
def __class_getitem__(cls, item: AnyType) -> AnyType:
return Annotated[item, cls()]
@classmethod
def __get_pydantic_core_schema__(
cls, schema: core_schema.CoreSchema | None = None, **_kwargs: Any
) -> core_schema.JsonSchema:
return core_schema.json_schema(schema)
@classmethod
def __pydantic_modify_json_schema__(cls, field_schema: dict[str, Any]) -> None:
field_schema.update(type='string', format='json-string')
def __repr__(self) -> str:
return 'Json'
def __hash__(self) -> int:
return hash(type(self))
def __eq__(self, other: Any) -> bool:
return type(other) == type(self)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SecretType = TypeVar('SecretType', str, bytes)
| PathType |
python | apache__airflow | providers/trino/src/airflow/providers/trino/hooks/trino.py | {
"start": 2678,
"end": 2986
} | class ____(Exception):
"""Trino exception."""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == "false":
return False
if value.lower() == "true":
return True
return value
| TrinoException |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 15305,
"end": 16318
} | class ____(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
):
super().__init__()
self.norm_layer = nn.GroupNorm(
num_channels=out_channels,
num_groups=32,
eps=1e-6,
affine=True,
)
self.conv_y = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.conv_b = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode="nearest")
hidden_states = self.norm_layer(hidden_states)
hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
return hidden_states
| Emu3VQVAESpatialNorm |
python | modin-project__modin | modin/tests/config/docs_module/classes.py | {
"start": 911,
"end": 1031
} | class ____:
def isna(self):
"""This is a test of the documentation module for Series."""
return
| Series |
python | allegroai__clearml | clearml/hyperdatasets/data_entry_image.py | {
"start": 19498,
"end": 31936
} | class ____(DataEntry):
def __init__(
self,
data_entry_id: Optional[str] = None,
metadata: Optional[dict] = None,
) -> None:
super(DataEntryImage, self).__init__(data_entry_id=data_entry_id, metadata=metadata)
# optional global annotations storage for the entry level
self._global_annotations: List[Dict[str, Any]] = []
def __repr__(self) -> str:
meta_keys = sorted((getattr(self, "_metadata", {}) or {}).keys())
sub_names = [sub.name for sub in (self.sub_data_entries or [])]
return (
f"{self.__class__.__name__}(id={self.id!r}, sub_entries={sub_names}, "
f"metadata_keys={meta_keys})"
)
def add_global_annotation(
self,
poly2d_xy: Optional[Values] = None,
poly3d_xyz: Optional[Values] = None,
points2d_xy: Optional[Values] = None,
points3d_xyz: Optional[Values] = None,
box2d_xywh: Optional[Values] = None,
box3d_xyzwhxyzwh: Optional[Values] = None,
ellipse2d_xyrrt: Optional[Values] = None,
mask_rgb: Optional[Values] = None,
frame_class: Optional[Sequence[str]] = None,
id: Optional[str] = None,
labels: Optional[Sequence[str]] = None,
confidence: Optional[float] = None,
metadata: Optional[dict] = None,
) -> List[int]:
"""
Broadcast an annotation request to all sub-entries and aggregate their indices.
:param poly2d_xy: 2D polygon coordinates
:param poly3d_xyz: 3D polygon coordinates
:param points2d_xy: 2D keypoint coordinates
:param points3d_xyz: 3D keypoint coordinates
:param box2d_xywh: 2D bounding box definition
:param box3d_xyzwhxyzwh: 3D bounding box definition
:param ellipse2d_xyrrt: 2D ellipse definition
:param mask_rgb: RGB mask values
:param frame_class: Optional frame-level class labels
:param id: Annotation identifier
:param labels: Sequence of label names
:param confidence: Optional confidence value
:param metadata: Extra metadata mapping to attach to the annotation
:return: List of annotation indices returned by the sub-entries
"""
idxs: List[int] = []
for sub in (self.sub_data_entries or []):
if hasattr(sub, "add_annotation"):
idxs.extend(
sub.add_annotation(
poly2d_xy=poly2d_xy,
poly3d_xyz=poly3d_xyz,
points2d_xy=points2d_xy,
points3d_xyz=points3d_xyz,
box2d_xywh=box2d_xywh,
box3d_xyzwhxyzwh=box3d_xyzwhxyzwh,
ellipse2d_xyrrt=ellipse2d_xyrrt,
mask_rgb=mask_rgb,
frame_class=frame_class,
id=id,
labels=labels,
confidence=confidence,
metadata=metadata,
)
)
return idxs
def remove_global_annotation(self, index: Optional[int] = None, **kwargs: Any) -> Any:
"""
Remove the first matching annotation across sub-entries.
:param index: Annotation index to remove
:param kwargs: Alternative filters such as id=...
:return: Removed annotation payload or None when nothing matched
"""
removed = None
for sub in (self.sub_data_entries or []):
r = sub.remove_annotation(index=index, **kwargs)
removed = removed or r
return removed
def remove_global_annotations(
self, id: Optional[str] = None, label: Optional[str] = None, labels: Optional[Sequence[str]] = None
) -> Sequence[Any]:
"""
Remove annotations across sub-entries using the provided filters.
:param id: Annotation identifier to match
:param label: Single label to match
:param labels: Sequence of labels to match
:return: Sequence of removed annotation payloads
"""
removed: List[Any] = []
for sub in (self.sub_data_entries or []):
if hasattr(sub, "remove_annotations"):
removed.extend(sub.remove_annotations(id=id, label=label, labels=labels))
return removed
def get_all_global_annotations(self) -> Sequence[Any]:
"""
Return every annotation collected from all sub-entries.
:return: Sequence of annotation payloads across all sub-entries
"""
anns: List[Any] = []
for sub in (self.sub_data_entries or []):
get_all = getattr(sub, "get_all_annotations", None)
if callable(get_all):
anns.extend(get_all())
else:
anns.extend(getattr(sub, "_annotations", []) or [])
return anns
def get_global_annotations(self, id: Optional[str] = None, index: Optional[int] = None) -> Sequence[Any]:
"""
Return global annotations filtered by identifier or index.
:param id: Annotation identifier to filter by
:param index: Annotation index to fetch
:return: Sequence of matching annotation payloads
"""
if id is None and index is None:
return self.get_all_global_annotations()
anns: List[Any] = []
for sub in (self.sub_data_entries or []):
if hasattr(sub, "get_annotations"):
anns.extend(sub.get_annotations(id=id, index=index))
return anns
def to_api_object(self) -> dict:
# Build SaveFramesRequest-compatible frame dict
entry_meta = _copy_without_keys(getattr(self, "_metadata", {}) or {}, ENTRY_CLASS_KEY)
entry_meta[ENTRY_CLASS_KEY] = _get_class_identifier(self)
frame: Dict[str, Any] = {"id": self.id, "meta": entry_meta}
sources: List[Dict[str, Any]] = []
rois: List[Dict[str, Any]] = []
context_id: Optional[str] = None
for sub in (self.sub_data_entries or []):
# merge per-subentry metadata under its name
sub_meta: Dict[str, Any] = {}
if isinstance(getattr(sub, "_metadata", None), dict):
sub_meta = _copy_without_keys(sub._metadata, SUB_ENTRY_CLASS_KEY)
sub_meta[SUB_ENTRY_CLASS_KEY] = _get_class_identifier(sub)
frame["meta"][sub.name] = sub_meta
s: Dict[str, Any] = {"id": sub.name, "uri": sub.get_source("source")}
if sub.get_source("preview_source"):
s["preview"] = {"uri": sub.get_source("preview_source")}
# dimensions/timestamp
if getattr(sub, "_width", None) is not None:
s["width"] = sub._width
if getattr(sub, "_height", None) is not None:
s["height"] = sub._height
if getattr(sub, "_timestamp", None) is not None:
s["timestamp"] = sub._timestamp
# hashes metadata
sh = sub.get_hash("source")
ph = sub.get_hash("preview_source")
if sh or ph:
s_meta: Dict[str, Any] = {"hash": {}}
if sh:
s_meta["hash"]["source"] = sh
if ph:
s_meta["hash"]["preview_source"] = ph
s["meta"] = s_meta
# masks
masks_src: Dict[str, str] = {}
get_masks = getattr(sub, "get_masks_source_dict", None)
if callable(get_masks):
masks_src = get_masks()
else:
masks_src = getattr(sub, "_masks_source", {}) or {}
if masks_src:
s["masks"] = [{"id": mid, "uri": muri} for mid, muri in sorted(masks_src.items())]
sources.append(s)
# aggregate rois
for ann in getattr(sub, "_annotations", []) or []:
rois.append(ann)
# prefer first non-empty context
if context_id is None and getattr(sub, "_context_id", None):
context_id = sub._context_id
frame["sources"] = sources
if rois:
frame["rois"] = rois
if context_id is not None:
frame["context_id"] = context_id
return frame
@classmethod
def from_api_object(cls, frame: Any) -> "DataEntryImage":
"""
Convert backend frame (dict/object) to DataEntryImage + DataSubEntryImage tree.
"""
log = logging.getLogger("DataView")
def _get(obj, key, default=None):
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
try:
raw_meta = _get(frame, "meta") or {}
metadata = _copy_without_keys(raw_meta, ENTRY_CLASS_KEY)
resolved_entry_cls = None
if isinstance(raw_meta, dict):
resolved_entry_cls = _resolve_class(raw_meta.get(ENTRY_CLASS_KEY), DataEntry)
if resolved_entry_cls and not issubclass(resolved_entry_cls, DataEntry):
resolved_entry_cls = None
entry = cls(data_entry_id=_get(frame, "id"), metadata=metadata)
ctx = _get(frame, "context_id")
sources = _get(frame, "sources") or []
sub_entries: List[DataSubEntry] = []
for idx, s in enumerate(sources):
name = _get(s, "id") or f"image_{idx}"
sub_meta_raw = raw_meta.get(name) if isinstance(raw_meta, dict) else None
sub_meta_clean: Optional[Dict[str, Any]] = None
resolved_cls: Optional[type] = None
if isinstance(sub_meta_raw, dict):
class_path = sub_meta_raw.get(SUB_ENTRY_CLASS_KEY)
sub_meta_clean = _copy_without_keys(sub_meta_raw, SUB_ENTRY_CLASS_KEY)
resolved_cls = _resolve_class(class_path, DataSubEntry)
metadata[name] = sub_meta_clean
sub = DataSubEntryImage.from_api_object(
s,
frame_meta=raw_meta if isinstance(raw_meta, dict) else {},
context_id=ctx,
name_fallback=name,
)
if isinstance(sub_meta_clean, dict):
sub._metadata = sub_meta_clean
if resolved_cls and issubclass(resolved_cls, DataSubEntry):
try:
sub.__class__ = resolved_cls
except TypeError:
log.warning(
"Could not assign image sub-entry %s to class '%s'",
name,
resolved_cls.__name__,
)
sub_entries.append(sub)
if sub_entries:
entry.add_sub_entries(sub_entries)
# Map ROIs to sub-entries by sources list
rois = _get(frame, "rois") or []
by_name = {se.name: se for se in sub_entries}
for roi in rois:
srcs = _get(roi, "sources") or []
targets = srcs or list(by_name.keys())[:1]
for sid in targets:
se = by_name.get(sid)
if not se:
continue
if not hasattr(se, "_annotations"):
se._annotations = [] # type: ignore[attr-defined]
try:
se._annotations.append(roi)
except Exception as ex:
log.error("Failed attaching ROI to subentry %s: %s", sid, ex)
if (
resolved_entry_cls
and isinstance(entry, DataEntry)
and issubclass(resolved_entry_cls, DataEntry)
and entry.__class__ is not resolved_entry_cls
):
try:
entry.__class__ = resolved_entry_cls
except TypeError:
log.warning(
"Could not assign data entry %s to class '%s'",
getattr(entry, "id", None),
resolved_entry_cls.__name__,
)
return entry
except Exception as ex:
log.exception("from_api_object conversion failed: %s", ex)
return cls()
| DataEntryImage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-asana/source_asana/config_migration.py | {
"start": 419,
"end": 3046
} | class ____:
"""
This class stands for migrating the config at runtime,
while providing the backward compatibility when falling back to the previous source version.
"""
message_repository: MessageRepository = InMemoryMessageRepository()
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""
based on the source spec.
Returns:
> True, if the transformation is necessary
> False, otherwise.
> Raises the Exception if the structure could not be migrated.
"""
return "access_token" in config # if access_token is directly given in config, transform it into PAT credentials
@classmethod
def modify(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
if "credentials" not in config:
if "personal_access_token" not in config:
config["credentials"] = {"option_title": "PAT Credentials", "personal_access_token": config["access_token"]}
else:
raise ValueError(f"Invalid config. got {config}")
return config
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
# modify the config
migrated_config = cls.modify(config)
# save the config
source.write_config(migrated_config, config_path)
# return modified config
return migrated_config
@classmethod
def emit_control_message(cls, migrated_config: Mapping[str, Any]) -> None:
# add the Airbyte Control Message to message repo
cls.message_repository.emit_message(create_connector_config_control_message(migrated_config))
# emit the Airbyte Control Message from message queue to stdout
for message in cls.message_repository._message_queue:
print(message.json(exclude_unset=True))
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
This method checks the input args, should the config be migrated,
transform if necessary and emit the CONTROL message.
"""
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls.should_migrate(config):
cls.emit_control_message(
cls.modify_and_save(config_path, source, config),
)
| AsanaConfigMigration |
python | celery__celery | celery/backends/rpc.py | {
"start": 778,
"end": 942
} | class ____(Exception):
"""Too much state history to fast-forward."""
def _on_after_fork_cleanup_backend(backend):
backend._after_fork()
| BacklogLimitExceeded |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_nonempty_geometries.py | {
"start": 992,
"end": 1696
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.nonempty_geometries"
condition_value_keys = ()
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
# We need to use shape and mapping to convert the GeoJSON to a Shapely object and vice versa
column = column.apply(shape)
# Use the ~ operator to check if each value is not empty
return ~column.apply(lambda x: x.is_empty)
# This class defines the Expectation itself
| ColumnValuesNonemptyGeometries |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 207863,
"end": 208067
} | class ____(InMemoryNetCDFWithGroups):
engine: T_NetcdfEngine = "h5netcdf"
@requires_h5netcdf
@requires_dask
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
| TestH5NetCDFInMemoryData |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ001.py | {
"start": 2391,
"end": 2682
} | class ____(models.Model):
charfield: models.CharField = models.CharField(max_length=255, null=True)
textfield: models.TextField = models.TextField(max_length=255, null=True)
slugfield: models.SlugField = models.SlugField(max_length=255, null=True)
| IncorrectModelWithSimpleAnnotations |
python | pytorch__pytorch | test/test_fake_tensor.py | {
"start": 48891,
"end": 60788
} | class ____(TestCase):
def get_aten_op(self, schema):
namespace, name = schema.name.split("::")
overload = schema.overload_name if schema.overload_name else "default"
assert namespace == "aten"
return getattr(getattr(torch.ops.aten, name), overload)
def get_all_aten_schemas(self):
for schema in torch._C._jit_get_all_schemas():
namespace = schema.name.split("::")[0]
if namespace != "aten":
continue
yield schema
def test_non_kwarg_only_device(self):
for schema in self.get_all_aten_schemas():
ten_type = torch._C.TensorType.get()
if not any(
contains_type(arg.type, ten_type)
for arg in itertools.chain(schema.arguments, schema.returns)
):
continue
opt_device = torch._C.OptionalType(torch._C.DeviceObjType.get())
has_non_kwarg_device = any(
not arg.kwarg_only and arg.type.isSubtypeOf(opt_device)
for arg in schema.arguments
)
if has_non_kwarg_device:
self.assertTrue(
self.get_aten_op(schema)
in torch._subclasses.fake_tensor._device_not_kwarg_ops
)
def test_tensor_constructors_all_have_kwarg_device(self):
for schema in self.get_all_aten_schemas():
op = self.get_aten_op(schema)
if not torch._subclasses.fake_tensor._is_tensor_constructor(op):
continue
opt_device = torch._C.OptionalType(torch._C.DeviceObjType.get())
has_kwarg_device = any(
arg.kwarg_only and arg.type.isSubtypeOf(opt_device)
for arg in schema.arguments
)
self.assertTrue(
has_kwarg_device or op == torch.ops.aten._list_to_tensor.default
)
@unittest.expectedFailure
def test_sparse_new(self):
with FakeTensorMode():
indices = torch.randn(1, 1, dtype=torch.int64)
values = torch.randn(1)
extra = (2,)
sparse = torch.randn(1).to_sparse()
# This used to segfault, now it does not, but it still raises an
# error
sparse2 = sparse.new(indices, values, extra)
def test_tensor_new(self):
with FakeTensorMode():
x = torch.Tensor([1, 2, 3])
self.assertIsInstance(x, FakeTensor)
def test_like_ops(self):
for schema in self.get_all_aten_schemas():
if "_like" == schema.name[-5:]:
op = self.get_aten_op(schema)
self.assertIn(
op, torch._subclasses.fake_tensor._like_tensor_constructors
)
def test_str_storage(self):
x = torch.zeros(3)
with FakeTensorMode() as m:
y = m.from_tensor(x)
self.assertExpectedInline(
str(x.storage()),
"""\
0.0
0.0
0.0
[torch.storage.TypedStorage(dtype=torch.float32, device=cpu) of size 3]""",
)
self.assertExpectedInline(
str(y.storage()),
"""\
...
[torch.storage.TypedStorage(dtype=torch.float32, device=meta) of size 3]""",
)
self.assertExpectedInline(
str(y.storage()),
"""\
...
[torch.storage.TypedStorage(dtype=torch.float32, device=meta) of size 3]""",
)
# at::_embedding_bag has no op info,
# and returns extra tensors that at::embedding bag throws away
def test_embedding_bag_private(self):
args = [
torch.ones(6, 1),
torch.ones(6, dtype=torch.int64),
torch.arange(2, dtype=torch.int64),
False,
2, # mode = max
]
ref_out = torch.ops.aten._embedding_bag(*args)
with FakeTensorMode() as m:
meta_args = [
m.from_tensor(a) if isinstance(a, torch.Tensor) else a for a in args
]
meta_out = torch.ops.aten._embedding_bag(*meta_args)
self.assertEqual(len(ref_out), len(meta_out))
for ref_o, meta_o in zip(ref_out, meta_out):
self.assertEqual(ref_o.size(), meta_o.size())
def test_cross_entropy_loss(self):
inp = torch.randn(3, 5)
target = torch.randint(5, (3,), dtype=torch.long)
weight = torch.rand(5)
fn = torch.nn.functional.cross_entropy
for w in (weight, None):
args = (inp, target, w)
ref = fn(*args)
with FakeTensorMode() as m:
meta_args = [
m.from_tensor(a) if isinstance(a, torch.Tensor) else a for a in args
]
meta_out = torch.nn.functional.cross_entropy(
*meta_args, label_smoothing=0.5
)
self.assertEqual(ref.size(), meta_out.size())
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Does not support SDPA or pre-SM80 hardware",
)
def test_flash_attention(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, arg1, arg2, arg3):
torch.ops.aten._scaled_dot_product_flash_attention(
arg1, arg2, arg3, scale=0.17677669529663687
)
args_new = [
[
((1, 48, 64, 64), (0, 4096, 64, 1), torch.float16, "cuda"),
((1, 48, 64, 64), (0, 4096, 64, 1), torch.float16, "cuda"),
((1, 48, 64, 64), (0, 4096, 64, 1), torch.float16, "cuda"),
],
[
((4, 2, 16, 32), (1024, 512, 32, 1), torch.float16, "cuda"),
((4, 2, 16, 32), (1024, 512, 32, 1), torch.float16, "cuda"),
((4, 2, 16, 32), (1024, 512, 32, 1), torch.float16, "cuda"),
],
]
for args_list in args_new:
args = [
rand_strided(bsz, num_heads, seq_len, head_dim)
for (bsz, num_heads, seq_len, head_dim) in args_list
]
try:
with torch._subclasses.CrossRefFakeMode():
Repro()(*args)
except MetadataMismatchError as e:
# We expect the cross ref to succeed for the first output to fail
# for the rng state, see Note [Seed and Offset]
self.assertTrue("output[0]" not in str(e))
if self.__class__.__name__.startswith("PropagateRealTensors"):
self.assertTrue(
"Real tensor propagation found a metadata mismatch" in str(e)
)
else:
self.assertTrue(
"found mismatched tensor metadata for output" in str(e)
)
# IMPORTANT!!! Always run even if CUDA is not available
def test_fake_gpu_no_init(self):
# Skip this test, we will try to run CUDA operations to real prop so
# it clearly will not work on CPU runner
if torch._functorch.config.fake_tensor_propagate_real_tensors:
self.skipTest("Propagate real tensor not supported")
with FakeTensorMode(allow_non_fake_inputs=True):
self.assertEqual(torch.empty(10, device=GPU_TYPE).device.type, GPU_TYPE)
self.assertEqual(torch.ones(10, device=GPU_TYPE).device.type, GPU_TYPE)
self.assertEqual(torch.zeros(10, device=GPU_TYPE).device.type, GPU_TYPE)
self.assertEqual(torch.rand(10, device=GPU_TYPE).device.type, GPU_TYPE)
self.assertEqual(torch.tensor(3.14, device=GPU_TYPE).device.type, GPU_TYPE)
self.assertEqual(
torch.tensor([[3.14, 2], [1, 2]], device=GPU_TYPE).device.type, GPU_TYPE
)
@unittest.skipIf(not torch.backends.cuda.is_built(), "requires CUDA build")
def test_move_module_under_fake(self):
if torch._functorch.config.fake_tensor_propagate_real_tensors:
self.skipTest("Propagate real tensor not supported")
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(2, 2)
self.buffer = torch.nn.Buffer(torch.rand(2, 2))
self.param = torch.nn.Parameter(torch.rand(2, 2))
def forward(self, x):
return self.linear(x) + self.buffer + self.param
m = Module()
input = torch.rand(2, 2)
gpu_device = torch.device(GPU_TYPE, 0)
with FakeTensorMode(allow_non_fake_inputs=True):
m.to(device=gpu_device)
arg = input.to(device=gpu_device)
out = m(arg)
for p in m.parameters():
self.assertTrue(isinstance(p, FakeTensor))
self.assertEqual(p.device, gpu_device)
for b in m.buffers():
self.assertTrue(isinstance(b, FakeTensor))
self.assertEqual(b.device, gpu_device)
self.assertTrue(isinstance(out, FakeTensor))
self.assertEqual(out.device, gpu_device)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_move_meta_tensor(self):
if torch._functorch.config.fake_tensor_propagate_real_tensors:
self.skipTest("Propagate real tensor not supported")
meta_tensor = torch.ones(2, device="meta")
with FakeTensorMode(allow_non_fake_inputs=True):
self.assertEqual(meta_tensor.to(device="cpu").device.type, "cpu")
self.assertEqual(meta_tensor.to(device=GPU_TYPE).device.type, GPU_TYPE)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_conv_c1_backward(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, arg1, arg2, arg3):
torch.ops.aten.convolution_backward.default(
arg1,
arg2,
arg3,
[1],
[1, 1],
[1, 1],
[1, 1],
False,
[0, 0],
1,
[True, True, False],
)
args_new = [
((16, 1, 128, 128), (16384, 16384, 128, 1), torch.float16, "cuda"),
((16, 64, 128, 128), (1048576, 1, 8192, 64), torch.float16, "cuda"),
((1, 64, 3, 3), (576, 9, 3, 1), torch.float16, "cuda"),
]
args = [rand_strided(sh, st, dt, dev) for (sh, st, dt, dev) in args_new]
with torch._subclasses.CrossRefFakeMode():
Repro()(*args)
def test_no_dispatch_with_like_function(self):
class CountingMode(TorchDispatchMode):
def __init__(self) -> None:
self.count = 0
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
self.count += 1
return func(*args, **kwargs)
with FakeTensorMode():
x = torch.randn(2)
with CountingMode() as mode:
with no_dispatch():
torch.zeros_like(x)
self.assertEqual(mode.count, 0)
# PropagateRealTensors installs weakrefs
@expectedFailurePropagateRealTensors
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_module_to(self):
def _check_device(sd, device_type):
for v in sd.values():
self.assertEqual(v.device.type, device_type)
with FakeTensorMode():
m = torch.nn.Linear(2, 2)
_check_device(m.state_dict(), "cpu")
m.to("cuda")
_check_device(m.state_dict(), "cuda")
make_propagate_real_tensors_cls(FakeTensorOperatorInvariants)
| FakeTensorOperatorInvariants |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/index1.py | {
"start": 1898,
"end": 1983
} | class ____:
def __call__(self, *args, **kwargs) -> Self:
return self
| ClassH |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.