language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 101129,
"end": 101999
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.GeoJsonFeatureCollection` ``TypedDict`` wrapper.
Parameters
----------
features
type
Specifies the type of GeoJSON object.
bbox
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
"""
features: Sequence[FeatureGeometryGeoJsonPropertiesKwds]
type: Literal["FeatureCollection"]
bbox: Sequence[float]
|
GeoJsonFeatureCollectionKwds
|
python
|
django__django
|
django/test/utils.py
|
{
"start": 25483,
"end": 29537
}
|
class ____(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if "message" in self.ignore_kwargs or "module" in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func("ignore", **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.",
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
return override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.jinja2.Jinja2",
"APP_DIRS": True,
"OPTIONS": {"keep_trailing_newline": True},
},
]
)(test_func)
|
ignore_warnings
|
python
|
TheAlgorithms__Python
|
other/number_container_system.py
|
{
"start": 308,
"end": 6296
}
|
class ____:
def __init__(self) -> None:
# numbermap keys are the number and its values are lists of indexes sorted
# in ascending order
self.numbermap: dict[int, list[int]] = {}
# indexmap keys are an index and it's values are the number at that index
self.indexmap: dict[int, int] = {}
def binary_search_delete(self, array: list | str | range, item: int) -> list[int]:
"""
Removes the item from the sorted array and returns
the new array.
>>> NumberContainer().binary_search_delete([1,2,3], 2)
[1, 3]
>>> NumberContainer().binary_search_delete([0, 0, 0], 0)
[0, 0]
>>> NumberContainer().binary_search_delete([-1, -1, -1], -1)
[-1, -1]
>>> NumberContainer().binary_search_delete([-1, 0], 0)
[-1]
>>> NumberContainer().binary_search_delete([-1, 0], -1)
[0]
>>> NumberContainer().binary_search_delete(range(7), 3)
[0, 1, 2, 4, 5, 6]
>>> NumberContainer().binary_search_delete([1.1, 2.2, 3.3], 2.2)
[1.1, 3.3]
>>> NumberContainer().binary_search_delete("abcde", "c")
['a', 'b', 'd', 'e']
>>> NumberContainer().binary_search_delete([0, -1, 2, 4], 0)
Traceback (most recent call last):
...
ValueError: Either the item is not in the array or the array was unsorted
>>> NumberContainer().binary_search_delete([2, 0, 4, -1, 11], -1)
Traceback (most recent call last):
...
ValueError: Either the item is not in the array or the array was unsorted
>>> NumberContainer().binary_search_delete(125, 1)
Traceback (most recent call last):
...
TypeError: binary_search_delete() only accepts either a list, range or str
"""
if isinstance(array, (range, str)):
array = list(array)
elif not isinstance(array, list):
raise TypeError(
"binary_search_delete() only accepts either a list, range or str"
)
low = 0
high = len(array) - 1
while low <= high:
mid = (low + high) // 2
if array[mid] == item:
array.pop(mid)
return array
elif array[mid] < item:
low = mid + 1
else:
high = mid - 1
raise ValueError(
"Either the item is not in the array or the array was unsorted"
)
def binary_search_insert(self, array: list | str | range, index: int) -> list[int]:
"""
Inserts the index into the sorted array
at the correct position.
>>> NumberContainer().binary_search_insert([1,2,3], 2)
[1, 2, 2, 3]
>>> NumberContainer().binary_search_insert([0,1,3], 2)
[0, 1, 2, 3]
>>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 103], 51)
[-5, -3, 0, 0, 11, 51, 103]
>>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 100, 103], 101)
[-5, -3, 0, 0, 11, 100, 101, 103]
>>> NumberContainer().binary_search_insert(range(10), 4)
[0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9]
>>> NumberContainer().binary_search_insert("abd", "c")
['a', 'b', 'c', 'd']
>>> NumberContainer().binary_search_insert(131, 23)
Traceback (most recent call last):
...
TypeError: binary_search_insert() only accepts either a list, range or str
"""
if isinstance(array, (range, str)):
array = list(array)
elif not isinstance(array, list):
raise TypeError(
"binary_search_insert() only accepts either a list, range or str"
)
low = 0
high = len(array) - 1
while low <= high:
mid = (low + high) // 2
if array[mid] == index:
# If the item already exists in the array,
# insert it after the existing item
array.insert(mid + 1, index)
return array
elif array[mid] < index:
low = mid + 1
else:
high = mid - 1
# If the item doesn't exist in the array, insert it at the appropriate position
array.insert(low, index)
return array
def change(self, index: int, number: int) -> None:
"""
Changes (sets) the index as number
>>> cont = NumberContainer()
>>> cont.change(0, 10)
>>> cont.change(0, 20)
>>> cont.change(-13, 20)
>>> cont.change(-100030, 20032903290)
"""
# Remove previous index
if index in self.indexmap:
n = self.indexmap[index]
if len(self.numbermap[n]) == 1:
del self.numbermap[n]
else:
self.numbermap[n] = self.binary_search_delete(self.numbermap[n], index)
# Set new index
self.indexmap[index] = number
# Number not seen before or empty so insert number value
if number not in self.numbermap:
self.numbermap[number] = [index]
# Here we need to perform a binary search insertion in order to insert
# The item in the correct place
else:
self.numbermap[number] = self.binary_search_insert(
self.numbermap[number], index
)
def find(self, number: int) -> int:
"""
Returns the smallest index where the number is.
>>> cont = NumberContainer()
>>> cont.find(10)
-1
>>> cont.change(0, 10)
>>> cont.find(10)
0
>>> cont.change(0, 20)
>>> cont.find(10)
-1
>>> cont.find(20)
0
"""
# Simply return the 0th index (smallest) of the indexes found (or -1)
return self.numbermap.get(number, [-1])[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
NumberContainer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/tests/core/test_runner.py
|
{
"start": 43890,
"end": 48934
}
|
class ____:
"""Test asset creation functionality."""
def test_create_asset_from_node_creates_asset(self, mock_manifest_node):
"""Test that assets are created correctly from manifest nodes."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
with patch("prefect_dbt.core.runner.Asset") as mock_asset_class:
mock_asset = Mock(spec=Asset)
mock_asset_class.return_value = mock_asset
result = runner._create_asset_from_node(mock_manifest_node, adapter_type)
assert result == mock_asset
mock_asset_class.assert_called_once()
def test_create_asset_from_source_definition_creates_asset(
self, mock_source_definition
):
"""Test that assets are created correctly from source definitions."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
with patch("prefect_dbt.core.runner.Asset") as mock_asset_class:
mock_asset = Mock(spec=Asset)
mock_asset_class.return_value = mock_asset
result = runner._create_asset_from_node(
mock_source_definition, adapter_type
)
assert result == mock_asset
mock_asset_class.assert_called_once()
def test_create_asset_from_source_definition_with_owner(
self, mock_source_definition
):
"""Test that assets are created correctly from source definitions with owner."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
mock_source_definition.meta = {"owner": "test_owner"}
with patch("prefect_dbt.core.runner.Asset") as mock_asset_class:
mock_asset = Mock(spec=Asset)
mock_asset_class.return_value = mock_asset
result = runner._create_asset_from_node(
mock_source_definition, adapter_type
)
assert result == mock_asset
mock_asset_class.assert_called_once()
def test_create_asset_from_source_definition_without_owner(
self, mock_source_definition
):
"""Test that assets are created correctly from source definitions without owner."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
mock_source_definition.meta = {}
with patch("prefect_dbt.core.runner.Asset") as mock_asset_class:
mock_asset = Mock(spec=Asset)
mock_asset_class.return_value = mock_asset
result = runner._create_asset_from_node(
mock_source_definition, adapter_type
)
assert result == mock_asset
mock_asset_class.assert_called_once()
def test_create_asset_from_source_definition_with_non_string_owner(
self, mock_source_definition
):
"""Test that assets are created correctly from source definitions with non-string owner."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
mock_source_definition.meta = {"owner": 123} # Non-string owner
with patch("prefect_dbt.core.runner.Asset") as mock_asset_class:
mock_asset = Mock(spec=Asset)
mock_asset_class.return_value = mock_asset
result = runner._create_asset_from_node(
mock_source_definition, adapter_type
)
assert result == mock_asset
mock_asset_class.assert_called_once()
def test_create_asset_from_node_with_missing_relation_name_raises_error(
self, mock_manifest_node
):
"""Test that missing relation_name raises an error when creating assets."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
mock_manifest_node.relation_name = None
with pytest.raises(ValueError, match="Relation name not found in manifest"):
runner._create_asset_from_node(mock_manifest_node, adapter_type)
def test_create_asset_from_source_definition_with_missing_relation_name_raises_error(
self, mock_source_definition
):
"""Test that missing relation_name raises an error when creating assets from source definitions."""
runner = PrefectDbtRunner()
adapter_type = "snowflake"
mock_source_definition.relation_name = None
with pytest.raises(ValueError, match="Relation name not found in manifest"):
runner._create_asset_from_node(mock_source_definition, adapter_type)
def test_create_task_options_creates_options(self, mock_manifest_node):
"""Test that task options are created correctly."""
runner = PrefectDbtRunner()
upstream_assets = [Mock(spec=Asset)]
with patch("prefect_dbt.core.runner.TaskOptions") as mock_options_class:
mock_options = Mock()
mock_options_class.return_value = mock_options
result = runner._create_task_options(mock_manifest_node, upstream_assets)
assert result == mock_options
mock_options_class.assert_called_once()
|
TestPrefectDbtRunnerAssetCreation
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-linnworks/source_linnworks/streams.py
|
{
"start": 2909,
"end": 3756
}
|
class ____(HttpSubStream, StockLocations):
# https://apps.linnworks.net/Api/Method/Locations-GetLocation
# Response: StockLocation https://apps.linnworks.net/Api/Class/linnworks-spa-commondata-Locations-ClassBase-StockLocation
# Allows 150 calls per minute
primary_key = "StockLocationIntId"
def __init__(self, **kwargs):
super().__init__(StockLocations(**kwargs), **kwargs)
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "/api/Locations/GetLocation"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any], **kwargs) -> MutableMapping[str, Any]:
return {"pkStockLocationId ": stream_slice["parent"]["StockLocationId"]}
|
StockLocationDetails
|
python
|
mitmproxy__pdoc
|
test/testdata/misc_py313.py
|
{
"start": 34,
"end": 136
}
|
class ____(dict):
pass
@deprecated("Do not use this anymore")
def deprecated_func():
pass
|
MyDict
|
python
|
ethereum__web3.py
|
web3/_utils/abi.py
|
{
"start": 9751,
"end": 18625
}
|
class ____(encoding.TextStringEncoder):
@classmethod
def validate_value(cls, value: Any) -> None:
if is_bytes(value):
try:
value = to_text(value)
except UnicodeDecodeError:
cls.invalidate_value(
value,
msg="not decodable as unicode string",
)
super().validate_value(value) # type: ignore[no-untyped-call]
TUPLE_TYPE_STR_RE = re.compile(r"^(tuple)((\[([1-9]\d*\b)?])*)??$")
def get_tuple_type_str_parts(s: str) -> tuple[str, str | None] | None:
"""
Takes a JSON ABI type string. For tuple type strings, returns the separated
prefix and array dimension parts. For all other strings, returns ``None``.
"""
match = TUPLE_TYPE_STR_RE.match(s)
if match is not None:
tuple_prefix = match.group(1)
tuple_dims = match.group(2)
return tuple_prefix, tuple_dims
return None
def _align_abi_input(
arg_abi: ABIComponent | ABIComponentIndexed, arg: Any
) -> tuple[Any, ...]:
"""
Aligns the values of any mapping at any level of nesting in ``arg``
according to the layout of the corresponding abi spec.
"""
tuple_parts = get_tuple_type_str_parts(arg_abi["type"])
if tuple_parts is None:
# Arg is non-tuple. Just return value.
return arg
tuple_prefix, tuple_dims = tuple_parts
if tuple_dims is None:
# Arg is non-list tuple. Each sub arg in `arg` will be aligned
# according to its corresponding abi.
sub_abis = arg_abi["components"]
else:
num_dims = tuple_dims.count("[")
# Arg is list tuple. A non-list version of its abi will be used to
# align each element in `arg`.
new_abi = copy.copy(arg_abi)
new_abi["type"] = tuple_prefix + "[]" * (num_dims - 1)
sub_abis = [cast(ABIComponent, abi) for abi in itertools.repeat(new_abi)]
if isinstance(arg, abc.Mapping):
# Arg is mapping. Align values according to abi order.
aligned_arg = tuple(arg[abi["name"]] for abi in sub_abis)
else:
aligned_arg = arg
if not is_list_like(aligned_arg):
raise Web3TypeError(
f'Expected non-string sequence for "{arg_abi.get("type")}" '
f"component type: got {aligned_arg}"
)
# convert NamedTuple to regular tuple
typing = tuple if isinstance(aligned_arg, tuple) else type(aligned_arg)
return typing(
_align_abi_input(sub_abi, sub_arg)
for sub_abi, sub_arg in zip(sub_abis, aligned_arg)
)
def find_constructor_abi_element_by_type(contract_abi: ABI) -> ABIConstructor:
"""
Find the constructor ABI element in the contract ABI.
This function is often used in place of `web3.utils.abi.get_abi_element` to find
a constructor without considering its argument types. This is used prior to
encoding the abi, since the argument types are not known at that time.
"""
candidates = [abi for abi in contract_abi if abi["type"] == "constructor"]
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
return None
elif len(candidates) > 1:
raise Web3ValueError("Found multiple constructors.")
return None
DYNAMIC_TYPES = ["bytes", "string"]
INT_SIZES = range(8, 257, 8)
BYTES_SIZES = range(1, 33)
UINT_TYPES = [f"uint{i}" for i in INT_SIZES]
INT_TYPES = [f"int{i}" for i in INT_SIZES]
BYTES_TYPES = [f"bytes{i}" for i in BYTES_SIZES] + ["bytes32.byte"]
STATIC_TYPES = list(
itertools.chain(
["address", "bool"],
UINT_TYPES,
INT_TYPES,
BYTES_TYPES,
)
)
BASE_TYPE_REGEX = "|".join(
_type + "(?![a-z0-9])" for _type in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)
)
SUB_TYPE_REGEX = r"\[" "[0-9]*" r"\]"
TYPE_REGEX = ("^" "(?:{base_type})" "(?:(?:{sub_type})*)?" "$").format(
base_type=BASE_TYPE_REGEX,
sub_type=SUB_TYPE_REGEX,
)
def is_recognized_type(abi_type: TypeStr) -> bool:
return bool(re.match(TYPE_REGEX, abi_type))
def is_bool_type(abi_type: TypeStr) -> bool:
return abi_type == "bool"
def is_uint_type(abi_type: TypeStr) -> bool:
return abi_type in UINT_TYPES
def is_int_type(abi_type: TypeStr) -> bool:
return abi_type in INT_TYPES
def is_address_type(abi_type: TypeStr) -> bool:
return abi_type == "address"
def is_bytes_type(abi_type: TypeStr) -> bool:
return abi_type in BYTES_TYPES + ["bytes"]
def is_string_type(abi_type: TypeStr) -> bool:
return abi_type == "string"
@curry
def is_length(target_length: int, value: abc.Sized) -> bool:
return len(value) == target_length
def size_of_type(abi_type: TypeStr) -> int:
"""
Returns size in bits of abi_type
"""
if "string" in abi_type:
return None
if "byte" in abi_type:
return None
if "[" in abi_type:
return None
if abi_type == "bool":
return 8
if abi_type == "address":
return 160
return int(re.sub(r"\D", "", abi_type))
END_BRACKETS_OF_ARRAY_TYPE_REGEX = r"\[[^]]*\]$"
def sub_type_of_array_type(abi_type: TypeStr) -> str:
if not is_array_type(abi_type):
raise Web3ValueError(f"Cannot parse subtype of nonarray abi-type: {abi_type}")
return re.sub(END_BRACKETS_OF_ARRAY_TYPE_REGEX, "", abi_type, count=1)
def length_of_array_type(abi_type: TypeStr) -> int:
if not is_array_type(abi_type):
raise Web3ValueError(f"Cannot parse length of nonarray abi-type: {abi_type}")
inner_brackets = (
re.search(END_BRACKETS_OF_ARRAY_TYPE_REGEX, abi_type).group(0).strip("[]")
)
if not inner_brackets:
return None
else:
return int(inner_brackets)
ARRAY_REGEX = ("^" "[a-zA-Z0-9_]+" "({sub_type})+" "$").format(sub_type=SUB_TYPE_REGEX)
def is_array_type(abi_type: TypeStr) -> bool:
return bool(re.match(ARRAY_REGEX, abi_type))
NAME_REGEX = "[a-zA-Z_]" "[a-zA-Z0-9_]*"
ENUM_REGEX = ("^" "{lib_name}" r"\." "{enum_name}" "$").format(
lib_name=NAME_REGEX, enum_name=NAME_REGEX
)
def is_probably_enum(abi_type: TypeStr) -> bool:
return bool(re.match(ENUM_REGEX, abi_type))
@to_tuple
def normalize_event_input_types(
abi_args: Collection[ABIEvent],
) -> Iterable[ABIEvent | dict[TypeStr, Any]]:
for arg in abi_args:
if is_recognized_type(arg["type"]):
yield arg
elif is_probably_enum(arg["type"]):
yield {k: "uint8" if k == "type" else v for k, v in arg.items()}
else:
yield arg
########################################################
#
# Conditionally modifying data, tagged with ABI Types
#
########################################################
@curry
def map_abi_data(
normalizers: Iterable[Callable[[TypeStr, Any], tuple[TypeStr, Any]]],
types: Iterable[TypeStr],
data: Iterable[Any],
) -> Any:
"""
Applies normalizers to your data, in the context of the relevant types.
Each normalizer is in the format:
def normalizer(datatype, data):
# Conditionally modify data
return (datatype, data)
Where datatype is a valid ABI type string, like "uint".
In case of an array, like "bool[2]", normalizer will receive `data`
as an iterable of typed data, like `[("bool", True), ("bool", False)]`.
Internals
---
This is accomplished by:
1. Decorating the data tree with types
2. Recursively mapping each of the normalizers to the data
3. Stripping the types back out of the tree
"""
return pipe(
data,
# 1. Decorating the data tree with types
abi_data_tree(types),
# 2. Recursively mapping each of the normalizers to the data
*map(data_tree_map, normalizers),
# 3. Stripping the types back out of the tree
strip_abi_types,
)
@curry
def abi_data_tree(
types: Iterable[TypeStr], data: Iterable[Any]
) -> list["ABITypedData"]:
"""
Decorate the data tree with pairs of (type, data). The pair tuple is actually an
ABITypedData, but can be accessed as a tuple.
As an example:
>>> abi_data_tree(types=["bool[2]", "uint"], data=[[True, False], 0])
[("bool[2]", [("bool", True), ("bool", False)]), ("uint256", 0)]
"""
return list(map(abi_sub_tree, types, data))
@curry
def data_tree_map(
func: Callable[[TypeStr, Any], tuple[TypeStr, Any]], data_tree: Any
) -> "ABITypedData":
"""
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
"""
def map_to_typed_data(elements: Any) -> "ABITypedData":
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
return ABITypedData(func(*elements))
else:
return elements
return recursive_map(map_to_typed_data, data_tree)
|
TextStringEncoder
|
python
|
wandb__wandb
|
tests/system_tests/test_api/conftest.py
|
{
"start": 243,
"end": 867
}
|
class ____(http.server.SimpleHTTPRequestHandler):
"""HTTP handler that serves parquet files from memory."""
parquet_files: dict[str, bytes] = {}
def do_GET(self): # noqa: N802
path = self.path.lstrip("/")
if path in self.parquet_files:
content = self.parquet_files[path]
self.send_response(200)
self.end_headers()
self.wfile.write(content)
else:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(b"File not found")
|
ParquetFileHandler
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/queue.py
|
{
"start": 1287,
"end": 1381
}
|
class ____(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
|
Full
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_date05.py
|
{
"start": 342,
"end": 1961
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_date05.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [45937408, 45939328]
worksheet.set_column("A:A", 12)
dates = [
date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10),
]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column("A1", dates, date_format)
worksheet.write_column("B1", values)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$10",
"values": "=Sheet1!$B$1:$B$10",
}
)
chart.set_x_axis(
{
"text_axis": True,
"num_format": "dd/mm/yyyy",
"num_format_linked": True,
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py
|
{
"start": 8640,
"end": 10586
}
|
class ____(Serializable):
"""Map an example, or row in the dataset, to the inputs of an evaluation."""
reference_key: str | None = None
@property
def output_keys(self) -> list[str]:
"""The keys to extract from the run."""
return ["reference"]
def serialize_chat_messages(self, messages: list[dict]) -> str:
"""Extract the input messages from the run."""
chat_messages = _get_messages_from_run_dict(messages)
return get_buffer_string(chat_messages)
def map(self, example: Example) -> dict[str, str]:
"""Maps the Example, or dataset row to a dictionary."""
if not example.outputs:
msg = f"Example {example.id} has no outputs to use as a reference."
raise ValueError(msg)
if self.reference_key is None:
if len(example.outputs) > 1:
msg = (
f"Example {example.id} has multiple outputs, so you must"
" specify a reference_key."
)
raise ValueError(msg)
output = next(iter(example.outputs.values()))
elif self.reference_key not in example.outputs:
msg = (
f"Example {example.id} does not have reference key"
f" {self.reference_key}."
)
raise ValueError(msg)
else:
output = example.outputs[self.reference_key]
return {
"reference": self.serialize_chat_messages([output])
if isinstance(output, dict) and output.get("type") and output.get("data")
else output,
}
def __call__(self, example: Example) -> dict[str, str]:
"""Maps the Run and Example to a dictionary."""
if not example.outputs:
msg = f"Example {example.id} has no outputs to use as areference label."
raise ValueError(msg)
return self.map(example)
|
StringExampleMapper
|
python
|
doocs__leetcode
|
solution/1100-1199/1130.Minimum Cost Tree From Leaf Values/Solution3.py
|
{
"start": 0,
"end": 493
}
|
class ____:
def mctFromLeafValues(self, arr: List[int]) -> int:
n = len(arr)
f = [[0] * n for _ in range(n)]
g = [[0] * n for _ in range(n)]
for i in range(n - 1, -1, -1):
g[i][i] = arr[i]
for j in range(i + 1, n):
g[i][j] = max(g[i][j - 1], arr[j])
f[i][j] = min(
f[i][k] + f[k + 1][j] + g[i][k] * g[k + 1][j] for k in range(i, j)
)
return f[0][n - 1]
|
Solution
|
python
|
Lightning-AI__lightning
|
examples/pytorch/servable_module/production.py
|
{
"start": 2120,
"end": 2697
}
|
class ____:
height: Optional[int] = None
width: Optional[int] = None
extension: str = "JPEG"
mode: str = "RGB"
channel_first: bool = False
def deserialize(self, data: str) -> torch.Tensor:
encoded_with_padding = (data + "===").encode("UTF-8")
img = base64.b64decode(encoded_with_padding)
buffer = BytesIO(img)
img = PILImage.open(buffer, mode="r")
if self.height and self.width:
img = img.resize((self.width, self.height))
arr = np.array(img)
return T.ToTensor()(arr).unsqueeze(0)
|
Image
|
python
|
cherrypy__cherrypy
|
cherrypy/test/test_core.py
|
{
"start": 30247,
"end": 30898
}
|
class ____:
def test_bind_ephemeral_port(self):
"""
A server configured to bind to port 0 will bind to an ephemeral
port and indicate that port number on startup.
"""
cherrypy.config.reset()
bind_ephemeral_conf = {
'server.socket_port': 0,
}
cherrypy.config.update(bind_ephemeral_conf)
cherrypy.engine.start()
assert cherrypy.server.bound_addr != cherrypy.server.bind_addr
_host, port = cherrypy.server.bound_addr
assert port > 0
cherrypy.engine.stop()
assert cherrypy.server.bind_addr == cherrypy.server.bound_addr
|
TestBinding
|
python
|
joke2k__faker
|
tests/providers/test_address.py
|
{
"start": 101422,
"end": 103017
}
|
class ____:
"""Test fr_CA address provider methods"""
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in FrCaAddressProvider.provinces
def test_province_abbr(self, faker, num_samples):
for _ in range(num_samples):
province_abbr = faker.province_abbr()
assert isinstance(province_abbr, str)
assert province_abbr in FrCaAddressProvider.provinces_abbr
def test_city_prefixes(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in FrCaAddressProvider.city_prefixes
def test_city_suffixes(self, faker, num_samples):
for _ in range(num_samples):
city_suffixes = faker.city_suffix()
assert isinstance(city_suffixes, str)
assert city_suffixes in FrCaAddressProvider.city_suffixes
def test_street_prefixes(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in FrCaAddressProvider.street_prefixes
def test_administrative_unit(self, faker, num_samples):
for _ in range(num_samples):
province = faker.administrative_unit()
assert isinstance(province, str)
assert province in FrCaAddressProvider.provinces
|
TestFrCa
|
python
|
spyder-ide__spyder
|
spyder/plugins/projects/widgets/qcookiecutter.py
|
{
"start": 2720,
"end": 14021
}
|
class ____(QtWidgets.QWidget):
"""
QWidget to display cookiecutter.json options.
cookiecutter_settings: dict
A cookiecutter.json settings content.
pre_gen_code: str
The code of the pregeneration script.
"""
sig_validated = QtCore.Signal(int, str)
"""
This signal is emitted after validation has been executed.
It provides the process exit code and the output captured.
"""
def __init__(self, parent, cookiecutter_settings=None, pre_gen_code=None):
super().__init__(parent)
# Attributes
self._parent = parent
self._cookiecutter_settings = cookiecutter_settings
self._pre_gen_code = pre_gen_code
self._widgets = OrderedDict()
self._defined_settings = OrderedDict()
self._rendered_settings = OrderedDict()
self._process = None
self._tempfile = tempfile.mkstemp(suffix=".py")[-1]
# Cookiecutter special variables
self._extensions = None
self._copy_without_render = None
self._new_lines = None
self._private_vars = None
self._rendered_private_var = None
# Layout
self._form_layout = QtWidgets.QFormLayout()
self._form_layout.setFieldGrowthPolicy(
self._form_layout.AllNonFixedFieldsGrow)
self.setLayout(self._form_layout)
# --- Helpers
# ------------------------------------------------------------------------
def _check_jinja_options(self):
"""
Check which values are Jinja2 expressions.
"""
if self._cookiecutter_settings:
# https://cookiecutter.readthedocs.io/en/latest/advanced/template_extensions.html
self._extensions = self._cookiecutter_settings.pop("_extensions",
[])
# https://cookiecutter.readthedocs.io/en/latest/advanced/copy_without_render.html
self._copy_without_render = self._cookiecutter_settings.pop(
"_copy_without_render", [])
# https://cookiecutter.readthedocs.io/en/latest/advanced/new_line_characters.html
self._new_lines = self._cookiecutter_settings.pop("_new_lines", "")
for setting, value in self._cookiecutter_settings.items():
# Treat everything like a list for convenience
if isinstance(value, dict):
# https://cookiecutter.readthedocs.io/en/latest/advanced/dict_variables.html
list_values = list(value.keys())
elif not isinstance(value, list):
list_values = [value]
else:
list_values = value
are_rendered_values = []
if list_values and value:
for list_value in list_values:
template = Template(list_value)
rendered_value = template.render(
cookiecutter=Namespace(
**self._cookiecutter_settings))
are_rendered_values.append(
list_value != rendered_value)
if any(are_rendered_values):
self._rendered_settings[setting] = value
else:
self._defined_settings[setting] = value
def _is_jinja(self, setting):
"""
Check if option contains jinja2 code.
"""
return setting in self._rendered_settings
def _parse_bool_text(self, text):
"""
Convert a text value into a boolean.
"""
value = None
if text.lower() in ["n", "no", "false"]:
value = False
elif text.lower() in ["y", "yes", "true"]:
value = True
return value
def _create_textbox(self, setting, label, default=None):
"""
Create a textbox field.
"""
if default is not None and len(default) > 30:
box = QtWidgets.QTextEdit(parent=self)
box.setText = box.setPlainText
box.text = box.toPlainText
else:
box = QtWidgets.QLineEdit(parent=self)
box.setting = setting
if default is not None:
box.setText(default)
box.textChanged.connect(lambda x=None: self.render())
box.get_value = box.text
box.set_value = lambda text: box.setText(text)
return box
def _create_checkbox(self, setting, label, default=None):
"""
Create a checkbox field.
"""
box = QtWidgets.QCheckBox(parent=self)
box.setting = setting
if default is not None:
new_default = self._parse_bool_text(default)
box.setChecked(new_default)
def _get_value():
bool_to_values = {
self._parse_bool_text(default): default,
not self._parse_bool_text(default): "other-value-" + default
}
return bool_to_values[box.isChecked()]
box.get_value = _get_value
return box
def _create_combobox(self, setting, label, choices, default=None):
"""
Create a combobox field.
"""
box = SpyderComboBox(parent=self)
if isinstance(choices, dict):
temp = OrderedDict()
for choice, choice_value in choices.items():
box.addItem(choice, {choice: choice_value})
else:
for choice in choices:
box.addItem(choice, choice)
box.setting = setting
box.get_value = box.currentData
return box
def _create_field(self, setting, value):
"""
Create a form field.
"""
label = " ".join(setting.split("_")).capitalize()
if isinstance(value, (list, dict)):
# https://cookiecutter.readthedocs.io/en/latest/advanced/choice_variables.html
widget = self._create_combobox(setting, label, value)
elif isinstance(value, str):
if value.lower() in ["y", "yes", "true", "n", "no", "false"]:
widget = self._create_checkbox(setting, label, default=value)
else:
default = None if self._is_jinja(setting) else value
widget = self._create_textbox(setting, label, default=default)
else:
raise Exception(
"Cookiecutter option '{}'cannot be processed".format(setting))
self._widgets[setting] = (label, widget)
return label, widget
def _on_process_finished(self):
"""
Process output of valiation script.
"""
if self._process is not None:
out = bytes(self._process.readAllStandardOutput()).decode()
error = bytes(self._process.readAllStandardError()).decode()
message = ""
if out:
message += out
if error:
message += error
message = message.replace("\r\n", " ")
message = message.replace("\n", " ")
self.sig_validated.emit(self._process.exitCode(), message)
# --- API
# ------------------------------------------------------------------------
def setup(self, cookiecutter_settings):
"""
Setup the widget using options.
"""
self._cookiecutter_settings = cookiecutter_settings
self._check_jinja_options()
for setting, value in self._cookiecutter_settings.items():
if not setting.startswith(("__", "_")):
label, widget = self._create_field(setting, value)
self._form_layout.addRow(label, widget)
self.render()
def set_pre_gen_code(self, pre_gen_code):
"""
Set the cookiecutter pregeneration code.
"""
self._pre_gen_code = pre_gen_code
def render(self):
"""
Render text that contains Jinja2 expressions and set their values.
"""
cookiecutter_settings = self.get_values()
for setting, value in self._rendered_settings.items():
if not setting.startswith(("__", "_")):
template = Template(value)
val = template.render(
cookiecutter=Namespace(**cookiecutter_settings))
__, widget = self._widgets[setting]
widget.set_value(val)
def get_values(self):
"""
Return all entered and generated values.
"""
cookiecutter_settings = cs = OrderedDict()
if self._cookiecutter_settings:
for setting, value in self._cookiecutter_settings.items():
if setting.startswith(("__", "_")):
cookiecutter_settings[setting] = value
else:
__, widget = self._widgets[setting]
cookiecutter_settings[setting] = widget.get_value()
# Cookiecutter special variables
cookiecutter_settings["_extensions"] = self._extensions
cookiecutter_settings["_copy_without_render"] = (
self._copy_without_render)
cookiecutter_settings["_new_lines"] = self._new_lines
return cookiecutter_settings
def validate(self):
"""
Run, pre generation script and provide information on finished.
"""
if self._pre_gen_code is not None:
cookiecutter_settings = self.get_values()
template = Template(self._pre_gen_code)
val = template.render(
cookiecutter=Namespace(**cookiecutter_settings))
with open(self._tempfile, "w") as fh:
fh.write(val)
if self._process is not None:
self._process.close()
self._process.waitForFinished(1000)
self._process = QtCore.QProcess(self)
self._process.setProgram(sys.executable)
self._process.setArguments([self._tempfile])
self._process.finished.connect(self._on_process_finished)
self._process.start()
if __name__ == "__main__":
from spyder.utils.qthelpers import qapplication
app = qapplication()
dlg = CookiecutterDialog(parent=None)
dlg.setup(
{
"list_option": ["1", "2", "3"],
"checkbox_option": "y",
"checkbox_option_2": "false",
"fixed_option": "goanpeca",
"rendered_option": "{{ cookiecutter.fixed_option|upper }}",
"dict_option": {
"png": {
"name": "Portable Network Graphic",
"library": "libpng",
"apps": [
"GIMP"
]
},
"bmp": {
"name": "Bitmap",
"library": "libbmp",
"apps": [
"Paint",
"GIMP"
]
}
},
"_private": "{{ cookiecutter.fixed_option }}",
"__private_rendered": "{{ cookiecutter.fixed_option }}",
}
)
dlg.set_pre_gen_code('''
import sys
print("HELP!") # spyder: test-skip
sys.exit(10)''')
dlg.show()
sys.exit(app.exec_())
|
CookiecutterWidget
|
python
|
rushter__MLAlgorithms
|
mla/naive_bayes.py
|
{
"start": 119,
"end": 1899
}
|
class ____(BaseEstimator):
"""Gaussian Naive Bayes."""
# Binary problem.
n_classes = 2
def fit(self, X, y=None):
self._setup_input(X, y)
# Check target labels
assert list(np.unique(y)) == [0, 1]
# Mean and variance for each class and feature combination
self._mean = np.zeros((self.n_classes, self.n_features), dtype=np.float64)
self._var = np.zeros((self.n_classes, self.n_features), dtype=np.float64)
self._priors = np.zeros(self.n_classes, dtype=np.float64)
for c in range(self.n_classes):
# Filter features by class
X_c = X[y == c]
# Calculate mean, variance, prior for each class
self._mean[c, :] = X_c.mean(axis=0)
self._var[c, :] = X_c.var(axis=0)
self._priors[c] = X_c.shape[0] / float(X.shape[0])
def _predict(self, X=None):
# Apply _predict_proba for each row
predictions = np.apply_along_axis(self._predict_row, 1, X)
# Normalize probabilities so that each row will sum up to 1.0
return softmax(predictions)
def _predict_row(self, x):
"""Predict log likelihood for given row."""
output = []
for y in range(self.n_classes):
prior = np.log(self._priors[y])
posterior = np.log(self._pdf(y, x)).sum()
prediction = prior + posterior
output.append(prediction)
return output
def _pdf(self, n_class, x):
"""Calculate Gaussian PDF for each feature."""
mean = self._mean[n_class]
var = self._var[n_class]
numerator = np.exp(-((x - mean) ** 2) / (2 * var))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator
|
NaiveBayesClassifier
|
python
|
django__django
|
django/template/library.py
|
{
"start": 10678,
"end": 11244
}
|
class ____(SimpleNode):
def __init__(self, nodelist, *args, **kwargs):
super().__init__(*args, **kwargs)
self.nodelist = nodelist
def get_resolved_arguments(self, context):
resolved_args, resolved_kwargs = super().get_resolved_arguments(context)
# Restore the "content" argument.
# It will move depending on whether takes_context was passed.
resolved_args.insert(
1 if self.takes_context else 0, self.nodelist.render(context)
)
return resolved_args, resolved_kwargs
|
SimpleBlockNode
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/ops/iterator_ops.py
|
{
"start": 23463,
"end": 26806
}
|
class ____(
collections_abc.Iterator,
trackable.Trackable,
composite_tensor.CompositeTensor,
metaclass=abc.ABCMeta):
"""Represents an iterator of a `tf.data.Dataset`.
`tf.data.Iterator` is the primary mechanism for enumerating elements of a
`tf.data.Dataset`. It supports the Python Iterator protocol, which means
it can be iterated over using a for-loop:
>>> dataset = tf.data.Dataset.range(2)
>>> for element in dataset:
... print(element)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
or by fetching individual elements explicitly via `get_next()`:
>>> dataset = tf.data.Dataset.range(2)
>>> iterator = iter(dataset)
>>> print(iterator.get_next())
tf.Tensor(0, shape=(), dtype=int64)
>>> print(iterator.get_next())
tf.Tensor(1, shape=(), dtype=int64)
In addition, non-raising iteration is supported via `get_next_as_optional()`,
which returns the next element (if available) wrapped in a
`tf.experimental.Optional`.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
"""
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this iterator.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> iterator.element_spec
tf.TensorSpec(shape=(), dtype=tf.int32, name=None)
For more information,
read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).
Returns:
A (nested) structure of `tf.TypeSpec` objects matching the structure of an
element of this iterator, specifying the type of individual components.
"""
raise NotImplementedError("Iterator.element_spec")
@abc.abstractmethod
def get_next(self):
"""Returns the next element.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> print(iterator.get_next())
tf.Tensor(42, shape=(), dtype=int32)
Returns:
A (nested) structure of values matching `tf.data.Iterator.element_spec`.
Raises:
`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.
"""
raise NotImplementedError("Iterator.get_next()")
@abc.abstractmethod
def get_next_as_optional(self):
"""Returns the next element wrapped in `tf.experimental.Optional`.
If the iterator has reached the end of the sequence, the returned
`tf.experimental.Optional` will have no value.
>>> dataset = tf.data.Dataset.from_tensors(42)
>>> iterator = iter(dataset)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
>>> optional = iterator.get_next_as_optional()
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
Returns:
A `tf.experimental.Optional` object representing the next element.
"""
raise NotImplementedError("Iterator.get_next_as_optional()")
@saveable_compat.legacy_saveable_name("ITERATOR")
|
IteratorBase
|
python
|
tornadoweb__tornado
|
tornado/test/routing_test.py
|
{
"start": 7823,
"end": 8827
}
|
class ____(AsyncHTTPTestCase):
def get_app(self):
wsgi_app = WSGIContainer(self.wsgi_app)
class Handler(RequestHandler):
def get(self, *args, **kwargs):
self.finish(self.reverse_url("tornado"))
return RuleRouter(
[
(
PathMatches("/tornado.*"),
Application([(r"/tornado/test", Handler, {}, "tornado")]),
),
(PathMatches("/wsgi"), wsgi_app),
]
)
def wsgi_app(self, environ, start_response):
start_response("200 OK", [])
return [b"WSGI"]
def test_wsgi_container(self):
response = self.fetch("/tornado/test")
self.assertEqual(response.body, b"/tornado/test")
response = self.fetch("/wsgi")
self.assertEqual(response.body, b"WSGI")
def test_delegate_not_found(self):
response = self.fetch("/404")
self.assertEqual(response.code, 404)
|
WSGIContainerTestCase
|
python
|
pytorch__pytorch
|
torch/_subclasses/complex_tensor/_core.py
|
{
"start": 4692,
"end": 5074
}
|
class ____(Function):
@staticmethod
def forward(ctx: FunctionCtx, real: Tensor, imag: Tensor) -> ComplexTensor: # type: ignore[bad-override]
return ComplexTensor(real, imag)
@staticmethod
def backward(ctx: FunctionCtx, grad_output: ComplexTensor) -> tuple[Tensor, Tensor]: # type: ignore[bad-override]
return grad_output.real, grad_output.imag
|
Complex
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-reddit/llama_index/readers/reddit/base.py
|
{
"start": 226,
"end": 1924
}
|
class ____(BaseReader):
"""
Subreddit post and top-level comments reader for Reddit.
"""
def load_data(
self,
subreddits: List[str],
search_keys: List[str],
post_limit: Optional[int] = [10],
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
subreddits (List[str]): List of subreddits you'd like to read from
search_keys (List[str]): List of keywords you'd like to use to search from subreddit(s)
post_limit (Optional[int]): Maximum number of posts per subreddit you'd like to read from, defaults to 10
"""
import os
import praw
from praw.models import MoreComments
reddit = praw.Reddit(
client_id=os.getenv("REDDIT_CLIENT_ID"),
client_secret=os.getenv("REDDIT_CLIENT_SECRET"),
user_agent=os.getenv("REDDIT_USER_AGENT"),
username=os.getenv("REDDIT_USERNAME"),
password=os.getenv("REDDIT_PASSWORD"),
)
posts = []
for sr in subreddits:
ml_subreddit = reddit.subreddit(sr)
for kw in search_keys:
relevant_posts = ml_subreddit.search(kw, limit=post_limit)
for post in relevant_posts:
posts.append(Document(text=post.selftext))
for top_level_comment in post.comments:
if isinstance(top_level_comment, MoreComments):
continue
posts.append(Document(text=top_level_comment.body))
return posts
|
RedditReader
|
python
|
django__django
|
tests/file_storage/tests.py
|
{
"start": 2246,
"end": 23501
}
|
class ____(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
self.storage = self.storage_class(
location=self.temp_dir, base_url="/test_media_url/"
)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location="")
self.assertEqual(storage.base_location, "")
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "w")
f.write("storage contents")
f.close()
self.assertTrue(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "r")
self.assertEqual(f.read(), "storage contents")
f.close()
self.storage.delete("storage_test")
self.assertFalse(self.storage.exists("storage_test"))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists("test.file.tz.on"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.on", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = datetime.UTC.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), datetime.timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists("test.file.tz.off"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.off", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = datetime.UTC.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), datetime.timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), datetime.timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
path = self.storage.path(f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertAlmostEqual(
atime,
datetime.datetime.fromtimestamp(os.path.getatime(path)),
delta=datetime.timedelta(seconds=1),
)
self.assertAlmostEqual(
atime,
timezone.now(),
delta=datetime.timedelta(seconds=1),
)
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
path = self.storage.path(f_name)
ctime = self.storage.get_created_time(f_name)
self.assertAlmostEqual(
ctime,
datetime.datetime.fromtimestamp(os.path.getctime(path)),
delta=datetime.timedelta(seconds=1),
)
self.assertAlmostEqual(
ctime,
timezone.now(),
delta=datetime.timedelta(seconds=1),
)
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
path = self.storage.path(f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertAlmostEqual(
mtime,
datetime.datetime.fromtimestamp(os.path.getmtime(path)),
delta=datetime.timedelta(seconds=1),
)
self.assertAlmostEqual(
mtime,
timezone.now(),
delta=datetime.timedelta(seconds=1),
)
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f.name = "test.file"
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists("path/to"))
self.storage.save("path/to/test.file", ContentFile("file saved with path"))
self.assertTrue(self.storage.exists("path/to"))
with self.storage.open("path/to/test.file") as f:
self.assertEqual(f.read(), b"file saved with path")
self.assertTrue(
os.path.exists(os.path.join(self.temp_dir, "path", "to", "test.file"))
)
self.storage.delete("path/to/test.file")
@unittest.skipUnless(
symlinks_supported(), "Must be able to symlink to run this test."
)
def test_file_save_broken_symlink(self):
"""A new path is created on save when a broken symlink is supplied."""
nonexistent_file_path = os.path.join(self.temp_dir, "nonexistent.txt")
broken_symlink_file_name = "symlink.txt"
broken_symlink_path = os.path.join(self.temp_dir, broken_symlink_file_name)
os.symlink(nonexistent_file_path, broken_symlink_path)
f = ContentFile("some content")
f_name = self.storage.save(broken_symlink_file_name, f)
self.assertIs(os.path.exists(os.path.join(self.temp_dir, f_name)), True)
def test_save_doesnt_close(self):
with TemporaryUploadedFile("test", "text/plain", 1, "utf8") as file:
file.write(b"1")
file.seek(0)
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1, "utf8")
with file:
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the web.
"""
self.assertEqual(
self.storage.url("test.file"), self.storage.base_url + "test.file"
)
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file",
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(
self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file"
)
# #25905: remove leading slashes from file names to prevent unsafe url
# output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url("test.file")
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(
location=self.temp_dir, base_url="/no_ending_slash"
)
self.assertEqual(
storage.url("test.file"), "%s%s" % (storage.base_url, "test.file")
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists("storage_test_1"))
self.assertFalse(self.storage.exists("storage_test_2"))
self.assertFalse(self.storage.exists("storage_dir_1"))
self.storage.save("storage_test_1", ContentFile("custom content"))
self.storage.save("storage_test_2", ContentFile("custom content"))
os.mkdir(os.path.join(self.temp_dir, "storage_dir_1"))
self.addCleanup(self.storage.delete, "storage_test_1")
self.addCleanup(self.storage.delete, "storage_test_2")
for directory in ("", Path("")):
with self.subTest(directory=directory):
dirs, files = self.storage.listdir(directory)
self.assertEqual(set(dirs), {"storage_dir_1"})
self.assertEqual(set(files), {"storage_test_1", "storage_test_2"})
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed
if they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("..")
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("/etc/passwd")
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
temp_dir2 = tempfile.mkdtemp(suffix="aBc")
self.addCleanup(shutil.rmtree, temp_dir2)
other_temp_storage = self.storage_class(location=temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = "CaSe_SeNsItIvE"
file = other_temp_storage.open(mixed_case, "w")
file.write("storage contents")
file.close()
self.assertEqual(
os.path.join(temp_dir2, mixed_case),
other_temp_storage.path(mixed_case),
)
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race
conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, "normal"):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, "raced"):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, "error"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.makedirs = fake_makedirs
self.storage.save("normal/test.file", ContentFile("saved normally"))
with self.storage.open("normal/test.file") as f:
self.assertEqual(f.read(), b"saved normally")
self.storage.save("raced/test.file", ContentFile("saved with race"))
with self.storage.open("raced/test.file") as f:
self.assertEqual(f.read(), b"saved with race")
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save("error/test.file", ContentFile("not saved"))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, "normal.file"):
real_remove(path)
elif path == os.path.join(self.temp_dir, "raced.file"):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, "error.file"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.remove = fake_remove
self.storage.save("normal.file", ContentFile("delete normally"))
self.storage.delete("normal.file")
self.assertFalse(self.storage.exists("normal.file"))
self.storage.save("raced.file", ContentFile("delete with race"))
self.storage.delete("raced.file")
self.assertFalse(self.storage.exists("normal.file"))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save("error.file", ContentFile("delete with error"))
with self.assertRaises(PermissionError):
self.storage.delete("error.file")
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile("chunks fails")
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save("error.file", f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
msg = "The name must be given to delete()."
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete(None)
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete("")
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT="media_root",
MEDIA_URL="media_url/",
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location="explicit_location",
base_url="explicit_base_url/",
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
"MEDIA_ROOT": "overridden_media_root",
"MEDIA_URL": "/overridden_media_url/",
"FILE_UPLOAD_PERMISSIONS": 0o333,
"FILE_UPLOAD_DIRECTORY_PERMISSIONS": 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, "explicit_location")
self.assertIn("explicit_location", storage.location)
self.assertEqual(storage.base_url, "explicit_base_url/")
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings["MEDIA_ROOT"])
self.assertIn(settings["MEDIA_ROOT"], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings["MEDIA_URL"])
self.assertEqual(
defaults_storage.file_permissions_mode,
settings["FILE_UPLOAD_PERMISSIONS"],
)
self.assertEqual(
defaults_storage.directory_permissions_mode,
settings["FILE_UPLOAD_DIRECTORY_PERMISSIONS"],
)
def test_file_methods_pathlib_path(self):
p = Path("test.file")
self.assertFalse(self.storage.exists(p))
f = ContentFile("custom contents")
f_name = self.storage.save(p, f)
# Storage basic methods.
self.assertEqual(self.storage.path(p), os.path.join(self.temp_dir, p))
self.assertEqual(self.storage.size(p), 15)
self.assertEqual(self.storage.url(p), self.storage.base_url + f_name)
with self.storage.open(p) as f:
self.assertEqual(f.read(), b"custom contents")
self.addCleanup(self.storage.delete, p)
|
FileStorageTests
|
python
|
django__django
|
tests/serializers/models/multi_table.py
|
{
"start": 166,
"end": 401
}
|
class ____(models.Model):
parent_data = models.CharField(max_length=30, unique=True)
parent_m2m = models.ManyToManyField("self")
objects = ParentManager()
def natural_key(self):
return (self.parent_data,)
|
Parent
|
python
|
spack__spack
|
lib/spack/spack/installer.py
|
{
"start": 44127,
"end": 50535
}
|
class ____(Task):
"""Class for representing a build task for a package."""
process_handle: Optional["spack.build_environment.BuildProcess"] = None
started: bool = False
no_op: bool = False
tmpdir = None
backup_dir = None
def start(self):
"""Attempt to use the binary cache to install
requested spec and/or dependency if requested.
Otherwise, start a process for of the requested spec and/or
dependency represented by the BuildTask."""
self.record.start()
if self.install_action == InstallAction.OVERWRITE:
self.tmpdir = tempfile.mkdtemp(dir=os.path.dirname(self.pkg.prefix), prefix=".backup")
self.backup_dir = os.path.join(self.tmpdir, "backup")
os.rename(self.pkg.prefix, self.backup_dir)
assert not self.started, "Cannot start a task that has already been started."
self.started = True
self.start_time = self.start_time or time.time()
install_args = self.request.install_args
unsigned = install_args.get("unsigned")
pkg, pkg_id = self.pkg, self.pkg_id
tests = install_args.get("tests")
pkg.run_tests = tests is True or tests and pkg.name in tests
# Use the binary cache to install if requested,
# save result to be handled in BuildTask.complete()
# TODO: change binary installs to occur in subprocesses rather than the main Spack process
policy = self.install_policy
if policy != "source_only":
if _install_from_cache(pkg, self.explicit, unsigned):
self.success_result = ExecuteResult.SUCCESS
return
elif policy == "cache_only":
self.error_result = spack.error.InstallError(
"No binary found when cache-only was specified", pkg=pkg
)
return
else:
tty.msg(f"No binary for {pkg_id} found: installing from source")
# if there's an error result, don't start a new process, and leave
if self.error_result is not None:
return
# Create stage object now and let it be serialized for the child process. That
# way monkeypatch in tests works correctly.
pkg.stage
self._setup_install_dir(pkg)
# Create a child process to do the actual installation.
self._start_build_process()
def _start_build_process(self):
self.process_handle = spack.build_environment.start_build_process(
self.pkg, build_process, self.request.install_args
)
# Identify the child process
self.child_pid = self.process_handle.pid
def poll(self):
"""Check if task has successfully executed, caused an InstallError,
or the child process has information ready to receive."""
assert (
self.started or self.no_op
), "Can't call `poll()` before `start()` or identified no-operation task"
return self.no_op or self.success_result or self.error_result or self.process_handle.poll()
def succeed(self):
self.record.succeed()
# delete the temporary backup for an overwrite
# see spack.llnl.util.filesystem.restore_directory_transaction
if self.install_action == InstallAction.OVERWRITE:
shutil.rmtree(self.tmpdir, ignore_errors=True)
def fail(self, inner_exception):
self.record.fail(inner_exception)
if self.install_action != InstallAction.OVERWRITE:
raise inner_exception
# restore the overwrite directory from backup
# see spack.llnl.util.filesystem.restore_directory_transaction
try:
if os.path.exists(self.pkg.prefix):
shutil.rmtree(self.pkg.prefix)
os.rename(self.backup_dir, self.pkg.prefix)
except Exception as outer_exception:
raise fs.CouldNotRestoreDirectoryBackup(inner_exception, outer_exception)
raise inner_exception
def complete(self):
"""
Complete the installation of the requested spec and/or dependency
represented by the build task.
"""
assert (
self.started or self.no_op
), "Can't call `complete()` before `start()` or identified no-operation task"
pkg = self.pkg
self.status = BuildStatus.INSTALLING
# If task has been identified as a no operation,
# return ExecuteResult.NOOP
if self.no_op:
# This is one exit point that does not need to call
# self.succeed/fail. Job is either a no_op (external, upstream)
# or requeued.
return ExecuteResult.NO_OP
# If installing a package from binary cache is successful,
# return ExecuteResult.SUCCESS
if self.success_result is not None:
self.succeed()
return self.success_result
# If an error arises from installing a package,
# raise spack.error.InstallError
if self.error_result is not None:
self.fail(self.error_result)
# hook that allows tests to inspect the Package before installation
# see unit_test_check() docs.
if not pkg.unit_test_check():
self.succeed()
return ExecuteResult.FAILED
try:
# Check if the task's child process has completed
spack.package_base.PackageBase._verbose = self.process_handle.complete()
# Note: PARENT of the build process adds the new package to
# the database, so that we don't need to re-read from file.
spack.store.STORE.db.add(pkg.spec, explicit=self.explicit)
except spack.error.StopPhase as e:
# A StopPhase exception means that do_install was asked to
# stop early from clients, and is not an error at this point
pid = f"{self.pid}: " if tty.show_pid() else ""
tty.debug(f"{pid}{str(e)}")
tty.debug(f"Package stage directory: {pkg.stage.source_path}")
except (Exception, KeyboardInterrupt, SystemExit) as e:
self.fail(e)
self.succeed()
return ExecuteResult.SUCCESS
def terminate(self) -> None:
"""Terminate any processes this task still has running."""
if self.process_handle:
self.process_handle.terminate()
|
BuildTask
|
python
|
facebookresearch__faiss
|
faiss/gpu/test/test_cagra.py
|
{
"start": 3334,
"end": 5774
}
|
class ____(unittest.TestCase):
def do_interop(self, metric, numeric_type):
d = 64
k = 12
if numeric_type == faiss.Int8:
data_base_nt = np.random.randint(
-128, 128, size=(10000, d), dtype=np.int8)
data_query_nt = np.random.randint(
-128, 128, size=(100, d), dtype=np.int8)
data_base = data_base_nt.astype(np.float32)
data_query = data_query_nt.astype(np.float32)
else:
ds = datasets.SyntheticDataset(d, 0, 10000, 100)
data_base = ds.get_database() # fp32
data_query = ds.get_queries() # fp32
if numeric_type == faiss.Float16:
data_base_nt = data_base.astype(np.float16)
data_query_nt = data_query.astype(np.float16)
elif numeric_type == faiss.Float32:
data_base_nt = data_base
data_query_nt = data_query
res = faiss.StandardGpuResources()
index = faiss.GpuIndexCagra(res, d, metric)
index.train(data_base_nt, numeric_type=numeric_type)
Dnew, Inew = index.search(data_query_nt, k, numeric_type=numeric_type)
cpu_index = faiss.index_gpu_to_cpu(index)
# cpu index always search in fp32
Dref, Iref = cpu_index.search(data_query, k)
evaluation.check_ref_knn_with_draws(Dref, Iref, Dnew, Inew, k)
deserialized_index = faiss.deserialize_index(
faiss.serialize_index(cpu_index))
gpu_index = faiss.index_cpu_to_gpu(
res, 0, deserialized_index
)
Dnew2, Inew2 = gpu_index.search(data_query_nt, k, numeric_type=numeric_type)
evaluation.check_ref_knn_with_draws(Dnew2, Inew2, Dnew, Inew, k)
def test_interop_L2(self):
self.do_interop(faiss.METRIC_L2, faiss.Float32)
def test_interop_IP(self):
self.do_interop(faiss.METRIC_INNER_PRODUCT, faiss.Float32)
def test_interop_L2_FP16(self):
self.do_interop(faiss.METRIC_L2, faiss.Float16)
def test_interop_IP_FP16(self):
self.do_interop(faiss.METRIC_INNER_PRODUCT, faiss.Float16)
def test_interop_L2_Int8(self):
self.do_interop(faiss.METRIC_L2, faiss.Int8)
def test_interop_IP_Int8(self):
self.do_interop(faiss.METRIC_INNER_PRODUCT, faiss.Int8)
@unittest.skipIf(
"CUVS" not in faiss.get_compile_options(),
"only if cuVS is compiled in")
|
TestInterop
|
python
|
pytorch__pytorch
|
torch/_numpy/_ndarray.py
|
{
"start": 763,
"end": 8071
}
|
class ____:
def __init__(self, flag_to_value: dict):
assert all(k in FLAGS for k in flag_to_value) # sanity check
self._flag_to_value = flag_to_value
def __getattr__(self, attr: str):
if attr.islower() and attr.upper() in FLAGS:
return self[attr.upper()]
else:
raise AttributeError(f"No flag attribute '{attr}'")
def __getitem__(self, key):
if key in SHORTHAND_TO_FLAGS:
key = SHORTHAND_TO_FLAGS[key]
if key in FLAGS:
try:
return self._flag_to_value[key]
except KeyError as e:
raise NotImplementedError(f"{key=}") from e
else:
raise KeyError(f"No flag key '{key}'")
def __setattr__(self, attr, value):
if attr.islower() and attr.upper() in FLAGS:
self[attr.upper()] = value
else:
super().__setattr__(attr, value)
def __setitem__(self, key, value):
if key in FLAGS or key in SHORTHAND_TO_FLAGS:
raise NotImplementedError("Modifying flags is not implemented")
else:
raise KeyError(f"No flag key '{key}'")
def create_method(fn, name=None):
name = name or fn.__name__
def f(*args, **kwargs):
return fn(*args, **kwargs)
f.__name__ = name
f.__qualname__ = f"ndarray.{name}"
return f
# Map ndarray.name_method -> np.name_func
# If name_func == None, it means that name_method == name_func
methods = {
"clip": None,
"nonzero": None,
"repeat": None,
"round": None,
"squeeze": None,
"swapaxes": None,
"ravel": None,
# linalg
"diagonal": None,
"dot": None,
"trace": None,
# sorting
"argsort": None,
"searchsorted": None,
# reductions
"argmax": None,
"argmin": None,
"any": None,
"all": None,
"max": None,
"min": None,
"ptp": None,
"sum": None,
"prod": None,
"mean": None,
"var": None,
"std": None,
# scans
"cumsum": None,
"cumprod": None,
# advanced indexing
"take": None,
"choose": None,
}
dunder = {
"abs": "absolute",
"invert": None,
"pos": "positive",
"neg": "negative",
"gt": "greater",
"lt": "less",
"ge": "greater_equal",
"le": "less_equal",
}
# dunder methods with right-looking and in-place variants
ri_dunder = {
"add": None,
"sub": "subtract",
"mul": "multiply",
"truediv": "divide",
"floordiv": "floor_divide",
"pow": "power",
"mod": "remainder",
"and": "bitwise_and",
"or": "bitwise_or",
"xor": "bitwise_xor",
"lshift": "left_shift",
"rshift": "right_shift",
"matmul": None,
}
def _upcast_int_indices(index):
if isinstance(index, torch.Tensor):
if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
return index.to(torch.int64)
elif isinstance(index, tuple):
return tuple(_upcast_int_indices(i) for i in index)
return index
def _has_advanced_indexing(index):
"""Check if there's any advanced indexing"""
return any(
isinstance(idx, (Sequence, bool))
or (isinstance(idx, torch.Tensor) and (idx.dtype == torch.bool or idx.ndim > 0))
for idx in index
)
def _numpy_compatible_indexing(index):
"""Convert scalar indices to lists when advanced indexing is present for NumPy compatibility."""
if not isinstance(index, tuple):
index = (index,)
# Check if there's any advanced indexing (sequences, booleans, or tensors)
has_advanced = _has_advanced_indexing(index)
if not has_advanced:
return index
# Convert integer scalar indices to single-element lists when advanced indexing is present
# Note: Do NOT convert boolean scalars (True/False) as they have special meaning in NumPy
converted = []
for idx in index:
if isinstance(idx, int) and not isinstance(idx, bool):
# Integer scalars should be converted to lists
converted.append([idx])
elif (
isinstance(idx, torch.Tensor)
and idx.ndim == 0
and not torch.is_floating_point(idx)
and idx.dtype != torch.bool
):
# Zero-dimensional tensors holding integers should be treated the same as integer scalars
converted.append([idx])
else:
# Everything else (booleans, lists, slices, etc.) stays as is
converted.append(idx)
return tuple(converted)
def _get_bool_depth(s):
"""Returns the depth of a boolean sequence/tensor"""
if isinstance(s, bool):
return True, 0
if isinstance(s, torch.Tensor) and s.dtype == torch.bool:
return True, s.ndim
if not (isinstance(s, Sequence) and s and s[0] != s):
return False, 0
is_bool, depth = _get_bool_depth(s[0])
return is_bool, depth + 1
def _numpy_empty_ellipsis_patch(index, tensor_ndim):
"""
Patch for NumPy-compatible ellipsis behavior when ellipsis doesn't match any dimensions.
In NumPy, when an ellipsis (...) doesn't actually match any dimensions of the input array,
it still acts as a separator between advanced indices. PyTorch doesn't have this behavior.
This function detects when we have:
1. Advanced indexing on both sides of an ellipsis
2. The ellipsis doesn't actually match any dimensions
"""
if not isinstance(index, tuple):
index = (index,)
# Find ellipsis position
ellipsis_pos = None
for i, idx in enumerate(index):
if idx is Ellipsis:
ellipsis_pos = i
break
# If no ellipsis, no patch needed
if ellipsis_pos is None:
return index, lambda x: x, lambda x: x
# Count non-ellipsis dimensions consumed by the index
consumed_dims = 0
for idx in index:
is_bool, depth = _get_bool_depth(idx)
if is_bool:
consumed_dims += depth
elif idx is Ellipsis or idx is None:
continue
else:
consumed_dims += 1
# Calculate how many dimensions the ellipsis should match
ellipsis_dims = tensor_ndim - consumed_dims
# Check if ellipsis doesn't match any dimensions
if ellipsis_dims == 0:
# Check if we have advanced indexing on both sides of ellipsis
left_advanced = _has_advanced_indexing(index[:ellipsis_pos])
right_advanced = _has_advanced_indexing(index[ellipsis_pos + 1 :])
if left_advanced and right_advanced:
# This is the case where NumPy and PyTorch differ
# We need to ensure the advanced indices are treated as separated
new_index = index[:ellipsis_pos] + (None,) + index[ellipsis_pos + 1 :]
end_ndims = 1 + sum(
1 for idx in index[ellipsis_pos + 1 :] if isinstance(idx, slice)
)
def squeeze_fn(x):
return x.squeeze(-end_ndims)
def unsqueeze_fn(x):
if isinstance(x, torch.Tensor) and x.ndim >= end_ndims:
return x.unsqueeze(-end_ndims)
return x
return new_index, squeeze_fn, unsqueeze_fn
return index, lambda x: x, lambda x: x
# Used to indicate that a parameter is unspecified (as opposed to explicitly
# `None`)
|
Flags
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 495282,
"end": 497576
}
|
class ____(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal UnicodeNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = UnicodeNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def analyse_as_type(self, env):
self.operand = self.operand.analyse_types(env)
return self.operand.type
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
|
TypeofNode
|
python
|
PyCQA__pylint
|
tests/functional/n/not_async_context_manager.py
|
{
"start": 511,
"end": 583
}
|
class ____(Portocala):
def __aenter__(self):
pass
|
UnknownBases
|
python
|
ansible__ansible
|
test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/test/test_test.py
|
{
"start": 72,
"end": 241
}
|
class ____(object):
""" Ansible core jinja2 tests """
def tests(self):
return {
# failure testing
'yolo': yolo,
}
|
TestModule
|
python
|
scipy__scipy
|
scipy/sparse/linalg/_interface.py
|
{
"start": 22747,
"end": 23621
}
|
class ____(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.args = (A, B)
super().__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _rmatmat(self, x):
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
|
_SumLinearOperator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/tests/call_to_lambda_function_test.py
|
{
"start": 952,
"end": 1393
}
|
class ____(reference_test_base.TestCase):
def test_inline(self):
self.assertFunctionMatchesEager(inline_lambda, 1)
self.assertFunctionMatchesEager(inline_lambda, tf.constant(1))
def test_external(self):
self.assertFunctionMatchesEager(external_lambda, 1, lambda x: x == 0)
self.assertFunctionMatchesEager(
external_lambda, tf.constant(1), lambda x: x == 0)
if __name__ == '__main__':
tf.test.main()
|
ReferenceTest
|
python
|
huggingface__transformers
|
src/transformers/models/conditional_detr/modeling_conditional_detr.py
|
{
"start": 1891,
"end": 3521
}
|
class ____(BaseModelOutputWithCrossAttentions):
r"""
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`):
Reference points (reference points of each layer of the decoder).
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to
Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder
layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding
losses.
"""
)
|
ConditionalDetrDecoderOutput
|
python
|
psf__black
|
tests/data/cases/dummy_implementations.py
|
{
"start": 380,
"end": 981
}
|
class ____(Protocol):
def foo(self, a: int) -> int:
...
def bar(self, b: str) -> str: ...
def baz(self, c: bytes) -> str:
...
def dummy_two():
...
@dummy
def dummy_three():
...
def dummy_four():
...
@overload
def b(arg: int) -> int: ...
@overload
def b(arg: str) -> str: ...
@overload
def b(arg: object) -> NoReturn: ...
def b(arg: Union[int, str, object]) -> Union[int, str]:
if not isinstance(arg, (int, str)):
raise TypeError
return arg
def has_comment():
... # still a dummy
if some_condition:
...
if already_dummy: ...
|
Proto
|
python
|
huggingface__transformers
|
src/transformers/models/llava_onevision/modular_llava_onevision.py
|
{
"start": 9307,
"end": 9386
}
|
class ____(LlavaNextVideoPreTrainedModel):
pass
|
LlavaOnevisionPreTrainedModel
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/issueowners_assignment.py
|
{
"start": 79,
"end": 261
}
|
class ____(analytics.Event):
organization_id: int
project_id: int
group_id: int
updated_assignment: bool
analytics.register(IssueOwnersAssignment)
|
IssueOwnersAssignment
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-jumps-to-reach-end-via-prime-teleportation.py
|
{
"start": 544,
"end": 1673
}
|
class ____(object):
def minJumps(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
adj = collections.defaultdict(list)
for i, x in enumerate(nums):
while x != 1:
p = SPF[x]
while x%p == 0:
x //= p
adj[p].append(i)
dist = [-1]*len(nums)
dist[0] = 0
q = [0]
while q:
new_q = []
for i in q:
if i == len(nums)-1:
return dist[-1]
for di in (-1, +1):
ni = i+di
if 0 <= ni < len(nums) and dist[ni] == -1:
dist[ni] = dist[i]+1
new_q.append(ni)
p = nums[i]
if SPF[p] != p or p not in adj:
continue
for ni in adj[p]:
if dist[ni] != -1:
continue
dist[ni] = dist[i]+1
new_q.append(ni)
del adj[p]
q = new_q
return -1
|
Solution
|
python
|
scipy__scipy
|
scipy/stats/tests/test_continuous.py
|
{
"start": 78833,
"end": 82121
}
|
class ____:
@pytest.mark.fail_slow(20) # Moments require integration
def test_order_statistic(self):
rng = np.random.default_rng(7546349802439582)
X = Uniform(a=0, b=1)
n = 5
r = np.asarray([[1], [3], [5]])
Y = stats.order_statistic(X, n=n, r=r)
Y0 = stats.beta(r, n + 1 - r)
y = Y0.rvs((3, 10), random_state=rng)
p = Y0.cdf(y)
# log methods need some attention before merge
assert_allclose(np.exp(Y.logentropy()), Y0.entropy())
assert_allclose(Y.entropy(), Y0.entropy())
assert_allclose(Y.mean(), Y0.mean())
assert_allclose(Y.variance(), Y0.var())
assert_allclose(Y.skewness(), Y0.stats('s'), atol=1e-15)
assert_allclose(Y.kurtosis(), Y0.stats('k') + 3, atol=1e-15)
assert_allclose(Y.median(), Y0.ppf(0.5))
assert_allclose(Y.support(), Y0.support())
assert_allclose(Y.pdf(y), Y0.pdf(y))
assert_allclose(Y.cdf(y, method='formula'), Y.cdf(y, method='quadrature'))
assert_allclose(Y.ccdf(y, method='formula'), Y.ccdf(y, method='quadrature'))
assert_allclose(Y.icdf(p, method='formula'), Y.icdf(p, method='inversion'))
assert_allclose(Y.iccdf(p, method='formula'), Y.iccdf(p, method='inversion'))
assert_allclose(Y.logpdf(y), Y0.logpdf(y))
assert_allclose(Y.logcdf(y), Y0.logcdf(y))
assert_allclose(Y.logccdf(y), Y0.logsf(y))
with np.errstate(invalid='ignore', divide='ignore'):
assert_allclose(Y.ilogcdf(np.log(p),), Y0.ppf(p))
assert_allclose(Y.ilogccdf(np.log(p)), Y0.isf(p))
message = "`r` and `n` must contain only positive integers."
with pytest.raises(ValueError, match=message):
stats.order_statistic(X, n=n, r=-1)
with pytest.raises(ValueError, match=message):
stats.order_statistic(X, n=-1, r=r)
with pytest.raises(ValueError, match=message):
stats.order_statistic(X, n=n, r=1.5)
with pytest.raises(ValueError, match=message):
stats.order_statistic(X, n=1.5, r=r)
def test_support_gh22037(self):
# During review of gh-22037, it was noted that the `support` of
# an `OrderStatisticDistribution` returned incorrect results;
# this was resolved by overriding `_support`.
Uniform = stats.make_distribution(stats.uniform)
X = Uniform()
Y = X*5 + 2
Z = stats.order_statistic(Y, r=3, n=5)
assert_allclose(Z.support(), Y.support())
def test_composition_gh22037(self):
# During review of gh-22037, it was noted that an error was
# raised when creating an `OrderStatisticDistribution` from
# a `TruncatedDistribution`. This was resolved by overriding
# `_update_parameters`.
Normal = stats.make_distribution(stats.norm)
TruncatedNormal = stats.make_distribution(stats.truncnorm)
a, b = [-2, -1], 1
r, n = 3, [[4], [5]]
x = [[[-0.3]], [[0.1]]]
X1 = Normal()
Y1 = stats.truncate(X1, a, b)
Z1 = stats.order_statistic(Y1, r=r, n=n)
X2 = TruncatedNormal(a=a, b=b)
Z2 = stats.order_statistic(X2, r=r, n=n)
np.testing.assert_allclose(Z1.cdf(x), Z2.cdf(x))
|
TestOrderStatistic
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_dms.py
|
{
"start": 2228,
"end": 6679
}
|
class ____:
TASK_DATA = {
"replication_task_id": "task_id",
"source_endpoint_arn": "source_endpoint",
"target_endpoint_arn": "target_endpoint",
"replication_instance_arn": "replication_arn",
"table_mappings": {
"rules": [
{
"rule-type": "selection",
"rule-id": "1",
"rule-name": "1",
"object-locator": {
"schema-name": "test",
"table-name": "%",
},
"rule-action": "include",
}
]
},
}
def test_init(self):
op = DmsCreateTaskOperator(
task_id="create_task",
**self.TASK_DATA,
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="ca-west-1",
verify=True,
botocore_config={"read_timeout": 42},
)
assert op.replication_task_id == self.TASK_DATA["replication_task_id"]
assert op.source_endpoint_arn == self.TASK_DATA["source_endpoint_arn"]
assert op.target_endpoint_arn == self.TASK_DATA["target_endpoint_arn"]
assert op.replication_instance_arn == self.TASK_DATA["replication_instance_arn"]
assert op.migration_type == "full-load"
assert op.table_mappings == self.TASK_DATA["table_mappings"]
assert op.hook.client_type == "dms"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "ca-west-1"
assert op.hook._verify is True
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = DmsCreateTaskOperator(task_id="create_task", **self.TASK_DATA)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(DmsHook, "get_task_status", side_effect=("ready",))
@mock.patch.object(DmsHook, "create_replication_task", return_value=TASK_ARN)
@mock.patch.object(DmsHook, "get_conn")
def test_create_task(self, mock_conn, mock_create_replication_task, mock_get_task_status):
dms_hook = DmsHook()
create_task = DmsCreateTaskOperator(task_id="create_task", **self.TASK_DATA)
create_task.execute(None)
mock_create_replication_task.assert_called_once_with(**self.TASK_DATA, migration_type="full-load")
assert dms_hook.get_task_status(TASK_ARN) == "ready"
@mock.patch.object(DmsHook, "get_task_status", side_effect=("ready",))
@mock.patch.object(DmsHook, "create_replication_task", return_value=TASK_ARN)
@mock.patch.object(DmsHook, "get_conn")
def test_create_task_with_migration_type(
self, mock_conn, mock_create_replication_task, mock_get_task_status
):
migration_type = "cdc"
dms_hook = DmsHook()
create_task = DmsCreateTaskOperator(
task_id="create_task", migration_type=migration_type, **self.TASK_DATA
)
create_task.execute(None)
mock_create_replication_task.assert_called_once_with(**self.TASK_DATA, migration_type=migration_type)
assert dms_hook.get_task_status(TASK_ARN) == "ready"
def test_template_fields(self):
op = DmsCreateTaskOperator(
task_id="create_task",
**self.TASK_DATA,
aws_conn_id="fake-conn-id",
region_name="ca-west-1",
verify=True,
botocore_config={"read_timeout": 42},
)
validate_template_fields(op)
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = DmsCreateTaskOperator(
task_id="dms_create_task_operator",
**self.TASK_DATA,
aws_conn_id=OVERWRITTEN_CONN,
verify=True,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == OVERWRITTEN_CONN
def test_default_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = DmsCreateTaskOperator(
task_id="dms_create_task_operator",
**self.TASK_DATA,
verify=True,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == DEFAULT_CONN
|
TestDmsCreateTaskOperator
|
python
|
huggingface__transformers
|
src/transformers/models/mbart50/tokenization_mbart50.py
|
{
"start": 1611,
"end": 14806
}
|
class ____(TokenizersBackend):
"""
Construct a MBart50 tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
Examples:
```python
>>> from transformers import MBart50Tokenizer
>>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
>>> # model(**model_inputs) should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(
self,
src_lang=None,
tgt_lang=None,
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
vocab=None,
merges=None, # Ignored for Unigram
vocab_file=None,
**kwargs,
):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.vocab_file = vocab_file
# Do not pass language codes via extra_special_tokens to super().__init__.
# We will mark them as special AFTER backend construction to avoid re-adding tokens
# when loading from pretrained files.
# Always construct a tokenizer_object without referencing external tokenizer files
if vocab is not None:
# MBart50 uses fairseq vocab alignment matching MBart50Converter:
# <s>=0, <pad>=1, </s>=2, <unk>=3, then tokens, lang codes, <mask>
vocab = [(str(item[0]), float(item[1])) for item in vocab]
vocab_tokens = [item[0] for item in vocab]
has_language_codes = any(lang_code in vocab_tokens for lang_code in FAIRSEQ_LANGUAGE_CODES)
if has_language_codes:
self._vocab_scores = vocab
else:
# Vocab from SentencePieceExtractor is in sentencepiece format:
# <unk>=0, <s>=1, </s>=2, then tokens
# We need to reorder to fairseq format: <s>=0, <pad>=1, </s>=2, <unk>=3, then tokens
# Reorder: fairseq expects <s>, <pad>, </s>, <unk>, then rest of vocab starting from index 3
vocab_list = [
(str(cls_token), 0.0), # 0: <s>
(str(pad_token), 0.0), # 1: <pad>
(str(eos_token), 0.0), # 2: </s>
(str(unk_token), 0.0), # 3: <unk>
]
# Add remaining tokens from position 3 onwards (skip <unk>, <s>, </s> from sentencepiece)
vocab_list.extend(vocab[3:])
# Add language codes
for lang_code in FAIRSEQ_LANGUAGE_CODES:
vocab_list.append((str(lang_code), 0.0))
# Add mask token
vocab_list.append((str(mask_token), 0.0))
self._vocab_scores = vocab_list
else:
# Minimal fallback: small vocab with specials and language codes
self._vocab_scores = [
(str(cls_token), 0.0),
(str(pad_token), 0.0),
(str(eos_token), 0.0),
(str(unk_token), 0.0),
("▁", -2.0),
]
for lang_code in FAIRSEQ_LANGUAGE_CODES:
self._vocab_scores.append((lang_code, 0.0))
self._vocab_scores.append((str(mask_token), 0.0))
# Build backend tokenizer from self._vocab_scores (both branches above set it)
self._tokenizer = Tokenizer(
Unigram(
self._vocab_scores,
unk_id=3,
byte_fallback=False,
)
)
# Set normalizer equivalent to Precompiled + Strip + Replace from tokenizer.json
# When loading from pretrained, this will be overridden by the tokenizer.json config
# When creating from extractor (vocab), this provides equivalent behavior
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace(Regex(r"[\n\r\t]"), " "), # Precompiled converts newlines/tabs to spaces
normalizers.NFKC(), # Precompiled does NFKC normalization
normalizers.Strip(left=False, right=True), # Strip trailing whitespace (matches tokenizer.json)
normalizers.Replace(
Regex(r" {2,}"), "▁"
), # Replace multiple spaces with underscore (matches tokenizer.json)
]
)
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always", split=True)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always", split=True)
super().__init__(
tokenizer_object=self._tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.fairseq_offset = 1
# Mark language codes as extra special tokens without re-adding them to the backend.
# Merge with any pre-existing extra_special_tokens (e.g., restored from config on load).
try:
lang_tokens = [AddedToken(code, special=True) for code in FAIRSEQ_LANGUAGE_CODES]
except Exception:
lang_tokens = list(FAIRSEQ_LANGUAGE_CODES)
existing_extra = getattr(self, "_extra_special_tokens", []) or []
# Preserve order: keep existing, append missing language codes
existing_strs = {str(t) for t in existing_extra}
merged_extra = list(existing_extra) + [t for t in lang_tokens if str(t) not in existing_strs]
self._extra_special_tokens = merged_extra
self._src_lang = src_lang if src_lang is not None else "en_XX"
self.tgt_lang = tgt_lang
# Build language code mappings and fairseq mappings
# This will be called again in _post_init after tokenizer.json is loaded
self._build_language_code_mappings()
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.set_src_lang_special_tokens(self._src_lang)
def _build_language_code_mappings(self):
"""Build language code to ID mappings and fairseq compatibility mappings."""
self.lang_code_to_id = {
lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
# Build fairseq token mappings for backward compatibility
self.fairseq_tokens_to_ids = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
mask_token = getattr(self, "mask_token", "<mask>")
self.fairseq_tokens_to_ids["<mask>"] = self.convert_tokens_to_ids(str(mask_token))
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _post_init(self):
"""Called after tokenizer.json is loaded in from_pretrained."""
# Rebuild language code mappings with the loaded tokenizer
self._build_language_code_mappings()
# Update cur_lang_code_id with the correct ID
if hasattr(self, "_src_lang"):
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.set_src_lang_special_tokens(self._src_lang)
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def prepare_seq2seq_batch(
self,
src_texts: list[str],
src_lang: str = "en_XX",
tgt_texts: Optional[list[str]] = None,
tgt_lang: str = "ro_RO",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
if self.tgt_lang is None:
self.tgt_lang = self._src_lang
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang)
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
__all__ = ["MBart50Tokenizer"]
# Backward alias
MBart50TokenizerFast = MBart50Tokenizer
|
MBart50Tokenizer
|
python
|
openai__openai-python
|
src/openai/types/vector_stores/vector_store_file.py
|
{
"start": 304,
"end": 547
}
|
class ____(BaseModel):
code: Literal["server_error", "unsupported_file", "invalid_file"]
"""One of `server_error`, `unsupported_file`, or `invalid_file`."""
message: str
"""A human-readable description of the error."""
|
LastError
|
python
|
django-mptt__django-mptt
|
tests/myapp/models.py
|
{
"start": 1396,
"end": 1499
}
|
class ____(models.Model):
item = models.ForeignKey(Item, null=True, on_delete=models.CASCADE)
|
SubItem
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_export.py
|
{
"start": 19962,
"end": 20541
}
|
class ____(AdminTestMixin, TestCase):
def test_selectable_fields_rendered_with_resource_index_attribute(self) -> None:
response = self._get_url_response(self.book_export_url)
form_resources = response.context["form"].resources
content = response.content.decode()
for index, resource in enumerate(form_resources):
resource_fields = resource().get_export_order()
self.assertEqual(
content.count(f'resource-index="{index}"'),
len(resource_fields),
)
|
TestSelectableFieldsExportPage
|
python
|
pytorch__pytorch
|
test/fx/test_matcher_utils.py
|
{
"start": 886,
"end": 10823
}
|
class ____(JitTestCase):
def test_subgraph_matcher_with_attributes(self):
class LargeModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self._weight = torch.nn.Parameter(torch.ones(3, 3))
self._bias = torch.nn.Parameter(torch.ones(3, 3))
def forward(self, x):
return torch.ops.aten.addmm.default(self._bias, x, self._weight)
# Large Model graph:
# opcode name target args kwargs
# ------------- ------------- ------------------ ------------------- --------
# placeholder x x () {}
# get_attr _bias _bias () {}
# get_attr _weight _weight () {}
# call_function addmm_default aten.addmm.default (_bias, x, _weight) {}
# output output output (addmm_default,) {}
large_model_graph = symbolic_trace(LargeModel()).graph
class PatternModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self._weight_1 = torch.nn.Parameter(torch.ones(5, 5))
self._bias_1 = torch.nn.Parameter(torch.ones(5, 5))
def forward(self, x):
return torch.ops.aten.addmm.default(self._bias_1, x, self._weight_1)
pattern_graph = torch.fx.symbolic_trace(PatternModel()).graph
subgraph_matcher = SubgraphMatcher(pattern_graph)
match_result = subgraph_matcher.match(large_model_graph)
self.assertEqual(len(match_result), 1)
def test_subgraph_matcher_with_list(self):
def original(x, y):
return torch.ops.aten.view(x, [5, y.shape[0]])
original_graph = torch.fx.symbolic_trace(original).graph
def pattern(x, y, z):
return torch.ops.aten.view(x, [z, y.shape[0]])
pattern_graph = torch.fx.symbolic_trace(pattern).graph
subgraph_matcher = SubgraphMatcher(pattern_graph)
match_result = subgraph_matcher.match(original_graph)
self.assertEqual(len(match_result), 1)
def test_subgraph_matcher_with_list_bad(self):
def original(x, y):
return torch.ops.aten._reshape_alias_copy.default(
x, [1, y.shape[0]], [y.shape[1], y.shape[1]]
)
original_graph = torch.fx.symbolic_trace(original).graph
def pattern(x, y, b):
return torch.ops.aten._reshape_alias_copy.default(
x, [b, y.shape[0], y.shape[1]], [y.shape[1]]
)
pattern_graph = torch.fx.symbolic_trace(pattern).graph
subgraph_matcher = SubgraphMatcher(pattern_graph)
match_result = subgraph_matcher.match(original_graph)
self.assertEqual(len(match_result), 0)
def test_subgraph_matcher_ignore_literals(self):
def original(x):
return x + 1
original_graph = make_fx(original)(torch.ones(3, 3)).graph
original_graph.eliminate_dead_code()
def pattern(x):
return x + 2
pattern_graph = make_fx(pattern)(torch.ones(4, 4)).graph
pattern_graph.eliminate_dead_code()
subgraph_matcher = SubgraphMatcher(pattern_graph)
match_result = subgraph_matcher.match(original_graph)
self.assertEqual(len(match_result), 0)
subgraph_matcher = SubgraphMatcher(pattern_graph, ignore_literals=True)
match_result = subgraph_matcher.match(original_graph)
self.assertEqual(len(match_result), 1)
def test_variatic_arg_matching(self):
inputs = (torch.randn(20, 16, 50, 32),)
def maxpool(x, kernel_size, stride, padding, dilation):
return torch.ops.aten.max_pool2d_with_indices.default(
x, kernel_size, stride, padding, dilation
)
maxpool_graph = torch.fx.symbolic_trace(maxpool).graph
maxpool_matcher = SubgraphMatcher(maxpool_graph)
match_result = maxpool_matcher.match(maxpool_graph)
self.assertEqual(len(match_result), 1)
# Graph only contains "stride" argument
maxpool_s = torch.nn.MaxPool2d(kernel_size=2, stride=1).eval()
maxpool_s_graph = make_fx(maxpool_s)(*inputs).graph
match_s_result = maxpool_matcher.match(maxpool_s_graph)
self.assertEqual(len(match_s_result), 1)
# Graph only contains "padding" argument
maxpool_p = torch.nn.MaxPool2d(kernel_size=2, padding=1)
maxpool_p_graph = make_fx(maxpool_p)(*inputs).graph
match_p_result = maxpool_matcher.match(maxpool_p_graph)
self.assertEqual(len(match_p_result), 1)
# Graph only contains "stride, padding" argument
maxpool_sp = torch.nn.MaxPool2d(kernel_size=2, stride=1, padding=1)
maxpool_sp_graph = make_fx(maxpool_sp)(*inputs).graph
match_sp_result = maxpool_matcher.match(maxpool_sp_graph)
self.assertEqual(len(match_sp_result), 1)
@unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile")
def test_split_to_graph_and_name_node_map(self):
"""Testing the internal helper function for splitting the pattern graph"""
from torch.fx.passes.utils.matcher_with_name_node_map_utils import (
_split_to_graph_and_name_node_map,
)
def pattern(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu_mul_by_two = relu * 2
return relu, relu_mul_by_two, {"conv": conv, "relu": relu}
example_inputs = (
torch.randn(1, 3, 3, 3) * 10,
torch.randn(3, 3, 3, 3),
)
pattern_gm = export(
WrapperModule(pattern), example_inputs, strict=True
).module()
before_split_res = pattern_gm(*example_inputs)
pattern_gm, _ = _split_to_graph_and_name_node_map(pattern_gm)
after_split_res = pattern_gm(*example_inputs)
self.assertEqual(before_split_res[0], after_split_res[0])
self.assertEqual(before_split_res[1], after_split_res[1])
@unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile")
def test_matcher_with_name_node_map_function(self):
"""Testing SubgraphMatcherWithNameNodeMap with function pattern"""
def target_graph(x, weight):
x = x * 2
weight = weight * 3
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu2 = relu * 2
return relu + relu2
def pattern(x, weight):
conv = F.conv2d(x, weight)
relu = F.relu(conv)
relu_mul_by_two = relu * 2
return relu, relu_mul_by_two, {"conv": conv, "relu": relu}
example_inputs = (
torch.randn(1, 3, 3, 3) * 10,
torch.randn(3, 3, 3, 3),
)
pattern_gm = export(
WrapperModule(pattern), example_inputs, strict=True
).module()
matcher = SubgraphMatcherWithNameNodeMap(pattern_gm)
target_gm = export(
WrapperModule(target_graph), example_inputs, strict=True
).module()
internal_matches = matcher.match(target_gm.graph)
for internal_match in internal_matches:
name_node_map = internal_match.name_node_map
assert "conv" in name_node_map
assert "relu" in name_node_map
name_node_map["conv"].meta["custom_annotation"] = "annotation"
# check if we correctly annotated the target graph module
for n in target_gm.graph.nodes:
if n == name_node_map["conv"]:
assert (
"custom_annotation" in n.meta
and n.meta["custom_annotation"] == "annotation"
)
@unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile")
def test_matcher_with_name_node_map_module(self):
"""Testing SubgraphMatcherWithNameNodeMap with module pattern"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
class Pattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
linear = self.linear(x)
# Note: we can't put "weight": self.linear.weight in dictionary since
# nn.Parameter is not an allowed output type in dynamo
return linear, {"linear": linear, "x": x}
example_inputs = (torch.randn(3, 5),)
pattern_gm = export(Pattern(), example_inputs, strict=True).module()
matcher = SubgraphMatcherWithNameNodeMap(pattern_gm)
target_gm = export(M(), example_inputs, strict=True).module()
internal_matches = matcher.match(target_gm.graph)
for internal_match in internal_matches:
name_node_map = internal_match.name_node_map
assert "linear" in name_node_map
assert "x" in name_node_map
name_node_map["linear"].meta["custom_annotation"] = "annotation"
# check if we correctly annotated the target graph module
for n in target_gm.graph.nodes:
if n == name_node_map["linear"]:
assert (
"custom_annotation" in n.meta
and n.meta["custom_annotation"] == "annotation"
)
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
|
TestMatcher
|
python
|
pyparsing__pyparsing
|
examples/searchparser.py
|
{
"start": 2514,
"end": 6843
}
|
class ____:
def __init__(self):
self._methods = {
"and": self.evaluateAnd,
"or": self.evaluateOr,
"not": self.evaluateNot,
"parenthesis": self.evaluateParenthesis,
"quotes": self.evaluateQuotes,
"word": self.evaluateWord,
"wordwildcard": self.evaluateWordWildcard,
}
self._parser = self.parser()
def parser(self):
"""
This function returns a parser.
The grammar should be like most full text search engines (Google, Tsearch, Lucene).
Grammar:
- a query consists of alphanumeric words, with an optional '*' wildcard
at the end of a word
- a sequence of words between quotes is a literal string
- words can be used together by using operators ('and' or 'or')
- words with operators can be grouped with parenthesis
- a word or group of words can be preceded by a 'not' operator
- the 'and' operator precedes an 'or' operator
- if an operator is missing, use an 'and' operator
"""
operatorOr = Forward()
operatorWord = Group(Combine(Word(alphanums) + Suppress("*"))).set_results_name(
"wordwildcard"
) | Group(Word(alphanums)).set_results_name("word")
operatorQuotesContent = Forward()
operatorQuotesContent << ((operatorWord + operatorQuotesContent) | operatorWord)
operatorQuotes = (
Group(Suppress('"') + operatorQuotesContent + Suppress('"')).set_results_name(
"quotes"
)
| operatorWord
)
operatorParenthesis = (
Group(Suppress("(") + operatorOr + Suppress(")")).set_results_name(
"parenthesis"
)
| operatorQuotes
)
operatorNot = Forward()
operatorNot << (
Group(Suppress(Keyword("not", caseless=True)) + operatorNot).set_results_name(
"not"
)
| operatorParenthesis
)
operatorAnd = Forward()
operatorAnd << (
Group(
operatorNot + Suppress(Keyword("and", caseless=True)) + operatorAnd
).set_results_name("and")
| Group(
operatorNot + OneOrMore(~one_of("and or") + operatorAnd)
).set_results_name("and")
| operatorNot
)
operatorOr << (
Group(
operatorAnd + Suppress(Keyword("or", caseless=True)) + operatorOr
).set_results_name("or")
| operatorAnd
)
return operatorOr.parse_string
def evaluateAnd(self, argument):
return self.evaluate(argument[0]).intersection(self.evaluate(argument[1]))
def evaluateOr(self, argument):
return self.evaluate(argument[0]).union(self.evaluate(argument[1]))
def evaluateNot(self, argument):
return self.GetNot(self.evaluate(argument[0]))
def evaluateParenthesis(self, argument):
return self.evaluate(argument[0])
def evaluateQuotes(self, argument):
"""Evaluate quoted strings
First is does an 'and' on the indidual search terms, then it asks the
function GetQuoted to only return the subset of ID's that contain the
literal string.
"""
r = set()
search_terms = []
for item in argument:
search_terms.append(item[0])
if len(r) == 0:
r = self.evaluate(item)
else:
r = r.intersection(self.evaluate(item))
return self.GetQuotes(" ".join(search_terms), r)
def evaluateWord(self, argument):
return self.GetWord(argument[0])
def evaluateWordWildcard(self, argument):
return self.GetWordWildcard(argument[0])
def evaluate(self, argument):
return self._methods[argument.get_name()](argument)
def Parse(self, query):
# print self._parser(query)[0]
return self.evaluate(self._parser(query)[0])
def GetWord(self, word):
return set()
def GetWordWildcard(self, word):
return set()
def GetQuotes(self, search_string, tmp_result):
return set()
def GetNot(self, not_set):
return set().difference(not_set)
|
SearchQueryParser
|
python
|
pytest-dev__pytest
|
testing/test_junitxml.py
|
{
"start": 35630,
"end": 60148
}
|
class ____:
@parametrize_families
def test_summing_simple(
self, pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makeconftest(
"""
import pytest
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".xyz":
return MyItem.from_parent(name=file_path.name, parent=parent)
class MyItem(pytest.Item):
def runtest(self):
raise ValueError(42)
def repr_failure(self, excinfo):
return "custom item runtest failed"
"""
)
pytester.path.joinpath("myfile.xyz").write_text("hello", encoding="utf-8")
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.get_first_by_tag("testsuite")
node.assert_attr(errors=0, failures=1, skipped=0, tests=1)
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(name="myfile.xyz")
fnode = tnode.get_first_by_tag("failure")
fnode.assert_attr(message="custom item runtest failed")
assert "custom item runtest failed" in fnode.toxml()
@pytest.mark.parametrize("junit_logging", ["no", "system-out"])
def test_nullbyte(pytester: Pytester, junit_logging: str) -> None:
# A null byte cannot occur in XML (see section 2.2 of the spec)
pytester.makepyfile(
"""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
"""
)
xmlf = pytester.path.joinpath("junit.xml")
pytester.runpytest(f"--junitxml={xmlf}", "-o", f"junit_logging={junit_logging}")
text = xmlf.read_text(encoding="utf-8")
assert "\x00" not in text
if junit_logging == "system-out":
assert "#x00" in text
if junit_logging == "no":
assert "#x00" not in text
@pytest.mark.parametrize("junit_logging", ["no", "system-out"])
def test_nullbyte_replace(pytester: Pytester, junit_logging: str) -> None:
# Check if the null byte gets replaced
pytester.makepyfile(
"""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
"""
)
xmlf = pytester.path.joinpath("junit.xml")
pytester.runpytest(f"--junitxml={xmlf}", "-o", f"junit_logging={junit_logging}")
text = xmlf.read_text(encoding="utf-8")
if junit_logging == "system-out":
assert "#x0" in text
if junit_logging == "no":
assert "#x0" not in text
def test_invalid_xml_escape() -> None:
# Test some more invalid xml chars, the full range should be
# tested really but let's just test the edges of the ranges
# instead.
# XXX This only tests low unicode character points for now as
# there are some issues with the testing infrastructure for
# the higher ones.
# XXX Testing 0xD (\r) is tricky as it overwrites the just written
# line in the output, so we skip it too.
invalid = (
0x00,
0x1,
0xB,
0xC,
0xE,
0x19,
27, # issue #126
0xD800,
0xDFFF,
0xFFFE,
0x0FFFF,
) # , 0x110000)
valid = (0x9, 0xA, 0x20)
# 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
for i in invalid:
got = bin_xml_escape(chr(i))
if i <= 0xFF:
expected = f"#x{i:02X}"
else:
expected = f"#x{i:04X}"
assert got == expected
for i in valid:
assert chr(i) == bin_xml_escape(chr(i))
def test_logxml_path_expansion(tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
home_tilde = Path(os.path.expanduser("~")).joinpath("test.xml")
xml_tilde = LogXML(Path("~", "test.xml"), None)
assert xml_tilde.logfile == str(home_tilde)
monkeypatch.setenv("HOME", str(tmp_path))
home_var = os.path.normpath(os.path.expandvars("$HOME/test.xml"))
xml_var = LogXML(Path("$HOME", "test.xml"), None)
assert xml_var.logfile == str(home_var)
def test_logxml_changingdir(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_func():
import os
os.chdir("a")
"""
)
pytester.mkdir("a")
result = pytester.runpytest("--junitxml=a/x.xml")
assert result.ret == 0
assert pytester.path.joinpath("a/x.xml").exists()
def test_logxml_makedir(pytester: Pytester) -> None:
"""--junitxml should automatically create directories for the xml file"""
pytester.makepyfile(
"""
def test_pass():
pass
"""
)
result = pytester.runpytest("--junitxml=path/to/results.xml")
assert result.ret == 0
assert pytester.path.joinpath("path/to/results.xml").exists()
def test_logxml_check_isdir(pytester: Pytester) -> None:
"""Give an error if --junit-xml is a directory (#2089)"""
result = pytester.runpytest("--junit-xml=.")
result.stderr.fnmatch_lines(["*--junitxml must be a filename*"])
def test_escaped_parametrized_names_xml(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""\
import pytest
@pytest.mark.parametrize('char', ["\\x00"])
def test_func(char):
assert char
"""
)
result, dom = run_and_parse()
assert result.ret == 0
node = dom.get_first_by_tag("testcase")
node.assert_attr(name="test_func[\\x00]")
def test_double_colon_split_function_issue469(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(param):
pass
"""
)
result, dom = run_and_parse()
assert result.ret == 0
node = dom.get_first_by_tag("testcase")
node.assert_attr(classname="test_double_colon_split_function_issue469")
node.assert_attr(name="test_func[double::colon]")
def test_double_colon_split_method_issue469(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import pytest
class TestClass(object):
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(self, param):
pass
"""
)
result, dom = run_and_parse()
assert result.ret == 0
node = dom.get_first_by_tag("testcase")
node.assert_attr(classname="test_double_colon_split_method_issue469.TestClass")
node.assert_attr(name="test_func[double::colon]")
def test_unicode_issue368(pytester: Pytester) -> None:
path = pytester.path.joinpath("test.xml")
log = LogXML(str(path), None)
ustr = "ВНИ!"
class Report(BaseReport):
longrepr = ustr
sections: list[tuple[str, str]] = []
nodeid = "something"
location = "tests/filename.py", 42, "TestClass.method"
when = "teardown"
test_report = cast(TestReport, Report())
# hopefully this is not too brittle ...
log.pytest_sessionstart()
node_reporter = log._opentestcase(test_report)
node_reporter.append_failure(test_report)
node_reporter.append_collect_error(test_report)
node_reporter.append_collect_skipped(test_report)
node_reporter.append_error(test_report)
test_report.longrepr = "filename", 1, ustr
node_reporter.append_skipped(test_report)
test_report.longrepr = "filename", 1, "Skipped: 卡嘣嘣"
node_reporter.append_skipped(test_report)
test_report.wasxfail = ustr
node_reporter.append_skipped(test_report)
log.pytest_sessionfinish()
def test_record_property(pytester: Pytester, run_and_parse: RunAndParse) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def other(record_property):
record_property("bar", 1)
def test_record(record_property, other):
record_property("foo", "<1");
"""
)
result, dom = run_and_parse()
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
psnode = tnode.get_first_by_tag("properties")
pnodes = psnode.find_by_tag("property")
pnodes[0].assert_attr(name="bar", value="1")
pnodes[1].assert_attr(name="foo", value="<1")
result.stdout.fnmatch_lines(["*= 1 passed in *"])
def test_record_property_on_test_and_teardown_failure(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def other(record_property):
record_property("bar", 1)
yield
assert 0
def test_record(record_property, other):
record_property("foo", "<1")
assert 0
"""
)
result, dom = run_and_parse()
node = dom.get_first_by_tag("testsuite")
tnodes = node.find_by_tag("testcase")
for tnode in tnodes:
psnode = tnode.get_first_by_tag("properties")
assert psnode, f"testcase didn't had expected properties:\n{tnode}"
pnodes = psnode.find_by_tag("property")
pnodes[0].assert_attr(name="bar", value="1")
pnodes[1].assert_attr(name="foo", value="<1")
result.stdout.fnmatch_lines(["*= 1 failed, 1 error *"])
def test_record_property_same_name(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
def test_record_with_same_name(record_property):
record_property("foo", "bar")
record_property("foo", "baz")
"""
)
_result, dom = run_and_parse()
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
psnode = tnode.get_first_by_tag("properties")
pnodes = psnode.find_by_tag("property")
pnodes[0].assert_attr(name="foo", value="bar")
pnodes[1].assert_attr(name="foo", value="baz")
@pytest.mark.parametrize("fixture_name", ["record_property", "record_xml_attribute"])
def test_record_fixtures_without_junitxml(
pytester: Pytester, fixture_name: str
) -> None:
pytester.makepyfile(
f"""
def test_record({fixture_name}):
{fixture_name}("foo", "bar")
"""
)
result = pytester.runpytest()
assert result.ret == 0
@pytest.mark.filterwarnings("default")
def test_record_attribute(pytester: Pytester, run_and_parse: RunAndParse) -> None:
pytester.makeini(
"""
[pytest]
junit_family = xunit1
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def other(record_xml_attribute):
record_xml_attribute("bar", 1)
def test_record(record_xml_attribute, other):
record_xml_attribute("foo", "<1");
"""
)
result, dom = run_and_parse()
node = dom.get_first_by_tag("testsuite")
tnode = node.get_first_by_tag("testcase")
tnode.assert_attr(bar="1")
tnode.assert_attr(foo="<1")
result.stdout.fnmatch_lines(
["*test_record_attribute.py:6:*record_xml_attribute is an experimental feature"]
)
@pytest.mark.filterwarnings("default")
@pytest.mark.parametrize("fixture_name", ["record_xml_attribute", "record_property"])
def test_record_fixtures_xunit2(
pytester: Pytester, fixture_name: str, run_and_parse: RunAndParse
) -> None:
"""Ensure record_xml_attribute and record_property drop values when outside of legacy family."""
pytester.makeini(
"""
[pytest]
junit_family = xunit2
"""
)
pytester.makepyfile(
f"""
import pytest
@pytest.fixture
def other({fixture_name}):
{fixture_name}("bar", 1)
def test_record({fixture_name}, other):
{fixture_name}("foo", "<1");
"""
)
result, _dom = run_and_parse(family=None)
expected_lines = []
if fixture_name == "record_xml_attribute":
expected_lines.append(
"*test_record_fixtures_xunit2.py:6:*record_xml_attribute is an experimental feature"
)
expected_lines = [
f"*test_record_fixtures_xunit2.py:6:*{fixture_name} is incompatible "
"with junit_family 'xunit2' (use 'legacy' or 'xunit1')"
]
result.stdout.fnmatch_lines(expected_lines)
def test_random_report_log_xdist(
pytester: Pytester, monkeypatch: MonkeyPatch, run_and_parse: RunAndParse
) -> None:
"""`xdist` calls pytest_runtest_logreport as they are executed by the workers,
with nodes from several nodes overlapping, so junitxml must cope with that
to produce correct reports (#1064)."""
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
pytester.makepyfile(
"""
import pytest, time
@pytest.mark.parametrize('i', list(range(30)))
def test_x(i):
assert i != 22
"""
)
_, dom = run_and_parse("-n2")
suite_node = dom.get_first_by_tag("testsuite")
failed = []
for case_node in suite_node.find_by_tag("testcase"):
if case_node.find_first_by_tag("failure"):
failed.append(case_node["name"])
assert failed == ["test_x[22]"]
@parametrize_families
def test_root_testsuites_tag(
pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
def test_x():
pass
"""
)
_, dom = run_and_parse(family=xunit_family)
root = dom.get_unique_child
assert root.tag == "testsuites"
root.assert_attr(name="pytest tests")
suite_node = root.get_unique_child
assert suite_node.tag == "testsuite"
def test_runs_twice(pytester: Pytester, run_and_parse: RunAndParse) -> None:
f = pytester.makepyfile(
"""
def test_pass():
pass
"""
)
result, dom = run_and_parse("--keep-duplicates", f, f)
result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = (x["classname"] for x in dom.find_by_tag("testcase"))
assert first == second
def test_runs_twice_xdist(
pytester: Pytester, monkeypatch: MonkeyPatch, run_and_parse: RunAndParse
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
f = pytester.makepyfile(
"""
def test_pass():
pass
"""
)
result, dom = run_and_parse(f, "--dist", "each", "--tx", "2*popen")
result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = (x["classname"] for x in dom.find_by_tag("testcase"))
assert first == second
def test_fancy_items_regression(pytester: Pytester, run_and_parse: RunAndParse) -> None:
# issue 1259
pytester.makeconftest(
"""
import pytest
class FunItem(pytest.Item):
def runtest(self):
pass
class NoFunItem(pytest.Item):
def runtest(self):
pass
class FunCollector(pytest.File):
def collect(self):
return [
FunItem.from_parent(name='a', parent=self),
NoFunItem.from_parent(name='a', parent=self),
NoFunItem.from_parent(name='b', parent=self),
]
def pytest_collect_file(file_path, parent):
if file_path.suffix == '.py':
return FunCollector.from_parent(path=file_path, parent=parent)
"""
)
pytester.makepyfile(
"""
def test_pass():
pass
"""
)
result, dom = run_and_parse()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
items = sorted(
f"{x['classname']} {x['name']}"
# dom is a DomNode not a mapping, it's not possible to ** it.
for x in dom.find_by_tag("testcase")
)
import pprint
pprint.pprint(items)
assert items == [
"conftest a",
"conftest a",
"conftest b",
"test_fancy_items_regression a",
"test_fancy_items_regression a",
"test_fancy_items_regression b",
"test_fancy_items_regression test_pass",
]
@parametrize_families
def test_global_properties(pytester: Pytester, xunit_family: str) -> None:
path = pytester.path.joinpath("test_global_properties.xml")
log = LogXML(str(path), None, family=xunit_family)
class Report(BaseReport):
sections: list[tuple[str, str]] = []
nodeid = "test_node_id"
log.pytest_sessionstart()
log.add_global_property("foo", "1")
log.add_global_property("bar", "2")
log.pytest_sessionfinish()
dom = minidom.parse(str(path))
properties = dom.getElementsByTagName("properties")
assert properties.length == 1, "There must be one <properties> node"
property_list = dom.getElementsByTagName("property")
assert property_list.length == 2, "There most be only 2 property nodes"
expected = {"foo": "1", "bar": "2"}
actual = {}
for p in property_list:
k = str(p.getAttribute("name"))
v = str(p.getAttribute("value"))
actual[k] = v
assert actual == expected
def test_url_property(pytester: Pytester) -> None:
test_url = "http://www.github.com/pytest-dev"
path = pytester.path.joinpath("test_url_property.xml")
log = LogXML(str(path), None)
class Report(BaseReport):
longrepr = "FooBarBaz"
sections: list[tuple[str, str]] = []
nodeid = "something"
location = "tests/filename.py", 42, "TestClass.method"
url = test_url
test_report = cast(TestReport, Report())
log.pytest_sessionstart()
node_reporter = log._opentestcase(test_report)
node_reporter.append_failure(test_report)
log.pytest_sessionfinish()
test_case = minidom.parse(str(path)).getElementsByTagName("testcase")[0]
assert test_case.getAttribute("url") == test_url, (
"The URL did not get written to the xml"
)
@parametrize_families
def test_record_testsuite_property(
pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makepyfile(
"""
def test_func1(record_testsuite_property):
record_testsuite_property("stats", "all good")
def test_func2(record_testsuite_property):
record_testsuite_property("stats", 10)
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testsuite")
properties_node = node.get_first_by_tag("properties")
p1_node, p2_node = properties_node.find_by_tag(
"property",
)[:2]
p1_node.assert_attr(name="stats", value="all good")
p2_node.assert_attr(name="stats", value="10")
def test_record_testsuite_property_junit_disabled(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_func1(record_testsuite_property):
record_testsuite_property("stats", "all good")
"""
)
result = pytester.runpytest()
assert result.ret == 0
@pytest.mark.parametrize("junit", [True, False])
def test_record_testsuite_property_type_checking(
pytester: Pytester, junit: bool
) -> None:
pytester.makepyfile(
"""
def test_func1(record_testsuite_property):
record_testsuite_property(1, 2)
"""
)
args = ("--junitxml=tests.xml",) if junit else ()
result = pytester.runpytest(*args)
assert result.ret == 1
result.stdout.fnmatch_lines(
["*TypeError: name parameter needs to be a string, but int given"]
)
@pytest.mark.parametrize("suite_name", ["my_suite", ""])
@parametrize_families
def test_set_suite_name(
pytester: Pytester, suite_name: str, run_and_parse: RunAndParse, xunit_family: str
) -> None:
if suite_name:
pytester.makeini(
f"""
[pytest]
junit_suite_name={suite_name}
junit_family={xunit_family}
"""
)
expected = suite_name
else:
expected = "pytest"
pytester.makepyfile(
"""
import pytest
def test_func():
pass
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testsuite")
node.assert_attr(name=expected)
def test_escaped_skipreason_issue3533(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason='1 <> 2')
def test_skip():
pass
"""
)
_, dom = run_and_parse()
node = dom.get_first_by_tag("testcase")
snode = node.get_first_by_tag("skipped")
assert "1 <> 2" in snode.text
snode.assert_attr(message="1 <> 2")
def test_bin_escaped_skipreason(pytester: Pytester, run_and_parse: RunAndParse) -> None:
"""Escape special characters from mark.skip reason (#11842)."""
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip("\33[31;1mred\33[0m")
def test_skip():
pass
"""
)
_, dom = run_and_parse()
node = dom.get_first_by_tag("testcase")
snode = node.get_first_by_tag("skipped")
assert "#x1B[31;1mred#x1B[0m" in snode.text
snode.assert_attr(message="#x1B[31;1mred#x1B[0m")
def test_escaped_setup_teardown_error(
pytester: Pytester, run_and_parse: RunAndParse
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def my_setup():
raise Exception("error: \033[31mred\033[m")
def test_esc(my_setup):
pass
"""
)
_, dom = run_and_parse()
node = dom.get_first_by_tag("testcase")
snode = node.get_first_by_tag("error")
assert "#x1B[31mred#x1B[m" in snode["message"]
assert "#x1B[31mred#x1B[m" in snode.text
@parametrize_families
def test_logging_passing_tests_disabled_does_not_log_test_output(
pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str
) -> None:
pytester.makeini(
f"""
[pytest]
junit_log_passing_tests=False
junit_logging=system-out
junit_family={xunit_family}
"""
)
pytester.makepyfile(
"""
import pytest
import logging
import sys
def test_func():
sys.stdout.write('This is stdout')
sys.stderr.write('This is stderr')
logging.warning('hello')
"""
)
result, dom = run_and_parse(family=xunit_family)
assert result.ret == 0
node = dom.get_first_by_tag("testcase")
assert len(node.find_by_tag("system-err")) == 0
assert len(node.find_by_tag("system-out")) == 0
@parametrize_families
@pytest.mark.parametrize("junit_logging", ["no", "system-out", "system-err"])
def test_logging_passing_tests_disabled_logs_output_for_failing_test_issue5430(
pytester: Pytester,
junit_logging: str,
run_and_parse: RunAndParse,
xunit_family: str,
) -> None:
pytester.makeini(
f"""
[pytest]
junit_log_passing_tests=False
junit_family={xunit_family}
"""
)
pytester.makepyfile(
"""
import pytest
import logging
import sys
def test_func():
logging.warning('hello')
assert 0
"""
)
result, dom = run_and_parse(
"-o", f"junit_logging={junit_logging}", family=xunit_family
)
assert result.ret == 1
node = dom.get_first_by_tag("testcase")
if junit_logging == "system-out":
assert len(node.find_by_tag("system-err")) == 0
assert len(node.find_by_tag("system-out")) == 1
elif junit_logging == "system-err":
assert len(node.find_by_tag("system-err")) == 1
assert len(node.find_by_tag("system-out")) == 0
else:
assert junit_logging == "no"
assert len(node.find_by_tag("system-err")) == 0
assert len(node.find_by_tag("system-out")) == 0
def test_no_message_quiet(pytester: Pytester) -> None:
"""Do not show the summary banner when --quiet is given (#13700)."""
pytester.makepyfile("def test(): pass")
result = pytester.runpytest("--junitxml=pytest.xml")
result.stdout.fnmatch_lines("* generated xml file: *")
result = pytester.runpytest("--junitxml=pytest.xml", "--quiet")
result.stdout.no_fnmatch_line("* generated xml file: *")
|
TestNonPython
|
python
|
cython__cython
|
Cython/Compiler/FlowControl.py
|
{
"start": 9801,
"end": 9971
}
|
class ____:
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
|
LoopDescr
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 135289,
"end": 137726
}
|
class ____(Request):
"""
Remove a task from its queue.
Fails if task status is not queued.
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "dequeue"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task: str, status_reason: Optional[str] = None, status_message: Optional[str] = None, **kwargs: Any
) -> None:
super(DequeueRequest, self).__init__(**kwargs)
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
|
DequeueRequest
|
python
|
etianen__django-reversion
|
reversion/models.py
|
{
"start": 1669,
"end": 4652
}
|
class ____(models.Model):
"""A group of related serialized versions."""
date_created = models.DateTimeField(
db_index=True,
verbose_name=_("date created"),
help_text="The date and time this revision was created.",
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name=_("user"),
help_text="The user who created this revision.",
)
comment = models.TextField(
blank=True,
verbose_name=_("comment"),
help_text="A text comment on this revision.",
)
def get_comment(self):
try:
LogEntry = apps.get_model('admin.LogEntry')
return LogEntry(change_message=self.comment).get_change_message()
except LookupError:
return self.comment
def revert(self, delete=False):
# Group the models by the database of the serialized model.
versions_by_db = defaultdict(list)
for version in self.version_set.iterator():
versions_by_db[version.db].append(version)
# For each db, perform a separate atomic revert.
for version_db, versions in versions_by_db.items():
with transaction.atomic(using=version_db):
# Optionally delete objects no longer in the current revision.
if delete:
# Get a set of all objects in this revision.
old_revision = set()
for version in versions:
model = version._model
try:
# Load the model instance from the same DB as it was saved under.
old_revision.add(model._default_manager.using(version.db).get(pk=version.object_id))
except model.DoesNotExist:
pass
# Calculate the set of all objects that are in the revision now.
current_revision = chain.from_iterable(
_follow_relations_recursive(obj)
for obj in old_revision
)
# Delete objects that are no longer in the current revision.
collector = Collector(using=version_db)
new_objs = [item for item in current_revision
if item not in old_revision]
for model, group in groupby(new_objs, type):
collector.collect(list(group))
collector.delete()
# Attempt to revert all revisions.
_safe_revert(versions)
def __str__(self):
return ", ".join(force_str(version) for version in self.version_set.all())
class Meta:
verbose_name = _('revision')
verbose_name_plural = _('revisions')
app_label = "reversion"
ordering = ("-pk",)
|
Revision
|
python
|
py-pdf__pypdf
|
pypdf/generic/_fit.py
|
{
"start": 78,
"end": 5515
}
|
class ____:
def __init__(
self, fit_type: str, fit_args: tuple[Union[None, float, Any], ...] = ()
) -> None:
from ._base import FloatObject, NameObject, NullObject, NumberObject # noqa: PLC0415
self.fit_type = NameObject(fit_type)
self.fit_args: list[Union[NullObject, FloatObject, NumberObject]] = [
NullObject() if is_null_or_none(a) else FloatObject(a) for a in fit_args
]
@classmethod
def xyz(
cls,
left: Optional[float] = None,
top: Optional[float] = None,
zoom: Optional[float] = None,
) -> "Fit":
"""
Display the page designated by page, with the coordinates (left, top)
positioned at the upper-left corner of the window and the contents
of the page magnified by the factor zoom.
A null value for any of the parameters left, top, or zoom specifies
that the current value of that parameter is to be retained unchanged.
A zoom value of 0 has the same meaning as a null value.
Args:
left:
top:
zoom:
Returns:
The created fit object.
"""
return Fit(fit_type="/XYZ", fit_args=(left, top, zoom))
@classmethod
def fit(cls) -> "Fit":
"""
Display the page designated by page, with its contents magnified just
enough to fit the entire page within the window both horizontally and
vertically.
If the required horizontal and vertical magnification factors are
different, use the smaller of the two, centering the page within the
window in the other dimension.
"""
return Fit(fit_type="/Fit")
@classmethod
def fit_horizontally(cls, top: Optional[float] = None) -> "Fit":
"""
Display the page designated by page, with the vertical coordinate top
positioned at the top edge of the window and the contents of the page
magnified just enough to fit the entire width of the page within the
window.
A null value for ``top`` specifies that the current value of that
parameter is to be retained unchanged.
Args:
top:
Returns:
The created fit object.
"""
return Fit(fit_type="/FitH", fit_args=(top,))
@classmethod
def fit_vertically(cls, left: Optional[float] = None) -> "Fit":
return Fit(fit_type="/FitV", fit_args=(left,))
@classmethod
def fit_rectangle(
cls,
left: Optional[float] = None,
bottom: Optional[float] = None,
right: Optional[float] = None,
top: Optional[float] = None,
) -> "Fit":
"""
Display the page designated by page, with its contents magnified
just enough to fit the rectangle specified by the coordinates
left, bottom, right, and top entirely within the window
both horizontally and vertically.
If the required horizontal and vertical magnification factors are
different, use the smaller of the two, centering the rectangle within
the window in the other dimension.
A null value for any of the parameters may result in unpredictable
behavior.
Args:
left:
bottom:
right:
top:
Returns:
The created fit object.
"""
return Fit(fit_type="/FitR", fit_args=(left, bottom, right, top))
@classmethod
def fit_box(cls) -> "Fit":
"""
Display the page designated by page, with its contents magnified just
enough to fit its bounding box entirely within the window both
horizontally and vertically.
If the required horizontal and vertical magnification factors are
different, use the smaller of the two, centering the bounding box
within the window in the other dimension.
"""
return Fit(fit_type="/FitB")
@classmethod
def fit_box_horizontally(cls, top: Optional[float] = None) -> "Fit":
"""
Display the page designated by page, with the vertical coordinate top
positioned at the top edge of the window and the contents of the page
magnified just enough to fit the entire width of its bounding box
within the window.
A null value for top specifies that the current value of that parameter
is to be retained unchanged.
Args:
top:
Returns:
The created fit object.
"""
return Fit(fit_type="/FitBH", fit_args=(top,))
@classmethod
def fit_box_vertically(cls, left: Optional[float] = None) -> "Fit":
"""
Display the page designated by page, with the horizontal coordinate
left positioned at the left edge of the window and the contents of the
page magnified just enough to fit the entire height of its bounding box
within the window.
A null value for left specifies that the current value of that
parameter is to be retained unchanged.
Args:
left:
Returns:
The created fit object.
"""
return Fit(fit_type="/FitBV", fit_args=(left,))
def __str__(self) -> str:
if not self.fit_args:
return f"Fit({self.fit_type})"
return f"Fit({self.fit_type}, {self.fit_args})"
DEFAULT_FIT = Fit.fit()
|
Fit
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/config_migrations.py
|
{
"start": 7521,
"end": 9305
}
|
class ____:
"""
Runtime config migrator that **removes** the deprecated
``action_report_time`` property.
The field was deprecated starting in version 3.5.0.
"""
migrate_key: str = "action_report_time"
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""Return ``True`` when the deprecated key is present."""
return any(cls.migrate_key in report for report in config.get("custom_insights", []))
@classmethod
def transform(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return a copy of *config* without the deprecated key.
The original mapping is left untouched.
"""
config_copy: Dict[str, Any] = dict(config)
for report in config_copy["custom_insights"]:
report.pop(cls.migrate_key, None)
return config_copy
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
# modify the config
migrated_config = cls.transform(config)
# save the config
source.write_config(migrated_config, config_path)
# return modified config
return migrated_config
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls.should_migrate(config):
emit_configuration_as_airbyte_control_message(cls.modify_and_save(config_path, source, config))
|
RemoveActionReportTimeMigration
|
python
|
spyder-ide__spyder
|
spyder/utils/programs.py
|
{
"start": 1044,
"end": 41232
}
|
class ____(Exception):
pass
def get_temp_dir(suffix=None):
"""
Return temporary Spyder directory, checking previously that it exists.
"""
to_join = [tempfile.gettempdir()]
if os.name == 'nt':
to_join.append('spyder')
else:
username = encoding.to_unicode_from_fs(getuser())
to_join.append('spyder-' + username)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
if suffix is not None:
to_join.append(suffix)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
return tempdir
def is_program_installed(basename, extra_paths=None):
"""
Return program absolute path if installed in PATH.
Otherwise, return None.
Also searches specific platform dependent paths that are not already in
PATH. This permits general use without assuming user profiles are
sourced (e.g. .bash_Profile), such as when login shells are not used to
launch Spyder. Additionally, extra_paths are searched.
On macOS systems, a .app is considered installed if it exists.
"""
extra_paths = [] if extra_paths is None else extra_paths
home = get_home_dir()
if (
sys.platform == 'darwin'
and basename.endswith('.app')
and osp.exists(basename)
):
return basename
pixi = [osp.join(home, '.pixi', 'bin')]
if os.name == 'posix':
pyenv = [
osp.join(home, '.pyenv', 'bin'),
osp.join('/usr', 'local', 'bin'),
]
a = [home, osp.join(home, 'opt'), '/opt']
b = ['mambaforge', 'miniforge3', 'miniforge',
'miniconda3', 'anaconda3', 'miniconda', 'anaconda']
else:
pyenv = [osp.join(home, '.pyenv', 'pyenv-win', 'bin')]
a = [home, osp.join(home, 'AppData', 'Local'),
'C:\\', osp.join('C:\\', 'ProgramData')]
b = ['Mambaforge', 'Miniforge3', 'Miniforge',
'Miniconda3', 'Anaconda3', 'Miniconda', 'Anaconda']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
for path in (
extra_paths
+ conda
+ pyenv
+ pixi
+ os.getenv("PATH", []).split(os.pathsep)
):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename, extra_paths=None):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
extra_paths = [] if extra_paths is None else extra_paths
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd')
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name, extra_paths)
if path:
return path
def get_full_command_for_program(path):
"""
Return the list of tokens necessary to open the program
at a given path.
On macOS systems, this function prefixes .app paths with
'open -a', which is necessary to run the application.
On all other OS's, this function has no effect.
:str path: The path of the program to run.
:return: The list of tokens necessary to run the program.
"""
if sys.platform == 'darwin' and path.endswith('.app'):
return ['open', '-a', path]
return [path]
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
# Ensure Windows subprocess environment has certain variables
if "env" in kwargs:
env = CaseInsensitiveDict(kwargs.get("env"))
for env_var in ['SYSTEMROOT', 'SYSTEMDRIVE', 'USERPROFILE']:
env.setdefault(env_var, os.getenv(env_var))
kwargs["env"] = dict(env)
else:
# linux and macOS
if "env" in kwargs:
kwargs["env"].setdefault("HOME", get_home_dir())
return kwargs
def run_shell_command(cmdstr, asynchronous=False, **subprocess_kwargs):
"""
Execute the given shell command.
Note that *args and **kwargs will be passed to the subprocess call.
If 'shell' is given in subprocess_kwargs it must be True,
otherwise ProgramError will be raised.
If 'executable' is not given in subprocess_kwargs, it will
be set to the value of the SHELL environment variable.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
Parameters
----------
cmdstr : str
The string run as a shell command.
asynchronous : bool (False)
Whether to return a subprocess.Popen or asyncio.subprocess.Process.
**subprocess_kwargs : keyword arguments
These will be passed to subprocess.Popen.
"""
popen = subprocess.Popen
pipe = subprocess.PIPE
if asynchronous:
popen = asyncio.create_subprocess_shell
pipe = asyncio.subprocess.PIPE
if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:
raise ProgramError('The "shell" kwarg may be omitted, but if '
'provided it must be True.')
else:
subprocess_kwargs['shell'] = True
# Don't pass SHELL to subprocess on Windows because it makes this
# fumction fail in Git Bash (where SHELL is declared; other Windows
# shells don't set it).
if not os.name == 'nt':
if 'executable' not in subprocess_kwargs:
subprocess_kwargs['executable'] = os.getenv('SHELL')
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, pipe)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return popen(cmdstr, **subprocess_kwargs)
def run_program(program, args=None, **subprocess_kwargs):
"""
Run program in a separate process.
NOTE: returns the process object created by
`subprocess.Popen()`. This can be used with
`proc.communicate()` for example.
If 'shell' appears in the kwargs, it must be False,
otherwise ProgramError will be raised.
If only the program name is given and not the full path,
a lookup will be performed to find the program. If the
lookup fails, ProgramError will be raised.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str program: The name of the program to run.
:list args: The program arguments.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:
raise ProgramError("This function is only for non-shell programs, "
"use run_shell_command() instead.")
fullcmd = find_program(program)
if not fullcmd:
raise ProgramError("Program %s was not found" % program)
# As per subprocess, we make a complete list of prog+args
fullcmd = get_full_command_for_program(fullcmd) + (args or [])
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(fullcmd, **subprocess_kwargs)
def parse_linux_desktop_entry(fpath):
"""Load data from desktop entry with xdg specification."""
from xdg.DesktopEntry import DesktopEntry
try:
entry = DesktopEntry(fpath)
entry_data = {}
entry_data['name'] = entry.getName()
entry_data['icon_path'] = entry.getIcon()
entry_data['exec'] = entry.getExec()
entry_data['type'] = entry.getType()
entry_data['hidden'] = entry.getHidden()
entry_data['fpath'] = fpath
except Exception:
entry_data = {
'name': '',
'icon_path': '',
'hidden': '',
'exec': '',
'type': '',
'fpath': fpath
}
return entry_data
def _get_mac_application_icon_path(app_bundle_path):
"""Parse mac application bundle and return path for *.icns file."""
import plistlib
contents_path = info_path = os.path.join(app_bundle_path, 'Contents')
info_path = os.path.join(contents_path, 'Info.plist')
pl = {}
if os.path.isfile(info_path):
try:
# readPlist is deprecated but needed for py27 compat
pl = plistlib.readPlist(info_path)
except Exception:
pass
icon_file = pl.get('CFBundleIconFile')
icon_path = None
if icon_file:
icon_path = os.path.join(contents_path, 'Resources', icon_file)
# Some app bundles seem to list the icon name without extension
if not icon_path.endswith('.icns'):
icon_path = icon_path + '.icns'
if not os.path.isfile(icon_path):
icon_path = None
return icon_path
def get_username():
"""Return current session username."""
if os.name == 'nt':
username = os.getlogin()
else:
import pwd
username = pwd.getpwuid(os.getuid())[0]
return username
def _get_win_reg_info(key_path, hive, flag, subkeys):
"""
See: https://stackoverflow.com/q/53132434
"""
import winreg
reg = winreg.ConnectRegistry(None, hive)
software_list = []
try:
key = winreg.OpenKey(reg, key_path, 0, winreg.KEY_READ | flag)
count_subkey = winreg.QueryInfoKey(key)[0]
for index in range(count_subkey):
software = {}
try:
subkey_name = winreg.EnumKey(key, index)
if not (subkey_name.startswith('{')
and subkey_name.endswith('}')):
software['key'] = subkey_name
subkey = winreg.OpenKey(key, subkey_name)
for property in subkeys:
try:
value = winreg.QueryValueEx(subkey, property)[0]
software[property] = value
except EnvironmentError:
software[property] = ''
software_list.append(software)
except EnvironmentError:
continue
except Exception:
pass
return software_list
def _clean_win_application_path(path):
"""Normalize windows path and remove extra quotes."""
path = path.replace('\\', '/').lower()
# Check for quotes at start and end
if path[0] == '"' and path[-1] == '"':
path = literal_eval(path)
return path
def _get_win_applications():
"""Return all system installed windows applications."""
import winreg
# See:
# https://docs.microsoft.com/en-us/windows/desktop/shell/app-registration
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths'
# Hive and flags
hfs = [
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_32KEY),
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_64KEY),
(winreg.HKEY_CURRENT_USER, 0),
]
subkeys = [None]
sort_key = 'key'
app_paths = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
if software[None]:
key = software['key'].capitalize().replace('.exe', '')
expanded_fpath = os.path.expandvars(software[None])
expanded_fpath = _clean_win_application_path(expanded_fpath)
app_paths[key] = expanded_fpath
# See:
# https://www.blog.pythonlibrary.org/2010/03/03/finding-installed-software-using-python/
# https://stackoverflow.com/q/53132434
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
subkeys = ['DisplayName', 'InstallLocation', 'DisplayIcon']
sort_key = 'DisplayName'
apps = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
location = software['InstallLocation']
name = software['DisplayName']
icon = software['DisplayIcon']
key = software['key']
if name and icon:
icon = icon.replace('"', '')
icon = icon.split(',')[0]
if location == '' and icon:
location = os.path.dirname(icon)
if not os.path.isfile(icon):
icon = ''
if location and os.path.isdir(location):
files = [f for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
if files:
for fname in files:
fn_low = fname.lower()
valid_file = fn_low.endswith(('.exe', '.com', '.bat'))
if valid_file and not fn_low.startswith('unins'):
fpath = os.path.join(location, fname)
expanded_fpath = os.path.expandvars(fpath)
expanded_fpath = _clean_win_application_path(
expanded_fpath)
apps[name + ' (' + fname + ')'] = expanded_fpath
# Join data
values = list(zip(*apps.values()))[-1]
for name, fpath in app_paths.items():
if fpath not in values:
apps[name] = fpath
return apps
def _get_linux_applications():
"""Return all system installed linux applications."""
# See:
# https://standards.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# https://askubuntu.com/q/433609
apps = {}
desktop_app_paths = [
'/usr/share/**/*.desktop',
'~/.local/share/**/*.desktop',
]
all_entries_data = []
for path in desktop_app_paths:
fpaths = glob.glob(path)
for fpath in fpaths:
entry_data = parse_linux_desktop_entry(fpath)
all_entries_data.append(entry_data)
for entry_data in sorted(all_entries_data, key=lambda x: x['name']):
if not entry_data['hidden'] and entry_data['type'] == 'Application':
apps[entry_data['name']] = entry_data['fpath']
return apps
def _get_mac_applications():
"""Return all system installed osx applications."""
apps = {}
app_folders = [
'/**/*.app',
'/Users/{}/**/*.app'.format(get_username())
]
fpaths = []
for path in app_folders:
fpaths += glob.glob(path)
for fpath in fpaths:
if os.path.isdir(fpath):
name = os.path.basename(fpath).split('.app')[0]
apps[name] = fpath
return apps
def get_application_icon(fpath):
"""Return application icon or default icon if not found."""
from qtpy.QtGui import QIcon
from spyder.utils.icon_manager import ima
if os.path.isfile(fpath) or os.path.isdir(fpath):
icon = ima.icon('no_match')
if sys.platform == 'darwin':
icon_path = _get_mac_application_icon_path(fpath)
if icon_path and os.path.isfile(icon_path):
icon = QIcon(icon_path)
elif os.name == 'nt':
pass
else:
entry_data = parse_linux_desktop_entry(fpath)
icon_path = entry_data['icon_path']
if icon_path:
if os.path.isfile(icon_path):
icon = QIcon(icon_path)
else:
icon = QIcon.fromTheme(icon_path)
else:
icon = ima.icon('help')
return icon
def get_installed_applications():
"""
Return all system installed applications.
The return value is a list of tuples where the first item is the icon path
and the second item is the program executable path.
"""
apps = {}
if sys.platform == 'darwin':
apps = _get_mac_applications()
elif os.name == 'nt':
apps = _get_win_applications()
else:
apps = _get_linux_applications()
if sys.platform == 'darwin':
apps = {key: val for (key, val) in apps.items() if osp.isdir(val)}
else:
apps = {key: val for (key, val) in apps.items() if osp.isfile(val)}
return apps
def open_files_with_application(app_path, fnames):
"""
Generalized method for opening files with a specific application.
Returns a dictionary of the command used and the return code.
A code equal to 0 means the application executed successfully.
"""
return_codes = {}
if os.name == 'nt':
fnames = [fname.replace('\\', '/') for fname in fnames]
if sys.platform == 'darwin':
if not (app_path.endswith('.app') and os.path.isdir(app_path)):
raise ValueError('`app_path` must point to a valid OSX '
'application!')
cmd = ['open', '-a', app_path] + fnames
try:
return_code = subprocess.call(cmd)
except Exception:
return_code = 1
return_codes[' '.join(cmd)] = return_code
elif os.name == 'nt':
if not (app_path.endswith(('.exe', '.bat', '.com', '.cmd'))
and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Windows '
'executable!')
cmd = [app_path] + fnames
try:
return_code = subprocess.call(cmd)
except OSError:
return_code = 1
return_codes[' '.join(cmd)] = return_code
else:
if not (app_path.endswith('.desktop') and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Linux '
'application!')
entry = parse_linux_desktop_entry(app_path)
app_path = entry['exec']
multi = []
extra = []
if len(fnames) == 1:
fname = fnames[0]
if '%u' in app_path:
cmd = app_path.replace('%u', fname)
elif '%f' in app_path:
cmd = app_path.replace('%f', fname)
elif '%U' in app_path:
cmd = app_path.replace('%U', fname)
elif '%F' in app_path:
cmd = app_path.replace('%F', fname)
else:
cmd = app_path
extra = fnames
elif len(fnames) > 1:
if '%U' in app_path:
cmd = app_path.replace('%U', ' '.join(fnames))
elif '%F' in app_path:
cmd = app_path.replace('%F', ' '.join(fnames))
if '%u' in app_path:
for fname in fnames:
multi.append(app_path.replace('%u', fname))
elif '%f' in app_path:
for fname in fnames:
multi.append(app_path.replace('%f', fname))
else:
cmd = app_path
extra = fnames
if multi:
for cmd in multi:
try:
return_code = subprocess.call([cmd], shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
else:
try:
return_code = subprocess.call([cmd] + extra, shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
return return_codes
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
if package is None:
spec = importlib.util.find_spec(module)
if spec:
path = spec.origin
else:
path = None
else:
spec = importlib.util.find_spec(package)
if spec:
path = osp.join(spec.origin, module) + '.py'
else:
path = None
if path:
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path
def run_python_script(package=None, module=None, args=None, p_args=None):
"""
Run Python script in a separate process
package=None -> module is in sys.path (standard library modules)
"""
args = [] if args is None else args
p_args = [] if p_args is None else p_args
assert module is not None
assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))
path = python_script_exists(package, module)
run_program(sys.executable, p_args + [path] + args)
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert isinstance(text, str) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args
def run_general_file_in_terminal(
executable: str,
args: str,
fname: str,
script_args: str,
wdir: str,
close_after_exec: bool = False,
windows_shell: str = "cmd.exe /K"
):
"""
Run a file on a given CLI executable.
Arguments
---------
executable: str
Name or path to the executable.
args: str
Arguments to pass to the executable.
fname: str
Path to the file to execute in the shell interpreter/executable.
script_args: str
File arguments
wdir: str
Working directory path from which the file will be executed.
windows_shell: str
Name of the executable to use as shell (Windows only).
"""
# Quote fname in case it has spaces (all platforms)
# fname = f'"{fname}"'
wdir = None if not wdir else wdir # Cannot be empty string
args = shell_split(args)
script_args = shell_split(script_args)
p_args = args + [fname] + script_args
p_args = [f'"{x}"' for x in p_args]
if os.name == 'nt':
if wdir is not None:
# wdir can come with / as os.sep, so we need to take care of it.
wdir = wdir.replace('/', '\\')
# python_exe must be quoted in case it has spaces
cmd = f'start {windows_shell} ""{executable}" '
cmd += ' '.join(p_args) # + '"'
logger.info('Executing on external console: %s', cmd)
try:
run_shell_command(cmd, cwd=wdir)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
QMessageBox.critical(
None,
_('Run'),
_("It was not possible to run this file in an external "
"terminal"),
QMessageBox.Ok
)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal', 'execute-option': '--'},
{'cmd': 'konsole', 'execute-option': '-e'},
{'cmd': 'xfce4-terminal', 'execute-option': '-x'},
{'cmd': 'xterm', 'execute-option': '-e'}]
for program in programs:
if is_program_installed(program['cmd']):
f = None
if not close_after_exec:
f = tempfile.NamedTemporaryFile(
'wt',
prefix='run_spyder_',
suffix='.sh',
dir=get_temp_dir(),
delete=False
)
logger.info('Executing on external console: %s',
' '.join([executable] + p_args))
f.write(' '.join([executable] + p_args) + '\n')
f.write(f'read -p "{_("Press enter to continue...")}"\n')
executable = '/usr/bin/bash'
p_args = [f.name]
cmd = [program['cmd'], program['execute-option'], executable]
cmd.extend(p_args)
run_shell_command(' '.join(cmd), cwd=wdir)
if f:
f.close()
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile(
'wt',
prefix='run_spyder_',
suffix='.sh',
dir=get_temp_dir(),
delete=False
)
if wdir:
f.write('cd "{}"\n'.format(wdir))
f.write(' '.join([executable] + p_args) + '\n')
if not close_after_exec:
f.write(f'read -p "{_("Press enter to continue...")}"\n')
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command(f'open -a Terminal.app {f.name}')
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def run_python_script_in_terminal(fname, wdir, args, interact, debug,
python_args, executable=None, pypath=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
env = {**os.environ}
env.pop('PYTHONPATH', None)
if pypath is not None:
pypath = os.pathsep.join(pypath)
env['PYTHONPATH'] = pypath
# Quote fname in case it has spaces (all platforms)
fname = f'"{fname}"'
wdir = None if not wdir else wdir # Cannot be empty string
p_args = get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
if wdir is not None:
# wdir can come with / as os.sep, so we need to take care of it.
wdir = wdir.replace('/', '\\')
# UNC paths start with \\
if osp.splitdrive(wdir)[0].startswith("\\\\"):
from qtpy.QtWidgets import QMessageBox
QMessageBox.critical(
None,
_('Run'),
_("External terminals does not support a UNC file path as "
"the working directory."),
QMessageBox.Ok
)
return
# python_exe must be quoted in case it has spaces
cmd = f'start cmd.exe /K ""{executable}" '
cmd += ' '.join(p_args) + '"' + ' ^&^& exit'
try:
run_shell_command(cmd, cwd=wdir, env=env)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
QMessageBox.critical(
None,
_('Run'),
_("It was not possible to run this file in an external "
"terminal"),
QMessageBox.Ok
)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal', 'execute-option': '--'},
{'cmd': 'konsole', 'execute-option': '-e'},
{'cmd': 'xfce4-terminal', 'execute-option': '-x'},
{'cmd': 'xterm', 'execute-option': '-e'}]
for program in programs:
if is_program_installed(program['cmd']):
cmd = [program['cmd'], program['execute-option'], executable]
cmd.extend(p_args)
run_shell_command(' '.join(cmd), cwd=wdir, env=env)
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile('wt', prefix='run_spyder_',
suffix='.sh', dir=get_temp_dir(),
delete=False)
if wdir:
f.write('cd "{}"\n'.format(wdir))
if pypath is not None:
f.write(f'export PYTHONPATH={pypath}\n')
f.write(' '.join([executable] + p_args))
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command(f'open -a Terminal.app {f.name}')
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def check_version_range(module_version, version_range):
"""
Check if a module's version lies in `version_range`.
"""
if ',' in version_range:
versions = version_range.split(',')
else:
versions = [version_range]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
if symb not in ['>=', '>', '=', '<', '<=', '!=']:
raise RuntimeError(f"Invalid version condition '{symb}'")
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
try:
if cmp_op == '>':
return parse(actver) > parse(version)
elif cmp_op == '>=':
return parse(actver) >= parse(version)
elif cmp_op == '=':
return parse(actver) == parse(version)
elif cmp_op == '<':
return parse(actver) < parse(version)
elif cmp_op == '<=':
return parse(actver) <= parse(version)
elif cmp_op == '!=':
return parse(actver) != parse(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name, interpreter=None):
"""Return module version or None if version can't be retrieved."""
if interpreter:
cmd = dedent(
"""
try:
import {} as mod
except Exception:
print('No Module') # spyder: test-skip
print(
getattr(mod, '__version__', getattr(mod, 'VERSION', None))
) # spyder: test-skip
"""
).format(module_name)
# Use clean environment while preserving basic environment variables
# needed to properly detect installed modules like `spyder-kernels`
# from system-wide Python installations on Windows.
# See spyder-ide/spyder#20968
env = {}
if os.name == "nt":
if "USERPROFILE" in os.environ:
env["USERPROFILE"] = os.environ["USERPROFILE"]
if "APPDATA" in os.environ:
env["APPDATA"] = os.environ["APPDATA"]
proc = run_program(interpreter, ['-c', cmd], env=env)
stdout, stderr = proc.communicate()
stdout = stdout.decode().strip()
if 'No Module' in stdout:
raise RuntimeError("No module named " + str(module_name))
if stdout != 'None':
# the module is installed and it has a version attribute
return stdout
return None
mod = __import__(module_name)
ver = getattr(mod, '__version__', getattr(mod, 'VERSION', None))
if not ver:
ver = get_package_version(module_name)
return ver
def get_package_version(package_name):
"""Return package version or None if version can't be retrieved."""
try:
return package_version(package_name)
except PackageNotFoundError:
return None
def is_module_installed(module_name, version=None, interpreter=None,
distribution_name=None):
"""
Return True if module ``module_name`` is installed
If ``version`` is not None, checks that the module's installed version is
consistent with ``version``. The module must have an attribute named
'__version__' or 'VERSION'.
``version`` may start with =, >=, > or < to specify the exact requirement;
multiple conditions may be separated by ',' (e.g. '>=0.13,<1.0')
If ``interpreter`` is not None, checks if a module is installed with a
given ``version`` in the ``interpreter``'s environment. Otherwise checks
in Spyder's environment.
``distribution_name`` is the distribution name of a package. For instance,
for pylsp_black that name is python_lsp_black.
"""
if interpreter is not None:
if is_python_interpreter(interpreter):
try:
module_version = get_module_version(module_name, interpreter)
except Exception:
return False
else:
# Try to not take a wrong decision if interpreter check fails
return True
else:
# interpreter is None, just get module version in Spyder environment
try:
module_version = get_module_version(module_name)
except Exception:
# Module is not installed
return False
# This can happen if a package was not uninstalled correctly. For
# instance, if it's __pycache__ main directory is left behind.
try:
mod = __import__(module_name)
if not getattr(mod, '__file__', None):
return False
except Exception:
pass
# Try to get the module version from its distribution name. For
# instance, pylsp_black doesn't have a version but that can be
# obtained from its distribution, called python_lsp_black.
if not module_version and distribution_name:
module_version = get_package_version(distribution_name)
if version is None:
return True
else:
return check_version_range(module_version, version)
def is_python_interpreter_valid_name(filename):
"""Check that the python interpreter file has a valid name."""
pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def is_python_interpreter(filename):
"""Evaluate whether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(real_filename)):
return False
# File exists and has valid name
is_text_file = encoding.is_text_file(real_filename)
if is_pythonw(real_filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not is_text_file:
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_conda_env(pyexec=real_filename) and is_text_file:
return True
elif not is_text_file:
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif is_text_file:
# At this point we can't have a text file
return False
else:
return check_python_help(real_filename)
def is_pythonw(filename):
"""Check that the python interpreter has 'pythonw'."""
pattern = r'.*python(\d\.?\d*)?w(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def check_python_help(filename):
"""Check that the python interpreter can compile and provide the zen."""
try:
proc = run_program(filename, ['-c', 'import this'], env={})
stdout, _ = proc.communicate()
stdout = str(stdout)
valid_lines = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
]
if all(line in stdout for line in valid_lines):
return True
else:
return False
except Exception:
return False
def is_spyder_process(pid):
"""
Test whether given PID belongs to a Spyder process.
This is checked by testing the first three command line arguments. This
function returns a bool. If there is no process with this PID or its
command line cannot be accessed (perhaps because the process is owned by
another user), then the function returns False.
"""
try:
p = psutil.Process(int(pid))
# Valid names for main script
names = set(['spyder', 'spyder3', 'spyder.exe', 'spyder3.exe',
'bootstrap.py', 'spyder-script.py', 'spyder-script.pyw',
'Spyder.launch.pyw'])
if running_under_pytest():
names.add('runtests.py')
# Check the first three command line arguments
arguments = set(os.path.basename(arg) for arg in p.cmdline()[:3])
conditions = [names & arguments]
return any(conditions)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return False
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode().strip()
# This is necessary to prevent showing unexpected output.
# See spyder-ide/spyder#19000
if not re.search(r'^Python \d+\.\d+\.\d+$', out):
out = ''
except Exception:
out = ''
return out.strip()
def find_git():
"""Find git executable in the system."""
if sys.platform == 'darwin':
proc = subprocess.run(
osp.join(HERE, "check-git.sh"), capture_output=True)
if proc.returncode != 0:
return None
return find_program('git')
else:
return find_program('git')
|
ProgramError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/ext/hybrid.py
|
{
"start": 44940,
"end": 47594
}
|
class ____(interfaces.InspectionAttrInfo, Generic[_P, _R]):
"""A decorator which allows definition of a Python object method with both
instance-level and class-level behavior.
"""
is_attribute = True
extension_type = HybridExtensionType.HYBRID_METHOD
def __init__(
self,
func: Callable[Concatenate[Any, _P], _R],
expr: Optional[
Callable[Concatenate[Any, _P], SQLCoreOperations[_R]]
] = None,
):
"""Create a new :class:`.hybrid_method`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_method
class SomeClass:
@hybrid_method
def value(self, x, y):
return self._value + x + y
@value.expression
@classmethod
def value(cls, x, y):
return func.some_function(cls._value, x, y)
"""
self.func = func
if expr is not None:
self.expression(expr)
else:
self.expression(func) # type: ignore
@property
def inplace(self) -> Self:
"""Return the inplace mutator for this :class:`.hybrid_method`.
The :class:`.hybrid_method` class already performs "in place" mutation
when the :meth:`.hybrid_method.expression` decorator is called,
so this attribute returns Self.
.. versionadded:: 2.0.4
.. seealso::
:ref:`hybrid_pep484_naming`
"""
return self
@overload
def __get__(
self, instance: Literal[None], owner: Type[object]
) -> Callable[_P, SQLCoreOperations[_R]]: ...
@overload
def __get__(
self, instance: object, owner: Type[object]
) -> Callable[_P, _R]: ...
def __get__(
self, instance: Optional[object], owner: Type[object]
) -> Union[Callable[_P, _R], Callable[_P, SQLCoreOperations[_R]]]:
if instance is None:
return self.expr.__get__(owner, owner) # type: ignore
else:
return self.func.__get__(instance, owner) # type: ignore
def expression(
self, expr: Callable[Concatenate[Any, _P], SQLCoreOperations[_R]]
) -> hybrid_method[_P, _R]:
"""Provide a modifying decorator that defines a
SQL-expression producing method."""
self.expr = expr
if not self.expr.__doc__:
self.expr.__doc__ = self.func.__doc__
return self
def _unwrap_classmethod(meth: _T) -> _T:
if isinstance(meth, classmethod):
return meth.__func__ # type: ignore
else:
return meth
|
hybrid_method
|
python
|
bokeh__bokeh
|
src/bokeh/server/contexts.py
|
{
"start": 12613,
"end": 14591
}
|
class ____:
_arguments: dict[str, list[bytes]]
_cookies: dict[str, str]
_headers: dict[str, str | list[str]]
def __init__(
self,
request: HTTPServerRequest,
arguments: dict[str, bytes | list[bytes]] | None = None,
cookies: dict[str, str] | None = None,
headers: dict[str, str | list[str]] | None = None,
) -> None:
self._request = request
if arguments is not None:
self._arguments = arguments
elif hasattr(request, 'arguments'):
self._arguments = dict(request.arguments)
else:
self._arguments = {}
if 'bokeh-session-id' in self._arguments:
del self._arguments['bokeh-session-id']
if cookies is not None:
self._cookies = cookies
elif hasattr(request, 'cookies'):
# Django cookies are plain strings, tornado cookies are objects with a value
self._cookies = {k: v if isinstance(v, str) else v.value for k, v in request.cookies.items()}
else:
self._cookies = {}
if headers is not None:
self._headers = headers
elif hasattr(request, 'headers'):
self._headers = dict(request.headers)
else:
self._headers = {}
@property
def arguments(self) -> dict[str, list[bytes]]:
return self._arguments
@property
def cookies(self) -> dict[str, str]:
return self._cookies
@property
def headers(self) -> dict[str, str | list[str]]:
return self._headers
def __getattr__(self, name: str) -> Any:
if not name.startswith("_"):
val = getattr(self._request, name, None)
if val is not None:
return val
return super().__getattr__(name)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
_RequestProxy
|
python
|
jazzband__django-waffle
|
waffle/tests/test_testutils.py
|
{
"start": 11466,
"end": 11793
}
|
class ____(OverrideSwitchOnClassTestCase):
"""
Extend ``OverrideSwitchOnClassTestCase``
and make sure ``override_switch`` change still works.
"""
def test_child_undecorated_method_is_set_properly_for_switch(self):
self.assertFalse(waffle.switch_is_active('foo'))
|
InheritanceOverrideSwitchOnClassTests
|
python
|
numba__numba
|
numba/tests/test_parfors.py
|
{
"start": 151971,
"end": 156122
}
|
class ____(TestCase):
"""
Tests chunksize handling in ParallelAccelerator.
"""
_numba_parallel_test_ = False
def setUp(self):
set_parallel_chunksize(0)
def tearDown(self):
set_parallel_chunksize(0)
def test_python_parallel_chunksize_basic(self):
# Test basic chunksize operations outside njit.
self.assertEqual(get_parallel_chunksize(), 0)
set_parallel_chunksize(8)
self.assertEqual(get_parallel_chunksize(), 8)
set_parallel_chunksize(0)
self.assertEqual(get_parallel_chunksize(), 0)
def test_python_with_chunksize(self):
# Test "with parallel_chunksize" outside njit.
self.assertEqual(get_parallel_chunksize(), 0)
with parallel_chunksize(8):
self.assertEqual(get_parallel_chunksize(), 8)
self.assertEqual(get_parallel_chunksize(), 0)
def test_njit_parallel_chunksize_basic(self):
# Test basic chunksize operations inside njit.
@njit
def get_cs():
return get_parallel_chunksize()
@njit
def set_cs(x):
return set_parallel_chunksize(x)
self.assertEqual(get_cs(), 0)
set_cs(8)
self.assertEqual(get_cs(), 8)
set_cs(0)
self.assertEqual(get_cs(), 0)
def test_njit_with_chunksize(self):
# Test "with parallel_chunksize" inside njit.
@njit
def test_impl(x):
cs1 = get_parallel_chunksize()
with parallel_chunksize(8):
cs2 = get_parallel_chunksize()
cs3 = get_parallel_chunksize()
return cs1, cs2, cs3
cs1, cs2, cs3 = test_impl(8)
self.assertEqual(cs1, 0)
self.assertEqual(cs2, 8)
self.assertEqual(cs3, 0)
def test_all_iterations_reset_chunksize(self):
""" Test that all the iterations get run if you set the
chunksize. Also check that the chunksize that each
worker thread sees has been reset to 0. """
@njit(parallel=True)
def test_impl(cs, n):
res = np.zeros(n)
inner_cs = np.full(n, -13)
with numba.parallel_chunksize(cs):
for i in numba.prange(n):
inner_cs[i] = numba.get_parallel_chunksize()
res[i] = 13
return res, inner_cs
# Test a variety of array and chunk sizes.
# 1000 is a round number, 997 is prime, 943 is product of two
# primes, 961 is square of a prime.
for j in [1000, 997, 943, 961]:
for i in range(15):
res, inner_cs = test_impl(i+1, j)
self.assertTrue(np.all(res == 13))
self.assertTrue(np.all(inner_cs == 0))
def test_njit_parallel_chunksize_negative(self):
# Test negative set_parallel_chunksize inside njit.
with self.assertRaises(ValueError) as raised:
@njit
def neg_test():
set_parallel_chunksize(-1)
neg_test()
msg = "chunksize must be greater than or equal to zero"
self.assertIn(msg, str(raised.exception))
def test_python_parallel_chunksize_negative(self):
# Test negative set_parallel_chunksize outside njit.
with self.assertRaises(ValueError) as raised:
set_parallel_chunksize(-1)
msg = "chunksize must be greater than or equal to zero"
self.assertIn(msg, str(raised.exception))
def test_njit_parallel_chunksize_invalid_type(self):
with self.assertRaises(errors.TypingError) as raised:
@njit
def impl():
set_parallel_chunksize('invalid_type')
impl()
msg = "The parallel chunksize must be an integer"
self.assertIn(msg, str(raised.exception))
def test_python_parallel_chunksize_invalid_type(self):
with self.assertRaises(TypeError) as raised:
set_parallel_chunksize('invalid_type')
msg = "The parallel chunksize must be an integer"
self.assertIn(msg, str(raised.exception))
@skip_parfors_unsupported
@x86_only
|
TestParforChunksizing
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 21187,
"end": 21477
}
|
class ____(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = "No version number specified in file. Assuming {}"
default_args = ("1.1",)
|
W20
|
python
|
getsentry__sentry
|
src/sentry/interfaces/contexts.py
|
{
"start": 6178,
"end": 6318
}
|
class ____(ContextType):
type = "runtime"
context_to_tag_mapping = {"": "{runtime}", "name": "{name}"}
@contexttype
|
RuntimeContextType
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py
|
{
"start": 681,
"end": 758
}
|
class ____:
"""GetNewArgsEx through the metaclass."""
|
ThirdGoodGetNewArgsEx
|
python
|
pytorch__pytorch
|
torch/_inductor/runtime/debug_utils.py
|
{
"start": 219,
"end": 4275
}
|
class ____:
"""
Tracks inductor runtime allocations and deallocations to compare against
expected behavior.
"""
def __init__(self) -> None:
self.tensor_tracker: dict[str, torch.storage.UntypedStorage] = (
weakref.WeakValueDictionary() # type: ignore[assignment]
)
self.died_since_last_step: OrderedSet[str] = OrderedSet()
self.added_since_last_step: OrderedSet[str] = OrderedSet()
self.error = (
torch._inductor.config.test_configs.track_memory_lifecycle == "assert"
)
def set_tensor(self, name: str, tensor: torch.Tensor) -> None:
storage = tensor.untyped_storage()
self.added_since_last_step.add(name)
self.tensor_tracker[name] = storage
def on_tensor_death() -> None:
self.died_since_last_step.add(name)
weakref.finalize(storage, on_tensor_death)
def advance_step(self) -> None:
self.died_since_last_step.clear()
self.added_since_last_step.clear()
def log_or_raise(self, msg: str) -> None:
if self.error:
raise RuntimeError(msg)
else:
log.info(msg)
def check_step_delta(
self,
expected_allocated: list[str],
expected_freed: list[str],
is_final_step: bool,
) -> None:
"""Check only the delta changes since last step"""
# Check expected deaths - we dont currently distinguish between nodes which die in last step
# and are returned as outputs, so skip if final_step.
if not is_final_step:
missing_deaths = OrderedSet(expected_freed) - self.died_since_last_step
if missing_deaths:
self.log_or_raise(
f"Expected tensors to die but still alive: {missing_deaths}"
)
# Check for unexpected deaths
unexpected_deaths = self.died_since_last_step - OrderedSet(expected_freed)
if unexpected_deaths:
self.log_or_raise(f"Unexpected tensor deaths: {unexpected_deaths}")
# Check newly alive tensors - separate messages like deaths
actual_allocated = self.added_since_last_step
expected_allocated_set = OrderedSet(expected_allocated)
extra_alive = actual_allocated - expected_allocated_set
if extra_alive:
self.log_or_raise(f"Unexpected allocated tensors: {extra_alive}")
missing_alive = expected_allocated_set - actual_allocated
if missing_alive:
self.log_or_raise(
f"Expected allocated tensors but missing: {missing_alive}"
)
# Reset for next step
self.advance_step()
if is_final_step:
local.memory_tracker = None
def get_mem_tracker() -> BufferMemoryTracker:
if local.memory_tracker is None:
local.memory_tracker = BufferMemoryTracker()
return local.memory_tracker
def track_tensor(tensor: torch.Tensor, name: str) -> None:
get_mem_tracker().set_tensor(name, tensor)
def tracked_empty_strided(
size: list[int],
stride: list[int],
*,
dtype: torch.dtype,
device: torch.device,
name: str,
) -> torch.Tensor:
o = torch.empty_strided(size, stride, dtype=dtype, device=device)
track_tensor(o, name)
return o
def check_memory_step(
allocated: list[str], freed: list[str], is_final_step: bool = False
) -> None:
tracker = get_mem_tracker()
tracker.check_step_delta(allocated, freed, is_final_step)
@functools.lru_cache(None)
def register_check_mem_op() -> None:
lib = torch.library.Library("_inductor_debug", "FRAGMENT") # noqa: TOR901
lib.define(
"check_memory_step(str[] allocated, str[] freed, bool is_final_step) -> ()"
)
lib.impl("check_memory_step", check_memory_step, "BackendSelect")
from torch._higher_order_ops.effects import _EffectType, _register_effectful_op
_register_effectful_op(
torch.ops._inductor_debug.check_memory_step.default,
_EffectType.ORDERED,
)
|
BufferMemoryTracker
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/triggers/test_bigquery.py
|
{
"start": 26493,
"end": 30746
}
|
class ____:
def test_bigquery_value_check_op_trigger_serialization(self, value_check_trigger):
"""Asserts that the BigQueryValueCheckTrigger correctly serializes its arguments and classpath."""
classpath, kwargs = value_check_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.bigquery.BigQueryValueCheckTrigger"
assert kwargs == {
"conn_id": TEST_CONN_ID,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
"pass_value": TEST_PASS_VALUE,
"job_id": TEST_JOB_ID,
"dataset_id": TEST_DATASET_ID,
"project_id": TEST_GCP_PROJECT_ID,
"location": None,
"sql": TEST_SQL_QUERY,
"table_id": TEST_TABLE_ID,
"tolerance": TEST_TOLERANCE,
"poll_interval": POLLING_PERIOD_SECONDS,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_records")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_output")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_value_check_op_trigger_success(
self, mock_job_status, get_job_output, get_records, value_check_trigger
):
"""
Tests BigQueryValueCheckTrigger only fires once the query execution reaches a successful state.
"""
mock_job_status.return_value = {"status": "success", "message": "Job completed"}
get_job_output.return_value = {}
get_records.return_value = [[2], [4]]
await value_check_trigger.run().__anext__()
await asyncio.sleep(0.5)
generator = value_check_trigger.run()
actual = await generator.asend(None)
assert actual == TriggerEvent({"status": "success", "message": "Job completed", "records": [4]})
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_value_check_op_trigger_pending(self, mock_job_status, caplog, value_check_trigger):
"""
Tests BigQueryValueCheckTrigger only fires once the query execution reaches a successful state.
"""
mock_job_status.return_value = {"status": "pending", "message": "Job pending"}
caplog.set_level(logging.INFO)
task = asyncio.create_task(value_check_trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was returned
assert task.done() is False
assert "Query is still running..." in caplog.text
assert f"Sleeping for {POLLING_PERIOD_SECONDS} seconds." in caplog.text
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_value_check_op_trigger_fail(self, mock_job_status, value_check_trigger):
"""
Tests BigQueryValueCheckTrigger only fires once the query execution reaches a successful state.
"""
mock_job_status.return_value = {"status": "error", "message": "dummy"}
generator = value_check_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "dummy", "records": None}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_value_check_trigger_exception(self, mock_job_status):
"""Tests the BigQueryValueCheckTrigger does not fire if there is an exception."""
mock_job_status.side_effect = Exception("Test exception")
trigger = BigQueryValueCheckTrigger(
conn_id=TEST_CONN_ID,
sql=TEST_SQL_QUERY,
pass_value=TEST_PASS_VALUE,
tolerance=1,
job_id=TEST_JOB_ID,
project_id=TEST_GCP_PROJECT_ID,
)
generator = trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "Test exception"}) == actual
|
TestBigQueryValueCheckTrigger
|
python
|
great-expectations__great_expectations
|
great_expectations/core/partitioners.py
|
{
"start": 2474,
"end": 2662
}
|
class ____(pydantic.BaseModel):
regex: re.Pattern
param_names: Tuple[Literal["year"], Literal["month"]] = ("year", "month")
sort_ascending: bool = True
|
FileNamePartitionerMonthly
|
python
|
django-haystack__django-haystack
|
test_haystack/test_loading.py
|
{
"start": 415,
"end": 3016
}
|
class ____(TestCase):
def test_init(self):
ch = loading.ConnectionHandler({})
self.assertEqual(ch.connections_info, {})
ch = loading.ConnectionHandler(
{
"default": {
"ENGINE": "haystack.backends.solr_backend.SolrEngine",
"URL": "http://localhost:9001/solr/test_default",
}
}
)
self.assertEqual(
ch.connections_info,
{
"default": {
"ENGINE": "haystack.backends.solr_backend.SolrEngine",
"URL": "http://localhost:9001/solr/test_default",
}
},
)
@unittest.skipIf(pysolr is False, "pysolr required")
def test_get_item(self):
ch = loading.ConnectionHandler({})
try:
empty_engine = ch["default"]
self.fail()
except ImproperlyConfigured:
pass
ch = loading.ConnectionHandler(
{
"default": {
"ENGINE": "haystack.backends.solr_backend.SolrEngine",
"URL": "http://localhost:9001/solr/test_default",
}
}
)
solr_engine = ch["default"]
backend_path, memory_address = (
repr(solr_engine).strip("<>").split(" object at ")
)
self.assertEqual(backend_path, "haystack.backends.solr_backend.SolrEngine")
solr_engine_2 = ch["default"]
backend_path_2, memory_address_2 = (
repr(solr_engine_2).strip("<>").split(" object at ")
)
self.assertEqual(backend_path_2, "haystack.backends.solr_backend.SolrEngine")
# Ensure we're loading out of the memorized connection.
self.assertEqual(memory_address_2, memory_address)
try:
empty_engine = ch["slave"]
self.fail()
except ImproperlyConfigured:
pass
def test_get_unified_index(self):
ch = loading.ConnectionHandler(
{"default": {"ENGINE": "haystack.backends.simple_backend.SimpleEngine"}}
)
ui = ch["default"].get_unified_index()
klass, address = repr(ui).strip("<>").split(" object at ")
self.assertEqual(str(klass), "haystack.utils.loading.UnifiedIndex")
ui_2 = ch["default"].get_unified_index()
klass_2, address_2 = repr(ui_2).strip("<>").split(" object at ")
self.assertEqual(str(klass_2), "haystack.utils.loading.UnifiedIndex")
self.assertEqual(address_2, address)
|
ConnectionHandlerTestCase
|
python
|
mlflow__mlflow
|
mlflow/webhooks/types.py
|
{
"start": 1099,
"end": 2289
}
|
class ____(TypedDict):
"""Payload sent when a new model version is created.
Example payload:
.. code-block:: python
{
"name": "example_model",
"version": "1",
"source": "models:/123",
"run_id": "abcd1234abcd5678",
"tags": {"example_key": "example_value"},
"description": "An example model version",
}
"""
name: str
"""The name of the registered model."""
version: str
"""The version of the model."""
source: str
"""The source URI of the model version."""
run_id: str | None
"""The run ID associated with the model version, if applicable."""
tags: dict[str, str]
"""Tags associated with the model version."""
description: str | None
"""Description of the model version."""
@classmethod
def example(cls) -> "ModelVersionCreatedPayload":
return cls(
name="example_model",
version="1",
source="models:/123",
run_id="abcd1234abcd5678",
tags={"example_key": "example_value"},
description="An example model version",
)
|
ModelVersionCreatedPayload
|
python
|
Pylons__pyramid
|
src/pyramid/i18n.py
|
{
"start": 13918,
"end": 14687
}
|
class ____:
@reify
def localizer(self):
"""Convenience property to return a localizer"""
registry = self.registry
current_locale_name = self.locale_name
localizer = registry.queryUtility(ILocalizer, name=current_locale_name)
if localizer is None:
# no localizer utility registered yet
tdirs = registry.queryUtility(ITranslationDirectories, default=[])
localizer = make_localizer(current_locale_name, tdirs)
registry.registerUtility(
localizer, ILocalizer, name=current_locale_name
)
return localizer
@reify
def locale_name(self):
locale_name = negotiate_locale_name(self)
return locale_name
|
LocalizerRequestMixin
|
python
|
ray-project__ray
|
python/ray/data/tests/unit/test_datatype.py
|
{
"start": 4474,
"end": 5603
}
|
class ____:
"""Test factory methods from external systems."""
@pytest.mark.parametrize(
"pa_type",
[
pa.int32(),
pa.string(),
pa.timestamp("s"),
pa.list_(pa.int32()),
pa.decimal128(10, 2),
],
)
def test_from_arrow(self, pa_type):
"""Test from_arrow factory method."""
dt = DataType.from_arrow(pa_type)
assert isinstance(dt, DataType)
assert dt.is_arrow_type()
assert dt._physical_dtype == pa_type
@pytest.mark.parametrize(
"numpy_input,expected_dtype",
[
(np.dtype("int32"), np.dtype("int32")),
(np.dtype("float64"), np.dtype("float64")),
("int64", np.dtype("int64")),
("float32", np.dtype("float32")),
],
)
def test_from_numpy(self, numpy_input, expected_dtype):
"""Test from_numpy factory method."""
dt = DataType.from_numpy(numpy_input)
assert isinstance(dt, DataType)
assert dt.is_numpy_type()
assert dt._physical_dtype == expected_dtype
|
TestDataTypeFactories
|
python
|
django__django
|
django/forms/widgets.py
|
{
"start": 12122,
"end": 12222
}
|
class ____(Input):
input_type = "url"
template_name = "django/forms/widgets/url.html"
|
URLInput
|
python
|
plotly__plotly.py
|
plotly/graph_objs/icicle/_leaf.py
|
{
"start": 233,
"end": 2322
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle"
_path_str = "icicle.leaf"
_valid_props = {"opacity"}
@property
def opacity(self):
"""
Sets the opacity of the leaves. With colorscale it is defaulted
to 1; otherwise it is defaulted to 0.7
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
opacity
Sets the opacity of the leaves. With colorscale it is
defaulted to 1; otherwise it is defaulted to 0.7
"""
def __init__(self, arg=None, opacity=None, **kwargs):
"""
Construct a new Leaf object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.icicle.Leaf`
opacity
Sets the opacity of the leaves. With colorscale it is
defaulted to 1; otherwise it is defaulted to 0.7
Returns
-------
Leaf
"""
super().__init__("leaf")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.Leaf
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Leaf`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Leaf
|
python
|
facelessuser__soupsieve
|
tests/test_level2/test_focus.py
|
{
"start": 49,
"end": 1508
}
|
class ____(util.TestCase):
"""Test focus selector."""
MARKUP = """
<form action="#">
<fieldset id='a' disabled>
<legend>
Simple fieldset <input type="radio" id="1" checked>
<fieldset id='b' disabled>
<legend>Simple fieldset <input type="radio" id="2" checked></legend>
<input type="radio" id="3" checked>
<label for="radio">radio</label>
</fieldset>
</legend>
<fieldset id='c' disabled>
<legend>Simple fieldset <input type="radio" id="4" checked></legend>
<input type="radio" id="5" checked>
<label for="radio">radio</label>
</fieldset>
<input type="radio" id="6" checked>
<label for="radio">radio</label>
</fieldset>
<optgroup id="opt-enable">
<option id="7" disabled>option</option>
</optgroup>
<optgroup id="8" disabled>
<option id="9">option</option>
</optgroup>
<a href="" id="link">text</a>
</form>
"""
def test_focus(self):
"""Test focus."""
self.assert_selector(
self.MARKUP,
"input:focus",
[],
flags=util.HTML
)
def test_not_focus(self):
"""Test not focus."""
self.assert_selector(
self.MARKUP,
"input:not(:focus)",
["1", "2", "3", "4", "5", "6"],
flags=util.HTML
)
|
TestFocus
|
python
|
python-excel__xlwt
|
xlwt/antlr.py
|
{
"start": 22100,
"end": 22739
}
|
class ____(object):
def __init__(self):
self.buffer = [] # empty list
def append(self,item):
self.buffer.append(item)
def elementAt(self,index):
return self.buffer[index]
def reset(self):
self.buffer = []
def removeFirst(self):
self.buffer.pop(0)
def length(self):
return len(self.buffer)
def __str__(self):
return str(self.buffer)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### InputBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
|
Queue
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 218918,
"end": 223772
}
|
class ____(Request):
"""
Add or update task configuration
:param task: Task ID
:type task: str
:param configuration: Task configuration items. The new ones will be added and
the already existing ones will be updated
:type configuration: Sequence[ConfigurationItem]
:param replace_configuration: If set then the all the configuration items will
be replaced with the provided ones. Otherwise only the provided configuration
items will be updated or added
:type replace_configuration: bool
:param force: If set to True then both new and running task configuration can
be edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.13"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configuration": {
"description": "Task configuration items. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task configuration can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"replace_configuration": {
"description": "If set then the all the configuration items will be replaced with the provided ones. Otherwise only the provided configuration items will be updated or added",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "configuration"],
"type": "object",
}
def __init__(
self,
task: str,
configuration: List[Any],
replace_configuration: Optional[bool] = None,
force: Optional[bool] = None,
**kwargs: Any
) -> None:
super(EditConfigurationRequest, self).__init__(**kwargs)
self.task = task
self.configuration = configuration
self.replace_configuration = replace_configuration
self.force = force
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("configuration")
def configuration(self) -> List[Any]:
return self._property_configuration
@configuration.setter
def configuration(self, value: List[Any]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", (dict, ConfigurationItem), is_array=True)
value = [ConfigurationItem(**v) if isinstance(v, dict) else v for v in value]
self._property_configuration = value
@schema_property("replace_configuration")
def replace_configuration(self) -> Optional[bool]:
return self._property_replace_configuration
@replace_configuration.setter
def replace_configuration(self, value: Optional[bool]) -> None:
if value is None:
self._property_replace_configuration = None
return
self.assert_isinstance(value, "replace_configuration", (bool,))
self._property_replace_configuration = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
|
EditConfigurationRequest
|
python
|
celery__celery
|
t/integration/test_canvas.py
|
{
"start": 43442,
"end": 61587
}
|
class ____:
@flaky
def test_ready_with_exception(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
g = group([add.s(1, 2), raise_error.s()])
result = g.apply_async()
while not result.ready():
pass
@flaky
def test_empty_group_result(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
task = group([])
result = task.apply_async()
GroupResult.save(result)
task = GroupResult.restore(result.id)
assert task.results == []
@flaky
def test_parent_ids(self, manager):
assert_ping(manager)
g = (
ids.si(i=1) |
ids.si(i=2) |
group(ids.si(i=i) for i in range(2, 50))
)
res = g()
expected_root_id = res.parent.parent.id
expected_parent_id = res.parent.id
values = res.get(timeout=TIMEOUT)
for i, r in enumerate(values):
root_id, parent_id, value = r
assert root_id == expected_root_id
assert parent_id == expected_parent_id
assert value == i + 2
@flaky
def test_nested_group(self, manager):
assert_ping(manager)
c = group(
add.si(1, 10),
group(
add.si(1, 100),
group(
add.si(1, 1000),
add.si(1, 2000),
),
),
)
res = c()
assert res.get(timeout=TIMEOUT) == [11, 101, 1001, 2001]
@flaky
def test_large_group(self, manager):
assert_ping(manager)
c = group(identity.s(i) for i in range(1000))
res = c.delay()
assert res.get(timeout=TIMEOUT) == list(range(1000))
def test_group_lone(self, manager):
"""
Test that a simple group completes.
"""
sig = group(identity.s(42), identity.s(42)) # [42, 42]
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
def test_nested_group_group(self, manager):
"""
Confirm that groups nested inside groups get unrolled.
"""
sig = group(
group(identity.s(42), identity.s(42)), # [42, 42]
) # [42, 42] due to unrolling
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
def test_nested_group_chord_counting_simple(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
gchild_sig = identity.si(42)
child_chord = chord((gchild_sig,), identity.s())
group_sig = group((child_chord,))
res = group_sig.delay()
# Wait for the result to land and confirm its value is as expected
assert res.get(timeout=TIMEOUT) == [[42]]
def test_nested_group_chord_counting_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
gchild_count = 42
gchild_sig = chain((identity.si(1337),) * gchild_count)
child_chord = chord((gchild_sig,), identity.s())
group_sig = group((child_chord,))
res = group_sig.delay()
# Wait for the result to land and confirm its value is as expected
assert res.get(timeout=TIMEOUT) == [[1337]]
def test_nested_group_chord_counting_group(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
gchild_count = 42
gchild_sig = group((identity.si(1337),) * gchild_count)
child_chord = chord((gchild_sig,), identity.s())
group_sig = group((child_chord,))
res = group_sig.delay()
# Wait for the result to land and confirm its value is as expected
assert res.get(timeout=TIMEOUT) == [[1337] * gchild_count]
def test_nested_group_chord_counting_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
gchild_count = 42
gchild_sig = chord(
(identity.si(1337),) * gchild_count, identity.si(31337),
)
child_chord = chord((gchild_sig,), identity.s())
group_sig = group((child_chord,))
res = group_sig.delay()
# Wait for the result to land and confirm its value is as expected
assert res.get(timeout=TIMEOUT) == [[31337]]
def test_nested_group_chord_counting_mixed(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
gchild_count = 42
child_chord = chord(
(
identity.si(42),
chain((identity.si(42),) * gchild_count),
group((identity.si(42),) * gchild_count),
chord((identity.si(42),) * gchild_count, identity.si(1337)),
),
identity.s(),
)
group_sig = group((child_chord,))
res = group_sig.delay()
# Wait for the result to land and confirm its value is as expected. The
# group result gets unrolled into the encapsulating chord, hence the
# weird unpacking below
assert res.get(timeout=TIMEOUT) == [
[42, 42, *((42,) * gchild_count), 1337]
]
@pytest.mark.xfail(raises=TimeoutError, reason="#6734")
def test_nested_group_chord_body_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
child_chord = chord(identity.si(42), chain((identity.s(),)))
group_sig = group((child_chord,))
res = group_sig.delay()
# The result can be expected to timeout since it seems like its
# underlying promise might not be getting fulfilled (ref #6734). Pick a
# short timeout since we don't want to block for ages and this is a
# fairly simple signature which should run pretty quickly.
expected_result = [[42]]
with pytest.raises(TimeoutError) as expected_excinfo:
res.get(timeout=TIMEOUT / 10)
# Get the child `AsyncResult` manually so that we don't have to wait
# again for the `GroupResult`
assert res.children[0].get(timeout=TIMEOUT) == expected_result[0]
assert res.get(timeout=TIMEOUT) == expected_result
# Re-raise the expected exception so this test will XFAIL
raise expected_excinfo.value
def test_callback_called_by_group(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
callback_msg = str(uuid.uuid4()).encode()
redis_key = str(uuid.uuid4())
callback = redis_echo.si(callback_msg, redis_key=redis_key)
group_sig = group(identity.si(42), identity.si(1337))
group_sig.link(callback)
redis_connection.delete(redis_key)
with subtests.test(msg="Group result is returned"):
res = group_sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 1337]
with subtests.test(msg="Callback is called after group is completed"):
await_redis_echo({callback_msg, }, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_first(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
redis_key = str(uuid.uuid4())
errback = redis_echo.si(errback_msg, redis_key=redis_key)
group_sig = group(fail.s(), identity.si(42))
group_sig.link_error(errback)
redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
await_redis_echo({errback_msg, }, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_last(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
errback_msg = str(uuid.uuid4()).encode()
redis_key = str(uuid.uuid4())
errback = redis_echo.si(errback_msg, redis_key=redis_key)
group_sig = group(identity.si(42), fail.s())
group_sig.link_error(errback)
redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
await_redis_echo({errback_msg, }, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_errback_called_by_group_fail_multiple(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
expected_errback_count = 42
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
# Include a mix of passing and failing tasks
group_sig = group(
*(identity.si(42) for _ in range(24)), # arbitrary task count
*(fail.s() for _ in range(expected_errback_count)),
)
group_sig.link_error(errback)
redis_connection.delete(redis_key)
with subtests.test(msg="Error propagates from group"):
res = group_sig.delay()
with pytest.raises(ExpectedException):
res.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group task fails"):
await_redis_count(expected_errback_count, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_children_with_callbacks(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
child_task_count = 42
child_sig = identity.si(1337)
child_sig.link(callback)
group_sig = group(child_sig for _ in range(child_task_count))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = group_sig()
assert res_obj.get(timeout=TIMEOUT) == [1337] * child_task_count
with subtests.test(msg="Chain child task callbacks are called"):
await_redis_count(child_task_count, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_children_with_errbacks(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
child_task_count = 42
child_sig = fail.si()
child_sig.link_error(errback)
group_sig = group(child_sig for _ in range(child_task_count))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain fails due to a child task dying"):
res_obj = group_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Chain child task errbacks are called"):
await_redis_count(child_task_count, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_with_callback_child_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
group_sig = group(add_replaced.si(42, 1337), identity.si(31337))
group_sig.link(callback)
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = group_sig()
assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
with subtests.test(msg="Callback is called after group finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_with_errback_child_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
group_sig = group(add_replaced.si(42, 1337), fail.s())
group_sig.link_error(errback)
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = group_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_child_with_callback_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
callback = redis_count.si(redis_key=redis_key)
child_sig = add_replaced.si(42, 1337)
child_sig.link(callback)
group_sig = group(child_sig, identity.si(31337))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = group_sig()
assert res_obj.get(timeout=TIMEOUT) == [42 + 1337, 31337]
with subtests.test(msg="Callback is called after group finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
def test_group_child_with_errback_replaced(self, manager, subtests):
if not manager.app.conf.result_backend.startswith("redis"):
raise pytest.skip("Requires redis result backend.")
redis_connection = get_redis_connection()
redis_key = str(uuid.uuid4())
errback = redis_count.si(redis_key=redis_key)
child_sig = fail_replaced.si()
child_sig.link_error(errback)
group_sig = group(child_sig, identity.si(42))
redis_connection.delete(redis_key)
with subtests.test(msg="Chain executes as expected"):
res_obj = group_sig()
with pytest.raises(ExpectedException):
res_obj.get(timeout=TIMEOUT)
with subtests.test(msg="Errback is called after group finishes"):
await_redis_count(1, redis_key=redis_key)
redis_connection.delete(redis_key)
@pytest.mark.xfail(raises=TimeoutError,
reason="Task is timeout instead of returning exception on rpc backend",
strict=False)
def test_group_child_replaced_with_chain_first(self, manager):
orig_sig = group(replace_with_chain.si(42), identity.s(1337))
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
@pytest.mark.xfail(raises=TimeoutError,
reason="Task is timeout instead of returning exception on rpc backend",
strict=False)
def test_group_child_replaced_with_chain_middle(self, manager):
orig_sig = group(
identity.s(42), replace_with_chain.s(1337), identity.s(31337)
)
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == [42, 1337, 31337]
@pytest.mark.xfail(raises=TimeoutError,
reason="Task is timeout instead of returning exception on rpc backend",
strict=False)
def test_group_child_replaced_with_chain_last(self, manager):
orig_sig = group(identity.s(42), replace_with_chain.s(1337))
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == [42, 1337]
def test_task_replace_with_group_preserves_group_order(self, manager):
if manager.app.conf.result_backend.startswith("rpc"):
raise pytest.skip("RPC result backend does not support replacing with a group")
orig_sig = group([add_to_all.s([2, 1], 1), add_to_all.s([4, 3], 1)] * 10)
res_obj = orig_sig.delay()
assert res_obj.get(timeout=TIMEOUT) == [[3, 2], [5, 4]] * 10
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
assert expected_value == value
assert root_id == expected_root_id
assert parent_id == expected_parent_id
def assert_ping(manager):
ping_result = manager.inspect().ping()
assert ping_result
ping_val = list(ping_result.values())[0]
assert ping_val == {"ok": "pong"}
|
test_group
|
python
|
huggingface__transformers
|
src/transformers/models/x_clip/modeling_x_clip.py
|
{
"start": 47475,
"end": 48189
}
|
class ____(nn.Module):
"""This corresponds to the `VideoSpecificPrompt` class in the original implementation."""
def __init__(self, config):
super().__init__()
embed_dim = config.projection_dim
self.layernorm = nn.LayerNorm(embed_dim, eps=config.vision_config.layer_norm_eps)
self.decoder = nn.ModuleList([PromptGeneratorLayer(config) for _ in range(config.prompt_layers)])
self.alpha = nn.Parameter(torch.ones(embed_dim) * config.prompt_alpha)
def forward(self, text, visual):
visual = self.layernorm(visual)
for layer in self.decoder:
text = layer(text, visual)
return self.alpha * text
@auto_docstring
|
XCLIPPromptGenerator
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py
|
{
"start": 985,
"end": 9565
}
|
class ____(QueryExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py docstring">
"""Expect the frequency of occurrences of a specified value in a queried column to be at least <threshold> percent of values in that column."""
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py metric_dependencies">
metric_dependencies = ("query.column",)
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py query">
query: str = """
SELECT {col},
CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM {active_batch})
FROM {active_batch}
GROUP BY {col}
"""
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py success_keys">
success_keys = (
"column",
"value",
"threshold",
"query",
)
# </snippet>
domain_keys = ("batch_id", "row_condition", "condition_parser")
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
super().validate_configuration(configuration)
value = configuration["kwargs"].get("value")
threshold = configuration["kwargs"].get("threshold")
try:
assert value is not None, "'value' must be specified"
assert (isinstance(threshold, (int, float)) and 0 < threshold <= 1) or (
isinstance(threshold, list)
and all(isinstance(x, (int, float)) for x in threshold)
and all(0 < x <= 1 for x in threshold)
and 0 < sum(threshold) <= 1
), "'threshold' must be 1, a float between 0 and 1, or a list of floats whose sum is between 0 and 1"
if isinstance(threshold, list):
assert isinstance(value, list) and len(value) == len(
threshold
), "'value' and 'threshold' must contain the same number of arguments"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py _validate function">
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py _validate function signature">
def _validate(
self,
metrics: dict,
runtime_configuration: dict | None = None,
execution_engine: ExecutionEngine | None = None,
) -> Union[ExpectationValidationResult, dict]:
# </snippet>
metrics = convert_to_json_serializable(data=metrics)
query_result = metrics.get("query.column")
query_result = dict([element.values() for element in query_result])
configuration = self.configuration
value = configuration["kwargs"].get("value")
threshold = configuration["kwargs"].get("threshold")
if isinstance(value, list):
success = all(
query_result[value[i]] >= threshold[i] for i in range(len(value))
)
return {
"success": success,
"result": {
"observed_value": [
query_result[value[i]] for i in range(len(value))
]
},
}
success = query_result[value] >= threshold
return {
"success": success,
"result": {"observed_value": query_result[value]},
}
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py examples">
examples = [
{
"data": [
{
"data": {
"col1": [1, 2, 2, 3, 4],
"col2": ["a", "a", "b", "b", "a"],
},
},
],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": "a",
"threshold": 0.6,
},
"out": {"success": True},
"only_for": ["sqlite", "spark"],
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col1",
"value": 2,
"threshold": 1,
},
"out": {"success": False},
"only_for": ["sqlite", "spark"],
},
{
"title": "multi_value_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.6, 0.4],
},
"out": {"success": True},
"only_for": ["sqlite", "spark"],
},
{
"title": "multi_value_positive_test_static_data_asset",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.6, 0.4],
"query": """
SELECT {col},
CAST(COUNT({col}) AS float) / (SELECT COUNT({col}) FROM test)
FROM test
GROUP BY {col}
""",
},
"out": {"success": True},
"only_for": ["sqlite"],
},
{
"title": "multi_value_positive_test_row_condition",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col2",
"value": ["a", "b"],
"threshold": [0.6, 0.4],
"row_condition": 'col("col1")==2',
"condition_parser": "great_expectations",
},
"out": {"success": False},
"only_for": ["sqlite", "spark"],
},
],
},
]
# </snippet>
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py _validate function library_metadata">
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["query-based"],
"contributors": ["@joegargery"],
}
# </snippet>
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_queried_column_value_frequency_to_meet_threshold.py print_diagnostic_checklist()">
ExpectQueriedColumnValueFrequencyToMeetThreshold().print_diagnostic_checklist()
# </snippet>
# Note to users: code below this line is only for integration testing -- ignore!
diagnostics = ExpectQueriedColumnValueFrequencyToMeetThreshold().run_diagnostics()
for check in diagnostics["tests"]:
assert check["test_passed"] is True
assert check["error_diagnostics"] is None
for check in diagnostics["errors"]:
assert check is None
for check in diagnostics["maturity_checklist"]["experimental"]:
if check["message"] == "Passes all linting checks":
continue
assert check["passed"] is True
|
ExpectQueriedColumnValueFrequencyToMeetThreshold
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py
|
{
"start": 987,
"end": 7193
}
|
class ____(str, Enum):
"""OpenAI embedding mode model."""
# davinci
TEXT_SIMILARITY_DAVINCI = "text-similarity-davinci-001"
TEXT_SEARCH_DAVINCI_QUERY = "text-search-davinci-query-001"
TEXT_SEARCH_DAVINCI_DOC = "text-search-davinci-doc-001"
# curie
TEXT_SIMILARITY_CURIE = "text-similarity-curie-001"
TEXT_SEARCH_CURIE_QUERY = "text-search-curie-query-001"
TEXT_SEARCH_CURIE_DOC = "text-search-curie-doc-001"
# babbage
TEXT_SIMILARITY_BABBAGE = "text-similarity-babbage-001"
TEXT_SEARCH_BABBAGE_QUERY = "text-search-babbage-query-001"
TEXT_SEARCH_BABBAGE_DOC = "text-search-babbage-doc-001"
# ada
TEXT_SIMILARITY_ADA = "text-similarity-ada-001"
TEXT_SEARCH_ADA_QUERY = "text-search-ada-query-001"
TEXT_SEARCH_ADA_DOC = "text-search-ada-doc-001"
# text-embedding-ada-002
TEXT_EMBED_ADA_002 = "text-embedding-ada-002"
# text-embedding-3-large
TEXT_EMBED_3_LARGE = "text-embedding-3-large"
# text-embedding-3-small
TEXT_EMBED_3_SMALL = "text-embedding-3-small"
# convenient shorthand
OAEM = OpenAIEmbeddingMode
OAEMT = OpenAIEmbeddingModelType
OAEMM = OpenAIEmbeddingModeModel
EMBED_MAX_TOKEN_LIMIT = 2048
_QUERY_MODE_MODEL_DICT = {
(OAEM.SIMILARITY_MODE, "davinci"): OAEMM.TEXT_SIMILARITY_DAVINCI,
(OAEM.SIMILARITY_MODE, "curie"): OAEMM.TEXT_SIMILARITY_CURIE,
(OAEM.SIMILARITY_MODE, "babbage"): OAEMM.TEXT_SIMILARITY_BABBAGE,
(OAEM.SIMILARITY_MODE, "ada"): OAEMM.TEXT_SIMILARITY_ADA,
(OAEM.SIMILARITY_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.SIMILARITY_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
(OAEM.SIMILARITY_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "davinci"): OAEMM.TEXT_SEARCH_DAVINCI_QUERY,
(OAEM.TEXT_SEARCH_MODE, "curie"): OAEMM.TEXT_SEARCH_CURIE_QUERY,
(OAEM.TEXT_SEARCH_MODE, "babbage"): OAEMM.TEXT_SEARCH_BABBAGE_QUERY,
(OAEM.TEXT_SEARCH_MODE, "ada"): OAEMM.TEXT_SEARCH_ADA_QUERY,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
}
_TEXT_MODE_MODEL_DICT = {
(OAEM.SIMILARITY_MODE, "davinci"): OAEMM.TEXT_SIMILARITY_DAVINCI,
(OAEM.SIMILARITY_MODE, "curie"): OAEMM.TEXT_SIMILARITY_CURIE,
(OAEM.SIMILARITY_MODE, "babbage"): OAEMM.TEXT_SIMILARITY_BABBAGE,
(OAEM.SIMILARITY_MODE, "ada"): OAEMM.TEXT_SIMILARITY_ADA,
(OAEM.SIMILARITY_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.SIMILARITY_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
(OAEM.SIMILARITY_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "davinci"): OAEMM.TEXT_SEARCH_DAVINCI_DOC,
(OAEM.TEXT_SEARCH_MODE, "curie"): OAEMM.TEXT_SEARCH_CURIE_DOC,
(OAEM.TEXT_SEARCH_MODE, "babbage"): OAEMM.TEXT_SEARCH_BABBAGE_DOC,
(OAEM.TEXT_SEARCH_MODE, "ada"): OAEMM.TEXT_SEARCH_ADA_DOC,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
}
def get_embedding(client: OpenAI, text: str, engine: str, **kwargs: Any) -> List[float]:
"""
Get embedding.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
text = text.replace("\n", " ")
return (
client.embeddings.create(input=[text], model=engine, **kwargs).data[0].embedding
)
async def aget_embedding(
aclient: AsyncOpenAI, text: str, engine: str, **kwargs: Any
) -> List[float]:
"""
Asynchronously get embedding.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
text = text.replace("\n", " ")
return (
(await aclient.embeddings.create(input=[text], model=engine, **kwargs))
.data[0]
.embedding
)
def get_embeddings(
client: OpenAI, list_of_text: List[str], engine: str, **kwargs: Any
) -> List[List[float]]:
"""
Get embeddings.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = client.embeddings.create(input=list_of_text, model=engine, **kwargs).data
return [d.embedding for d in data]
async def aget_embeddings(
aclient: AsyncOpenAI,
list_of_text: List[str],
engine: str,
**kwargs: Any,
) -> List[List[float]]:
"""
Asynchronously get embeddings.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = (
await aclient.embeddings.create(input=list_of_text, model=engine, **kwargs)
).data
return [d.embedding for d in data]
def get_engine(
mode: str,
model: str,
mode_model_dict: Dict[Tuple[OpenAIEmbeddingMode, str], OpenAIEmbeddingModeModel],
) -> str:
"""Get engine."""
key = (OpenAIEmbeddingMode(mode), OpenAIEmbeddingModelType(model))
if key not in mode_model_dict:
raise ValueError(f"Invalid mode, model combination: {key}")
return mode_model_dict[key].value
|
OpenAIEmbeddingModeModel
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 7633,
"end": 7803
}
|
class ____(models.Model):
poll = CustomAttrNameForeignKey(Poll, models.CASCADE, attr_name="custom_poll")
history = HistoricalRecords()
|
ModelWithCustomAttrForeignKey
|
python
|
getsentry__sentry
|
tests/sentry/seer/autofix/test_issue_summary.py
|
{
"start": 1377,
"end": 31602
}
|
class ____(APITestCase, SnubaTestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
self.group = self.create_group()
self.login_as(user=self.user)
def tearDown(self) -> None:
super().tearDown()
# Clear the cache after each test
cache.delete(f"ai-group-summary-v2:{self.group.id}")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary._call_seer")
def test_get_issue_summary_with_existing_summary(
self, mock_call_seer, mock_get_acknowledgement
):
mock_get_acknowledgement.return_value = True
existing_summary = {
"group_id": str(self.group.id),
"headline": "Existing headline",
"whats_wrong": "Existing whats wrong",
"trace": "Existing trace",
"possible_cause": "Existing possible cause",
"scores": {
"possible_cause_confidence": 0.9,
"possible_cause_novelty": 0.8,
},
}
# Set the cache with the existing summary
cache.set(
f"ai-group-summary-v2:{self.group.id}", existing_summary, timeout=60 * 60 * 24 * 7
)
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
assert summary_data == convert_dict_key_case(existing_summary, snake_to_camel_case)
mock_call_seer.assert_not_called()
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary._get_event")
def test_get_issue_summary_without_event(
self, mock_get_event: MagicMock, mock_get_acknowledgement: MagicMock
) -> None:
mock_get_acknowledgement.return_value = True
mock_get_event.return_value = [None, None]
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 400
assert summary_data == {"detail": "Could not find an event for the issue"}
assert cache.get(f"ai-group-summary-v2:{self.group.id}") is None
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary._get_trace_tree_for_event")
@patch("sentry.seer.autofix.issue_summary._call_seer")
@patch("sentry.seer.autofix.issue_summary._get_event")
def test_get_issue_summary_without_existing_summary(
self, mock_get_event, mock_call_seer, mock_get_trace_tree, mock_get_acknowledgement
):
mock_get_acknowledgement.return_value = True
event = Mock(
event_id="test_event_id",
data="test_event_data",
trace_id="test_trace",
datetime=datetime.datetime.now(),
)
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_summary = SummarizeIssueResponse(
group_id=str(self.group.id),
headline="Test headline",
whats_wrong="Test whats wrong",
trace="Test trace",
possible_cause="Test possible cause",
scores=SummarizeIssueScores(
possible_cause_confidence=0.0,
possible_cause_novelty=0.0,
),
)
mock_call_seer.return_value = mock_summary
mock_get_trace_tree.return_value = {"trace": "tree"}
expected_response_summary = mock_summary.dict()
expected_response_summary["event_id"] = event.event_id
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
assert summary_data == convert_dict_key_case(expected_response_summary, snake_to_camel_case)
mock_get_event.assert_called_once_with(self.group, self.user, provided_event_id=None)
mock_get_trace_tree.assert_called_once()
mock_call_seer.assert_called_once_with(self.group, serialized_event, {"trace": "tree"})
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
# Check if the cache was set correctly
cached_summary = cache.get(f"ai-group-summary-v2:{self.group.id}")
assert cached_summary == expected_response_summary
def test_get_issue_summary_without_ai_acknowledgement(self) -> None:
with patch(
"sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement"
) as mock_get_acknowledgement:
mock_get_acknowledgement.return_value = False
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 403
assert summary_data == {
"detail": "AI Autofix has not been acknowledged by the organization."
}
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary.requests.post")
@patch("sentry.seer.autofix.issue_summary._get_event")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
def test_call_seer_integration(
self, mock_get_acknowledgement: MagicMock, mock_get_event: MagicMock, mock_post: MagicMock
) -> None:
mock_get_acknowledgement.return_value = True
event = Mock(
event_id="test_event_id",
data="test_event_data",
trace_id=None,
datetime=datetime.datetime.now(),
)
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_response = Mock()
mock_response.json.return_value = {
"group_id": str(self.group.id),
"whats_wrong": "Test whats wrong",
"trace": "Test trace",
"possible_cause": "Test possible cause",
"headline": "Test headline",
"scores": {
"possible_cause_confidence": 0.9,
"possible_cause_novelty": 0.8,
"fixability_score": 0.5,
"is_fixable": True,
"fixability_score_version": 1,
},
}
mock_post.return_value = mock_response
expected_response_summary = mock_response.json.return_value
expected_response_summary["event_id"] = event.event_id
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
assert summary_data == convert_dict_key_case(expected_response_summary, snake_to_camel_case)
mock_post.assert_called_once()
payload = orjson.loads(mock_post.call_args_list[0].kwargs["data"])
assert payload["trace_tree"] is None
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
assert cache.get(f"ai-group-summary-v2:{self.group.id}") == expected_response_summary
@patch("sentry.seer.autofix.issue_summary.get_issue_summary")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
def test_get_issue_summary_cache_write_read(
self, mock_get_acknowledgement, mock_get_issue_summary
):
mock_get_acknowledgement.return_value = True
# First request to populate the cache
mock_get_event = Mock()
mock_call_seer = Mock()
event = Mock(
event_id="test_event_id",
data="test_event_data",
trace_id=None,
datetime=datetime.datetime.now(),
)
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_summary = SummarizeIssueResponse(
group_id=str(self.group.id),
whats_wrong="Test whats wrong",
trace="Test trace",
possible_cause="Test possible cause",
headline="Test headline",
)
mock_call_seer.return_value = mock_summary
# Set up the cache with the test data
expected_response_summary = mock_summary.dict()
expected_response_summary["event_id"] = event.event_id
cache.set(
f"ai-group-summary-v2:{self.group.id}",
expected_response_summary,
timeout=60 * 60 * 24 * 7,
)
# Test the cache hit
with (
patch("sentry.seer.autofix.issue_summary._get_event") as mock_get_event,
patch("sentry.seer.autofix.issue_summary._call_seer") as mock_call_seer,
):
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
assert summary_data == convert_dict_key_case(
expected_response_summary, snake_to_camel_case
)
# Verify that _get_event and _call_seer were not called due to cache hit
mock_get_event.assert_not_called()
mock_call_seer.assert_not_called()
mock_get_acknowledgement.assert_called_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary._generate_summary")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
def test_get_issue_summary_concurrent_wait_for_lock(
self, mock_get_acknowledgement, mock_generate_summary
):
"""Test that a second request waits for the lock and reads from cache."""
mock_get_acknowledgement.return_value = True
cache_key = f"ai-group-summary-v2:{self.group.id}"
# Mock summary generation to take time and cache the result
generated_summary = {"headline": "Generated Summary", "event_id": "gen_event"}
cache_key = f"ai-group-summary-v2:{self.group.id}"
def side_effect_generate(*args, **kwargs):
# Simulate work
time.sleep(0.3)
# Write to cache before returning (simulates behavior after lock release)
cache.set(cache_key, generated_summary, timeout=60)
return generated_summary, 200
mock_generate_summary.side_effect = side_effect_generate
results = {}
exceptions = {}
def target(req_id):
try:
summary_data, status_code = get_issue_summary(self.group, self.user)
results[req_id] = (summary_data, status_code)
except Exception as e:
exceptions[req_id] = e
# Start two threads concurrently
thread1 = threading.Thread(target=target, args=(1,))
thread2 = threading.Thread(target=target, args=(2,))
thread1.start()
# Give thread1 a slight head start, but the lock should handle the race
time.sleep(0.01)
thread2.start()
# Wait for both threads to complete
thread1.join(timeout=5)
thread2.join(timeout=5)
# Assertions
if exceptions:
raise AssertionError(f"Threads raised exceptions: {exceptions}")
assert 1 in results, "Thread 1 did not complete in time"
assert 2 in results, "Thread 2 did not complete in time"
# Both should succeed and get the same summary
assert results[1][1] == 200, f"Thread 1 failed with status {results[1][1]}"
assert results[2][1] == 200, f"Thread 2 failed with status {results[2][1]}"
expected_result = convert_dict_key_case(generated_summary, snake_to_camel_case)
assert results[1][0] == expected_result, "Thread 1 returned wrong summary"
assert results[2][0] == expected_result, "Thread 2 returned wrong summary"
# Check that _generate_summary was only called once
# (by the thread that acquired the lock)
mock_generate_summary.assert_called_once()
# Ensure the cache contains the final result
assert cache.get(cache_key) == generated_summary
@patch("sentry.seer.autofix.issue_summary._generate_summary")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
def test_get_issue_summary_concurrent_force_event_id_bypasses_lock(
self, mock_get_acknowledgement, mock_generate_summary
):
"""Test that force_event_id bypasses lock waiting."""
mock_get_acknowledgement.return_value = True
# Mock summary generation
forced_summary = {"headline": "Forced Summary", "event_id": "force_event"}
mock_generate_summary.return_value = (forced_summary, 200)
# Ensure cache is empty and lock *could* be acquired if attempted
cache_key = f"ai-group-summary-v2:{self.group.id}"
lock_key = f"ai-group-summary-v2-lock:{self.group.id}"
cache.delete(cache_key)
locks.get(lock_key, duration=1).release() # Ensure lock isn't held
# Call with force_event_id=True
summary_data, status_code = get_issue_summary(
self.group, self.user, force_event_id="some_event"
)
assert status_code == 200
assert summary_data == convert_dict_key_case(forced_summary, snake_to_camel_case)
# Ensure generation was called directly
mock_generate_summary.assert_called_once()
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary.sign_with_seer_secret", return_value={})
@patch("sentry.seer.autofix.issue_summary.requests.post")
def test_call_seer_routes_to_summarization_url(self, post: MagicMock, _sign: MagicMock) -> None:
resp = Mock()
resp.json.return_value = {
"group_id": str(self.group.id),
"whats_wrong": "w",
"trace": "t",
"possible_cause": "c",
"headline": "h",
"scores": {},
}
resp.raise_for_status = Mock()
post.return_value = resp
result = _call_seer(self.group, {"event_id": "e1"}, {"trace": "tree"})
assert result.group_id == str(self.group.id)
assert post.call_count == 1
assert (
post.call_args_list[0]
.args[0]
.startswith(f"{settings.SEER_SUMMARIZATION_URL}/v1/automation/summarize/issue")
)
payload = orjson.loads(post.call_args_list[0].kwargs["data"])
assert payload["trace_tree"] == {"trace": "tree"}
resp.raise_for_status.assert_called_once()
@patch("sentry.seer.autofix.issue_summary.sign_with_seer_secret", return_value={})
@patch(
"sentry.seer.autofix.issue_summary.requests.post", side_effect=Exception("primary error")
)
def test_call_seer_raises_exception_when_endpoint_fails(
self, post: MagicMock, sign: MagicMock
) -> None:
with pytest.raises(Exception):
_call_seer(self.group, {"event_id": "e1"}, None)
@patch("sentry.seer.autofix.issue_summary.cache.get")
@patch("sentry.seer.autofix.issue_summary._generate_summary")
@patch("sentry.utils.locking.lock.Lock.blocking_acquire")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
def test_get_issue_summary_lock_timeout(
self,
mock_get_acknowledgement,
mock_blocking_acquire,
mock_generate_summary_core,
mock_cache_get,
):
"""Test that a timeout waiting for the lock returns 503."""
mock_get_acknowledgement.return_value = True
# Simulate lock acquisition always failing with the specific exception
mock_blocking_acquire.side_effect = UnableToAcquireLock
# Simulate cache miss even after timeout
mock_cache_get.return_value = None
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 503
assert summary_data == {"detail": "Timeout waiting for summary generation lock"}
# Ensure lock acquisition was attempted
mock_blocking_acquire.assert_called_once()
# Ensure generation was NOT called
mock_generate_summary_core.assert_not_called()
# Ensure cache was checked three times (once initially, once after lock failure, and once for hideAiFeatures check)
assert mock_cache_get.call_count == 3
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
@patch("sentry.seer.autofix.issue_summary.eventstore.backend.get_event_by_id")
@patch("sentry.seer.autofix.issue_summary.serialize")
def test_get_event_no_recommended(
self, mock_serialize: MagicMock, mock_get_event_by_id: MagicMock
) -> None:
mock_group = Mock()
mock_event = Mock()
mock_user = Mock()
mock_event.event_id = "test_event_id"
mock_group.get_recommended_event_for_environments.return_value = None
mock_group.get_latest_event.return_value = mock_event
mock_group.project.id = "test_project_id"
mock_group.id = "test_group_id"
mock_ready_event = Mock()
mock_get_event_by_id.return_value = mock_ready_event
mock_serialized_event = {"serialized": "event"}
mock_serialize.return_value = mock_serialized_event
result = _get_event(mock_group, mock_user)
assert result == (mock_serialized_event, mock_event)
mock_group.get_recommended_event_for_environments.assert_called_once()
mock_group.get_latest_event.assert_called_once()
mock_get_event_by_id.assert_called_once_with(
"test_project_id", "test_event_id", group_id="test_group_id"
)
mock_serialize.assert_called_once()
@patch("sentry.seer.autofix.issue_summary.eventstore.backend.get_event_by_id")
def test_get_event_recommended_first(self, mock_get_event_by_id: MagicMock) -> None:
mock_group = Mock()
mock_event = Mock()
mock_user = Mock()
mock_event.event_id = "test_event_id"
mock_group.get_recommended_event_for_environments.return_value = mock_event
mock_group.project.id = "test_project_id"
mock_group.id = "test_group_id"
mock_get_event_by_id.return_value = None
result = _get_event(mock_group, mock_user)
assert result == (None, None)
mock_group.get_recommended_event_for_environments.assert_called_once()
mock_group.get_latest_event.assert_not_called()
mock_get_event_by_id.assert_called_once_with(
"test_project_id", "test_event_id", group_id="test_group_id"
)
@patch("sentry.seer.autofix.issue_summary.eventstore.backend.get_event_by_id")
def test_get_event_none_found(self, mock_get_event_by_id: MagicMock) -> None:
mock_group = Mock()
mock_user = Mock()
mock_group.get_recommended_event_for_environments.return_value = None
mock_group.get_latest_event.return_value = None
mock_group.project.id = "test_project_id"
mock_group.id = "test_group_id"
mock_get_event_by_id.return_value = None
result = _get_event(mock_group, mock_user)
assert result == (None, None)
mock_group.get_recommended_event_for_environments.assert_called_once()
mock_group.get_latest_event.assert_called_once()
mock_get_event_by_id.assert_not_called()
@patch("sentry.seer.autofix.issue_summary.eventstore.backend.get_event_by_id")
@patch("sentry.seer.autofix.issue_summary.serialize")
def test_get_event_provided(
self, mock_serialize: MagicMock, mock_get_event_by_id: MagicMock
) -> None:
mock_group = Mock()
mock_event = Mock()
mock_user = Mock()
mock_event.event_id = "test_event_id"
mock_group.project.id = "test_project_id"
mock_group.id = "test_group_id"
mock_get_event_by_id.return_value = mock_event
mock_serialized_event = {"serialized": "event"}
mock_serialize.return_value = mock_serialized_event
result = _get_event(mock_group, mock_user, provided_event_id="test_event_id")
assert result == (mock_serialized_event, mock_event)
mock_group.get_recommended_event_for_environments.assert_not_called()
mock_group.get_latest_event.assert_not_called()
mock_get_event_by_id.assert_has_calls(
[
call(
"test_project_id",
"test_event_id",
group_id="test_group_id",
),
call(
"test_project_id",
"test_event_id",
group_id="test_group_id",
),
]
)
mock_serialize.assert_called_once()
@patch("sentry.seer.autofix.issue_summary._trigger_autofix_task.delay")
@patch("sentry.seer.autofix.issue_summary.get_autofix_state")
@patch("sentry.seer.autofix.issue_summary._generate_fixability_score")
@patch("sentry.quotas.backend.record_seer_run")
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary._get_trace_tree_for_event")
@patch("sentry.seer.autofix.issue_summary._call_seer")
@patch("sentry.seer.autofix.issue_summary._get_event")
def test_get_issue_summary_with_web_vitals_issue(
self,
mock_get_event,
mock_call_seer,
mock_get_trace_tree,
mock_get_acknowledgement,
mock_record_seer_run,
mock_generate_fixability_score,
mock_get_autofix_state,
mock_trigger_autofix_task,
):
mock_get_acknowledgement.return_value = True
mock_get_autofix_state.return_value = None
mock_fixability_response = SummarizeIssueResponse(
group_id=str(self.group.id),
headline="some headline",
whats_wrong="some whats wrong",
trace="some trace",
possible_cause="some possible cause",
scores=SummarizeIssueScores(
fixability_score=0.5,
is_fixable=True,
),
)
mock_generate_fixability_score.return_value = mock_fixability_response
event = Mock(
event_id="test_event_id",
data="test_event_data",
trace_id="test_trace",
datetime=datetime.datetime.now(),
)
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_summary = SummarizeIssueResponse(
group_id=str(self.group.id),
headline="Test headline",
whats_wrong="Test whats wrong",
trace="Test trace",
possible_cause="Test possible cause",
scores=SummarizeIssueScores(
possible_cause_confidence=0.0,
possible_cause_novelty=0.0,
),
)
mock_call_seer.return_value = mock_summary
mock_get_trace_tree.return_value = {"trace": "tree"}
# Create an event
data = load_data("javascript", timestamp=before_now(minutes=1))
event = self.store_event(data=data, project_id=self.project.id)
# Create an occurrence to obtain a WebVitalsGroup group
occurrence_data = self.build_occurrence_data(
event_id=event.event_id,
project_id=self.project.id,
type=WebVitalsGroup.type_id,
issue_title="LCP score needs improvement",
subtitle="/test-transaction has an LCP score of 75",
culprit="/test-transaction",
evidence_data={
"transaction": "/test-transaction",
"vital": "lcp",
"score": 75,
"trace_id": "1234567890",
},
level="info",
)
_, group_info = save_issue_occurrence(occurrence_data, event)
assert group_info is not None
self.group = group_info.group
summary_data, status_code = get_issue_summary(
self.group, self.user, source=SeerAutomationSource.POST_PROCESS
)
assert status_code == 200
mock_record_seer_run.assert_called_once()
mock_trigger_autofix_task.assert_called_once()
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary.run_automation")
@patch("sentry.seer.autofix.issue_summary._get_trace_tree_for_event")
@patch("sentry.seer.autofix.issue_summary._call_seer")
@patch("sentry.seer.autofix.issue_summary._get_event")
def test_get_issue_summary_continues_when_automation_fails(
self,
mock_get_event,
mock_call_seer,
mock_get_trace_tree,
mock_run_automation,
mock_get_acknowledgement,
):
"""Test that issue summary is still returned when run_automation throws an exception."""
mock_get_acknowledgement.return_value = True
# Set up event and seer response
event = Mock(event_id="test_event_id", datetime=datetime.datetime.now())
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_get_trace_tree.return_value = None
mock_summary = SummarizeIssueResponse(
group_id=str(self.group.id),
headline="Test headline",
whats_wrong="Test whats wrong",
trace="Test trace",
possible_cause="Test possible cause",
)
mock_call_seer.return_value = mock_summary
# Make run_automation raise an exception
mock_run_automation.side_effect = Exception("Automation failed")
# Call get_issue_summary and verify it still returns successfully
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
expected_response = mock_summary.dict()
expected_response["event_id"] = event.event_id
assert summary_data == convert_dict_key_case(expected_response, snake_to_camel_case)
# Verify run_automation was called and failed
mock_run_automation.assert_called_once()
mock_call_seer.assert_called_once()
@patch("sentry.seer.autofix.issue_summary._get_trace_tree_for_event")
def test_get_issue_summary_handles_trace_tree_errors(
self,
mock_get_trace_tree,
):
mock_get_trace_tree.side_effect = Exception("boom")
event = Mock(event_id="test_event_id", datetime=datetime.datetime.now())
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
with (
patch(
"sentry.seer.autofix.issue_summary._get_event",
return_value=[serialized_event, event],
),
patch(
"sentry.seer.autofix.issue_summary._call_seer",
return_value=SummarizeIssueResponse(
group_id=str(self.group.id),
headline="headline",
whats_wrong="what",
trace="trace",
possible_cause="cause",
),
) as mock_call_seer,
patch("sentry.seer.autofix.issue_summary.run_automation"),
patch(
"sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement",
return_value=True,
),
):
summary_data, status_code = get_issue_summary(self.group, self.user)
assert status_code == 200
mock_call_seer.assert_called_once_with(self.group, serialized_event, None)
@patch("sentry.seer.autofix.issue_summary.get_seer_org_acknowledgement")
@patch("sentry.seer.autofix.issue_summary.run_automation")
@patch("sentry.seer.autofix.issue_summary._get_trace_tree_for_event")
@patch("sentry.seer.autofix.issue_summary._call_seer")
@patch("sentry.seer.autofix.issue_summary._get_event")
def test_get_issue_summary_with_should_run_automation_false(
self,
mock_get_event,
mock_call_seer,
mock_get_trace_tree,
mock_run_automation,
mock_get_acknowledgement,
):
"""Test that should_run_automation=False prevents run_automation from being called."""
mock_get_acknowledgement.return_value = True
event = Mock(
event_id="test_event_id",
data="test_event_data",
trace_id="test_trace",
datetime=datetime.datetime.now(),
)
serialized_event = {"event_id": "test_event_id", "data": "test_event_data"}
mock_get_event.return_value = [serialized_event, event]
mock_summary = SummarizeIssueResponse(
group_id=str(self.group.id),
headline="Test headline",
whats_wrong="Test whats wrong",
trace="Test trace",
possible_cause="Test possible cause",
scores=SummarizeIssueScores(
possible_cause_confidence=0.0,
possible_cause_novelty=0.0,
),
)
mock_call_seer.return_value = mock_summary
mock_get_trace_tree.return_value = {"trace": "tree"}
expected_response_summary = mock_summary.dict()
expected_response_summary["event_id"] = event.event_id
summary_data, status_code = get_issue_summary(
self.group, self.user, should_run_automation=False
)
assert status_code == 200
assert summary_data == convert_dict_key_case(expected_response_summary, snake_to_camel_case)
mock_get_event.assert_called_once_with(self.group, self.user, provided_event_id=None)
mock_get_trace_tree.assert_called_once()
mock_call_seer.assert_called_once_with(self.group, serialized_event, {"trace": "tree"})
mock_get_acknowledgement.assert_called_once_with(self.group.organization)
# Verify that run_automation was NOT called
mock_run_automation.assert_not_called()
# Check if the cache was set correctly
cached_summary = cache.get(f"ai-group-summary-v2:{self.group.id}")
assert cached_summary == expected_response_summary
|
IssueSummaryTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/the-number-of-ways-to-make-the-sum.py
|
{
"start": 4276,
"end": 4710
}
|
class ____(object):
def numberOfWays(self, n):
"""
:type n: int
:rtype: int
"""
MOD = 10**9+7
def count_1_2(n):
return n//2+1
def count_1_2_6(n):
return sum(count_1_2(n-6*i) for i in xrange((n//6)+1))
return reduce(lambda x, y: (x+count_1_2_6(n-4*y))%MOD, (i for i in xrange(min(n//4, 2)+1)), 0)
# Time: O(n)
# Space: O(n)
# dp
|
Solution3
|
python
|
psf__black
|
tests/data/cases/preview_long_strings__regression.py
|
{
"start": 33224,
"end": 34216
}
|
class ____:
class B:
def foo():
st_error = STError(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
def foo():
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE,
)
def foo():
user_regex = _lazy_re_compile(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # dot-atom
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", # quoted-string
xyz,
)
def foo():
user_regex = _lazy_re_compile(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # dot-atom
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", # quoted-string
xyz,
)
|
A
|
python
|
numba__numba
|
numba/core/types/containers.py
|
{
"start": 10113,
"end": 10624
}
|
class ____(_HomogeneousTuple, BaseNamedTuple):
def __init__(self, dtype, count, cls):
self.dtype = dtype
self.count = count
self.fields = tuple(cls._fields)
self.instance_class = cls
name = "%s(%s x %d)" % (cls.__name__, dtype, count)
super(NamedUniTuple, self).__init__(name)
@property
def iterator_type(self):
return UniTupleIter(self)
@property
def key(self):
return self.instance_class, self.dtype, self.count
|
NamedUniTuple
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/distlib/database.py
|
{
"start": 41291,
"end": 51160
}
|
class ____(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
# self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if label is not None:
f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only', req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
in finding the dependencies.
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = set() # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
seen = set(t[0] for t in todo) # already added to todo
while todo:
d = todo.pop()[0]
req.add(d)
pred_list = graph.adjacency_list[d]
for pred in pred_list:
d = pred[0]
if d not in req and d not in seen:
seen.add(d)
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
DependencyGraph
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/counting.py
|
{
"start": 7147,
"end": 12060
}
|
class ____(AutoEnum):
day = AutoEnum.auto() # `day` will be translated into an equivalent `time`
time = AutoEnum.auto()
event = AutoEnum.auto()
resource = AutoEnum.auto()
# Implementations for storage backend
def get_database_query(
self,
filter: "EventFilter",
time_unit: TimeUnit,
time_interval: float,
) -> Select[tuple[str, str, DateTime, DateTime, int]]:
db = provide_database_interface()
# The innermost SELECT pulls the matching events and groups them up by their
# buckets. At this point, there may be duplicate buckets for each value, since
# the label of the thing referred to might have changed
fundamental_counts = (
sa.select(
(
self._database_value_expression(
db,
time_unit=time_unit,
time_interval=time_interval,
).label("value")
),
(
self._database_label_expression(
db,
time_unit=time_unit,
time_interval=time_interval,
).label("label")
),
sa.func.max(db.Event.occurred).label("latest"),
sa.func.min(db.Event.occurred).label("oldest"),
sa.func.count().label("count"),
)
.where(sa.and_(*filter.build_where_clauses()))
.group_by("value", "label")
)
fundamental_counts = fundamental_counts.select_from(db.Event)
# An intermediate SELECT takes the fundamental counts and reprojects it with the
# most recent value for the labels of that bucket.
fundamental = fundamental_counts.subquery("fundamental_counts")
with_latest_labels = (
sa.select(
fundamental.c.value,
(
sa.func.first_value(fundamental.c.label).over(
partition_by=fundamental.c.value,
order_by=sa.desc(fundamental.c.latest),
)
).label("label"),
fundamental.c.latest,
fundamental.c.oldest,
fundamental.c.count,
)
.select_from(fundamental)
.subquery("with_latest_labels")
)
# The final SELECT re-sums with the latest labels, ensuring that we get one
# row back for each value. This handles the final ordering as well.
count = sa.func.sum(with_latest_labels.c.count).label("count")
reaggregated = (
sa.select(
with_latest_labels.c.value.label("value"),
with_latest_labels.c.label.label("label"),
count,
sa.func.min(with_latest_labels.c.oldest).label("start_time"),
sa.func.max(with_latest_labels.c.latest).label("end_time"),
)
.select_from(with_latest_labels)
.group_by(with_latest_labels.c.value, with_latest_labels.c.label)
)
if self in (self.day, self.time):
reaggregated = reaggregated.order_by(
sa.asc("start_time"),
)
else:
reaggregated = reaggregated.order_by(
sa.desc(count),
sa.asc(with_latest_labels.c.label),
)
return reaggregated
def _database_value_expression(
self,
db: PrefectDBInterface,
time_unit: TimeUnit,
time_interval: float,
):
if self == self.day:
# The legacy `day` Countable is just a special case of the `time` one
return TimeUnit.day.database_value_expression(1)
elif self == self.time:
return time_unit.database_value_expression(time_interval)
elif self == self.event:
return db.Event.event
elif self == self.resource:
return db.Event.resource_id
else:
raise NotImplementedError()
def _database_label_expression(
self,
db: PrefectDBInterface,
time_unit: TimeUnit,
time_interval: float,
):
if self == self.day:
# The legacy `day` Countable is just a special case of the `time` one
return TimeUnit.day.database_label_expression(db, 1)
elif self == self.time:
return time_unit.database_label_expression(db, time_interval)
elif self == self.event:
return db.Event.event
elif self == self.resource:
return sa.func.coalesce(
db.Event.resource["prefect.resource.name"].astext,
db.Event.resource["prefect.name"].astext,
db.Event.resource_id,
)
else:
raise NotImplementedError()
|
Countable
|
python
|
huggingface__transformers
|
utils/test_module/custom_modeling.py
|
{
"start": 105,
"end": 450
}
|
class ____(PreTrainedModel):
config_class = CustomConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.post_init()
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
|
CustomModel
|
python
|
rapidsai__cudf
|
python/cudf/cudf/pandas/_wrappers/pandas.py
|
{
"start": 4127,
"end": 69363
}
|
class ____:
"""
Descriptor that ensures that accessors like `.dt` and `.str`
return the corresponding accessor types when accessed on `Series`
and `Index` _types_ (not instances).n
Attribute access for _instances_ uses the regular fast-then-slow
lookup defined in `__getattr__`.
"""
def __init__(self, typ):
self._typ = typ
def __set_name__(self, owner, name):
self._name = name
def __get__(self, obj, cls=None):
if obj is None:
return self._typ
else:
return _FastSlowAttribute(self._name).__get__(obj, type(obj))
def Timestamp_Timedelta__new__(cls, *args, **kwargs):
# Call fast/slow constructor
# This takes care of running __init__ as well, but must be paired
# with a removal of the defaulted __init__ that
# make_final_proxy_type provides.
# Timestamp & Timedelta don't always return same types as self,
# hence this method is needed.
self, _ = _fast_slow_function_call(
lambda cls, args, kwargs: cls(*args, **kwargs),
None,
cls,
args,
kwargs,
)
return self
Timedelta = make_final_proxy_type(
"Timedelta",
_Unusable,
pd.Timedelta,
bases=(datetime.timedelta,),
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
"__new__": Timestamp_Timedelta__new__,
"__init__": _DELETE,
},
)
Timestamp = make_final_proxy_type(
"Timestamp",
_Unusable,
pd.Timestamp,
bases=(datetime.datetime,),
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
"__new__": Timestamp_Timedelta__new__,
"__init__": _DELETE,
},
)
DatetimeProperties = make_intermediate_proxy_type(
"DatetimeProperties",
cudf.core.series.DatetimeProperties,
pd.core.indexes.accessors.DatetimeProperties,
)
TimedeltaProperties = make_intermediate_proxy_type(
"TimedeltaProperties",
cudf.core.series.TimedeltaProperties,
pd.core.indexes.accessors.TimedeltaProperties,
)
CombinedDatetimelikeProperties = make_intermediate_proxy_type(
"CombinedDatetimelikeProperties",
cudf.core.series.DatetimeProperties,
pd.core.indexes.accessors.CombinedDatetimelikeProperties,
)
StringMethods = make_intermediate_proxy_type(
"StringMethods",
cudf.core.accessors.string.StringMethods,
pd.core.strings.accessor.StringMethods,
)
ListMethods = make_intermediate_proxy_type(
"ListMethods",
cudf.core.accessors.lists.ListMethods,
pd_ListAccessor,
)
SparseAccessor = make_intermediate_proxy_type(
"SparseAccessor",
_Unusable,
pd.core.arrays.sparse.accessor.SparseAccessor,
)
StructAccessor = make_intermediate_proxy_type(
"StructAccessor",
cudf.core.accessors.struct.StructMethods,
pd_StructAccessor,
)
_CategoricalAccessor = make_intermediate_proxy_type(
"CategoricalAccessor",
cudf.core.accessors.categorical.CategoricalAccessor,
pd.core.arrays.categorical.CategoricalAccessor,
)
def _DataFrame__dir__(self):
# Column names that are string identifiers are added to the dir of the
# DataFrame
# See https://github.com/pandas-dev/pandas/blob/43691a2f5d235b08f0f3aa813d8fdcb7c4ce1e47/pandas/core/indexes/base.py#L878
_pd_df_dir = dir(pd.DataFrame)
return _pd_df_dir + [
colname
for colname in self.columns
if isinstance(colname, str) and colname.isidentifier()
]
def ignore_ipython_canary_check(self, **kwargs):
raise AttributeError(
"_ipython_canary_method_should_not_exist_ doesn't exist"
)
def _DataFrame_dtypes_apply_func(value):
if isinstance(value, (cudf.CategoricalDtype, cudf.IntervalDtype)):
return value.to_pandas()
return value
def _DataFrame__dtypes(self):
result = _fast_slow_function_call(
lambda self: self.dtypes,
None,
self,
)[0]
result = _maybe_wrap_result(
result._fsproxy_slow.apply(_DataFrame_dtypes_apply_func), None
)
result.force_state(_State.SLOW)
return result
@functools.wraps(pd.DataFrame.columns)
def _DataFrame_columns(self):
result = _fast_slow_function_call(
lambda self: self.columns,
None,
self,
)[0]
result.force_state(_State.SLOW)
return result
DataFrame = make_final_proxy_type(
"DataFrame",
cudf.DataFrame,
pd.DataFrame,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__dir__": _DataFrame__dir__,
"__arrow_c_stream__": _FastSlowAttribute("__arrow_c_stream__"),
"_constructor": _FastSlowAttribute("_constructor"),
"_constructor_sliced": _FastSlowAttribute("_constructor_sliced"),
"_accessors": set(),
"_ipython_canary_method_should_not_exist_": ignore_ipython_canary_check,
"columns": property(_DataFrame_columns),
"dtypes": property(_DataFrame__dtypes),
"__iter__": custom_iter,
"attrs": _FastSlowAttribute("attrs"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"style": _FastSlowAttribute("style", private=True),
"_mgr": _FastSlowAttribute("_mgr", private=True),
"plot": _FastSlowAttribute("plot", private=True),
"sparse": _FastSlowAttribute("sparse", private=True),
"expanding": _FastSlowAttribute("expanding", private=True),
"_AXIS_LEN": _FastSlowAttribute("_AXIS_LEN", private=True),
"_AXIS_TO_AXIS_NUMBER": _FastSlowAttribute(
"_AXIS_TO_AXIS_NUMBER", private=True
),
"_AXIS_ORDERS": _FastSlowAttribute("_AXIS_ORDERS", private=True),
"flags": _FastSlowAttribute("flags", private=True),
"memory_usage": _FastSlowAttribute("memory_usage"),
"__sizeof__": _FastSlowAttribute("__sizeof__"),
},
)
def custom_repr_html(obj):
# This custom method is need to register a html format
# for ipython
return _fast_slow_function_call(
lambda obj: obj._repr_html_(),
None,
obj,
)[0]
if ipython_shell:
# See: https://ipython.readthedocs.io/en/stable/config/integrating.html#formatters-for-third-party-types
html_formatter = ipython_shell.display_formatter.formatters["text/html"]
html_formatter.for_type(DataFrame, custom_repr_html)
def _Series_dtype(self):
# Fast-path to extract dtype from the current
# object without round-tripping through the slow<->fast
return _maybe_wrap_result(self._fsproxy_wrapped.dtype, None)
_SeriesAtIndexer = make_intermediate_proxy_type(
"_SeriesAtIndexer",
cudf.core.series._SeriesAtIndexer,
pd.core.indexing._AtIndexer,
)
_SeriesiAtIndexer = make_intermediate_proxy_type(
"_SeriesiAtIndexer",
cudf.core.series._SeriesiAtIndexer,
pd.core.indexing._iAtIndexer,
)
def _argsort(self, *args, **kwargs):
return _maybe_wrap_result(
self._fsproxy_wrapped.argsort(*args, **kwargs).astype(np.intp), self
)
Series = make_final_proxy_type(
"Series",
cudf.Series,
pd.Series,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__array_function__": array_function_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"__arrow_array__": arrow_array_method,
"__cuda_array_interface__": cuda_array_interface,
"__iter__": custom_iter,
"memory_usage": _FastSlowAttribute("memory_usage"),
"__sizeof__": _FastSlowAttribute("__sizeof__"),
"dt": _AccessorAttr(CombinedDatetimelikeProperties),
"at": _FastSlowAttribute("at"),
"iat": _FastSlowAttribute("iat"),
"str": _AccessorAttr(StringMethods),
"list": _AccessorAttr(ListMethods),
"struct": _AccessorAttr(StructAccessor),
"cat": _AccessorAttr(_CategoricalAccessor),
"_constructor": _FastSlowAttribute("_constructor"),
"_constructor_expanddim": _FastSlowAttribute("_constructor_expanddim"),
"_accessors": set(),
"dtype": property(_Series_dtype),
"argsort": _argsort,
"attrs": _FastSlowAttribute("attrs"),
"_mgr": _FastSlowAttribute("_mgr", private=True),
"array": _FastSlowAttribute("array", private=True),
"sparse": _FastSlowAttribute("sparse", private=True),
"_AXIS_LEN": _FastSlowAttribute("_AXIS_LEN", private=True),
"_AXIS_TO_AXIS_NUMBER": _FastSlowAttribute(
"_AXIS_TO_AXIS_NUMBER", private=True
),
"_AXIS_ORDERS": _FastSlowAttribute("_AXIS_ORDERS", private=True),
"flags": _FastSlowAttribute("flags", private=True),
},
)
def Index__new__(cls, *args, **kwargs):
# Call fast/slow constructor
# This takes care of running __init__ as well, but must be paired
# with a removal of the defaulted __init__ that
# make_final_proxy_type provides.
self, _ = _fast_slow_function_call(
lambda cls, args, kwargs: cls(*args, **kwargs),
None,
cls,
args,
kwargs,
)
return self
def Index__setattr__(self, name, value):
if name.startswith("_"):
object.__setattr__(self, name, value)
return
if name == "name":
setattr(self._fsproxy_wrapped, "name", value)
if name == "names":
setattr(self._fsproxy_wrapped, "names", value)
return _FastSlowAttribute("__setattr__").__get__(self, type(self))(
name, value
)
Index = make_final_proxy_type(
"Index",
cudf.Index,
pd.Index,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__array_function__": array_function_method,
"__arrow_array__": arrow_array_method,
"__cuda_array_interface__": cuda_array_interface,
"dt": _AccessorAttr(CombinedDatetimelikeProperties),
"str": _AccessorAttr(StringMethods),
"cat": _AccessorAttr(_CategoricalAccessor),
"__iter__": custom_iter,
"memory_usage": _FastSlowAttribute("memory_usage"),
"__sizeof__": _FastSlowAttribute("__sizeof__"),
"__init__": _DELETE,
"__new__": Index__new__,
"__setattr__": Index__setattr__,
"_constructor": _FastSlowAttribute("_constructor"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"_accessors": set(),
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
"name": _FastSlowAttribute("name"),
"nbytes": _FastSlowAttribute("nbytes", private=True),
"array": _FastSlowAttribute("array", private=True),
# TODO: Handle special cases like mergesort being unsupported
# and raising for certain types like Categorical and RangeIndex
"argsort": _argsort,
},
)
RangeIndex = make_final_proxy_type(
"RangeIndex",
cudf.RangeIndex,
pd.RangeIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"name": _FastSlowAttribute("name"),
"nbytes": _FastSlowAttribute("nbytes", private=True),
"array": _FastSlowAttribute("array", private=True),
"_range": _FastSlowAttribute("_range"),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
},
)
SparseDtype = make_final_proxy_type(
"SparseDtype",
_Unusable,
pd.SparseDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
# Special caseing `ArrowDtype` as it is not yet added to `cudf` namespace
# both fast and slow paths are `pd.ArrowDtype`
ArrowDtype = make_final_proxy_type(
"ArrowDtype",
pd.ArrowDtype,
pd.ArrowDtype,
bases=(pd.api.extensions.ExtensionDtype,),
fast_to_slow=lambda fast: fast,
slow_to_fast=lambda slow: slow,
additional_attributes={
"__from_arrow__": _FastSlowAttribute("__from_arrow__"),
"__hash__": _FastSlowAttribute("__hash__"),
"pyarrow_dtype": _FastSlowAttribute("pyarrow_dtype"),
},
)
SparseArray = make_final_proxy_type(
"SparseDtype",
_Unusable,
pd.arrays.SparseArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
CategoricalIndex = make_final_proxy_type(
"CategoricalIndex",
cudf.CategoricalIndex,
pd.CategoricalIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"name": _FastSlowAttribute("name"),
"nbytes": _FastSlowAttribute("nbytes", private=True),
"array": _FastSlowAttribute("array", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
},
)
Categorical = make_final_proxy_type(
"Categorical",
_Unusable,
pd.Categorical,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
CategoricalDtype = make_final_proxy_type(
"CategoricalDtype",
cudf.CategoricalDtype,
pd.CategoricalDtype,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
DatetimeIndex = make_final_proxy_type(
"DatetimeIndex",
cudf.DatetimeIndex,
pd.DatetimeIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
"name": _FastSlowAttribute("name"),
"nbytes": _FastSlowAttribute("nbytes", private=True),
"array": _FastSlowAttribute("array", private=True),
},
)
DatetimeArray = make_final_proxy_type(
"DatetimeArray",
_Unusable,
pd.arrays.DatetimeArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
},
)
DatetimeTZDtype = make_final_proxy_type(
"DatetimeTZDtype",
_Unusable,
pd.DatetimeTZDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
TimedeltaIndex = make_final_proxy_type(
"TimedeltaIndex",
cudf.TimedeltaIndex,
pd.TimedeltaIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
"name": _FastSlowAttribute("name"),
"nbytes": _FastSlowAttribute("nbytes", private=True),
"array": _FastSlowAttribute("array", private=True),
},
)
try:
from pandas.arrays import NumpyExtensionArray as pd_NumpyExtensionArray
NumpyExtensionArray = make_final_proxy_type(
"NumpyExtensionArray",
_Unusable,
pd_NumpyExtensionArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_ndarray": _FastSlowAttribute("_ndarray"),
"_dtype": _FastSlowAttribute("_dtype"),
},
)
except ImportError:
from pandas.arrays import PandasArray as pd_PandasArray
PandasArray = make_final_proxy_type(
"PandasArray",
_Unusable,
pd_PandasArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_ndarray": _FastSlowAttribute("_ndarray"),
"_dtype": _FastSlowAttribute("_dtype"),
},
)
TimedeltaArray = make_final_proxy_type(
"TimedeltaArray",
_Unusable,
pd.arrays.TimedeltaArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
},
)
PeriodIndex = make_final_proxy_type(
"PeriodIndex",
_Unusable,
pd.PeriodIndex,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
"name": _FastSlowAttribute("name"),
},
)
PeriodArray = make_final_proxy_type(
"PeriodArray",
_Unusable,
pd.arrays.PeriodArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
PeriodDtype = make_final_proxy_type(
"PeriodDtype",
_Unusable,
pd.PeriodDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Period = make_final_proxy_type(
"Period",
_Unusable,
pd.Period,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
MultiIndex = make_final_proxy_type(
"MultiIndex",
cudf.MultiIndex,
pd.MultiIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"names": _FastSlowAttribute("names"),
"_lexsort_depth": _FastSlowAttribute("_lexsort_depth", private=True),
"_should_fallback_to_positional": _FastSlowAttribute(
"_should_fallback_to_positional", private=True
),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
},
)
TimeGrouper = make_intermediate_proxy_type(
"TimeGrouper",
_Unusable,
pd_TimeGrouper,
)
Grouper = make_final_proxy_type(
"Grouper",
cudf.Grouper,
pd.Grouper,
fast_to_slow=lambda fast: pd.Grouper(
**{
k: getattr(fast, k)
for k in {"key", "level", "freq", "closed", "label"}
if getattr(fast, k, None) is not None
}
),
slow_to_fast=lambda slow: cudf.Grouper(
**{
k: getattr(slow, k)
for k in {"key", "level", "freq", "closed", "label"}
if getattr(slow, k, None) is not None
}
),
)
StringArray = make_final_proxy_type(
"StringArray",
_Unusable,
pd.arrays.StringArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"__array__": _FastSlowAttribute("__array__"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
if cudf.core._compat.PANDAS_GE_210:
ArrowStringArrayNumpySemantics = make_final_proxy_type(
"ArrowStringArrayNumpySemantics",
_Unusable,
pd.core.arrays.string_arrow.ArrowStringArrayNumpySemantics,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_pa_array": _FastSlowAttribute("_pa_array", private=True),
"__array__": _FastSlowAttribute("__array__", private=True),
},
)
if cudf.core._compat.PANDAS_GE_230:
StringArrayNumpySemantics = make_final_proxy_type(
"StringArrayNumpySemantics",
_Unusable,
pd.core.arrays.string_.StringArrayNumpySemantics,
bases=(StringArray,),
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
ArrowStringArray = make_final_proxy_type(
"ArrowStringArray",
_Unusable,
pd.core.arrays.string_arrow.ArrowStringArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_pa_array": _FastSlowAttribute("_pa_array", private=True),
"__array__": _FastSlowAttribute("__array__", private=True),
"__invert__": _FastSlowAttribute("__invert__"),
"__neg__": _FastSlowAttribute("__neg__"),
"__pos__": _FastSlowAttribute("__pos__", private=True),
"__abs__": _FastSlowAttribute("__abs__"),
"__contains__": _FastSlowAttribute("__contains__"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
StorageExtensionDtype = make_final_proxy_type(
"StorageExtensionDtype",
_Unusable,
pd.core.dtypes.base.StorageExtensionDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
StringDtype = make_final_proxy_type(
"StringDtype",
_Unusable,
pd.StringDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
"storage": _FastSlowAttribute("storage"),
},
)
BooleanArray = make_final_proxy_type(
"BooleanArray",
_Unusable,
pd.arrays.BooleanArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
BooleanDtype = make_final_proxy_type(
"BooleanDtype",
_Unusable,
pd.BooleanDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
IntegerArray = make_final_proxy_type(
"IntegerArray",
_Unusable,
pd.arrays.IntegerArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
},
)
Int8Dtype = make_final_proxy_type(
"Int8Dtype",
_Unusable,
pd.Int8Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Int16Dtype = make_final_proxy_type(
"Int16Dtype",
_Unusable,
pd.Int16Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Int32Dtype = make_final_proxy_type(
"Int32Dtype",
_Unusable,
pd.Int32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Int64Dtype = make_final_proxy_type(
"Int64Dtype",
_Unusable,
pd.Int64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
UInt8Dtype = make_final_proxy_type(
"UInt8Dtype",
_Unusable,
pd.UInt8Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
UInt16Dtype = make_final_proxy_type(
"UInt16Dtype",
_Unusable,
pd.UInt16Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
UInt32Dtype = make_final_proxy_type(
"UInt32Dtype",
_Unusable,
pd.UInt32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
UInt64Dtype = make_final_proxy_type(
"UInt64Dtype",
_Unusable,
pd.UInt64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
IntervalIndex = make_final_proxy_type(
"IntervalIndex",
cudf.IntervalIndex,
pd.IntervalIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={
"__init__": _DELETE,
"__setattr__": Index__setattr__,
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"_cache": _FastSlowAttribute("_cache", private=True),
"name": _FastSlowAttribute("name"),
},
)
IntervalArray = make_final_proxy_type(
"IntervalArray",
_Unusable,
pd.arrays.IntervalArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
},
)
IntervalDtype = make_final_proxy_type(
"IntervalDtype",
cudf.IntervalDtype,
pd.IntervalDtype,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Interval = make_final_proxy_type(
"Interval",
_Unusable,
pd.Interval,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
FloatingArray = make_final_proxy_type(
"FloatingArray",
_Unusable,
pd.arrays.FloatingArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"_data": _FastSlowAttribute("_data", private=True),
"_mask": _FastSlowAttribute("_mask", private=True),
},
)
Float32Dtype = make_final_proxy_type(
"Float32Dtype",
_Unusable,
pd.Float32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Float64Dtype = make_final_proxy_type(
"Float64Dtype",
_Unusable,
pd.Float64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
SeriesGroupBy = make_intermediate_proxy_type(
"SeriesGroupBy",
cudf.core.groupby.groupby.SeriesGroupBy,
pd.core.groupby.SeriesGroupBy,
)
DataFrameGroupBy = make_intermediate_proxy_type(
"DataFrameGroupBy",
cudf.core.groupby.groupby.DataFrameGroupBy,
pd.core.groupby.DataFrameGroupBy,
additional_attributes={
"_grouper": _FastSlowAttribute("_grouper", private=True),
},
)
RollingGroupBy = make_intermediate_proxy_type(
"RollingGroupBy",
cudf.core.window.rolling.RollingGroupby,
pd.core.window.rolling.RollingGroupby,
)
_SeriesIlocIndexer = make_intermediate_proxy_type(
"_SeriesIlocIndexer",
cudf.core.series._SeriesIlocIndexer,
pd.core.indexing._iLocIndexer,
)
_DataFrameIlocIndexer = make_intermediate_proxy_type(
"_SeriesIlocIndexer",
cudf.core.dataframe._DataFrameIlocIndexer,
pd.core.indexing._iLocIndexer,
)
_SeriesLocIndexer = make_intermediate_proxy_type(
"_SeriesLocIndexer",
cudf.core.series._SeriesLocIndexer,
pd.core.indexing._LocIndexer,
)
_DataFrameLocIndexer = make_intermediate_proxy_type(
"_DataFrameLocIndexer",
cudf.core.dataframe._DataFrameLocIndexer,
pd.core.indexing._LocIndexer,
)
_LocIndexer = make_intermediate_proxy_type(
"_LocIndexer",
cudf.core.dataframe._DataFrameLocIndexer,
pd.core.indexing._LocIndexer,
)
_AtIndexer = make_intermediate_proxy_type(
"_AtIndexer",
cudf.core.dataframe._DataFrameAtIndexer,
pd.core.indexing._AtIndexer,
)
_iAtIndexer = make_intermediate_proxy_type(
"_iAtIndexer",
cudf.core.dataframe._DataFrameiAtIndexer,
pd.core.indexing._iAtIndexer,
)
FixedForwardWindowIndexer = make_final_proxy_type(
"FixedForwardWindowIndexer",
_Unusable,
pd.api.indexers.FixedForwardWindowIndexer,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
VariableOffsetWindowIndexer = make_final_proxy_type(
"VariableOffsetWindowIndexer",
_Unusable,
pd.api.indexers.VariableOffsetWindowIndexer,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
Window = make_intermediate_proxy_type(
"Window",
_Unusable,
pd.core.window.rolling.Window,
)
Rolling = make_intermediate_proxy_type(
"Rolling",
cudf.core.window.Rolling,
pd.core.window.Rolling,
)
ExponentialMovingWindow = make_intermediate_proxy_type(
"ExponentialMovingWindow",
cudf.core.window.ewm.ExponentialMovingWindow,
pd.core.window.ewm.ExponentialMovingWindow,
)
ExponentialMovingWindowGroupby = make_intermediate_proxy_type(
"ExponentialMovingWindowGroupby",
_Unusable,
pd.core.window.ewm.ExponentialMovingWindowGroupby,
)
EWMMeanState = make_intermediate_proxy_type(
"EWMMeanState",
_Unusable,
pd.core.window.online.EWMMeanState,
)
Expanding = make_intermediate_proxy_type(
"Expanding",
_Unusable,
pd.core.window.expanding.Expanding,
)
ExpandingGroupby = make_intermediate_proxy_type(
"ExpandingGroupby",
_Unusable,
pd.core.window.expanding.ExpandingGroupby,
)
Resampler = make_intermediate_proxy_type(
"Resampler", cudf.core.resample._Resampler, pd_Resampler
)
DataFrameResampler = make_intermediate_proxy_type(
"DataFrameResampler", cudf.core.resample.DataFrameResampler, pd_Resampler
)
SeriesResampler = make_intermediate_proxy_type(
"SeriesResampler", cudf.core.resample.SeriesResampler, pd_Resampler
)
DatetimeIndexResampler = make_intermediate_proxy_type(
"DatetimeIndexResampler", _Unusable, pd_DatetimeIndexResampler
)
StataReader = make_intermediate_proxy_type(
"StataReader",
_Unusable,
pd.io.stata.StataReader,
)
HDFStore = make_final_proxy_type(
"HDFStore",
_Unusable,
pd.HDFStore,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
ExcelFile = make_final_proxy_type(
"ExcelFile",
_Unusable,
pd.ExcelFile,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
ExcelWriter = make_final_proxy_type(
"ExcelWriter",
_Unusable,
pd.ExcelWriter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
"__fspath__": _FastSlowAttribute("__fspath__"),
},
bases=(os.PathLike,),
metaclasses=(abc.ABCMeta,),
)
try:
from pandas.io.formats.style import Styler as pd_Styler # isort: skip
from pandas.io.formats.style import StylerRenderer as pd_StylerRenderer
StylerRenderer = make_final_proxy_type(
"StylerRenderer",
_Unusable,
pd_StylerRenderer,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
Styler = make_final_proxy_type(
"Styler",
_Unusable,
pd_Styler,
bases=(StylerRenderer,),
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"css": _FastSlowAttribute("css"),
"ctx": _FastSlowAttribute("ctx"),
"index": _FastSlowAttribute("index"),
"data": _FastSlowAttribute("data"),
"_display_funcs": _FastSlowAttribute(
"_display_funcs", private=True
),
"table_styles": _FastSlowAttribute("table_styles"),
"columns": _FastSlowAttribute("columns"),
"caption": _FastSlowAttribute("caption"),
"_todo": _FastSlowAttribute("_todo", private=True),
"ctx_columns": _FastSlowAttribute("ctx_columns"),
"ctx_index": _FastSlowAttribute("ctx_index"),
"_display_funcs_index": _FastSlowAttribute(
"_display_funcs_index", private=True
),
"uuid": _FastSlowAttribute("uuid"),
"hide_index_": _FastSlowAttribute("hide_index_"),
"hide_index_names": _FastSlowAttribute("hide_index_names"),
"hide_columns_": _FastSlowAttribute("hide_columns_"),
"hide_column_names": _FastSlowAttribute("hide_column_names"),
"table_attributes": _FastSlowAttribute("table_attributes"),
},
)
except ImportError:
# Styler requires Jinja to be installed
pass
def _find_user_frame():
frame = inspect.currentframe()
while frame:
modname = frame.f_globals.get("__name__", "")
# TODO: Remove "nvtx." entry once we cross nvtx-0.2.11 as minimum version
if modname == "__main__" or not modname.startswith(("cudf.", "nvtx.")):
return frame
frame = frame.f_back
raise RuntimeError("Could not find the user's frame.")
_eval_func = _FunctionProxy(_Unusable(), pd.eval)
register_proxy_func(pd.read_pickle)(
_FunctionProxy(_Unusable(), pd.read_pickle)
)
register_proxy_func(pd.to_pickle)(_FunctionProxy(_Unusable(), pd.to_pickle))
register_proxy_func(pd.api.types.is_list_like)( # noqa: TID251
_FunctionProxy(_Unusable(), pd.api.types.is_list_like) # noqa: TID251
)
register_proxy_func(loc)(loc)
register_proxy_func(iloc)(iloc)
register_proxy_func(at)(at)
register_proxy_func(iat)(iat)
register_proxy_func(setitem)(setitem)
register_proxy_func(getitem)(getitem)
def _get_eval_locals_and_globals(level, local_dict=None, global_dict=None):
frame = _find_user_frame()
local_dict = dict(frame.f_locals) if local_dict is None else local_dict
global_dict = dict(frame.f_globals) if global_dict is None else global_dict
return local_dict, global_dict
@register_proxy_func(pd.core.computation.eval.eval)
@nvtx.annotate(
"CUDF_PANDAS_EVAL",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _eval(
*args,
parser="pandas",
engine=None,
local_dict=None,
global_dict=None,
**kwargs,
):
# Custom implementation of to pre-process globals and
# locals before calling pd.eval.
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return _eval_func(
*args,
parser=parser,
engine=engine,
local_dict=local_dict,
global_dict=global_dict,
**kwargs,
)
_orig_df_eval_method = DataFrame.eval
@register_proxy_func(pd.core.accessor.register_dataframe_accessor)
def _register_dataframe_accessor(name):
return pd.core.accessor._register_accessor(name, DataFrame)
@register_proxy_func(pd.core.accessor.register_series_accessor)
def _register_series_accessor(name):
return pd.core.accessor._register_accessor(name, Series)
@register_proxy_func(pd.core.accessor.register_index_accessor)
def _register_index_accessor(name):
return pd.core.accessor._register_accessor(name, Index)
@nvtx.annotate(
"CUDF_PANDAS_DATAFRAME_EVAL",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _df_eval_method(self, *args, local_dict=None, global_dict=None, **kwargs):
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return _orig_df_eval_method(
self, *args, local_dict=local_dict, global_dict=global_dict, **kwargs
)
_orig_query_eval_method = DataFrame.query
@nvtx.annotate(
"CUDF_PANDAS_DATAFRAME_QUERY",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _df_query_method(self, *args, local_dict=None, global_dict=None, **kwargs):
# `query` API internally calls `eval`, hence we are making use of
# helps of `eval` to populate locals and globals dict.
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return _orig_query_eval_method(
self, *args, local_dict=local_dict, global_dict=global_dict, **kwargs
)
DataFrame.eval = _df_eval_method
DataFrame.query = _df_query_method
_JsonReader = make_intermediate_proxy_type(
"_JsonReader",
_Unusable,
pd.io.json._json.JsonReader,
)
_TextFileReader = make_intermediate_proxy_type(
"_TextFileReader", _Unusable, pd.io.parsers.readers.TextFileReader
)
_XportReader = make_intermediate_proxy_type(
"_XportReader", _Unusable, pd_XportReader
)
_SAS7BDATReader = make_intermediate_proxy_type(
"_SAS7BDATReader", _Unusable, pd_SAS7BDATReader
)
USFederalHolidayCalendar = make_final_proxy_type(
"USFederalHolidayCalendar",
_Unusable,
pd_USFederalHolidayCalendar,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
HolidayCalendarMetaClass = make_final_proxy_type(
"HolidayCalendarMetaClass",
_Unusable,
pd_HolidayCalendarMetaClass,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
@register_proxy_func(pd_HolidayCalendarFactory)
def holiday_calendar_factory_wrapper(*args, **kwargs):
# Call the original HolidayCalendarFactory
result = _FunctionProxy(_Unusable(), pd_HolidayCalendarFactory)(
*args, **kwargs
)
# Return the slow proxy of the result
return result._fsproxy_slow
AbstractHolidayCalendar = make_final_proxy_type(
"AbstractHolidayCalendar",
_Unusable,
pd_AbstractHolidayCalendar,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
metaclasses=(pd_HolidayCalendarMetaClass,),
)
Holiday = make_final_proxy_type(
"Holiday",
_Unusable,
pd_Holiday,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USThanksgivingDay = make_final_proxy_type(
"USThanksgivingDay",
_Unusable,
pd_USThanksgivingDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USColumbusDay = make_final_proxy_type(
"USColumbusDay",
_Unusable,
pd_USColumbusDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USLaborDay = make_final_proxy_type(
"USLaborDay",
_Unusable,
pd_USLaborDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USMemorialDay = make_final_proxy_type(
"USMemorialDay",
_Unusable,
pd_USMemorialDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USMartinLutherKingJr = make_final_proxy_type(
"USMartinLutherKingJr",
_Unusable,
pd_USMartinLutherKingJr,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
USPresidentsDay = make_final_proxy_type(
"USPresidentsDay",
_Unusable,
pd_USPresidentsDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
GoodFriday = make_final_proxy_type(
"GoodFriday",
_Unusable,
pd_GoodFriday,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
EasterMonday = make_final_proxy_type(
"EasterMonday",
_Unusable,
pd_EasterMonday,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
FY5253 = make_final_proxy_type(
"FY5253",
_Unusable,
pd.offsets.FY5253,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BDay = make_final_proxy_type(
"BDay",
_Unusable,
pd.offsets.BDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BMonthBegin = make_final_proxy_type(
"BMonthBegin",
_Unusable,
pd.offsets.BMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BMonthEnd = make_final_proxy_type(
"BMonthEnd",
_Unusable,
pd.offsets.BMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BQuarterBegin = make_final_proxy_type(
"BQuarterBegin",
_Unusable,
pd.offsets.BQuarterBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BQuarterEnd = make_final_proxy_type(
"BQuarterEnd",
_Unusable,
pd.offsets.BQuarterEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BusinessDay = make_final_proxy_type(
"BusinessDay",
_Unusable,
pd.offsets.BusinessDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BusinessHour = make_final_proxy_type(
"BusinessHour",
_Unusable,
pd.offsets.BusinessHour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BusinessMonthBegin = make_final_proxy_type(
"BusinessMonthBegin",
_Unusable,
pd.offsets.BusinessMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BusinessMonthEnd = make_final_proxy_type(
"BusinessMonthEnd",
_Unusable,
pd.offsets.BusinessMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BYearBegin = make_final_proxy_type(
"BYearBegin",
_Unusable,
pd.offsets.BYearBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BYearEnd = make_final_proxy_type(
"BYearEnd",
_Unusable,
pd.offsets.BYearEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CBMonthBegin = make_final_proxy_type(
"CBMonthBegin",
_Unusable,
pd.offsets.CBMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CBMonthEnd = make_final_proxy_type(
"CBMonthEnd",
_Unusable,
pd.offsets.CBMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CDay = make_final_proxy_type(
"CDay",
_Unusable,
pd.offsets.CDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CustomBusinessDay = make_final_proxy_type(
"CustomBusinessDay",
_Unusable,
pd.offsets.CustomBusinessDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CustomBusinessHour = make_final_proxy_type(
"CustomBusinessHour",
_Unusable,
pd.offsets.CustomBusinessHour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CustomBusinessMonthBegin = make_final_proxy_type(
"CustomBusinessMonthBegin",
_Unusable,
pd.offsets.CustomBusinessMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
CustomBusinessMonthEnd = make_final_proxy_type(
"CustomBusinessMonthEnd",
_Unusable,
pd.offsets.CustomBusinessMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
DateOffset = make_final_proxy_type(
"DateOffset",
_Unusable,
pd.offsets.DateOffset,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
BaseOffset = make_final_proxy_type(
"BaseOffset",
_Unusable,
pd.offsets.BaseOffset,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Day = make_final_proxy_type(
"Day",
_Unusable,
pd.offsets.Day,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Easter = make_final_proxy_type(
"Easter",
_Unusable,
pd.offsets.Easter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
FY5253Quarter = make_final_proxy_type(
"FY5253Quarter",
_Unusable,
pd.offsets.FY5253Quarter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Hour = make_final_proxy_type(
"Hour",
_Unusable,
pd.offsets.Hour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
LastWeekOfMonth = make_final_proxy_type(
"LastWeekOfMonth",
_Unusable,
pd.offsets.LastWeekOfMonth,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Micro = make_final_proxy_type(
"Micro",
_Unusable,
pd.offsets.Micro,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Milli = make_final_proxy_type(
"Milli",
_Unusable,
pd.offsets.Milli,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Minute = make_final_proxy_type(
"Minute",
_Unusable,
pd.offsets.Minute,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
MonthBegin = make_final_proxy_type(
"MonthBegin",
_Unusable,
pd.offsets.MonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
MonthEnd = make_final_proxy_type(
"MonthEnd",
_Unusable,
pd.offsets.MonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Nano = make_final_proxy_type(
"Nano",
_Unusable,
pd.offsets.Nano,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
QuarterBegin = make_final_proxy_type(
"QuarterBegin",
_Unusable,
pd.offsets.QuarterBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
QuarterEnd = make_final_proxy_type(
"QuarterEnd",
_Unusable,
pd.offsets.QuarterEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Second = make_final_proxy_type(
"Second",
_Unusable,
pd.offsets.Second,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
SemiMonthBegin = make_final_proxy_type(
"SemiMonthBegin",
_Unusable,
pd.offsets.SemiMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
SemiMonthEnd = make_final_proxy_type(
"SemiMonthEnd",
_Unusable,
pd.offsets.SemiMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Tick = make_final_proxy_type(
"Tick",
_Unusable,
pd.offsets.Tick,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Week = make_final_proxy_type(
"Week",
_Unusable,
pd.offsets.Week,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
WeekOfMonth = make_final_proxy_type(
"WeekOfMonth",
_Unusable,
pd.offsets.WeekOfMonth,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
YearBegin = make_final_proxy_type(
"YearBegin",
_Unusable,
pd.offsets.YearBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
YearEnd = make_final_proxy_type(
"YearEnd",
_Unusable,
pd.offsets.YearEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
Flags = make_final_proxy_type(
"Flags",
_Unusable,
pd.Flags,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
NamedAgg = make_final_proxy_type(
"NamedAgg",
_Unusable,
pd.NamedAgg,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
def _arrow_extension_array_astype(self, dtype=None, copy: bool = False):
if copy is False and cudf.api.types.is_dtype_equal(self.dtype, dtype):
return self
return _maybe_wrap_result(
self._fsproxy_wrapped.astype(dtype=dtype, copy=copy), self
)
ArrowExtensionArray = make_final_proxy_type(
"ArrowExtensionArray",
_Unusable,
pd.arrays.ArrowExtensionArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_pa_array": _FastSlowAttribute("_pa_array", private=True),
"__array__": _FastSlowAttribute("__array__", private=True),
"__invert__": _FastSlowAttribute("__invert__"),
"__neg__": _FastSlowAttribute("__neg__"),
"__pos__": _FastSlowAttribute("__pos__", private=True),
"__abs__": _FastSlowAttribute("__abs__"),
"__contains__": _FastSlowAttribute("__contains__"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"__arrow_array__": arrow_array_method,
"astype": _arrow_extension_array_astype,
},
)
FrozenList = make_final_proxy_type(
"FrozenList",
_Unusable,
pd.core.indexes.frozen.FrozenList,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
# The following are subclasses of `pandas.core.base.PandasObj`,
# excluding subclasses defined in `pandas.core.internals`. These are
# not strictly part of the Pandas public API, but they do appear as
# return types.
NDFrame = make_final_proxy_type(
"NDFrame",
_Unusable,
pd.core.generic.NDFrame,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array__": array_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
DatetimeTimedeltaMixin = make_final_proxy_type(
"DatetimeTimedeltaMixin",
_Unusable,
pd.core.indexes.datetimelike.DatetimeTimedeltaMixin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array__": array_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
DatetimeIndexOpsMixin = make_final_proxy_type(
"DatetimeIndexOpsMixin",
_Unusable,
pd.core.indexes.datetimelike.DatetimeIndexOpsMixin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array__": array_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
NDArrayBackedExtensionIndex = make_final_proxy_type(
"NDArrayBackedExtensionIndex",
_Unusable,
pd.core.indexes.extension.NDArrayBackedExtensionIndex,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"_cache": _FastSlowAttribute("_cache", private=True),
"_engine": _FastSlowAttribute("_engine", private=True),
"__array__": array_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
_PANDAS_OBJ_FINAL_TYPES = [
pd.core.indexes.accessors.PeriodProperties,
pd.core.indexes.accessors.Properties,
pd.plotting._core.PlotAccessor,
pd.io.sql.SQLiteTable,
pd.io.sql.SQLTable,
pd.io.sql.SQLDatabase,
pd.io.sql.SQLiteDatabase,
pd.io.sql.PandasSQL,
]
_PANDAS_OBJ_INTERMEDIATE_TYPES = [
pd.core.groupby.groupby.GroupByPlot,
pd.core.groupby.groupby.GroupBy,
pd.core.groupby.groupby.BaseGroupBy,
]
for typ in _PANDAS_OBJ_FINAL_TYPES:
if typ.__name__ in globals():
# if we already defined a proxy type
# corresponding to this type, use that.
continue
globals()[typ.__name__] = make_final_proxy_type(
typ.__name__,
_Unusable,
typ,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__hash__": _FastSlowAttribute("__hash__"),
},
)
for typ in _PANDAS_OBJ_INTERMEDIATE_TYPES:
if typ.__name__ in globals():
# if we already defined a proxy type
# corresponding to this type, use that.
continue
globals()[typ.__name__] = make_intermediate_proxy_type(
typ.__name__,
_Unusable,
typ,
)
# Save the original __init__ methods
_original_Series_init = cudf.Series.__init__
_original_DataFrame_init = cudf.DataFrame.__init__
_original_Index_init = cudf.Index.__init__
_original_from_pandas = cudf.from_pandas
_original_DataFrame_from_pandas = cudf.DataFrame.from_pandas
_original_Series_from_pandas = cudf.Series.from_pandas
_original_Index_from_pandas = cudf.Index.from_pandas
_original_MultiIndex_from_pandas = cudf.MultiIndex.from_pandas
def wrap_init(original_init):
@functools.wraps(original_init)
def wrapped_init(self, data=None, *args, **kwargs):
if is_proxy_object(data):
data = data.as_gpu_object()
if (
isinstance(data, type(self))
and len(args) == 0
and len(kwargs) == 0
):
# This short-circuits the constructor to avoid
# unnecessary work when the data is already a
# proxy object of the same type.
# It is a common case in `cuml` and `xgboost`.
# For perf impact see:
# https://github.com/rapidsai/cudf/pull/17878/files#r1936469215
self.__dict__.update(data.__dict__)
return
original_init(self, data, *args, **kwargs)
return wrapped_init
def wrap_call(original_call):
@functools.wraps(original_call)
def wrapped_call(cls, data, *args, **kwargs):
if is_proxy_object(data):
data = data.as_gpu_object()
return original_call(cls, data, *args, **kwargs)
return wrapped_call
def wrap_from_pandas(original_call):
@functools.wraps(original_call)
def wrapped_from_pandas(obj, *args, **kwargs):
if is_proxy_object(obj):
obj = obj.as_gpu_object()
return obj
return original_call(obj, *args, **kwargs)
return wrapped_from_pandas
def wrap_from_pandas_dataframe(original_call):
@functools.wraps(original_call)
def wrapped_from_pandas_dataframe(dataframe, *args, **kwargs):
if is_proxy_object(dataframe):
dataframe = dataframe.as_gpu_object()
if isinstance(dataframe, cudf.DataFrame):
return dataframe
return original_call(dataframe, *args, **kwargs)
return wrapped_from_pandas_dataframe
def wrap_from_pandas_series(original_call):
@functools.wraps(original_call)
def wrapped_from_pandas_series(s, *args, **kwargs):
if is_proxy_object(s):
s = s.as_gpu_object()
if isinstance(s, cudf.Series):
return s
return original_call(s, *args, **kwargs)
return wrapped_from_pandas_series
def wrap_from_pandas_index(original_call):
@functools.wraps(original_call)
def wrapped_from_pandas_index(index, *args, **kwargs):
if is_proxy_object(index):
index = index.as_gpu_object()
if isinstance(index, cudf.Index):
return index
return original_call(index, *args, **kwargs)
return wrapped_from_pandas_index
def wrap_from_pandas_multiindex(original_call):
@functools.wraps(original_call)
def wrapped_from_pandas_multiindex(multiindex, *args, **kwargs):
if is_proxy_object(multiindex):
multiindex = multiindex.as_gpu_object()
if isinstance(multiindex, cudf.MultiIndex):
return multiindex
return original_call(multiindex, *args, **kwargs)
return wrapped_from_pandas_multiindex
@functools.wraps(_original_DataFrame_init)
def DataFrame_init_(
self, data=None, index=None, columns=None, *args, **kwargs
):
data_is_proxy = is_proxy_object(data)
if data_is_proxy:
data = data.as_gpu_object()
if is_proxy_object(index):
index = index.as_gpu_object()
if is_proxy_object(columns):
columns = columns.as_cpu_object()
if (
(
(data_is_proxy and isinstance(data, type(self)))
and (index is None)
and (columns is None)
)
and len(args) == 0
and len(kwargs) == 0
):
self.__dict__.update(data.__dict__)
return
_original_DataFrame_init(self, data, index, columns, *args, **kwargs)
def initial_setup():
"""
This is a one-time setup function that can contain
any initialization code that needs to be run once
when the module is imported. Currently, it is used
to wrap the __init__ methods and enable pandas compatibility mode.
"""
cudf.Series.__init__ = wrap_init(_original_Series_init)
cudf.Index.__init__ = wrap_init(_original_Index_init)
cudf.DataFrame.__init__ = DataFrame_init_
cudf.from_pandas = wrap_from_pandas(_original_from_pandas)
cudf.DataFrame.from_pandas = wrap_from_pandas_dataframe(
_original_DataFrame_from_pandas
)
cudf.Series.from_pandas = wrap_from_pandas_series(
_original_Series_from_pandas
)
cudf.Index.from_pandas = wrap_from_pandas_index(
_original_Index_from_pandas
)
cudf.MultiIndex.from_pandas = wrap_from_pandas_multiindex(
_original_MultiIndex_from_pandas
)
cudf.set_option("mode.pandas_compatible", True)
def _unpickle_obj(pickled_args):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickler, args = pickle.loads(pickled_args)
obj = unpickler(*args)
return obj
def _reduce_proxied_td_obj(obj):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_args = pickle.dumps(obj._fsproxy_wrapped.__reduce__())
return _unpickle_obj, (pickled_args,)
def _reduce_obj(obj):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_args = pickle.dumps(obj.__reduce__())
return _unpickle_obj, (pickled_args,)
def _generic_reduce_obj(obj, unpickle_func):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_args = pickle.dumps(obj.__reduce__())
return unpickle_func, (pickled_args,)
def _frame_unpickle_obj(pickled_args):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickled_intermediate = pickle.loads(pickled_args)
reconstructor_func = unpickled_intermediate[0]
obj = reconstructor_func(*unpickled_intermediate[1])
obj.__setstate__(unpickled_intermediate[2])
return obj
def _index_unpickle_obj(pickled_args):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickled_intermediate = pickle.loads(pickled_args)
reconstructor_func = unpickled_intermediate[0]
obj = reconstructor_func(*unpickled_intermediate[1])
return obj
def _reduce_offset_obj(obj):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_args = pickle.dumps(obj.__getstate__())
return _unpickle_offset_obj, (pickled_args,)
def _unpickle_offset_obj(pickled_args):
from cudf.pandas.module_accelerator import disable_module_accelerator
with disable_module_accelerator():
data = pickle.loads(pickled_args)
data.pop("_offset")
data.pop("_use_relativedelta")
obj = pd._libs.tslibs.offsets.DateOffset(**data)
return obj
copyreg.dispatch_table[Timestamp] = _reduce_proxied_td_obj
copyreg.dispatch_table[pd.Timestamp] = _reduce_obj
# same reducer/unpickler can be used for Timedelta:
copyreg.dispatch_table[Timedelta] = _reduce_proxied_td_obj
copyreg.dispatch_table[pd.Timedelta] = _reduce_obj
# TODO: Need to find a way to unpickle cross-version(old) pickled objects.
# Register custom reducer/unpickler functions for pandas objects
# so that they can be pickled/unpickled correctly:
copyreg.dispatch_table[pd.Series] = lambda obj: _generic_reduce_obj(
obj, _frame_unpickle_obj
)
copyreg.dispatch_table[pd.DataFrame] = lambda obj: _generic_reduce_obj(
obj, _frame_unpickle_obj
)
copyreg.dispatch_table[pd.Index] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd.RangeIndex] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd.DatetimeIndex] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd.TimedeltaIndex] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd.CategoricalIndex] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd.MultiIndex] = lambda obj: _generic_reduce_obj(
obj, _index_unpickle_obj
)
copyreg.dispatch_table[pd._libs.tslibs.offsets.DateOffset] = _reduce_offset_obj
|
_AccessorAttr
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/embed/test_util__embed.py
|
{
"start": 3200,
"end": 5089
}
|
class ____:
def test_error_on_empty_list(self) -> None:
with pytest.raises(ValueError) as e:
with beu.OutputDocumentFor([]):
pass
assert str(e.value).endswith(_ODFERR)
def test_error_on_mixed_list(self) -> None:
p = SomeModel()
d = Document()
orig_theme = d.theme
with pytest.raises(ValueError) as e:
with beu.OutputDocumentFor([p, d]):
pass
assert str(e.value).endswith(_ODFERR)
assert d.theme is orig_theme
@pytest.mark.parametrize('v', [10, -0,3, "foo", True])
def test_error_on_wrong_types(self, v) -> None:
with pytest.raises(ValueError) as e:
with beu.OutputDocumentFor(v):
pass
assert str(e.value).endswith(_ODFERR)
def test_with_doc_in_child_raises_error(self) -> None:
doc = Document()
p1 = SomeModel()
p2 = OtherModel(child=SomeModel())
doc.add_root(p2.child)
assert p1.document is None
assert p2.document is None
assert p2.child.document is doc
with pytest.raises(RuntimeError) as e:
with beu.OutputDocumentFor([p1, p2]):
pass
assert "already in a doc" in str(e.value)
@patch('bokeh.document.document.check_integrity')
def test_validates_document_by_default(self, check_integrity, test_plot) -> None:
with beu.OutputDocumentFor([test_plot]):
pass
assert check_integrity.called
@patch('bokeh.document.document.check_integrity')
def test_doesnt_validate_doc_due_to_env_var(self, check_integrity, monkeypatch: pytest.MonkeyPatch, test_plot) -> None:
monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
with beu.OutputDocumentFor([test_plot]):
pass
assert not check_integrity.called
|
Test_OutputDocumentFor_general
|
python
|
pikepdf__pikepdf
|
src/pikepdf/form.py
|
{
"start": 20723,
"end": 22371
}
|
class ____:
"""Represents a single option for a choice field."""
def __init__(self, field: ChoiceField, opt: String | Array, index: int | None):
"""Create a new option for a choice field."""
self._field = field
self._opt = opt
self._index = index
@property
def display_value(self):
"""The value that will be displayed on-screen to the user in a PDF reader."""
if isinstance(self._opt, Array):
return self._opt[1]
else:
return self._opt
@property
def export_value(self):
"""The value that will be used when exporting data from this form."""
if isinstance(self._opt, Array):
return self._opt[0]
else:
return self._opt
@property
def is_hidden(self) -> bool:
"""Is this option hidden?
Hidden options are still settable via code, but are not shown to users in PDF
reader applications.
"""
return self._index is not None and self._index < self._field._field.obj.get(
Name.TI, 0
)
@property
def is_preset(self) -> bool:
"""Is this option one of the field's preset options?
If false, this is a manually entered value typed by the user in an editable
choice field.
"""
return self._index is not None
def select(self):
"""Set this option as the selected option."""
self._field.selected = self
@property
def selected(self) -> bool:
"""Is this the currently selected option?"""
return self._field.value == self.export_value
|
ChoiceFieldOption
|
python
|
celery__celery
|
t/integration/test_canvas.py
|
{
"start": 124964,
"end": 128215
}
|
class ____:
"""
Confirm nested signatures can be rebuilt after passing through a backend.
These tests are expected to finish and return `None` or raise an exception
in the error case. The exception indicates that some element of a nested
signature object was not properly deserialized from its dictionary
representation, and would explode later on if it were used as a signature.
"""
def test_rebuild_nested_chain_chain(self, manager):
sig = chain(
tasks.return_nested_signature_chain_chain.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_chain_group(self, manager):
sig = chain(
tasks.return_nested_signature_chain_group.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_chain_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
tasks.return_nested_signature_chain_chord.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_group_chain(self, manager):
sig = chain(
tasks.return_nested_signature_group_chain.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_group_group(self, manager):
sig = chain(
tasks.return_nested_signature_group_group.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_group_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
tasks.return_nested_signature_group_chord.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_chord_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
tasks.return_nested_signature_chord_chain.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_chord_group(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
tasks.return_nested_signature_chord_group.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
def test_rebuild_nested_chord_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
except NotImplementedError as e:
raise pytest.skip(e.args[0])
sig = chain(
tasks.return_nested_signature_chord_chord.s(),
tasks.rebuild_signature.s()
)
sig.delay().get(timeout=TIMEOUT)
|
test_signature_serialization
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 48511,
"end": 48664
}
|
class ____(CombineSeries):
_parameters = CombineSeries._parameters + ["overwrite"]
_defaults = {"fill_value": None, "overwrite": True}
|
CombineFrame
|
python
|
scikit-learn__scikit-learn
|
sklearn/tests/test_base.py
|
{
"start": 1232,
"end": 1358
}
|
class ____(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
|
MyEstimator
|
python
|
numpy__numpy
|
numpy/_core/_internal.py
|
{
"start": 6325,
"end": 7214
}
|
class ____:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('n').char
if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
|
dummy_ctype
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 105733,
"end": 106195
}
|
class ____(str, Enum):
"""
State of the single shard within a replica set.
"""
def __str__(self) -> str:
return str(self.value)
ACTIVE = "Active"
DEAD = "Dead"
PARTIAL = "Partial"
INITIALIZING = "Initializing"
LISTENER = "Listener"
PARTIALSNAPSHOT = "PartialSnapshot"
RECOVERY = "Recovery"
RESHARDING = "Resharding"
RESHARDINGSCALEDOWN = "ReshardingScaleDown"
ACTIVEREAD = "ActiveRead"
|
ReplicaState
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_volume_projection.py
|
{
"start": 383,
"end": 7907
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'cluster_trust_bundle': 'V1ClusterTrustBundleProjection',
'config_map': 'V1ConfigMapProjection',
'downward_api': 'V1DownwardAPIProjection',
'pod_certificate': 'V1PodCertificateProjection',
'secret': 'V1SecretProjection',
'service_account_token': 'V1ServiceAccountTokenProjection'
}
attribute_map = {
'cluster_trust_bundle': 'clusterTrustBundle',
'config_map': 'configMap',
'downward_api': 'downwardAPI',
'pod_certificate': 'podCertificate',
'secret': 'secret',
'service_account_token': 'serviceAccountToken'
}
def __init__(self, cluster_trust_bundle=None, config_map=None, downward_api=None, pod_certificate=None, secret=None, service_account_token=None, local_vars_configuration=None): # noqa: E501
"""V1VolumeProjection - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._cluster_trust_bundle = None
self._config_map = None
self._downward_api = None
self._pod_certificate = None
self._secret = None
self._service_account_token = None
self.discriminator = None
if cluster_trust_bundle is not None:
self.cluster_trust_bundle = cluster_trust_bundle
if config_map is not None:
self.config_map = config_map
if downward_api is not None:
self.downward_api = downward_api
if pod_certificate is not None:
self.pod_certificate = pod_certificate
if secret is not None:
self.secret = secret
if service_account_token is not None:
self.service_account_token = service_account_token
@property
def cluster_trust_bundle(self):
"""Gets the cluster_trust_bundle of this V1VolumeProjection. # noqa: E501
:return: The cluster_trust_bundle of this V1VolumeProjection. # noqa: E501
:rtype: V1ClusterTrustBundleProjection
"""
return self._cluster_trust_bundle
@cluster_trust_bundle.setter
def cluster_trust_bundle(self, cluster_trust_bundle):
"""Sets the cluster_trust_bundle of this V1VolumeProjection.
:param cluster_trust_bundle: The cluster_trust_bundle of this V1VolumeProjection. # noqa: E501
:type: V1ClusterTrustBundleProjection
"""
self._cluster_trust_bundle = cluster_trust_bundle
@property
def config_map(self):
"""Gets the config_map of this V1VolumeProjection. # noqa: E501
:return: The config_map of this V1VolumeProjection. # noqa: E501
:rtype: V1ConfigMapProjection
"""
return self._config_map
@config_map.setter
def config_map(self, config_map):
"""Sets the config_map of this V1VolumeProjection.
:param config_map: The config_map of this V1VolumeProjection. # noqa: E501
:type: V1ConfigMapProjection
"""
self._config_map = config_map
@property
def downward_api(self):
"""Gets the downward_api of this V1VolumeProjection. # noqa: E501
:return: The downward_api of this V1VolumeProjection. # noqa: E501
:rtype: V1DownwardAPIProjection
"""
return self._downward_api
@downward_api.setter
def downward_api(self, downward_api):
"""Sets the downward_api of this V1VolumeProjection.
:param downward_api: The downward_api of this V1VolumeProjection. # noqa: E501
:type: V1DownwardAPIProjection
"""
self._downward_api = downward_api
@property
def pod_certificate(self):
"""Gets the pod_certificate of this V1VolumeProjection. # noqa: E501
:return: The pod_certificate of this V1VolumeProjection. # noqa: E501
:rtype: V1PodCertificateProjection
"""
return self._pod_certificate
@pod_certificate.setter
def pod_certificate(self, pod_certificate):
"""Sets the pod_certificate of this V1VolumeProjection.
:param pod_certificate: The pod_certificate of this V1VolumeProjection. # noqa: E501
:type: V1PodCertificateProjection
"""
self._pod_certificate = pod_certificate
@property
def secret(self):
"""Gets the secret of this V1VolumeProjection. # noqa: E501
:return: The secret of this V1VolumeProjection. # noqa: E501
:rtype: V1SecretProjection
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this V1VolumeProjection.
:param secret: The secret of this V1VolumeProjection. # noqa: E501
:type: V1SecretProjection
"""
self._secret = secret
@property
def service_account_token(self):
"""Gets the service_account_token of this V1VolumeProjection. # noqa: E501
:return: The service_account_token of this V1VolumeProjection. # noqa: E501
:rtype: V1ServiceAccountTokenProjection
"""
return self._service_account_token
@service_account_token.setter
def service_account_token(self, service_account_token):
"""Sets the service_account_token of this V1VolumeProjection.
:param service_account_token: The service_account_token of this V1VolumeProjection. # noqa: E501
:type: V1ServiceAccountTokenProjection
"""
self._service_account_token = service_account_token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumeProjection):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumeProjection):
return True
return self.to_dict() != other.to_dict()
|
V1VolumeProjection
|
python
|
yaml__pyyaml
|
lib/yaml/nodes.py
|
{
"start": 1328,
"end": 1385
}
|
class ____(CollectionNode):
id = 'sequence'
|
SequenceNode
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 90760,
"end": 90837
}
|
class ____(BinOpSeries):
operation = M.lt
_operator_repr = "<"
|
LTSeries
|
python
|
pytorch__pytorch
|
test/test_cuda.py
|
{
"start": 207988,
"end": 227582
}
|
class ____(TestCase):
def _setup_mempool_limited_memory_test(self, additional_allowed_memory_in_mb):
device = torch.device("cuda:0")
self.init_fraction = torch.cuda.get_per_process_memory_fraction()
torch.cuda.memory.empty_cache()
mb = 1024 * 1024
_, all_memory = torch.cuda.memory.mem_get_info(device)
pre_reserved = torch.cuda.memory_reserved(device)
total_allowed = additional_allowed_memory_in_mb * mb + pre_reserved
fraction_allowed = total_allowed / all_memory
torch.cuda.memory.set_per_process_memory_fraction(fraction_allowed, device)
dtype = torch.int8
return device, dtype
def _teardown_mempool_limited_memory_test(self):
torch.cuda.memory.empty_cache()
torch.cuda.memory.set_per_process_memory_fraction(self.init_fraction)
def test_mempool_id(self):
pool1 = torch.cuda.graph_pool_handle()
pool2 = torch.cuda.MemPool().id
# first value of id in a user created pool is always zero
self.assertEqual(pool1[0] == 0, pool2[0] == 0)
# each call to torch.cuda.graph_pool_handle() or torch.cuda.MemPool()
# increments the id
self.assertTrue(abs(pool2[1] - pool1[1]) > 0)
def get_dummy_allocator(self, check_vars):
dummy_allocator_source_vars = """
#include <torch/extension.h>
#include <ATen/cuda/Exceptions.h>
#include <cuda_runtime_api.h>
extern "C" {
C10_EXPORT int called_dummy_alloc = 0;
C10_EXPORT int called_dummy_free = 0;
// Note that windows needs __declspec(dllexport): https://stackoverflow.com/a/24575865
C10_EXPORT void* dummy_alloc(size_t size, int device, void* stream) {
called_dummy_alloc = 123;
void* ptr;
C10_CUDA_CHECK(cudaMallocManaged(&ptr, size));
return ptr;
}
C10_EXPORT void dummy_free(void* ptr, size_t size, int device, void* stream) {
called_dummy_free = 321;
C10_CUDA_CHECK(cudaFree(ptr));
}
}
"""
dummy_allocator_source_no_vars = """
#include <torch/extension.h>
#include <ATen/cuda/Exceptions.h>
#include <cuda_runtime_api.h>
extern "C" {
// Note that windows needs __declspec(dllexport): https://stackoverflow.com/a/24575865
C10_EXPORT void* dummy_alloc(size_t size, int device, void* stream) {
void* ptr;
C10_CUDA_CHECK(cudaMallocManaged(&ptr, size));
return ptr;
}
C10_EXPORT void dummy_free(void* ptr, size_t size, int device, void* stream) {
C10_CUDA_CHECK(cudaFree(ptr));
}
}
"""
from torch.utils.cpp_extension import load_inline
dummy_allocator_libname = "dummy_allocator"
dummy_allocator = load_inline(
name=dummy_allocator_libname,
cpp_sources=dummy_allocator_source_vars
if check_vars
else dummy_allocator_source_no_vars,
is_python_module=False,
keep_intermediates=False,
verbose=True,
with_cuda=True,
)
allocator = torch.cuda.memory.CUDAPluggableAllocator(
dummy_allocator,
"dummy_alloc",
"dummy_free",
)
return allocator, dummy_allocator
def test_mempool_empty_cache(self):
torch.cuda.empty_cache()
pool = torch.cuda.MemPool()
x = torch.empty(1024, 1024, device="cuda")
with torch.cuda.use_mem_pool(pool):
y = torch.empty(1024, 1024, device="cuda")
del y
del x
del pool
segments = torch.cuda.memory._snapshot()["segments"]
self.assertTrue(len(segments) > 0, "expected more than one segment")
@serialTest()
def test_mempool_empty_cache_inactive(self):
torch.cuda.empty_cache()
allocator, dummy_allocator = self.get_dummy_allocator(check_vars=True)
alloc_lib = ctypes.CDLL(dummy_allocator)
called_dummy_alloc = ctypes.c_int.in_dll(alloc_lib, "called_dummy_alloc")
called_dummy_free = ctypes.c_int.in_dll(alloc_lib, "called_dummy_free")
self.assertEqual(called_dummy_alloc.value, 0)
self.assertEqual(called_dummy_free.value, 0)
def f():
pool = torch.cuda.MemPool(allocator.allocator())
# allocate memory with ncclMemAlloc
with torch.cuda.use_mem_pool(pool):
x = torch.arange(1024 * 1024 * 2, device="cuda")
# Note: pool will be destroyed upon function return, but x, which
# was allocated via the pool is still alive.
return x
x = f()
self.assertEqual(called_dummy_alloc.value, 123)
self.assertEqual(called_dummy_free.value, 0)
del x
torch.cuda.empty_cache()
self.assertEqual(called_dummy_free.value, 321)
def test_mempool_with_allocator(self):
pool = torch.cuda.MemPool()
# MemPool doesn't have an allocator by default
self.assertEqual(pool.allocator, None)
allocator, dummy_allocator = self.get_dummy_allocator(check_vars=True)
pool = torch.cuda.MemPool(allocator.allocator())
# pool should point to the same allocator as the one passed into it
self.assertEqual(allocator.allocator(), pool.allocator)
# pool's use count should be 1 at this point as MemPool object
# holds a reference
self.assertEqual(pool.use_count(), 1)
# no allocations happened yet, so called_dummy_alloc and
# called_dummy_free should be 0
alloc_lib = ctypes.CDLL(dummy_allocator)
called_dummy_alloc = ctypes.c_int.in_dll(alloc_lib, "called_dummy_alloc")
called_dummy_free = ctypes.c_int.in_dll(alloc_lib, "called_dummy_free")
self.assertEqual(called_dummy_alloc.value, 0)
self.assertEqual(called_dummy_free.value, 0)
nelem_1mb = 1024 * 1024 // 4
with torch.cuda.use_mem_pool(pool):
out_0 = torch.randn(nelem_1mb, device="cuda")
# pool's use count should be 2 at this point as use_mem_pool
# holds a reference
self.assertEqual(pool.use_count(), 2)
# pool's use count should be back to 1 at this point as use_mem_pool
# released its reference
self.assertEqual(pool.use_count(), 1)
# called_dummy_alloc should be 123 if dummy_alloc was used to allocate
# out tensor
self.assertEqual(called_dummy_alloc.value, 123)
out_non_pool = torch.empty(nelem_1mb, device="cuda")
with torch.cuda.use_mem_pool(pool):
# pool should have 1 segment since we made a small allocation (1 MB)
# above and so the CUDACachingAllocator packed it into a 2 MB buffer
self.assertEqual(len(pool.snapshot()), 1)
out_1 = torch.randn(nelem_1mb, device="cuda")
# pool should still have 1 segment since we made another small allocation
# (1 MB) that got packed into the existing 2 MB buffer
self.assertEqual(len(pool.snapshot()), 1)
out_2 = torch.randn(nelem_1mb, device="cuda")
# pool now should have 2 segments since the CUDACachingAllocator had
# to make a new 2 MB buffer to accommodate out_2
self.assertEqual(len(pool.snapshot()), 2)
self.assertEqual(len(pool.snapshot()), 2)
del out_0, out_1, out_2
# pool's destructor calls emptyCache()
del pool
# called_dummy_free should be 321 if dummy_free was used to deallocate
# out tensor
self.assertEqual(called_dummy_free.value, 321)
@serialTest()
def test_mempool_limited_memory_with_allocator(self):
allocator, _ = self.get_dummy_allocator(check_vars=False)
pool_do_not_use = torch.cuda.MemPool(allocator.allocator())
pool_use = torch.cuda.MemPool(allocator.allocator(), use_on_oom=True)
nelem_1mb = 1024 * 1024 // 4
self._setup_mempool_limited_memory_test(80)
# remaining free mem: 80 mb
# mempool_use [] 0 mb
# mempool_do_not_use [] 0 mb
# default pool [] 0 mb
with torch.cuda.use_mem_pool(pool_do_not_use):
a = torch.randn(40 * nelem_1mb, device="cuda")
with torch.cuda.use_mem_pool(pool_use):
b = torch.randn(40 * nelem_1mb, device="cuda")
a_dataptr = a.data_ptr()
b_dataptr = b.data_ptr()
# remaining free mem: 0 mb
# mempool_do_not_use [aaaa] 40 mb
# mempool_use [bbbb] 40 mb
# default pool [] 0 mb
with self.assertRaises(torch.OutOfMemoryError):
# out of memory
c = torch.randn(40 * nelem_1mb, device="cuda")
del a, b
# remaining free mem: 0 mb
# mempool_do_not_use [____] 40 mb
# mempool_use [____] 40 mb
# default pool [] 0 mb
# c should not oom and instead can use mempool_use as fallback
c = torch.randn(30 * nelem_1mb, device="cuda")
c_dataptr = c.data_ptr()
# remaining free mem: 0 mb
# mempool_do_not_use [____] 40 mb
# mempool_use [ccc_] 40 mb
# default pool [] 0 mb
with self.assertRaises(torch.OutOfMemoryError):
# out of memory since can't use mempool_do_not_use
d = torch.randn(30 * nelem_1mb, device="cuda")
del c
# remaining free mem: 0 mb
# mempool_do_not_use [____] 40 mb
# mempool_use [____] 40 mb
# default pool [] 0 mb
# expect that we used same memory address for both a and c
self.assertEqual(b_dataptr, c_dataptr)
# make sure we can still use mempool_use as intended after c is deleted
with torch.cuda.use_mem_pool(pool_use):
e = torch.randn(20 * nelem_1mb, device="cuda")
# remaining free mem: 0 mb
# mempool_do_not_use [____] 40 mb
# mempool_use [ee__] 40 mb
# default pool [] 0 mb
e_dataptr = e.data_ptr()
del e
self.assertEqual(e_dataptr, c_dataptr)
# pool's destructor calls emptyCache()
del pool_use, pool_do_not_use
self._teardown_mempool_limited_memory_test()
def test_mempool_multithread(self):
pool_ids = []
def create_mempool_and_make_active():
pool = torch.cuda.MemPool()
pool_ids.extend([pool.id])
num_threads = 4
threads = [
threading.Thread(target=create_mempool_and_make_active)
for t in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# each thread should create a unique mempool, since
# mempool id creation is atomic
self.assertEqual(len(set(pool_ids)), 4)
def test_mempool_emptycache_multithread(self):
num_threads = 4
def my_function(pool):
with torch.cuda.use_mem_pool(pool):
x = torch.randn(4, device="cuda")
del x
torch.cuda.empty_cache()
pools = [torch.cuda.MemPool() for _ in range(num_threads)]
threads = [
threading.Thread(target=my_function, args=(pools[i],))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
# empty_cache should have done nothing under mempool context
for p in pools:
s = p.snapshot()
self.assertEqual(len(s), 1, "Expected to have a single segment")
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_capture_reclaim_2_streams(self):
torch.cuda.memory._set_allocator_settings(
"graph_capture_record_stream_reuse:True"
)
torch.cuda.empty_cache()
s1, s2 = torch.cuda.Stream(), torch.cuda.Stream()
g = torch.cuda.CUDAGraph(keep_graph=True)
torch.cuda.synchronize()
with torch.cuda.stream(s1):
g.capture_begin()
# A sink node allocated up-front so it doesn't steal data1's block later.
sink1 = torch.empty(8, device="cuda")
# Source tensor on s1; this block is the reuse candidate.
data1 = torch.empty(8, device="cuda")
data1_ptr = data1.data_ptr()
# Fork: do real work on s2 that READS data1 and writes to its own buffer.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
buf2 = torch.empty_like(data1)
torch.add(data1, 2.0, out=buf2)
data1.record_stream(s2)
del data1
# BEFORE JOIN: must NOT reuse
data2 = torch.empty(8, device="cuda")
data2_ptr = data2.data_ptr()
# Join s2 -> s1 and add a sink node on s1.
s1.wait_stream(s2)
sink1.fill_(1.0)
# AFTER JOIN: now reuse is allowed
data3 = torch.empty(8, device="cuda")
data3_ptr = data3.data_ptr()
g.capture_end()
torch.cuda.synchronize()
# No reuse before join; reuse after join.
self.assertNotEqual(data1_ptr, data2_ptr)
self.assertEqual(data1_ptr, data3_ptr)
torch.cuda.memory._set_allocator_settings(
"graph_capture_record_stream_reuse:False"
)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_capture_reclaim_4_streams(self):
torch.cuda.memory._set_allocator_settings(
"graph_capture_record_stream_reuse:True"
)
torch.cuda.empty_cache()
s1, s2, s3, s4 = (
torch.cuda.Stream(),
torch.cuda.Stream(),
torch.cuda.Stream(),
torch.cuda.Stream(),
)
g = torch.cuda.CUDAGraph(keep_graph=True)
torch.cuda.synchronize()
with torch.cuda.stream(s1):
g.capture_begin()
# Source tensor allocated on s1. This block is the candidate for reuse.
data1 = torch.ones(8, device="cuda")
data1_ptr = data1.data_ptr()
sink1 = torch.empty_like(data1)
sink3 = torch.empty_like(data1)
s2.wait_stream(s1)
with torch.cuda.stream(s2):
buf2 = torch.empty_like(data1)
torch.add(data1, 2.0, out=buf2)
data1.record_stream(s2)
s3.wait_stream(s1)
with torch.cuda.stream(s3):
buf3 = torch.empty_like(data1)
torch.add(data1, 3.0, out=buf3)
data1.record_stream(s3)
s4.wait_stream(s1)
with torch.cuda.stream(s4):
buf4 = torch.empty_like(data1)
torch.add(data1, 4.0, out=buf4)
data1.record_stream(s4)
# Free data1 inside capture; allocator may reuse later when it's safe.
del data1
# PARTIAL JOINS: should NOT allow reuse yet
# Join s2 -> s1 and add a sink node on s1.
s1.wait_stream(s2)
sink1.fill_(1.0)
# Join s4 -> s3 and add a sink node on s3.
s3.wait_stream(s4)
with torch.cuda.stream(s3):
sink3.fill_(3.0)
sink3.record_stream(s3)
# At this point, s1 and s3 subgraphs are NOT yet joined together.
# Allocating data2 here must NOT reuse data1's block.
data2 = torch.empty(8, device="cuda")
data2_ptr = data2.data_ptr()
# FINAL JOIN: now reuse is allowed
# Join s3 -> s1 and add a sink node on s1.
s1.wait_stream(s3)
sink1.add_(sink3)
# Now allocator should safely reuse data1's block.
data3 = torch.empty(8, device="cuda")
data3_ptr = data3.data_ptr()
g.capture_end()
torch.cuda.synchronize()
# No reuse before full join; reuse after full join.
self.assertNotEqual(data1_ptr, data2_ptr)
self.assertEqual(data1_ptr, data3_ptr)
torch.cuda.memory._set_allocator_settings(
"graph_capture_record_stream_reuse:False"
)
@skipIfRocm(msg="expandable_segments mode is not supported on ROCm")
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Load_inline doesn't work in fbcode")
def test_mempool_expandable(self):
torch.cuda.memory._set_allocator_settings("expandable_segments:True")
allocator, _ = self.get_dummy_allocator(check_vars=False)
pool = torch.cuda.MemPool(allocator.allocator())
# torch.cuda.MemPool doesn't work with expandable segments
with self.assertRaises(RuntimeError):
nelem_1mb = 1024 * 1024 // 4
with torch.cuda.use_mem_pool(pool):
out_0 = torch.randn(nelem_1mb, device="cuda")
torch.cuda.memory._set_allocator_settings("expandable_segments:False")
@serialTest()
def test_mempool_ctx_multithread(self):
torch.cuda.empty_cache()
segments = torch.cuda.memory._snapshot()["segments"]
self.assertEqual(len(segments), 0, "Expected empty pool in the beginning")
nelem = 1024 * 1024
trigger_alloc = threading.Event()
done_allocation = threading.Event()
def main_thread_fn():
pool = torch.cuda.MemPool()
out1 = torch.empty(nelem, dtype=torch.int8, device="cuda")
with torch.cuda.use_mem_pool(pool):
out = torch.empty(nelem, dtype=torch.int8, device="cuda")
del out
trigger_alloc.set()
done_allocation.wait()
def side_thread_fn(segments):
trigger_alloc.wait()
out = torch.empty(nelem, dtype=torch.int8, device="cuda")
s = torch.cuda.memory._snapshot()["segments"]
segments.append(s)
done_allocation.set()
segments = []
main_thread = threading.Thread(target=main_thread_fn)
side_thread = threading.Thread(target=side_thread_fn, args=(segments,))
main_thread.start()
side_thread.start()
main_thread.join(timeout=10)
side_thread.join(timeout=10)
if main_thread.is_alive() or side_thread.is_alive():
# release threads so that they don't hang forever
trigger_alloc.set()
done_allocation.set()
self.fail(
"Test timed out - threads did not complete within the allowed time"
)
self.assertEqual(len(segments), 1, "Expected to have memory snapshot")
self.assertEqual(len(segments[0]), 2, "Expected to have 2 segments allocated")
active = defaultdict(int)
for s in segments[0]:
active[s["segment_pool_id"]] += s["active_size"]
for k, v in active.items():
if k == (0, 0):
self.assertEqual(
v, 2097152, "Expected to have 2MB allocated in the default pool"
)
else:
self.assertEqual(
v, 0, "Expected to have 0 bytes allocated in the custom pool"
)
@unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
@torch.testing._internal.common_utils.markDynamoStrictTest
|
TestMemPool
|
python
|
Netflix__metaflow
|
metaflow/plugins/cards/card_modules/basic.py
|
{
"start": 9386,
"end": 9620
}
|
class ____(ErrorComponent):
def __init__(self, component_name, error_message):
headline = "Render failed of component named `%s`" % component_name
super().__init__(headline, error_message)
|
SerializationErrorComponent
|
python
|
spyder-ide__spyder
|
spyder/plugins/explorer/widgets/remote_explorer.py
|
{
"start": 2062,
"end": 2864
}
|
class ____(QSortFilterProxyModel):
def lessThan(self, left, right):
right_data = self.sourceModel().data(
self.sourceModel().index(right.row(), 0), Qt.UserRole + 1
)
if right_data["type"] == "ACTION":
return self.sortOrder() == Qt.AscendingOrder
left_data = self.sourceModel().data(
self.sourceModel().index(left.row(), 0), Qt.UserRole + 1
)
if left_data["type"] == "ACTION":
return self.sortOrder() == Qt.DescendingOrder
if left_data["type"] == "directory" and right_data["type"] == "file":
return True
if left_data["type"] == "file" and right_data["type"] == "directory":
return False
return super().lessThan(left, right)
|
RemoteQSortFilterProxyModel
|
python
|
django-debug-toolbar__django-debug-toolbar
|
tests/panels/test_custom.py
|
{
"start": 124,
"end": 322
}
|
class ____(Panel):
def title(self):
return "Title with special chars &\"'<>"
@override_settings(
DEBUG=True, DEBUG_TOOLBAR_PANELS=["tests.panels.test_custom.CustomPanel"]
)
|
CustomPanel
|
python
|
neetcode-gh__leetcode
|
python/1958-check-if-move-is-legal.py
|
{
"start": 0,
"end": 872
}
|
class ____:
def checkMove(self, board: List[List[str]], rMove: int, cMove: int, color: str) -> bool:
ROWS, COLS = len(board), len(board[0])
direction = [[1, 0], [-1, 0], [0, 1], [0, -1],
[1, 1], [-1, -1], [1, -1], [-1, 1]]
board[rMove][cMove] = color
def legal(row, col, color, direc):
dr, dc = direc
row, col = row + dr, col + dc
length = 1
while(0 <= row < ROWS and 0 <= col < COLS):
length += 1
if board[row][col] == '.': return False
if board[row][col] == color:
return length >= 3
row, col = row + dr, col + dc
return False
for d in direction:
if legal(rMove, cMove, color, d): return True
return False
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/scalar/timestamp/test_timestamp.py
|
{
"start": 16305,
"end": 17537
}
|
class ____:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01").as_unit("ns")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts._value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning, match="drop timezone information"):
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
|
TestTimestampConversion
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.