language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
walkccc__LeetCode
|
solutions/1536. Minimum Swaps to Arrange a Binary Grid/1536.py
|
{
"start": 0,
"end": 647
}
|
class ____:
def minSwaps(self, grid: list[list[int]]) -> int:
n = len(grid)
ans = 0
# suffixZeros[i] := the number of suffix zeros in the i-th row
suffixZeros = [n if 1 not in row else row[::-1].index(1) for row in grid]
for i in range(n):
neededZeros = n - 1 - i
# Get the first row with suffix zeros >= `neededZeros` in suffixZeros[i:..n).
j = next((j for j in range(i, n) if suffixZeros[j] >= neededZeros), -1)
if j == -1:
return -1
# Move the rows[j] to the rows[i].
for k in range(j, i, -1):
suffixZeros[k] = suffixZeros[k - 1]
ans += j - i
return ans
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/responses/tool_param.py
|
{
"start": 6293,
"end": 8151
}
|
class ____(TypedDict, total=False):
type: Required[Literal["image_generation"]]
"""The type of the image generation tool. Always `image_generation`."""
background: Literal["transparent", "opaque", "auto"]
"""Background type for the generated image.
One of `transparent`, `opaque`, or `auto`. Default: `auto`.
"""
input_fidelity: Optional[Literal["high", "low"]]
"""
Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
`low`. Defaults to `low`.
"""
input_image_mask: ImageGenerationInputImageMask
"""Optional mask for inpainting.
Contains `image_url` (string, optional) and `file_id` (string, optional).
"""
model: Literal["gpt-image-1", "gpt-image-1-mini"]
"""The image generation model to use. Default: `gpt-image-1`."""
moderation: Literal["auto", "low"]
"""Moderation level for the generated image. Default: `auto`."""
output_compression: int
"""Compression level for the output image. Default: 100."""
output_format: Literal["png", "webp", "jpeg"]
"""The output format of the generated image.
One of `png`, `webp`, or `jpeg`. Default: `png`.
"""
partial_images: int
"""
Number of partial images to generate in streaming mode, from 0 (default value)
to 3.
"""
quality: Literal["low", "medium", "high", "auto"]
"""The quality of the generated image.
One of `low`, `medium`, `high`, or `auto`. Default: `auto`.
"""
size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
"""The size of the generated image.
One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`.
"""
|
ImageGeneration
|
python
|
astropy__astropy
|
astropy/table/bst.py
|
{
"start": 3452,
"end": 14710
}
|
class ____:
"""
A basic binary search tree in pure Python, used
as an engine for indexing.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
"""
NodeClass = Node
def __init__(self, data, row_index, unique=False):
self.root = None
self.size = 0
self.unique = unique
for key, row in zip(data, row_index):
self.add(tuple(key), row)
def add(self, key: tuple, data: int | None = None) -> None:
"""
Add a key, data pair.
"""
if data is None:
# nothing about this branch conforms to the IndexEngine protocol
data = key
self.size += 1
node = self.NodeClass(key, data)
curr_node = self.root
if curr_node is None:
self.root = node
return
while True:
if node < curr_node:
if curr_node.left is None:
curr_node.left = node
break
curr_node = curr_node.left
elif node > curr_node:
if curr_node.right is None:
curr_node.right = node
break
curr_node = curr_node.right
elif self.unique:
raise ValueError("Cannot insert non-unique value")
else: # add data to node
curr_node.data.extend(node.data)
curr_node.data = sorted(curr_node.data)
return
def find(self, key: tuple) -> Sequence[Integral]:
"""
Return all data values corresponding to a given key.
Parameters
----------
key : tuple
Input key
Returns
-------
data_vals : list
List of rows corresponding to the input key
"""
node, parent = self.find_node(key)
return node.data if node is not None else []
def find_node(self, key):
"""
Find the node associated with the given key.
"""
if self.root is None:
return (None, None)
return self._find_recursive(key, self.root, None)
def shift_left(self, row: int) -> None:
"""
Decrement all rows larger than the given row.
"""
for node in self.traverse():
node.data = [x - 1 if x > row else x for x in node.data]
def shift_right(self, row: int) -> None:
"""
Increment all rows greater than or equal to the given row.
"""
for node in self.traverse():
node.data = [x + 1 if x >= row else x for x in node.data]
def _find_recursive(self, key, node, parent):
try:
if key == node.key:
return (node, parent)
elif key > node.key:
if node.right is None:
return (None, None)
return self._find_recursive(key, node.right, node)
else:
if node.left is None:
return (None, None)
return self._find_recursive(key, node.left, node)
except TypeError: # wrong key type
return (None, None)
def traverse(self, order="inorder"):
"""
Return nodes of the BST in the given order.
Parameters
----------
order : str
The order in which to recursively search the BST.
Possible values are:
"preorder": current node, left subtree, right subtree
"inorder": left subtree, current node, right subtree
"postorder": left subtree, right subtree, current node
"""
if order == "preorder":
return self._preorder(self.root, [])
elif order == "inorder":
return self._inorder(self.root, [])
elif order == "postorder":
return self._postorder(self.root, [])
raise ValueError(f'Invalid traversal method: "{order}"')
def items(self) -> list[tuple[Hashable, list[Integral]]]:
"""
Return BST items in order as (key, data) pairs.
"""
return [(x.key, x.data) for x in self.traverse()]
def sort(self) -> None:
"""
Make row order align with key order.
"""
i = 0
for node in self.traverse():
num_rows = len(node.data)
node.data = list(range(i, i + num_rows))
i += num_rows
def sorted_data(self) -> None:
"""
Return BST rows sorted by key values.
"""
return [x for node in self.traverse() for x in node.data]
def _preorder(self, node, lst):
if node is None:
return lst
lst.append(node)
self._preorder(node.left, lst)
self._preorder(node.right, lst)
return lst
def _inorder(self, node, lst):
if node is None:
return lst
self._inorder(node.left, lst)
lst.append(node)
self._inorder(node.right, lst)
return lst
def _postorder(self, node, lst):
if node is None:
return lst
self._postorder(node.left, lst)
self._postorder(node.right, lst)
lst.append(node)
return lst
def _substitute(self, node, parent, new_node):
if node is self.root:
self.root = new_node
else:
parent.replace(node, new_node)
def remove(self, key: tuple, data: int | None = None) -> bool:
"""
Remove data corresponding to the given key.
Parameters
----------
key : tuple
The key to remove
data : int or None
If None, remove the node corresponding to the given key.
If not None, remove only the given data value from the node.
Returns
-------
successful : bool
True if removal was successful, false otherwise
"""
node, parent = self.find_node(key)
if node is None:
return False
if data is not None:
if data not in node.data:
raise ValueError("Data does not belong to correct node")
elif len(node.data) > 1:
node.data.remove(data)
return True
if node.left is None and node.right is None:
self._substitute(node, parent, None)
elif node.left is None and node.right is not None:
self._substitute(node, parent, node.right)
elif node.right is None and node.left is not None:
self._substitute(node, parent, node.left)
else:
# find largest element of left subtree
curr_node = node.left
parent = node
while curr_node.right is not None:
parent = curr_node
curr_node = curr_node.right
self._substitute(curr_node, parent, curr_node.left)
node.set(curr_node)
self.size -= 1
return True
def is_valid(self) -> bool:
"""
Returns whether this is a valid BST.
"""
return self._is_valid(self.root)
def _is_valid(self, node) -> bool:
if node is None:
return True
return (
(node.left is None or node.left <= node)
and (node.right is None or node.right >= node)
and self._is_valid(node.left)
and self._is_valid(node.right)
)
def range(
self,
lower: tuple[Hashable, ...] | None,
upper: tuple[Hashable, ...] | None,
bounds: tuple[bool, bool] = (True, True),
) -> list[int]:
"""
Return all nodes with keys in the given range.
Parameters
----------
lower : tuple, None
Lower bound (no lower bound if None)
upper : tuple, None
Upper bound (no upper bound if None)
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
"""
if lower is None:
lower = (MinValue(),)
if upper is None:
upper = (MaxValue(),)
nodes = self.range_nodes(lower, upper, bounds)
return [x for node in nodes for x in node.data]
def range_nodes(self, lower, upper, bounds=(True, True)):
"""
Return nodes in the given range.
"""
if self.root is None:
return []
# op1 is <= or <, op2 is >= or >
op1 = operator.le if bounds[0] else operator.lt
op2 = operator.ge if bounds[1] else operator.gt
return self._range(lower, upper, op1, op2, self.root, [])
def same_prefix(self, val):
"""
Assuming the given value has smaller length than keys, return
nodes whose keys have this value as a prefix.
"""
if self.root is None:
return []
nodes = self._same_prefix(val, self.root, [])
return [x for node in nodes for x in node.data]
def _range(self, lower, upper, op1, op2, node, lst):
if op1(lower, node.key) and op2(upper, node.key):
lst.append(node)
if upper > node.key and node.right is not None:
self._range(lower, upper, op1, op2, node.right, lst)
if lower < node.key and node.left is not None:
self._range(lower, upper, op1, op2, node.left, lst)
return lst
def _same_prefix(self, val, node, lst):
prefix = node.key[: len(val)]
if prefix == val:
lst.append(node)
if prefix <= val and node.right is not None:
self._same_prefix(val, node.right, lst)
if prefix >= val and node.left is not None:
self._same_prefix(val, node.left, lst)
return lst
def __repr__(self):
return f"<{self.__class__.__name__}>"
def _print(self, node, level):
line = "\t" * level + str(node) + "\n"
if node.left is not None:
line += self._print(node.left, level + 1)
if node.right is not None:
line += self._print(node.right, level + 1)
return line
@property
def height(self):
"""
Return the BST height.
"""
return self._height(self.root)
def _height(self, node):
if node is None:
return -1
return max(self._height(node.left), self._height(node.right)) + 1
def replace_rows(self, row_map: "Mapping[int, int]") -> None:
"""
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their nodes deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
"""
for _, data in self.items(): # noqa: PERF102
data[:] = [row_map[x] for x in data if x in row_map]
|
BST
|
python
|
tiangolo__fastapi
|
tests/test_get_model_definitions_formfeed_escape.py
|
{
"start": 489,
"end": 4428
}
|
class ____(BaseModel):
id: str
address: Address
app = FastAPI()
client = TestClient(app)
@app.get("/facilities/{facility_id}")
def get_facility(facility_id: str) -> Facility: ...
openapi_schema = {
"components": {
"schemas": {
"Address": {
# NOTE: the description of this model shows only the public-facing text, before the `\f` in docstring
"description": "This is a public description of an Address\n",
"properties": {
"city": {"title": "City", "type": "string"},
"line_1": {"title": "Line 1", "type": "string"},
"state_province": {"title": "State Province", "type": "string"},
},
"required": ["line_1", "city", "state_province"],
"title": "Address",
"type": "object",
},
"Facility": {
"properties": {
"address": {"$ref": "#/components/schemas/Address"},
"id": {"title": "Id", "type": "string"},
},
"required": ["id", "address"],
"title": "Facility",
"type": "object",
},
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"title": "Detail",
"type": "array",
}
},
"title": "HTTPValidationError",
"type": "object",
},
"ValidationError": {
"properties": {
"loc": {
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
"title": "Location",
"type": "array",
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
"required": ["loc", "msg", "type"],
"title": "ValidationError",
"type": "object",
},
}
},
"info": {"title": "FastAPI", "version": "0.1.0"},
"openapi": "3.1.0",
"paths": {
"/facilities/{facility_id}": {
"get": {
"operationId": "get_facility_facilities__facility_id__get",
"parameters": [
{
"in": "path",
"name": "facility_id",
"required": True,
"schema": {"title": "Facility Id", "type": "string"},
}
],
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Facility"}
}
},
"description": "Successful Response",
},
"422": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
"description": "Validation Error",
},
},
"summary": "Get Facility",
}
}
},
}
def test_openapi_schema():
"""
Sanity check to ensure our app's openapi schema renders as we expect
"""
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
|
Facility
|
python
|
Netflix__metaflow
|
test/extensions/packages/card_via_extinit/metaflow_extensions/card_via_extinit/plugins/cards/card_b/__init__.py
|
{
"start": 42,
"end": 359
}
|
class ____(MetaflowCard):
type = "card_ext_init_b"
def __init__(self, options={"key": "task"}, **kwargs):
self._key = options["key"] if "key" in options else "task"
def render(self, task):
task_data = task[self._key].data
return "%s" % task_data
CARDS = [TestMockCard]
|
TestMockCard
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/concepts/types/object_type.py
|
{
"start": 70,
"end": 433
}
|
class ____:
def __init__(self, num):
assert num % 2 == 0
self.num = num
EvenDagsterType = PythonObjectDagsterType(EvenType, name="EvenDagsterType")
# end_object_type
# start_use_object_type
@op
def double_even(even_num: EvenDagsterType) -> EvenDagsterType: # type: ignore
return EvenType(even_num.num * 2)
# end_use_object_type
|
EvenType
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py
|
{
"start": 12854,
"end": 13007
}
|
class ____(StaticStringIndexer):
def __init__(self) -> None:
super().__init__(CachingIndexer(indexer_cache, PGStringIndexerV2()))
|
PostgresIndexer
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/interpreter_test.py
|
{
"start": 20988,
"end": 26413
}
|
class ____(test_util.TensorFlowTestCase):
def setUp(self):
super(InterpreterDelegateTest, self).setUp()
self._delegate_file = resource_loader.get_path_to_datafile(
'testdata/test_delegate.so')
self._model_file = resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite')
# Load the library to reset the counters.
library = ctypes.pydll.LoadLibrary(self._delegate_file)
library.initialize_counters()
def _TestInterpreter(self, model_path, options=None):
"""Test wrapper function that creates an interpreter with the delegate."""
delegate = interpreter_wrapper.load_delegate(self._delegate_file, options)
return interpreter_wrapper.Interpreter(
model_path=model_path, experimental_delegates=[delegate])
def testDelegate(self):
"""Tests the delegate creation and destruction."""
interpreter = self._TestInterpreter(model_path=self._model_file)
lib = interpreter._delegates[0]._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
del interpreter
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
def testMultipleInterpreters(self):
delegate = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
interpreter_a = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 1)
interpreter_b = interpreter_wrapper.Interpreter(
model_path=self._model_file, experimental_delegates=[delegate])
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del delegate
del interpreter_a
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
del interpreter_b
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 1)
self.assertEqual(lib.get_num_delegates_invoked(), 2)
def testDestructionOrder(self):
"""Make sure internal _interpreter object is destroyed before delegate."""
self.skipTest('TODO(b/142136355): fix flakiness and re-enable')
# Track which order destructions were doned in
destructions = []
def register_destruction(x):
destructions.append(x if isinstance(x, str) else x.decode('utf-8'))
return 0
# Make a wrapper for the callback so we can send this to ctypes
delegate = interpreter_wrapper.load_delegate(self._delegate_file)
# Make an interpreter with the delegate
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'),
experimental_delegates=[delegate])
class InterpreterDestroyCallback:
def __del__(self):
register_destruction('interpreter')
interpreter._interpreter.stuff = InterpreterDestroyCallback()
# Destroy both delegate and interpreter
library = delegate._library
prototype = ctypes.CFUNCTYPE(ctypes.c_int, (ctypes.c_char_p))
library.set_destroy_callback(prototype(register_destruction))
del delegate
del interpreter
library.set_destroy_callback(None)
# check the interpreter was destroyed before the delegate
self.assertEqual(destructions, ['interpreter', 'test_delegate'])
def testOptions(self):
delegate_a = interpreter_wrapper.load_delegate(self._delegate_file)
lib = delegate_a._library
self.assertEqual(lib.get_num_delegates_created(), 1)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 0)
delegate_b = interpreter_wrapper.load_delegate(
self._delegate_file, options={
'unused': False,
'options_counter': 2
})
lib = delegate_b._library
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 0)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
del delegate_a
del delegate_b
self.assertEqual(lib.get_num_delegates_created(), 2)
self.assertEqual(lib.get_num_delegates_destroyed(), 2)
self.assertEqual(lib.get_num_delegates_invoked(), 0)
self.assertEqual(lib.get_options_counter(), 2)
def testFail(self):
with self.assertRaisesRegex(
# Due to exception chaining in PY3, we can't be more specific here and
# check that the phrase 'Fail argument sent' is present.
ValueError, 'Failed to load delegate from'):
interpreter_wrapper.load_delegate(
self._delegate_file, options={'fail': 'fail'})
|
InterpreterDelegateTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_resolver.py
|
{
"start": 823,
"end": 2253
}
|
class ____(TestCase):
def setUp(self):
self.owner = create_user(username="owner", password="test")
self.tester = create_user(username="tester", password="test")
self.pip = fixture.get(
Project,
slug="pip",
users=[self.owner],
main_language_project=None,
)
self.version = self.pip.versions.first()
self.subproject = fixture.get(
Project,
slug="sub",
language="ja",
users=[self.owner],
main_language_project=None,
)
self.subproject_version = self.subproject.versions.first()
self.translation = fixture.get(
Project,
slug="trans",
language="ja",
users=[self.owner],
main_language_project=None,
)
self.translation_version = self.translation.versions.first()
self.pip.add_subproject(self.subproject)
self.pip.translations.add(self.translation)
self.subproject_translation = fixture.get(
Project,
slug="subproject-translation",
language="es",
users=[self.owner],
)
self.subproject_translation_version = (
self.subproject_translation.versions.first()
)
self.subproject.translations.add(self.subproject_translation)
self.resolver = Resolver()
|
ResolverBase
|
python
|
doocs__leetcode
|
solution/1000-1099/1019.Next Greater Node In Linked List/Solution.py
|
{
"start": 151,
"end": 614
}
|
class ____:
def nextLargerNodes(self, head: Optional[ListNode]) -> List[int]:
nums = []
while head:
nums.append(head.val)
head = head.next
stk = []
n = len(nums)
ans = [0] * n
for i in range(n - 1, -1, -1):
while stk and stk[-1] <= nums[i]:
stk.pop()
if stk:
ans[i] = stk[-1]
stk.append(nums[i])
return ans
|
Solution
|
python
|
Netflix__metaflow
|
metaflow/plugins/aws/secrets_manager/aws_secrets_manager_secrets_provider.py
|
{
"start": 444,
"end": 594
}
|
class ____(MetaflowException):
"""Raised when the response from AWS Secrets Manager contains duplicate keys"""
|
MetaflowAWSSecretsManagerDuplicateKey
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 73428,
"end": 73953
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, since: str, api_key: str):
"""Airbyte Source for Delighted.
Args:
name (str): The name of the destination.
since (str): The date from which you'd like to replicate the data
api_key (str): A Delighted API key.
"""
self.since = check.str_param(since, "since")
self.api_key = check.str_param(api_key, "api_key")
super().__init__("Delighted", name)
|
DelightedSource
|
python
|
getsentry__sentry
|
src/sentry/status_checks/warnings.py
|
{
"start": 190,
"end": 1170
}
|
class ____(StatusCheck):
def __init__(self, warning_set: WarningSet) -> None:
self.__warning_set = warning_set
def check(self) -> list[Problem]:
if self.__warning_set:
return [
Problem(
"There {} {} {} with your system configuration.".format(
"are" if len(self.__warning_set) > 1 else "is",
len(self.__warning_set),
"issues" if len(self.__warning_set) > 1 else "issue",
),
severity=Problem.SEVERITY_WARNING,
# We need this manual URL building as this page is moved to react
# and only the top-level entrypoint is defined and addressable in
# our backend Django app.
url=urljoin(reverse("sentry-admin-overview"), "status/warnings/"),
)
]
else:
return []
|
WarningStatusCheck
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 109036,
"end": 111874
}
|
class ____(Expr):
_projection_passthrough = False
_expr_cls: AnyType | None = None
def _divisions(self):
if {df.npartitions for df in self.args} == {1}:
divs = []
for df in self.args:
divs.extend(list(df.divisions))
try:
return min(divs), max(divs)
except TypeError:
# either unknown divisions or int-str mix
return None, None
return calc_divisions_for_align(*self.args)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection) and self._projection_passthrough:
return plain_column_projection(self, parent, dependents)
@functools.cached_property
def args(self):
dfs = [op for op in self.operands if isinstance(op, Expr)]
return [op for op in dfs if not is_broadcastable(dfs, op)]
def _lower(self):
# This can be expensive when something that has expensive division
# calculation is in the Expression
dfs = self.args
if (
len(dfs) == 1
or all(
dfs[0].divisions == df.divisions and df.known_divisions for df in dfs
)
or len(self.divisions) == 2
and max(map(lambda x: len(x.divisions), dfs)) == 2
):
return self._expr_cls(*self.operands)
elif self.divisions[0] is None:
# We have to shuffle
npartitions = max(df.npartitions for df in dfs)
dtypes = {df._meta.index.dtype for df in dfs}
if not _are_dtypes_shuffle_compatible(dtypes):
raise TypeError(
"DataFrames are not aligned. We need to shuffle to align partitions "
"with each other. This is not possible because the indexes of the "
f"DataFrames have differing dtypes={dtypes}. Please ensure that "
"all Indexes have the same dtype or align manually for this to "
"work."
)
from dask.dataframe.dask_expr._shuffle import RearrangeByColumn
args = [
(
RearrangeByColumn(df, None, npartitions, index_shuffle=True)
if isinstance(df, Expr)
else df
)
for df in self.operands
]
return self._expr_cls(*args)
args = maybe_align_partitions(*self.operands, divisions=self.divisions)
return self._expr_cls(*args)
@functools.cached_property
def _meta(self):
return self._expr_cls(*self.operands)._meta
def _are_dtypes_shuffle_compatible(dtypes):
if len(dtypes) == 1:
return True
return all(pd.api.types.is_numeric_dtype(d) for d in dtypes)
|
MaybeAlignPartitions
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_ufuncs.py
|
{
"start": 50085,
"end": 51549
}
|
class ____:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
|
TestUfuncOuter
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 8230,
"end": 8273
}
|
class ____(XsdInt):
pass
|
ST_DecimalNumber
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-xor-of-two-non-overlapping-subtrees.py
|
{
"start": 81,
"end": 837
}
|
class ____(object):
def __init__(self, bit_length):
self.__root = {}
self.__bit_length = bit_length
def insert(self, num):
node = self.__root
for i in reversed(xrange(self.__bit_length)):
curr = (num>>i) & 1
if curr not in node:
node[curr] = {}
node = node[curr]
def query(self, num):
if not self.__root:
return -1
node, result = self.__root, 0
for i in reversed(xrange(self.__bit_length)):
curr = (num>>i) & 1
if 1^curr in node:
node = node[1^curr]
result |= 1<<i
else:
node = node[curr]
return result
|
Trie
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_write_sheet_pr.py
|
{
"start": 301,
"end": 1561
}
|
class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_pr() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_pr_fit_to_page(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.fit_to_pages(1, 1)
self.worksheet._write_sheet_pr()
exp = """<sheetPr><pageSetUpPr fitToPage="1"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_pr_tab_color(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.set_tab_color("red")
self.worksheet._write_sheet_pr()
exp = """<sheetPr><tabColor rgb="FFFF0000"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_pr_both(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.set_tab_color("red")
self.worksheet.fit_to_pages(1, 1)
self.worksheet._write_sheet_pr()
exp = """<sheetPr><tabColor rgb="FFFF0000"/><pageSetUpPr fitToPage="1"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteSheetPr
|
python
|
ray-project__ray
|
python/ray/train/tests/lightning_test_utils.py
|
{
"start": 3677,
"end": 5743
}
|
class ____(pl.LightningModule):
def __init__(self, lr: float, layer_1: int, layer_2: int):
super(LightningMNISTClassifier, self).__init__()
self.lr = lr
# mnist images are (1, 28, 28) (channels, width, height)
self.layer_1 = torch.nn.Linear(28 * 28, layer_1)
self.layer_2 = torch.nn.Linear(layer_1, layer_2)
self.layer_3 = torch.nn.Linear(layer_2, 10)
self.accuracy = Accuracy(task="multiclass", num_classes=10, top_k=1)
self.val_acc_list = []
self.val_loss_list = []
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = torch.relu(x)
x = self.layer_2(x)
x = torch.relu(x)
x = self.layer_3(x)
x = torch.log_softmax(x, dim=1)
return x
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
acc = self.accuracy(logits, y)
self.log("ptl/train_loss", loss)
self.log("ptl/train_accuracy", acc)
return loss
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
logits = self.forward(x)
loss = F.nll_loss(logits, y)
acc = self.accuracy(logits, y)
self.val_acc_list.append(acc)
self.val_loss_list.append(loss)
return {"val_loss": loss, "val_accuracy": acc}
def on_validation_epoch_end(self):
avg_loss = torch.stack(self.val_loss_list).mean()
avg_acc = torch.stack(self.val_acc_list).mean()
self.log("ptl/val_loss", avg_loss)
self.log("ptl/val_accuracy", avg_acc)
self.val_acc_list.clear()
self.val_loss_list.clear()
def predict_step(self, batch, batch_idx, dataloader_idx=None):
x = batch
logits = self.forward(x)
return torch.argmax(logits, dim=-1)
|
LightningMNISTClassifier
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/metadata.py
|
{
"start": 3778,
"end": 3992
}
|
class ____(graphene.ObjectType):
runId = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "PipelineRunMetadataEntry"
|
GraphenePipelineRunMetadataEntry
|
python
|
google__pytype
|
pytype/errors/error_types.py
|
{
"start": 3553,
"end": 3832
}
|
class ____(InvalidParameters):
"""E.g. an arg "x" is passed to a function that doesn't have an "x" param."""
def __init__(self, sig, passed_args, ctx, extra_keywords):
super().__init__(sig, passed_args, ctx)
self.extra_keywords = tuple(extra_keywords)
|
WrongKeywordArgs
|
python
|
dask__dask
|
dask/cache.py
|
{
"start": 206,
"end": 1980
}
|
class ____(Callback):
"""Use cache for computation
Examples
--------
>>> cache = Cache(1e9) # doctest: +SKIP
The cache can be used locally as a context manager around ``compute`` or
``get`` calls:
>>> with cache: # doctest: +SKIP
... result = x.compute()
You can also register a cache globally, so that it works for all
computations:
>>> cache.register() # doctest: +SKIP
>>> cache.unregister() # doctest: +SKIP
"""
def __init__(self, cache, *args, **kwargs):
try:
import cachey
except ImportError as ex:
raise ImportError(
f'Cache requires cachey, "{ex}" problem importing'
) from ex
self._nbytes = cachey.nbytes
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state["dependencies"][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
|
Cache
|
python
|
jazzband__django-simple-history
|
simple_history/tests/external/models.py
|
{
"start": 510,
"end": 740
}
|
class ____(models.Model):
history = HistoricalRecords(
inherit=True, app="external", custom_model_name=lambda x: f"Audit{x}"
)
class Meta:
abstract = True
app_label = "external"
|
AbstractExternal3
|
python
|
PyCQA__pyflakes
|
pyflakes/checker.py
|
{
"start": 7048,
"end": 7369
}
|
class ____(Definition):
"""A definition created for all Python builtins."""
def __init__(self, name):
super().__init__(name, None)
def __repr__(self):
return '<{} object {!r} at 0x{:x}>'.format(
self.__class__.__name__,
self.name,
id(self)
)
|
Builtin
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/dagster/subschema/rabbitmq.py
|
{
"start": 336,
"end": 504
}
|
class ____(BaseModel):
enabled: bool
image: ExternalImage
rabbitmq: RabbitMQConfiguration
service: Service
volumePermissions: VolumePermissions
|
RabbitMQ
|
python
|
catalyst-team__catalyst
|
examples/self_supervised/src/runner.py
|
{
"start": 766,
"end": 8956
}
|
class ____(IRunner):
"""IRunner for experiments with contrastive model.
Args:
input_key: key in ``runner.batch`` dict mapping for model input
target_key: key in ``runner.batch`` dict mapping for target
loss_key: key for ``runner.batch_metrics`` to store criterion loss output
augemention_prefix: key for ``runner.batch`` to sample augumentions
projection_prefix: key for ``runner.batch`` to store model projection
embedding_prefix: key for `runner.batch`` to store model embeddings
Abstraction, please check out implementations for more details:
- :py:mod:`catalyst.runners.contrastive.ContrastiveRunner`
.. note::
ISelfSupervisedRunner contains only the logic with batch handling.
ISelfSupervisedRunner logic pseudocode:
.. code-block:: python
batch = {"aug1": tensor, "aug2": tensor, ...}
_, proj1 = model(batch["aug1"])
_, proj2 = model(batch["aug2"])
loss = criterion(proj1, proj2)
batch_metrics["loss_key"] = loss
Examples:
.. code-block:: python
# 1. loader and transforms
transforms = Compose(
[
ToTensor(),
Normalize((0.1307,), (0.3081,)),
torchvision.transforms.RandomCrop((28, 28)),
torchvision.transforms.RandomVerticalFlip(),
torchvision.transforms.RandomHorizontalFlip(),
]
)
mnist = MNIST("./logdir", train=True, download=True, transform=None)
contrastive_mnist = ContrastiveDataset(mnist, transforms=transforms)
train_loader = torch.utils.data.DataLoader(contrastive_mnist, batch_size=BATCH_SIZE)
# 2. model and optimizer
encoder = MnistSimpleNet(out_features=16)
projection_head = nn.Sequential(
nn.Linear(16, 16, bias=False), nn.ReLU(inplace=True), nn.Linear(16, 16, bias=True)
)
class ContrastiveModel(torch.nn.Module):
def __init__(self, model, encoder):
super(ContrastiveModel, self).__init__()
self.model = model
self.encoder = encoder
def forward(self, x):
emb = self.encoder(x)
projection = self.model(emb)
return emb, projection
model = ContrastiveModel(model=projection_head, encoder=encoder)
optimizer = Adam(model.parameters(), lr=LR)
# 3. criterion with triplets sampling
criterion = NTXentLoss(tau=0.1)
callbacks = [
dl.ControlFlowCallback(
dl.CriterionCallback(
input_key="projection_left",
target_key="projection_right",
metric_key="loss"
),
loaders="train",
),
dl.SklearnModelCallback(
feature_key="embedding_left",
target_key="target",
train_loader="train",
valid_loaders="valid",
model_fn=RandomForestClassifier,
predict_method="predict_proba",
predict_key="sklearn_predict",
random_state=RANDOM_STATE,
n_estimators=10,
),
dl.ControlFlowCallback(
dl.AccuracyCallback(
target_key="target", input_key="sklearn_predict", topk=(1, 3)
),
loaders="valid",
),
]
runner = dl.ContrastiveRunner()
logdir = "./logdir"
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
callbacks=callbacks,
loaders={"train": train_loader, "valid": train_loader},
verbose=True,
logdir=logdir,
valid_loader="train",
valid_metric="loss",
minimize_valid_metric=True,
num_epochs=10,
)
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str = "features",
target_key: str = "target",
loss_key: str = "loss",
augemention_prefix: str = "augment",
projection_prefix: str = "projection",
embedding_prefix: str = "embedding",
):
"""Init."""
IRunner.__init__(self)
self._target_key = target_key
self._loss_key = loss_key
self._projection_prefix = projection_prefix
self._augemention_prefix = augemention_prefix
self._embedding_prefix = embedding_prefix
self._input_key = input_key
def _process_batch(self, batch):
if isinstance(batch, (tuple, list)):
assert len(batch) in [3, 4]
if len(batch) == 4:
batch = {
self._input_key: batch[0],
f"{self._augemention_prefix}_left": batch[1],
f"{self._augemention_prefix}_right": batch[2],
self._target_key: batch[3],
}
elif len(batch) == 3:
batch = {
self._input_key: batch[0],
f"{self._augemention_prefix}_left": batch[1],
f"{self._augemention_prefix}_right": batch[2],
}
return batch
def on_experiment_start(self, runner: "IRunner"):
"""on_experiment_start event handler."""
super().on_experiment_start(runner)
self.is_kv_model = False
if isinstance(self.model, (Mapping, nn.ModuleDict)):
self.is_kv_model = True
def _process_input(self, batch: Mapping[str, Any], **kwargs):
if self.is_kv_model:
encoders = [
(encoder_name, self.model[encoder_name]) for encoder_name in self.model
]
else:
encoders = [("", self.model)]
for (encoder_name, encoder) in encoders:
embedding1, projection1 = encoder(
batch[f"{self._augemention_prefix}_left"], **kwargs
)
embedding2, projection2 = encoder(
batch[f"{self._augemention_prefix}_right"], **kwargs
)
origin_embeddings, projection_origin = encoder(
batch[self._input_key], **kwargs
)
prefix = f"{encoder_name}_" if encoder_name else ""
batch = {
**batch,
f"{prefix}{self._projection_prefix}_left": projection1,
f"{prefix}{self._projection_prefix}_right": projection2,
f"{prefix}{self._projection_prefix}_origin": projection_origin,
f"{prefix}{self._embedding_prefix}_left": embedding1,
f"{prefix}{self._embedding_prefix}_right": embedding2,
f"{prefix}{self._embedding_prefix}_origin": origin_embeddings,
}
return batch
def on_batch_start(self, runner: "IRunner"):
"""Event handler."""
self.batch = self._process_batch(self.batch)
super().on_batch_start(runner)
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Forward method for your Runner.
Should not be called directly outside of runner.
If your model has specific interface, override this method to use it
Args:
batch (Mapping[str, Any]): dictionary with data batches
from DataLoaders.
**kwargs: additional parameters to pass to the model
Returns:
dict with model output batch
"""
return self._process_input(batch, **kwargs)
def handle_batch(self, batch: Mapping[str, Any]) -> None:
"""
Inner method to handle specified data batch.
Used to make a train/valid/infer stage during Experiment run.
Args:
batch: dictionary with data batches from DataLoader.
"""
self.batch = {**batch, **self.forward(batch)}
|
ISelfSupervisedRunner
|
python
|
Pylons__pyramid
|
tests/test_path.py
|
{
"start": 2109,
"end": 2900
}
|
class ____(unittest.TestCase):
def _callFUT(self, *arg, **kw):
from pyramid.path import caller_package
return caller_package(*arg, **kw)
def test_it_level_1(self):
import tests
result = self._callFUT(1)
self.assertEqual(result, tests)
def test_it_level_2(self):
import tests
result = self._callFUT(2)
self.assertEqual(result, tests)
def test_it_level_3(self):
import unittest
result = self._callFUT(3)
self.assertEqual(result, unittest)
def test_it_package(self):
import tests
def dummy_caller_module(*arg):
return tests
result = self._callFUT(1, caller_module=dummy_caller_module)
self.assertEqual(result, tests)
|
TestCallerPackage
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py
|
{
"start": 26117,
"end": 27290
}
|
class ____(Benchmark):
r"""
Shubert 1 objective function.
This class defines the Shubert 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shubert01}}(x) = \prod_{i=1}^{n}\left(\sum_{j=1}^{5}
cos(j+1)x_i+j \right )
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -186.7309` for
:math:`x = [-7.0835, 4.8580]` (and many others).
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#133 is missing a prefactor of j before the cos function.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-7.0835, 4.8580]]
self.fglob = -186.7309
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = j * cos((j + 1) * x + j)
return prod(sum(y, axis=0))
|
Shubert01
|
python
|
pypa__pip
|
src/pip/_vendor/rich/prompt.py
|
{
"start": 246,
"end": 336
}
|
class ____(Exception):
"""Exception base class for prompt related errors."""
|
PromptError
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 22834,
"end": 23020
}
|
class ____(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
|
UndeletableObject
|
python
|
tox-dev__tox
|
src/tox/config/loader/ini/replace.py
|
{
"start": 582,
"end": 3911
}
|
class ____(ReplaceReference):
def __init__(self, conf: Config, loader: IniLoader) -> None:
self.conf = conf
self.loader = loader
def __call__(self, value: str, conf_args: ConfigLoadArgs) -> str | None: # noqa: C901
# a return value of None indicates could not replace
pattern = _replace_ref(self.loader.section.prefix or self.loader.section.name)
match = pattern.match(value)
if match:
settings = match.groupdict()
key = settings["key"]
if settings["section"] is None and settings["full_env"]:
settings["section"] = settings["full_env"]
exception: Exception | None = None
try:
for src in self._config_value_sources(settings["env"], settings["section"], conf_args.env_name):
try:
if isinstance(src, SectionProxy):
return self.loader.process_raw(self.conf, conf_args.env_name, src[key])
value = src.load(key, conf_args.chain)
except KeyError as exc: # if fails, keep trying maybe another source can satisfy # noqa: PERF203
exception = exc
else:
as_str, _ = stringify(value)
return as_str.replace("#", r"\#") # escape comment characters as these will be stripped
except Exception as exc: # noqa: BLE001
exception = exc
if exception is not None:
if isinstance(exception, KeyError): # if the lookup failed replace - else keep
default = settings["default"]
if default is not None:
return default
# we cannot raise here as that would mean users could not write factorials:
# depends = {py39,py38}-{,b}
else:
raise exception
return None
def _config_value_sources(
self, env: str | None, section: str | None, current_env: str | None
) -> Iterator[SectionProxy | ConfigSet]:
# if we have an env name specified take only from there
if env is not None and env in self.conf:
yield self.conf.get_env(env)
if section is None:
# if no section specified perhaps it's an unregistered config:
# 1. try first from core conf
yield self.conf.core
# 2. and then fallback to our own environment
if current_env is not None:
yield self.conf.get_env(current_env)
return
# if there's a section, special handle the core section
if section == self.loader.core_section.name:
yield self.conf.core # try via registered configs
value = self.loader.get_section(section) # fallback to section
if value is not None:
yield value
@cache
def _replace_ref(env: str | None) -> Pattern[str]:
return re.compile(
rf"""
(\[(?P<full_env>{re.escape(env or ".*")}(:(?P<env>[^]]+))?|(?P<section>[-\w]+))])? # env/section
(?P<key>[-a-zA-Z0-9_]+) # key
(:(?P<default>.*))? # default value
$
""",
re.VERBOSE,
)
__all__ = [
"ReplaceReferenceIni",
]
|
ReplaceReferenceIni
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/extension1/package.py
|
{
"start": 228,
"end": 745
}
|
class ____(Package):
"""A package which extends another package"""
homepage = "http://www.example.com"
url = "http://www.example.com/extension1-1.0.tar.gz"
extends("extendee")
version("1.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="abcdef0123456789abcdef0123456789")
def install(self, spec, prefix):
mkdirp(prefix.bin)
with open(os.path.join(prefix.bin, "extension1"), "w+", encoding="utf-8") as fout:
fout.write(str(spec.version))
|
Extension1
|
python
|
pytorch__pytorch
|
torch/distributed/_tools/fsdp2_mem_tracker.py
|
{
"start": 3977,
"end": 25408
}
|
class ____(MemTracker):
"""
A ``TorchDispatchMode`` based context manager that extends ``torch.distributed._tools.mem_tracker.MemTracker`` to track
and categorize the peak memory and module-wise memory usage of FSDP modules.
It tracks the peak memory usage across all the devices of all the FSDP modules in the module tree and categorizes
the tensor memory usage as defined by ``_FSDPRefType``. Further, it captures memory `snapshots` at different stages of
the module execution defined by ``_FSDPModState``.
Attributes:
memory_tracking: A weakref key dictionary to store the memory statistics of each module. Each key is a reference
to a module, and each value is a ``_FSDPModMemStats`` object that stores the memory statistics of the module.
Args:
mod (torch.nn.Module): The root FSDP module to be tracked.
optm (torch.optim.Optimizer, optional): The optimizer to be tracked.
Note: Please refer to ``torch.distributed._tools.mem_tracker.MemTracker`` to learn about the limitations.
Example usage
.. code-block:: python
module = ...
optimizer = ...
inp = ...
fmt = FSDPMemTracker(module, optimizer)
fmt.track_inputs((inp,))
with fmt:
optimizer.zero_grad()
loss = module(inp)
print("After Forward:")
fmt.display_snapshot("current")
loss.backward()
optimizer.step()
fmt.display_snapshot("peak")
fmt.display_modulewise_snapshots(depth=3, units="MB")
"""
def __init__(
self,
mod: torch.nn.Module,
optm: Optional[torch.optim.Optimizer] = None,
) -> None:
super().__init__()
assert isinstance(mod, FSDPModule), "FSDPMemTracker only supports FSDP modules"
self._root_mod = mod
self._optm = optm
self._fsdp_mod_to_saved_methods: WeakIdKeyDictionary = WeakIdKeyDictionary()
self._fsdp_state: _FSDPState = _FSDPState.PRE_FW
self._ref_class: type[_RefType] = _FSDPRefType
def _instrument_fsdp_sharded_params_grads(
self, fsdp_param_group: FSDPParamGroup
) -> None:
# Track sharded params and grads after initialization
for fsdp_param in fsdp_param_group.fsdp_params:
self._update_and_maybe_create_winfos(
fsdp_param.sharded_param,
_FSDPRefType.SHARDED_PARAM,
)
sharded_grad = fsdp_param.sharded_param.grad
if sharded_grad is not None:
self._update_and_maybe_create_winfos(
sharded_grad,
_FSDPRefType.SHARDED_GRAD,
)
def _fsdp_state_pre_forward(
self,
fsdp_mod: FSDPModule,
orig_fsdp_state_pre_fw: Callable[_P, tuple[tuple[Unpack[_Ts]], dict[str, Any]]],
) -> Callable[_P, tuple[tuple[Unpack[_Ts]], dict[str, Any]]]:
# We capture memory snapshots before and after ``FSDPState._pre_forward`` to attribute the `unsharded` params
# and `all_gather` buffers. There are three cases:
# Case 1: If the module is not in the ``memory_tracking`` dictionary, create a new ``_FSDPModMemStats``
# instance for the module and add it to the ``memory_tracking`` dictionary.
# Case 2: If the module is already in the ``memory_tracking`` dictionary and we are in backward, this means
# we are in the AC region. We check if this is the top most module in the AC region. If it is,
# we store a weak reference and set the flag ``_in_ac`` to True.
# Case 3: If the module is already in the ``memory_tracking`` dictionary and we are in forward, this means
# this module is called for the second time. If it is a root module, that means we are in the next
# iteration and we error out. If it is not a root module, that means it's a submodule that is being
# used multiple times in the same iteration, which we allow and track.
# For Case 1 and 3, we also initialize the ``local_peak`` and ``PEAK_FW`` snapshot for the module.
# For Case 2 we only capture 1 snapshot after ``FSDPState._pre_forward`` runs because it is a no-op.
@wraps(orig_fsdp_state_pre_fw)
def inner(
*args: _P.args, **kwargs: _P.kwargs
) -> tuple[tuple[Unpack[_Ts]], dict[str, Any]]:
self._fsdp_state = _FSDPState.PRE_FW
mod_fqn = self._mod_tracker.get_known_fqn(fsdp_mod)
assert mod_fqn is not None
if fsdp_mod not in self.memory_tracking:
mod_stat = _FSDPModMemStats(mod_fqn)
self.memory_tracking[fsdp_mod] = mod_stat
snapshot = self.get_tracker_snapshot()
mod_stat.local_peak = {
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in snapshot.items()
}
mod_stat.snapshots.setdefault(_FSDPModState.PEAK_FW, []).append(
snapshot
)
mod_stat.snapshots.setdefault(_FSDPModState.BEF_PRE_FW, []).append(
deepcopy(snapshot)
)
elif not self._mod_tracker.is_bw:
parents = self._mod_tracker.parents - {mod_fqn}
if len(parents) == 1 and "Global" in parents:
raise NotImplementedError(
"FSDPMemTracker does not support memory tracking for multiple iterative calls."
" Either use ``reset_mod_stats`` to clear module memory stats for the previous iteration"
" or file a github issue if you need this feature."
)
# pyrefly: ignore [bad-assignment]
args, kwargs = orig_fsdp_state_pre_fw(*args, **kwargs)
fsdp_state = fsdp_mod._get_fsdp_state()
if fsdp_param_group := fsdp_state._fsdp_param_group:
for fsdp_param in fsdp_param_group.fsdp_params:
self._update_and_maybe_create_winfos(
fsdp_param.unsharded_param,
_FSDPRefType.UNSHARDED_PARAM,
)
mod_stat = self.memory_tracking[fsdp_mod]
if self._mod_tracker.is_bw:
state = _FSDPModState.PRE_FW_AC
if self._ac_mod is None:
self._ac_mod = weakref.ref(fsdp_mod)
self._in_ac = True
else:
state = _FSDPModState.AFT_PRE_FW
mod_stat.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
self._fsdp_state = _FSDPState.FW
return args, kwargs
return inner
def _fsdp_state_post_forward(
self,
fsdp_mod: FSDPModule,
orig_fsdp_state_post_fw: Callable[_P, _R],
) -> Callable[_P, _R]:
# We capture memory snapshots before and after ``FSDPState._post_forward`` to capture the resharded state
# if ``reshard_after_forward`` is not ``False``. There are two cases:
# Case 1: This is called in backward, which means we are in the AC region. If this is the top most module
# in the AC region, we set the flag ``_in_ac`` to False.
# Case 2: This is called in forward.
@wraps(orig_fsdp_state_post_fw)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
mod_stat = self.memory_tracking[fsdp_mod]
if self._mod_tracker.is_bw:
state = _FSDPModState.POST_FW_AC
if self._ac_mod is not None and self._ac_mod() is fsdp_mod:
self._ac_mod = None
self._in_ac = False
else:
state = _FSDPModState.BEF_POST_FW
mod_stat.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
self._fsdp_state = _FSDPState.POST_FW
output = orig_fsdp_state_post_fw(*args, **kwargs)
if not self._mod_tracker.is_bw:
mod_stat.snapshots.setdefault(_FSDPModState.AFT_POST_FW, []).append(
self.get_tracker_snapshot()
)
return output
return inner
def _fsdp_param_group_pre_backward(
self,
fsdp_mod: FSDPModule,
orig_fsdp_param_group_pre_backward: Callable[_P, Any],
) -> Callable[_P, None]:
# We capture memory snapshots before and after ``FSDPParamGroup.pre_backward`` to capture the pre-fetching
# and unsharding of params. We also initialize ``local_peak`` and ``PEAK_BW`` snapshot for the module.
@wraps(orig_fsdp_param_group_pre_backward)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> None:
self._fsdp_state = _FSDPState.PRE_BW
mod_stat = self.memory_tracking[fsdp_mod]
snapshot = self.get_tracker_snapshot()
mod_stat.local_peak = {
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in snapshot.items()
}
mod_stat.snapshots.setdefault(_FSDPModState.PEAK_BW, []).append(snapshot)
mod_stat.snapshots.setdefault(_FSDPModState.BEF_PRE_BW, []).append(
deepcopy(snapshot)
)
orig_fsdp_param_group_pre_backward(*args, **kwargs)
mod_stat.snapshots.setdefault(_FSDPModState.AFT_PRE_BW, []).append(
self.get_tracker_snapshot()
)
self._fsdp_state = _FSDPState.BW
return inner
def _fsdp_param_group_post_backward(
self,
fsdp_mod: FSDPModule,
orig_fsdp_param_group_post_backward: Callable[_P, Any],
) -> Callable[_P, None]:
# We capture the memory snapshots before and after ``FSDPParamGroup.post_backward`` to track and attribute
# the `unsharded` grads before the post backward and then `sharded` grads and `reduce_scatter` buffers
# after the post backward.
@wraps(orig_fsdp_param_group_post_backward)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> None:
fsdp_state = fsdp_mod._get_fsdp_state()
if fsdp_param_group := fsdp_state._fsdp_param_group:
for fsdp_param in fsdp_param_group.fsdp_params:
unsharded_grad = fsdp_param._unsharded_param.grad
if unsharded_grad is not None:
self._update_and_maybe_create_winfos(
unsharded_grad,
_FSDPRefType.UNSHARDED_GRAD,
update_existing=True,
)
mod_stat = self.memory_tracking[fsdp_mod]
mod_stat.snapshots.setdefault(_FSDPModState.BEF_POST_BW, []).append(
self.get_tracker_snapshot()
)
self._fsdp_state = _FSDPState.POST_BW
orig_fsdp_param_group_post_backward(*args, **kwargs)
if fsdp_param_group := fsdp_state._fsdp_param_group:
for fsdp_param in fsdp_param_group.fsdp_params:
sharded_grad = fsdp_param.sharded_param.grad
if sharded_grad is not None:
self._update_and_maybe_create_winfos(
sharded_grad,
_FSDPRefType.SHARDED_GRAD,
)
mod_stat.snapshots.setdefault(_FSDPModState.AFT_POST_BW, []).append(
self.get_tracker_snapshot()
)
return inner
def _instrument_fsdp_module(self) -> None:
# We uninstall the existing `FSDPState._pre_forward` and `FSDPState._post_forward` hooks and install
# our own hooks that wrap them. We choose this over monkey-patching `FSDPParamGroup.pre_forward` and
# `FSDPParamGroup.post_forward` because during AC these won't be called.
# TODO(@sanketpurandare): This will need to be modified after this PR (https://github.com/pytorch/pytorch/pull/127786)
# lands. For backward we monkey-patch the `FSDPParamGroup.pre_backward` and `FSDPParamGroup.post_backward`.
# get the unique _MultiHandlers/RemoveHandlers and store in dictionary
# the _MultiHandlers object will only need to be grabbed once.
unique_handlers: dict[RemovableHandle, bool] = {}
# pyrefly: ignore # missing-attribute
for module in self._root_mod.modules():
if isinstance(module, FSDPModule):
fsdp_state = module._get_fsdp_state()
if fsdp_param_group := fsdp_state._fsdp_param_group:
if not unique_handlers.get(fsdp_state._pre_forward_hook_handle):
unique_handlers[fsdp_state._pre_forward_hook_handle] = True
if not unique_handlers.get(fsdp_state._post_forward_hook_handle):
unique_handlers[fsdp_state._post_forward_hook_handle] = True
# call remove on the handles once
for f_hook_handle in unique_handlers:
f_hook_handle.remove()
# pyrefly: ignore # missing-attribute
for module in self._root_mod.modules():
if isinstance(module, FSDPModule):
fsdp_state = module._get_fsdp_state()
if fsdp_param_group := fsdp_state._fsdp_param_group:
self._instrument_fsdp_sharded_params_grads(fsdp_param_group)
fsdp_state._pre_forward_hook_handle = (
# pyrefly: ignore [missing-attribute]
module.register_forward_pre_hook(
self._fsdp_state_pre_forward(
module, fsdp_state._pre_forward
),
prepend=True,
with_kwargs=True,
)
)
# pyrefly: ignore [missing-attribute]
fsdp_state._post_forward_hook_handle = module.register_forward_hook(
self._fsdp_state_post_forward(module, fsdp_state._post_forward),
prepend=False,
always_call=True,
)
self._fsdp_mod_to_saved_methods[module] = _SavedFSDPMethods(
fsdp_param_group.pre_backward,
fsdp_param_group.post_backward,
)
fsdp_param_group.pre_backward = self._fsdp_param_group_pre_backward( # type: ignore[assignment]
module, fsdp_param_group.pre_backward
)
fsdp_param_group.post_backward = ( # type: ignore[assignment]
self._fsdp_param_group_post_backward(
module, fsdp_param_group.post_backward
)
)
# pyrefly: ignore [missing-attribute]
for buffer in self._root_mod.buffers():
self._update_and_maybe_create_winfos(
buffer,
_FSDPRefType.BUFFER,
)
def _instrument_optimizer(self) -> None:
# Register a hook on the optimizer step to track the optimizer states.
# The pre-hook is to set the flag ``_in_opt`` to True. The post-hook unsets the flag,
# and also tracks any optimizer states that are created during the optimizer step.
if self._optm is not None:
self._track_optimizer_states(_FSDPRefType.OPT, self._optm)
def _opt_step_pre_hook(
optimizer: optim.Optimizer, args: Any, kwargs: Any
) -> None:
self._in_opt = True
def _opt_step_post_hook(
optimizer: optim.Optimizer, args: Any, kwargs: Any
) -> None:
self._track_optimizer_states(_FSDPRefType.OPT, optimizer)
self._in_opt = False
self._optimizer_hook_handles = (
self._optm.register_step_pre_hook(_opt_step_pre_hook),
self._optm.register_step_post_hook(_opt_step_post_hook),
)
def _register_module_and_optimizer_hooks(self) -> None:
self._instrument_fsdp_module()
self._instrument_optimizer()
def _deregister_module_and_optimizer_hooks(self) -> None:
for (
fsdp_mod,
saved_methods,
) in self._fsdp_mod_to_saved_methods.items():
fsdp_state = fsdp_mod._get_fsdp_state()
fsdp_state._pre_forward_hook_handle.remove()
fsdp_state._post_forward_hook_handle.remove()
fsdp_state._pre_forward_hook_handle = fsdp_mod.register_forward_pre_hook(
fsdp_state._pre_forward, prepend=True, with_kwargs=True
)
fsdp_state._post_forward_hook_handle = fsdp_mod.register_forward_hook(
fsdp_state._post_forward, prepend=False
)
if fsdp_param_group := fsdp_state._fsdp_param_group:
fsdp_param_group.pre_backward = saved_methods.pre_backward
fsdp_param_group.post_backward = saved_methods.post_backward
self._fsdp_mod_to_saved_methods.clear()
if self._optimizer_hook_handles is not None:
for handle in self._optimizer_hook_handles:
handle.remove()
self._optimizer_hook_handles = None
def track_inputs(self, inputs: tuple[Any, ...]) -> None:
"""
This is used to track the input tensors to the model and annotate them as ``Inputs``.
Args:
inputs (Tuple[Any]): A tuple containing the input data. This can include tensors
as well as other data types. Only tensors will be tracked.
"""
def _track_inputs(t: torch.Tensor) -> None:
self._update_and_maybe_create_winfos(
t,
_FSDPRefType.INP,
)
tree_map_only(torch.Tensor, _track_inputs, inputs)
def track_external(
self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]
) -> None:
"""This is no-op for ``FSDPMemTracker``"""
def __enter__(self) -> "FSDPMemTracker":
if self._depth == 0:
self._register_module_and_optimizer_hooks()
self._track_resize()
self._peak_mem_snap = self.get_tracker_snapshot()
self._peak_mem = {
dev: dev_snap[_TOTAL_KEY]
for dev, dev_snap in self._peak_mem_snap.items()
}
self._mod_tracker.__enter__()
TorchDispatchMode.__enter__(self)
self._depth += 1
return self
def __exit__(self, *args: Any) -> None:
self._depth -= 1
if self._depth == 0:
self._deregister_module_and_optimizer_hooks()
self._restore_resize()
self._mod_tracker.__exit__(*args)
TorchDispatchMode.__exit__(self, *args)
def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def]
# When running this mode with DTensor, ordinarily all modes will
# run **before** subclasses get a chance to run.
# Returning NotImplemented here gives us a chance to let DTensor
# run and desugar into local tensor ops, before `MemTracker` sees them.
if any(t == DTensor for t in types):
return NotImplemented
if (
func is torch.ops._c10d_functional.wait_tensor.default
and active_fake_mode()
):
# N.B: This is a hacky way to override the Meta IMPL of wait_tensor. The original impl returns
# a new tensor which does not happen in eager mode, when a wait_tensor is called.
# pyrefly: ignore [unsupported-operation]
res = args[0]
else:
res = func(*args, **kwargs or {})
# If we are tracking an optimizer state, we use the optimizer reference type.
# If we are in backward region and not in AC region, we use the backward reference type.
# Else we use the forward reference type.
if self._in_opt:
reftype = _FSDPRefType.OPT
elif self._mod_tracker.is_bw and not self._in_ac:
reftype = _FSDPRefType.TEMP
else:
reftype = _FSDPRefType.ACT
if func is c10d._allgather_base_.default and self._fsdp_state in [
_FSDPState.PRE_FW,
_FSDPState.PRE_BW,
]:
# pyrefly: ignore [unsupported-operation]
output_tensor = args[0]
self._update_and_maybe_create_winfos(
output_tensor,
_FSDPRefType.ALL_GATHER,
update_existing=True,
)
if (
func is c10d._reduce_scatter_base_.default
and self._fsdp_state == _FSDPState.POST_BW
):
# pyrefly: ignore [unsupported-operation]
input_tensor = args[1]
self._update_and_maybe_create_winfos(
input_tensor,
_FSDPRefType.REDUCE_SCATTER,
update_existing=True,
)
tree_map_only(torch.Tensor, partial(self._track, reftype), res)
peak_state = (
_FSDPModState.PEAK_BW if self._mod_tracker.is_bw else _FSDPModState.PEAK_FW
)
self._update_peak_stats(peak_state)
return res
|
FSDPMemTracker
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/tool_bash_20250124_param.py
|
{
"start": 320,
"end": 692
}
|
class ____(TypedDict, total=False):
name: Required[Literal["bash"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["bash_20250124"]]
cache_control: Optional[CacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
|
ToolBash20250124Param
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/mysql/test_compiler.py
|
{
"start": 53815,
"end": 56375
}
|
class ____(testing.AssertsCompiledSQL):
def setup_test(self):
self.table = table(
"mytable", column("myid", String), column("name", String)
)
def test_regexp_match(self):
self.assert_compile(
self.table.c.myid.regexp_match("pattern"),
"mytable.myid REGEXP %s",
checkpositional=("pattern",),
)
def test_regexp_match_column(self):
self.assert_compile(
self.table.c.myid.regexp_match(self.table.c.name),
"mytable.myid REGEXP mytable.name",
checkpositional=(),
)
def test_regexp_match_str(self):
self.assert_compile(
literal("string").regexp_match(self.table.c.name),
"%s REGEXP mytable.name",
checkpositional=("string",),
)
def test_not_regexp_match(self):
self.assert_compile(
~self.table.c.myid.regexp_match("pattern"),
"mytable.myid NOT REGEXP %s",
checkpositional=("pattern",),
)
def test_not_regexp_match_column(self):
self.assert_compile(
~self.table.c.myid.regexp_match(self.table.c.name),
"mytable.myid NOT REGEXP mytable.name",
checkpositional=(),
)
def test_not_regexp_match_str(self):
self.assert_compile(
~literal("string").regexp_match(self.table.c.name),
"%s NOT REGEXP mytable.name",
checkpositional=("string",),
)
def test_regexp_replace(self):
self.assert_compile(
self.table.c.myid.regexp_replace("pattern", "replacement"),
"REGEXP_REPLACE(mytable.myid, %s, %s)",
checkpositional=("pattern", "replacement"),
)
def test_regexp_replace_column(self):
self.assert_compile(
self.table.c.myid.regexp_replace("pattern", self.table.c.name),
"REGEXP_REPLACE(mytable.myid, %s, mytable.name)",
checkpositional=("pattern",),
)
def test_regexp_replace_column2(self):
self.assert_compile(
self.table.c.myid.regexp_replace(self.table.c.name, "replacement"),
"REGEXP_REPLACE(mytable.myid, mytable.name, %s)",
checkpositional=("replacement",),
)
def test_regexp_replace_string(self):
self.assert_compile(
literal("string").regexp_replace("pattern", self.table.c.name),
"REGEXP_REPLACE(%s, %s, mytable.name)",
checkpositional=("string", "pattern"),
)
|
RegexpCommon
|
python
|
vyperlang__vyper
|
vyper/venom/passes/memmerging.py
|
{
"start": 685,
"end": 2912
}
|
class ____:
# abstract "copy" operation which contains a list of copy instructions
# and can fuse them into a single copy operation.
dst: int
src: int
length: int
insts: list[IRInstruction]
@classmethod
def memzero(cls, dst, length, insts):
# factory method to simplify creation of memory zeroing operations
# (which are similar to Copy operations but src is always
# `calldatasize`). choose src=dst, so that can_merge returns True
# for overlapping memzeros.
return cls(dst, dst, length, insts)
@property
def src_end(self) -> int:
return self.src + self.length
@property
def dst_end(self) -> int:
return self.dst + self.length
def src_interval(self) -> _Interval:
return _Interval(self.src, self.length)
def dst_interval(self) -> _Interval:
return _Interval(self.dst, self.length)
def overwrites_self_src(self) -> bool:
# return true if dst overlaps src. this is important for blocking
# mcopy batching in certain cases.
return self.overwrites(self.src_interval())
def overwrites(self, interval: _Interval) -> bool:
# return true if dst of self overwrites the interval
return _Interval(self.dst, self.length).overlaps(interval)
def can_merge(self, other: "_Copy"):
# both source and destination have to be offset by same amount,
# otherwise they do not represent the same copy. e.g.
# Copy(0, 64, 16)
# Copy(11, 74, 16)
if self.src - other.src != self.dst - other.dst:
return False
# the copies must at least touch each other
if other.dst > self.dst_end:
return False
return True
def merge(self, other: "_Copy"):
# merge other into self. e.g.
# Copy(0, 64, 16); Copy(16, 80, 8) => Copy(0, 64, 24)
assert self.dst <= other.dst, "bad bisect_left"
assert self.can_merge(other)
new_length = max(self.dst_end, other.dst_end) - self.dst
self.length = new_length
self.insts.extend(other.insts)
def __repr__(self) -> str:
return f"_Copy({self.dst}, {self.src}, {self.length})"
|
_Copy
|
python
|
pyca__cryptography
|
src/cryptography/x509/extensions.py
|
{
"start": 33873,
"end": 34776
}
|
class ____(ExtensionType):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs: int) -> None:
if not isinstance(skip_certs, int):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self) -> str:
return f"<InhibitAnyPolicy(skip_certs={self.skip_certs})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __hash__(self) -> int:
return hash(self.skip_certs)
@property
def skip_certs(self) -> int:
return self._skip_certs
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
|
InhibitAnyPolicy
|
python
|
openai__openai-python
|
tests/api_resources/beta/test_assistants.py
|
{
"start": 469,
"end": 9435
}
|
class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
assistant = client.beta.assistants.create(
model="gpt-4o",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.create(
model="gpt-4o",
description="description",
instructions="instructions",
metadata={"foo": "string"},
name="name",
reasoning_effort="none",
response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
"vector_store_ids": ["string"],
"vector_stores": [
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
"metadata": {"foo": "string"},
}
],
},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.create(
model="gpt-4o",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.create(
model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
assistant = client.beta.assistants.retrieve(
"assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.retrieve(
"assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.retrieve(
"assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
client.beta.assistants.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_update(self, client: OpenAI) -> None:
assistant = client.beta.assistants.update(
assistant_id="assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.update(
assistant_id="assistant_id",
description="description",
instructions="instructions",
metadata={"foo": "string"},
model="string",
name="name",
reasoning_effort="none",
response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.update(
assistant_id="assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.update(
assistant_id="assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(Assistant, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
client.beta.assistants.with_raw_response.update(
assistant_id="",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
assistant = client.beta.assistants.list()
assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.list(
after="after",
before="before",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
assistant = client.beta.assistants.delete(
"assistant_id",
)
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.delete(
"assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.delete(
"assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
assistant = response.parse()
assert_matches_type(AssistantDeleted, assistant, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
client.beta.assistants.with_raw_response.delete(
"",
)
|
TestAssistants
|
python
|
Pylons__pyramid
|
tests/test_config/test_assets.py
|
{
"start": 30536,
"end": 33292
}
|
class ____:
def test_get_filename(self):
source = self._makeOne('')
self.assertEqual(
source.get_filename('test_assets.py'),
os.path.join(here, 'test_assets.py'),
)
def test_get_filename_with_prefix(self):
source = self._makeOne('test_assets.py')
self.assertEqual(
source.get_filename(''), os.path.join(here, 'test_assets.py')
)
def test_get_filename_file_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.get_filename('wont_exist'), None)
def test_get_stream(self):
source = self._makeOne('')
with source.get_stream('test_assets.py') as stream:
_assertBody(stream.read(), os.path.join(here, 'test_assets.py'))
def test_get_stream_with_prefix(self):
source = self._makeOne('test_assets.py')
with source.get_stream('') as stream:
_assertBody(stream.read(), os.path.join(here, 'test_assets.py'))
def test_get_stream_file_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.get_stream('wont_exist'), None)
def test_get_string(self):
source = self._makeOne('')
_assertBody(
source.get_string('test_assets.py'),
os.path.join(here, 'test_assets.py'),
)
def test_get_string_with_prefix(self):
source = self._makeOne('test_assets.py')
_assertBody(
source.get_string(''), os.path.join(here, 'test_assets.py')
)
def test_get_string_file_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.get_string('wont_exist'), None)
def test_exists(self):
source = self._makeOne('')
self.assertEqual(source.exists('test_assets.py'), True)
def test_exists_with_prefix(self):
source = self._makeOne('test_assets.py')
self.assertEqual(source.exists(''), True)
def test_exists_file_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.exists('wont_exist'), None)
def test_isdir_false(self):
source = self._makeOne('')
self.assertEqual(source.isdir('test_assets.py'), False)
def test_isdir_true(self):
source = self._makeOne('')
self.assertEqual(source.isdir('files'), True)
def test_isdir_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.isdir('wont_exist'), None)
def test_listdir(self):
source = self._makeOne('')
self.assertTrue(source.listdir('files'))
def test_listdir_doesnt_exist(self):
source = self._makeOne('')
self.assertEqual(source.listdir('wont_exist'), None)
|
AssetSourceIntegrationTests
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/io/test_row.py
|
{
"start": 391,
"end": 4976
}
|
class ____(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.row"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_row_in_meta(self, cosmo_cls, cosmo, in_meta):
"""Test where the cosmology class is placed."""
row = cosmo.to_format("astropy.row", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert row.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.colnames # not also a column
else:
assert row["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.meta
# -----------------------
def test_from_not_row(self, cosmo, from_format):
"""Test not passing a Row to the Row parser."""
with pytest.raises(AttributeError):
from_format("NOT A ROW", format="astropy.row")
def test_tofrom_row_instance(self, cosmo, to_format, from_format):
"""Test cosmology -> astropy.row -> cosmology."""
# ------------
# To Row
row = to_format("astropy.row")
assert isinstance(row, Row)
assert row["cosmology"] == cosmo.__class__.__qualname__
assert row["name"] == cosmo.name
# ------------
# From Row
row.table["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if cosmo._init_has_kwargs:
got = from_format(row, format="astropy.row")
assert got.__class__ is cosmo.__class__
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(row, format="astropy.row")
# unless mismatched are moved to meta
got = from_format(row, format="astropy.row", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
row.table.remove_column("mismatching")
got = from_format(row, format="astropy.row")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
cosmology = _COSMOLOGY_CLASSES[row["cosmology"]]
row.table.remove_column("cosmology")
row.table["cosmology"] = cosmology
got = from_format(row, format="astropy.row")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(row)
assert got == cosmo
def test_tofrom_row_rename(self, cosmo, to_format, from_format):
"""Test renaming columns in row."""
rename = {"name": "cosmo_name"}
row = to_format("astropy.row", rename=rename)
assert "name" not in row.colnames
assert "cosmo_name" in row.colnames
# Error if just reading
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(row)
# Roundtrip
inv_rename = {v: k for k, v in rename.items()}
got = from_format(row, rename=inv_rename)
assert got == cosmo
def test_fromformat_row_subclass_partial_info(self, cosmo: Cosmology) -> None:
"""
Test writing from an instance and reading from that class.
This works with missing information.
There are no partial info options
"""
@pytest.mark.parametrize("format", [True, False, None, "astropy.row"])
def test_is_equivalent_to_row(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a Row.
"""
obj = to_format("astropy.row")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
|
ToFromRowTestMixin
|
python
|
pypa__setuptools
|
setuptools/_vendor/typeguard/_importhook.py
|
{
"start": 1459,
"end": 3061
}
|
class ____(SourceFileLoader):
@staticmethod
def source_to_code(
data: Buffer | str | ast.Module | ast.Expression | ast.Interactive,
path: Buffer | str | PathLike[str] = "<string>",
) -> CodeType:
if isinstance(data, (ast.Module, ast.Expression, ast.Interactive)):
tree = data
else:
if isinstance(data, str):
source = data
else:
source = decode_source(data)
tree = _call_with_frames_removed(
ast.parse,
source,
path,
"exec",
)
tree = TypeguardTransformer().visit(tree)
ast.fix_missing_locations(tree)
if global_config.debug_instrumentation and sys.version_info >= (3, 9):
print(
f"Source code of {path!r} after instrumentation:\n"
"----------------------------------------------",
file=sys.stderr,
)
print(ast.unparse(tree), file=sys.stderr)
print("----------------------------------------------", file=sys.stderr)
return _call_with_frames_removed(
compile, tree, path, "exec", 0, dont_inherit=True
)
def exec_module(self, module: ModuleType) -> None:
# Use a custom optimization marker – the import lock should make this monkey
# patch safe
with patch(
"importlib._bootstrap_external.cache_from_source",
optimized_cache_from_source,
):
super().exec_module(module)
|
TypeguardLoader
|
python
|
cython__cython
|
Cython/Build/Cache.py
|
{
"start": 1945,
"end": 6855
}
|
class ____:
def __init__(self, path, cache_size=None):
if path is None:
self.path = join_path(get_cython_cache_dir(), "compiler")
else:
self.path = path
self.cache_size = cache_size if cache_size is not None else MAX_CACHE_SIZE
if not os.path.exists(self.path):
os.makedirs(self.path)
def transitive_fingerprint(
self, filename, dependencies, compilation_options, flags=FingerprintFlags()
):
r"""
Return a fingerprint of a cython file that is about to be cythonized.
Fingerprints are looked up in future compilations. If the fingerprint
is found, the cythonization can be skipped. The fingerprint must
incorporate everything that has an influence on the generated code.
"""
try:
m = hashlib.sha256(__version__.encode("UTF-8"))
m.update(file_hash(filename).encode("UTF-8"))
for x in sorted(dependencies):
if os.path.splitext(x)[1] not in (".c", ".cpp", ".h"):
m.update(file_hash(x).encode("UTF-8"))
# Include the module attributes that change the compilation result
# in the fingerprint. We do not iterate over module.__dict__ and
# include almost everything here as users might extend Extension
# with arbitrary (random) attributes that would lead to cache
# misses.
m.update(flags.get_fingerprint().encode("UTF-8"))
m.update(compilation_options.get_fingerprint().encode("UTF-8"))
return m.hexdigest()
except OSError:
return None
def fingerprint_file(self, cfile, fingerprint, ext):
return (
join_path(self.path, "%s-%s" % (os.path.basename(cfile), fingerprint)) + ext
)
def lookup_cache(self, c_file, fingerprint):
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
if not os.path.exists(self.path):
safe_makedirs(self.path)
gz_fingerprint_file = self.fingerprint_file(c_file, fingerprint, gzip_ext)
if os.path.exists(gz_fingerprint_file):
return gz_fingerprint_file
zip_fingerprint_file = self.fingerprint_file(c_file, fingerprint, zip_ext)
if os.path.exists(zip_fingerprint_file):
return zip_fingerprint_file
return None
def load_from_cache(self, c_file, cached):
ext = os.path.splitext(cached)[1]
if ext == gzip_ext:
os.utime(cached, None)
with gzip_open(cached, "rb") as g:
with open(c_file, "wb") as f:
shutil.copyfileobj(g, f)
elif ext == zip_ext:
os.utime(cached, None)
dirname = os.path.dirname(c_file)
with zipfile.ZipFile(cached) as z:
for artifact in z.namelist():
z.extract(artifact, join_path(dirname, artifact))
else:
raise ValueError(f"Unsupported cache file extension: {ext}")
def store_to_cache(self, c_file, fingerprint, compilation_result):
artifacts = compilation_result.get_generated_source_files()
if len(artifacts) == 1:
fingerprint_file = self.fingerprint_file(c_file, fingerprint, gzip_ext)
with open(c_file, "rb") as f:
with gzip_open(fingerprint_file + ".tmp", "wb") as g:
shutil.copyfileobj(f, g)
else:
fingerprint_file = self.fingerprint_file(c_file, fingerprint, zip_ext)
with zipfile.ZipFile(
fingerprint_file + ".tmp", "w", zipfile_compression_mode
) as zip:
for artifact in artifacts:
zip.write(artifact, os.path.basename(artifact))
os.rename(fingerprint_file + ".tmp", fingerprint_file)
def cleanup_cache(self, ratio=0.85):
try:
completed_process = subprocess.run(
["du", "-s", "-k", os.path.abspath(self.path)], stdout=subprocess.PIPE
)
stdout = completed_process.stdout
if completed_process.returncode == 0:
total_size = 1024 * int(stdout.strip().split()[0])
if total_size < self.cache_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(self.path):
path = join_path(self.path, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > self.cache_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < self.cache_size * ratio:
break
|
Cache
|
python
|
scrapy__scrapy
|
scrapy/core/downloader/webclient.py
|
{
"start": 664,
"end": 2982
}
|
class ____(HTTPClient):
delimiter = b"\n"
def __init__(self):
warnings.warn(
"ScrapyHTTPPageGetter is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
super().__init__()
def connectionMade(self):
self.headers = Headers() # bucket for response headers
# Method command
self.sendCommand(self.factory.method, self.factory.path)
# Headers
for key, values in self.factory.headers.items():
for value in values:
self.sendHeader(key, value)
self.endHeaders()
# Body
if self.factory.body is not None:
self.transport.write(self.factory.body)
def lineReceived(self, line):
return HTTPClient.lineReceived(self, line.rstrip())
def handleHeader(self, key, value):
self.headers.appendlist(key, value)
def handleStatus(self, version, status, message):
self.factory.gotStatus(version, status, message)
def handleEndHeaders(self):
self.factory.gotHeaders(self.headers)
def connectionLost(self, reason):
self._connection_lost_reason = reason
HTTPClient.connectionLost(self, reason)
self.factory.noPage(reason)
def handleResponse(self, response):
if self.factory.method.upper() == b"HEAD":
self.factory.page(b"")
elif self.length is not None and self.length > 0:
self.factory.noPage(self._connection_lost_reason)
else:
self.factory.page(response)
self.transport.loseConnection()
def timeout(self):
self.transport.loseConnection()
# transport cleanup needed for HTTPS connections
if self.factory.url.startswith(b"https"):
self.transport.stopProducing()
self.factory.noPage(
defer.TimeoutError(
f"Getting {self.factory.url} took longer "
f"than {self.factory.timeout} seconds."
)
)
# This class used to inherit from Twisted’s
# twisted.web.client.HTTPClientFactory. When that class was deprecated in
# Twisted (https://github.com/twisted/twisted/pull/643), we merged its
# non-overridden code into this class.
|
ScrapyHTTPPageGetter
|
python
|
mkdocstrings__mkdocstrings
|
src/mkdocstrings/_internal/inventory.py
|
{
"start": 354,
"end": 2856
}
|
class ____:
"""Inventory item."""
def __init__(
self,
name: str,
domain: str,
role: str,
uri: str,
priority: int = 1,
dispname: str | None = None,
):
"""Initialize the object.
Arguments:
name: The item name.
domain: The item domain, like 'python' or 'crystal'.
role: The item role, like 'class' or 'method'.
uri: The item URI.
priority: The item priority. Only used internally by mkdocstrings and Sphinx.
dispname: The item display name.
"""
self.name: str = name
"""The item name."""
self.domain: str = domain
"""The item domain."""
self.role: str = role
"""The item role."""
self.uri: str = uri
"""The item URI."""
self.priority: int = priority
"""The item priority."""
self.dispname: str = dispname or name
"""The item display name."""
def format_sphinx(self) -> str:
"""Format this item as a Sphinx inventory line.
Returns:
A line formatted for an `objects.inv` file.
"""
dispname = self.dispname
if dispname == self.name:
dispname = "-"
uri = self.uri
if uri.endswith(self.name):
uri = uri[: -len(self.name)] + "$"
return f"{self.name} {self.domain}:{self.role} {self.priority} {uri} {dispname}"
sphinx_item_regex = re.compile(r"^(.+?)\s+(\S+):(\S+)\s+(-?\d+)\s+(\S+)\s*(.*)$")
"""Regex to parse a Sphinx v2 inventory line."""
@overload
@classmethod
def parse_sphinx(cls, line: str, *, return_none: Literal[False]) -> InventoryItem: ...
@overload
@classmethod
def parse_sphinx(cls, line: str, *, return_none: Literal[True]) -> InventoryItem | None: ...
@classmethod
def parse_sphinx(cls, line: str, *, return_none: bool = False) -> InventoryItem | None:
"""Parse a line from a Sphinx v2 inventory file and return an `InventoryItem` from it."""
match = cls.sphinx_item_regex.search(line)
if not match:
if return_none:
return None
raise ValueError(line)
name, domain, role, priority, uri, dispname = match.groups()
if uri.endswith("$"):
uri = uri[:-1] + name
if dispname == "-":
dispname = name
return cls(name, domain, role, uri, int(priority), dispname)
|
InventoryItem
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_utils.py
|
{
"start": 30735,
"end": 33226
}
|
class ____(LocalDTensorTestBase):
@property
def world_size(self) -> int:
return 32
@with_comms
def test_StridedShard_to_shard_order(self):
with LocalTensorMode(ranks=self.world_size):
mesh = DeviceMesh("cpu", torch.arange(self.world_size).view(2, 2, 2, 2, 2))
shard_iter = generate_shard_orders(mesh, 3)
# It takes ~4.8h to complete total 2520 shard order combinations here
# using LocalTensor. So we only randomly pick 25 shard orders to test.
all_shard_order = list(shard_iter)
import random
random.seed(42)
shard_order_choices = random.sample(
all_shard_order, min(25, len(all_shard_order))
)
x = torch.randn(32, 32, 32)
for shard_order in shard_order_choices:
a = _distribute_tensor(x, mesh, None, shard_order)
placement_without_stridedshard = shard_order_to_placement(
shard_order, mesh
)
placements_with_stridedshard = (
DTensorSpec._convert_shard_order_to_StridedShard(
shard_order, placement_without_stridedshard, mesh
)
)
b = distribute_tensor(x, mesh, placements_with_stridedshard)
shard_order_from_stridedshard = (
DTensorSpec._maybe_convert_StridedShard_to_shard_order(
placements_with_stridedshard, mesh
)
)
self.assertEqual(shard_order, shard_order_from_stridedshard)
self.assertEqual(a.to_local(), b.to_local())
@with_comms
def test_StridedShard_not_convertible_to_shard_order(self):
with LocalTensorMode(ranks=self.world_size):
mesh = DeviceMesh("cpu", torch.arange(self.world_size).view(4, 8))
unconvertible_placements_list = [
[_StridedShard(0, split_factor=2), _StridedShard(1, split_factor=2)],
[_StridedShard(0, split_factor=2), Shard(1)],
[_StridedShard(1, split_factor=16), Shard(1)],
]
for placements in unconvertible_placements_list:
shard_order = DTensorSpec._maybe_convert_StridedShard_to_shard_order(
tuple(placements), mesh
)
self.assertIsNone(shard_order)
|
Test_StridedShard_with_shard_order
|
python
|
huggingface__transformers
|
src/transformers/models/janus/configuration_janus.py
|
{
"start": 9752,
"end": 14620
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`JanusModel`]. It is used to instantiate an
Janus model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Janus-1B or Janus-7B models.
e.g. [deepseek-community/Janus-Pro-1B](https://huggingface.co/deepseek-community/Janus-Pro-1B) or
[deepseek-community/Janus-Pro-7B](https://huggingface.co/deepseek-community/Janus-Pro-7B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `JanusVisionConfig`):
The config object or dictionary of the vision backbone.
vq_config (`Union[AutoConfig, dict]`, *optional*, defaults to `JanusVQVAEConfig`):
The config object or dictionary of the VQVAE backbone.
image_token_id (`int`, *optional*, defaults to 100581):
Token index of a placeholder image token.
Example:
```python
>>> from transformers import JanusForConditionalGeneration, JanusConfig, JanusVisionConfig, JanusVQVAEConfig, LlamaConfig
>>> # Initializing a Janus vision config
>>> vision_config = JanusVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> # Initializing a VQ config
>>> vq_config = JanusVQVAEConfig()
>>> # Initializing a Janus Pro 1B style configuration
>>> configuration = JanusConfig(vision_config=vision_config, text_config=text_config, vq_config=vq_config)
>>> # Initializing a model from the Janus Pro 1B style configuration
>>> model = JanusForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "janus"
sub_configs = {
"text_config": AutoConfig,
"vision_config": JanusVisionConfig,
"vq_config": JanusVQVAEConfig,
}
def __init__(
self,
text_config=None,
vision_config=None,
vq_config=None,
image_token_id=100581,
**kwargs,
):
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
logger.info("`text_config` is None. Initializing with default values")
self.text_config = CONFIG_MAPPING["llama"]()
elif isinstance(text_config, PreTrainedConfig):
self.text_config = text_config
else:
raise ValueError(
f"Invalid type for `text_config`. Must be either `dict` or `LlamaConfig`."
f" Type found: {type(text_config)}"
)
if vision_config is None:
logger.info("`vision_config` is None. Initializing with default JanusVisionConfig values")
self.vision_config = JanusVisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = JanusVisionConfig(**vision_config)
elif isinstance(vision_config, JanusVisionConfig):
self.vision_config = vision_config
else:
raise ValueError(
f"Invalid type for `vision_config`. Must be either `dict` or `JanusVisionConfig`."
f" Type found: {type(vision_config)}"
)
if vq_config is None:
logger.info("`vq_config` is None. Initializing with default JanusVQVAEConfig values")
self.vq_config = JanusVQVAEConfig()
elif isinstance(vq_config, dict):
self.vq_config = JanusVQVAEConfig(**vq_config)
elif isinstance(vq_config, JanusVQVAEConfig):
self.vq_config = vq_config
else:
raise ValueError(
f"Invalid type for `vq_config`. Must be either `dict` or `JanusVQVAEConfig`."
f" Type found: {type(vq_config)}"
)
self.initializer_range = self.vision_config.initializer_range
# This dimension is required when decoding discrete image tokens to continuous input.
self.vq_config.num_patches = self.vision_config.image_size // self.vision_config.patch_size
# The default is only the index for the 1B model, 7B uses a different one
self.image_token_id = image_token_id
super().__init__(**kwargs)
__all__ = ["JanusVQVAEConfig", "JanusVisionConfig", "JanusConfig"]
|
JanusConfig
|
python
|
pikepdf__pikepdf
|
src/pikepdf/jbig2.py
|
{
"start": 1355,
"end": 4213
}
|
class ____(JBIG2DecoderInterface):
"""JBIG2 decoder implementation."""
def __init__(self, *, subprocess_run=run, creationflags=CREATION_FLAGS):
"""Initialize the decoder."""
self._run = subprocess_run
self._creationflags = creationflags
def check_available(self) -> None:
"""Check if jbig2dec is installed and usable."""
version = self._version()
if version is not None and version < Version('0.15'):
raise DependencyError("jbig2dec is too old (older than version 0.15)")
def decode_jbig2(self, jbig2: bytes, jbig2_globals: bytes) -> bytes:
"""Decode JBIG2 from binary data, returning decode bytes."""
with TemporaryDirectory(prefix='pikepdf-', suffix='.jbig2') as tmpdir:
image_path = Path(tmpdir) / "image"
global_path = Path(tmpdir) / "global"
output_path = Path(tmpdir) / "outfile"
args = [
"jbig2dec",
"--embedded",
"--format",
"png",
"--output",
os.fspath(output_path),
]
# Get the raw stream, because we can't decode im_obj
# (that is why we're here).
# (Strictly speaking we should remove any non-JBIG2 filters if double
# encoded).
image_path.write_bytes(jbig2)
if len(jbig2_globals) > 0:
global_path.write_bytes(jbig2_globals)
args.append(os.fspath(global_path))
args.append(os.fspath(image_path))
self._run(
args, stdout=DEVNULL, check=True, creationflags=self._creationflags
)
with Image.open(output_path) as im:
return im.tobytes()
def _version(self) -> Version | None:
try:
proc = self._run(
['jbig2dec', '--version'],
stdout=PIPE,
check=True,
encoding='ascii',
creationflags=self._creationflags,
)
except (CalledProcessError, FileNotFoundError) as e:
raise DependencyError("jbig2dec - not installed or not found") from e
else:
result = proc.stdout
version_str = result.replace(
'jbig2dec', ''
).strip() # returns "jbig2dec 0.xx"
try:
return Version(version_str)
except InvalidVersion:
return None
_jbig2_decoder: JBIG2DecoderInterface = JBIG2Decoder()
def get_decoder() -> JBIG2DecoderInterface:
"""Return an instance of a JBIG2 decoder."""
return _jbig2_decoder
def set_decoder(jbig2_decoder: JBIG2DecoderInterface) -> None:
"""Set the JBIG2 decoder to use."""
global _jbig2_decoder
_jbig2_decoder = jbig2_decoder
|
JBIG2Decoder
|
python
|
openai__openai-python
|
src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py
|
{
"start": 794,
"end": 1070
}
|
class ____(TypedDict, total=False):
enabled: bool
"""Enables chat users to access previous ChatKit threads. Defaults to true."""
recent_threads: int
"""Number of recent ChatKit threads users have access to.
Defaults to unlimited when unset.
"""
|
History
|
python
|
tornadoweb__tornado
|
tornado/test/iostream_test.py
|
{
"start": 1395,
"end": 7278
}
|
class ____(AsyncTestCase):
# We want to run these tests with both AsyncHTTPTestCase and AsyncHTTPSTestCase,
# but this leads to some tricky inheritance situations. We want this class's
# get_app, but the test classes's get_http_port and fetch. There's no way to make
# the method resolution order to do what we want in all cases, so the current
# state is that that AsyncHTTP(S)TestCase must be the first base class of the
# final class, and that class must define a get_app method that calls mixin_get_app.
#
# Alternatives include defining this class in a factory that can change the base class
# or refactoring to use composition instead of inheritance for the http components.
def _make_client_iostream(self):
raise NotImplementedError()
def mixin_get_app(self):
return Application([("/", HelloHandler)])
def get_http_port(self) -> int:
raise NotImplementedError()
def fetch(
self, path: str, raise_error: bool = False, **kwargs: typing.Any
) -> HTTPResponse:
# To be filled in by mixing in AsyncHTTPTestCase or AsyncHTTPSTestCase
raise NotImplementedError()
def test_connection_closed(self):
# When a server sends a response and then closes the connection,
# the client must be allowed to read the data before the IOStream
# closes itself. Epoll reports closed connections with a separate
# EPOLLRDHUP event delivered at the same time as the read event,
# while kqueue reports them as a second read/write event with an EOF
# flag.
if (
AsyncHTTPClient.configured_class().__name__.endswith("CurlAsyncHTTPClient")
and platform.system() == "Darwin"
):
# It's possible that this is Tornado's fault, either in AsyncIOLoop or in
# CurlAsyncHTTPClient, but we've also seen this kind of issue in libcurl itself
# (especially a long time ago). The error is tied to the use of Apple's
# SecureTransport instead of OpenSSL.
self.skipTest("libcurl doesn't handle closed connections cleanly on macOS")
response = self.fetch("/", headers={"Connection": "close"})
response.rethrow()
@gen_test
def test_read_until_close(self):
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
stream.write(b"GET / HTTP/1.0\r\n\r\n")
data = yield stream.read_until_close()
self.assertTrue(data.startswith(b"HTTP/1.1 200"))
self.assertTrue(data.endswith(b"Hello"))
@gen_test
def test_read_zero_bytes(self):
self.stream = self._make_client_iostream()
yield self.stream.connect(("127.0.0.1", self.get_http_port()))
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
# normal read
data = yield self.stream.read_bytes(9)
self.assertEqual(data, b"HTTP/1.1 ")
# zero bytes
data = yield self.stream.read_bytes(0)
self.assertEqual(data, b"")
# another normal read
data = yield self.stream.read_bytes(3)
self.assertEqual(data, b"200")
self.stream.close()
@gen_test
def test_write_while_connecting(self):
stream = self._make_client_iostream()
connect_fut = stream.connect(("127.0.0.1", self.get_http_port()))
# unlike the previous tests, try to write before the connection
# is complete.
write_fut = stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
self.assertFalse(connect_fut.done())
# connect will always complete before write.
it = gen.WaitIterator(connect_fut, write_fut)
resolved_order = []
while not it.done():
yield it.next()
resolved_order.append(it.current_future)
self.assertEqual(resolved_order, [connect_fut, write_fut])
data = yield stream.read_until_close()
self.assertTrue(data.endswith(b"Hello"))
stream.close()
@gen_test
def test_future_interface(self):
"""Basic test of IOStream's ability to return Futures."""
stream = self._make_client_iostream()
connect_result = yield stream.connect(("127.0.0.1", self.get_http_port()))
self.assertIs(connect_result, stream)
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
first_line = yield stream.read_until(b"\r\n")
self.assertEqual(first_line, b"HTTP/1.1 200 OK\r\n")
# callback=None is equivalent to no callback.
header_data = yield stream.read_until(b"\r\n\r\n")
headers = HTTPHeaders.parse(header_data.decode("latin1"))
content_length = int(headers["Content-Length"])
body = yield stream.read_bytes(content_length)
self.assertEqual(body, b"Hello")
stream.close()
@gen_test
def test_future_close_while_reading(self):
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
with self.assertRaises(StreamClosedError):
yield stream.read_bytes(1024 * 1024)
stream.close()
@gen_test
def test_future_read_until_close(self):
# Ensure that the data comes through before the StreamClosedError.
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
yield stream.read_until(b"\r\n\r\n")
body = yield stream.read_until_close()
self.assertEqual(body, b"Hello")
# Nothing else to read; the error comes immediately without waiting
# for yield.
with self.assertRaises(StreamClosedError):
stream.read_bytes(1)
@abstract_base_test
|
TestIOStreamWebMixin
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/metadata.py
|
{
"start": 832,
"end": 3271
}
|
class ____:
"""Properties used to create :class:`Tensor`"""
# Regular tensor fields
dtype: torch.dtype = field(default_factory=torch.get_default_dtype)
# This field is deprecated.
layout: torch.layout = field(default=torch.strided)
# This field is deprecated.
requires_grad: bool = False
# This field is deprecated.
memory_format: torch.memory_format = field(default=torch.contiguous_format)
# This field is deprecated.
pin_memory: bool = False
def __getstate__(self):
# Since torch.memory_format cannot be pickled!
memory_format = self.memory_format
if memory_format == torch.contiguous_format:
mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT
elif memory_format == torch.channels_last:
mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST
elif memory_format == torch.preserve_format:
mem_format_encoding = _MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT
else:
raise RuntimeError(f"Invalid torch.memory_format: {memory_format}")
return (
self.dtype,
self.layout,
self.requires_grad,
mem_format_encoding,
self.pin_memory,
)
def __setstate__(
self,
state,
):
(
self.dtype,
self.layout,
self.requires_grad,
mem_format_encoding,
self.pin_memory,
) = state
if mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT:
memory_format = torch.contiguous_format
elif mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST:
memory_format = torch.channels_last
elif mem_format_encoding == _MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT:
memory_format = torch.preserve_format
else:
raise RuntimeError(
f"Invalid torch.memory_format encoding: {mem_format_encoding}"
)
self.memory_format = memory_format
@staticmethod
def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties":
return TensorProperties(
dtype=tensor.dtype,
layout=tensor.layout,
requires_grad=tensor.requires_grad,
memory_format=torch.contiguous_format,
pin_memory=tensor.is_pinned(),
)
@dataclass
|
TensorProperties
|
python
|
mamba-org__mamba
|
docs/source/tools/mermaid.py
|
{
"start": 1054,
"end": 1738
}
|
class ____(nodes.General, nodes.Inline, nodes.Element):
pass
def figure_wrapper(directive, node, caption):
figure_node = nodes.figure("", node)
if "align" in node:
figure_node["align"] = node.attributes.pop("align")
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=""), directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, "", *parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
return directives.choice(argument, ("left", "center", "right"))
|
mermaid
|
python
|
pytorch__pytorch
|
torch/_export/serde/schema.py
|
{
"start": 14668,
"end": 14816
}
|
class ____:
# name is not the unique identifier of the node
name: Annotated[str, 10]
node: Annotated[Node, 20]
@dataclass
|
ExternKernelNode
|
python
|
facebookresearch__faiss
|
tests/test_rabitq.py
|
{
"start": 61126,
"end": 65910
}
|
class ____(unittest.TestCase):
"""Test IndexIVFRaBitQ with multi-bit support."""
def do_test_ivf_basic_operations(self, metric, nb_bits, qb):
"""Test IVF train/add/search pipeline."""
ds = create_test_dataset(d=128, nb=500, nq=20, nt=300)
k = 10
nlist = 16
# Create IVF index with rotation
index = create_index_ivf_rabitq_with_rotation(
ds.d, metric, nb_bits, nlist=nlist, qb=qb, nprobe=4
)
# Train
index.train(ds.get_train())
self.assertTrue(index.is_trained)
# Add
index.add(ds.get_database())
self.assertEqual(index.ntotal, ds.nb)
# Search
D, I = index.search(ds.get_queries(), k)
# Assert: Result shapes are correct
self.assertEqual(D.shape, (ds.nq, k))
self.assertEqual(I.shape, (ds.nq, k))
# Assert: Indices are valid
self.assertTrue(np.all(I >= 0))
self.assertTrue(np.all(I < ds.nb))
# Assert: Distances are finite
self.assertTrue(np.all(np.isfinite(D)))
def test_ivf_all_combinations(self):
"""Test IVF for subset of combinations."""
for metric in [faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT]:
for nb_bits in [1, 2, 4, 8]:
for qb in [0, 4, 8]:
with self.subTest(metric=metric, nb_bits=nb_bits, qb=qb):
self.do_test_ivf_basic_operations(metric, nb_bits, qb)
def do_test_ivf_nprobe_improves_recall(self, metric, nb_bits):
"""Test that higher nprobe improves recall."""
metric_str = 'L2' if metric == faiss.METRIC_L2 else 'IP'
ds = create_test_dataset(
d=128, nb=1000, nq=50, nt=500, metric=metric_str
)
k = 10
nlist = 32
# Ground truth
I_gt = ds.get_groundtruth(10)
# Create IVF index
quantizer = faiss.IndexFlat(ds.d, metric)
index_rbq = faiss.IndexIVFRaBitQ(
quantizer, ds.d, nlist, metric, True, nb_bits
)
rrot = faiss.RandomRotationMatrix(ds.d, ds.d)
rrot.init(123)
index = faiss.IndexPreTransform(rrot, index_rbq)
index.train(ds.get_train())
index.add(ds.get_database())
# Test different nprobe values
recalls = {}
for nprobe in [1, 2, 4, 8]:
index_rbq.nprobe = nprobe
_, I = index.search(ds.get_queries(), k)
recalls[nprobe] = compute_recall_at_k(I_gt, I)
# Assert: Monotonic improvement with nprobe
self.assertGreaterEqual(recalls[2], recalls[1])
self.assertGreaterEqual(recalls[4], recalls[2])
self.assertGreaterEqual(recalls[8], recalls[4])
def test_nprobe_effect(self):
"""Test nprobe effect for both metrics and selected nb_bits."""
for metric in [faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT]:
for nb_bits in [1, 4, 8]:
with self.subTest(metric=metric, nb_bits=nb_bits):
self.do_test_ivf_nprobe_improves_recall(metric, nb_bits)
def do_test_ivf_serialization(self, metric, nb_bits, qb):
"""Test IVF serialization preserves results."""
ds = create_test_dataset(d=64, nb=200, nq=10, nt=150)
k = 5
nlist = 16
# Create and populate IVF index
index1 = create_index_ivf_rabitq_with_rotation(
ds.d, metric, nb_bits, nlist=nlist, qb=qb, nprobe=4
)
index1.train(ds.get_train())
index1.add(ds.get_database())
# Search before serialization
D1, I1 = index1.search(ds.get_queries(), k)
# Serialize and deserialize
index_bytes = faiss.serialize_index(index1)
index2 = faiss.deserialize_index(index_bytes)
# Assert: Parameters preserved
self.assertEqual(index2.d, ds.d)
self.assertEqual(index2.ntotal, ds.nb)
self.assertTrue(index2.is_trained)
# Search after deserialization using search parameters
params = faiss.IVFRaBitQSearchParameters()
params.qb = qb
params.centered = False
params.nprobe = 4
D2, I2 = index2.search(ds.get_queries(), k, params=params)
# Assert: Results are identical
np.testing.assert_array_equal(I1, I2)
np.testing.assert_allclose(D1, D2, rtol=1e-5)
def test_ivf_serialization(self):
"""Test IVF serialization for multiple configurations."""
for metric in [faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT]:
for nb_bits in [1, 2, 4, 8, 9]:
for qb in [0, 4, 8]:
with self.subTest(metric=metric, nb_bits=nb_bits, qb=qb):
self.do_test_ivf_serialization(metric, nb_bits, qb)
|
TestMultiBitIndexIVFRaBitQ
|
python
|
pypa__pip
|
src/pip/_internal/distributions/wheel.py
|
{
"start": 392,
"end": 1364
}
|
class ____(AbstractDistribution):
"""Represents a wheel distribution.
This does not need any preparation as wheels can be directly unpacked.
"""
@property
def build_tracker_id(self) -> str | None:
return None
def get_metadata_distribution(self) -> BaseDistribution:
"""Loads the metadata from the wheel file into memory and returns a
Distribution that uses it, not relying on the wheel file or
requirement.
"""
assert self.req.local_file_path, "Set as part of preparation during download"
assert self.req.name, "Wheels are never unnamed"
wheel = FilesystemWheel(self.req.local_file_path)
return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
def prepare_distribution_metadata(
self,
build_env_installer: BuildEnvironmentInstaller,
build_isolation: bool,
check_build_deps: bool,
) -> None:
pass
|
WheelDistribution
|
python
|
ray-project__ray
|
rllib/examples/_old_api_stack/policy/random_policy.py
|
{
"start": 402,
"end": 3214
}
|
class ____(Policy):
"""Hand-coded policy that returns random actions."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Whether for compute_actions, the bounds given in action_space
# should be ignored (default: False). This is to test action-clipping
# and any Env's reaction to bounds breaches.
if self.config.get("ignore_action_bounds", False) and isinstance(
self.action_space, Box
):
self.action_space_for_sampling = Box(
-float("inf"),
float("inf"),
shape=self.action_space.shape,
dtype=self.action_space.dtype,
)
else:
self.action_space_for_sampling = self.action_space
@override(Policy)
def init_view_requirements(self):
super().init_view_requirements()
# Disable for_training and action attributes for SampleBatch.INFOS column
# since it can not be properly batched.
vr = self.view_requirements[SampleBatch.INFOS]
vr.used_for_training = False
vr.used_for_compute_actions = False
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
**kwargs,
):
# Alternatively, a numpy array would work here as well.
# e.g.: np.array([random.choice([0, 1])] * len(obs_batch))
obs_batch_size = len(tree.flatten(obs_batch)[0])
return (
[self.action_space_for_sampling.sample() for _ in range(obs_batch_size)],
[],
{},
)
@override(Policy)
def learn_on_batch(self, samples):
"""No learning."""
return {}
@override(Policy)
def compute_log_likelihoods(
self,
actions,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
**kwargs,
):
return np.array([random.random()] * len(obs_batch))
@override(Policy)
def get_weights(self) -> ModelWeights:
"""No weights to save."""
return {}
@override(Policy)
def set_weights(self, weights: ModelWeights) -> None:
"""No weights to set."""
pass
@override(Policy)
def _get_dummy_batch_from_view_requirements(self, batch_size: int = 1):
return SampleBatch(
{
SampleBatch.OBS: tree.map_structure(
lambda s: s[None], self.observation_space.sample()
),
}
)
|
RandomPolicy
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 220319,
"end": 223216
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3]", L_v_: "f32[3, 3]"):
l_x_ = L_x_
l_v_ = L_v_
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled = None
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
_make_dual: "f32[3, 3]" = torch._make_dual(l_x_, l_v_, level = 0); l_x_ = l_v_ = None
sin: "f32[3, 3]" = _make_dual.sin(); _make_dual = None
result_duals: "f32[]" = sin.sum(); sin = None
_unpack_dual = torch._unpack_dual(result_duals, level = 0); result_duals = None
primal: "f32[]" = _unpack_dual[0]
dual: "f32[]" = _unpack_dual[1]; _unpack_dual = None
primals_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(primal, 1); primal = None
tangents_out_unflatten: "f32[]" = torch._C._functorch._unwrap_for_grad(dual, 1); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_2 = torch._C._set_fwd_grad_enabled(False); _set_fwd_grad_enabled_2 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
_set_fwd_grad_enabled_3 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_3 = None
return (primals_out_unflatten, tangents_out_unflatten)
""",
)
def test_jvp_two_tensors_disable_enable_disable_grad(self):
counters.clear()
def fn(x):
return x.sin().sum()
def wrapper_fn(x, v):
with torch.autograd.forward_ad._set_fwd_grad_enabled(False): # (1)
with torch.autograd.forward_ad._set_fwd_grad_enabled(True): # (2)
with torch.autograd.forward_ad._set_fwd_grad_enabled(False): # (3)
return torch.func.jvp(fn, (x,), (v,)) # (4)
# Start True
# False (1)
# True (2)
# False (3)
# True (4)
# True (undo 3)
# False (undo 2)
# True (undo 1)
x = torch.randn(3, 3)
v = torch.randn(3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-score-from-grid-operations.py
|
{
"start": 1499,
"end": 2849
}
|
class ____(object):
def maximumScore(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
prefix = [0]*(len(grid)+1)
for i in xrange(len(grid)):
prefix[i+1] = prefix[i]+grid[i][0]
# dp[0][i]: the maximum score from 0 to the current column, and the current column has i black cells, without scoring the white cells of the current column
# dp[1][i]: the maximum score from 0 to the current column, and the current column has i black cells, with scoring the white cells of the current column
dp = [[0]*(len(grid)+1) for _ in xrange(2)]
for j in xrange(1, len(grid[0])):
new_prefix = [0]*(len(grid)+1)
for i in xrange(len(grid)):
new_prefix[i+1] = new_prefix[i]+grid[i][j]
new_dp = [[0]*(len(grid)+1) for _ in xrange(2)]
for i in xrange(len(grid)+1):
for k in xrange(len(grid)+1):
new_dp[0][i] = max(new_dp[0][i], max(prefix[i]-prefix[k], 0)+dp[0][k])
new_dp[1][i] = max(new_dp[1][i], dp[1][k]+max(new_prefix[k]-new_prefix[i], 0))
new_dp[0][i] = max(new_dp[0][i], max(dp[1]))
new_dp[1][i] = max(new_dp[1][i], new_dp[0][i])
dp, prefix = new_dp, new_prefix
return max(dp[1])
|
Solution2
|
python
|
joke2k__faker
|
faker/providers/ssn/no_NO/__init__.py
|
{
"start": 775,
"end": 3297
}
|
class ____(SsnProvider):
scale1 = (3, 7, 6, 1, 8, 9, 4, 5, 2)
scale2 = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
def ssn(self, dob: Optional[str] = None, gender: Optional[SexLiteral] = None) -> str:
"""
Returns 11 character Norwegian personal identity code (Fødselsnummer).
A Norwegian personal identity code consists of 11 digits, without any
whitespace or other delimiters. The form is DDMMYYIIICC, where III is
a serial number separating persons born oh the same date with different
intervals depending on the year they are born. CC is two checksums.
https://en.wikipedia.org/wiki/National_identification_number#Norway
:param dob: date of birth as a "YYYYMMDD" string
:type dob: str
:param gender: gender of the person - "F" for female, M for male.
:type gender: str
:return: Fødselsnummer in str format (11 digs)
:rtype: str
"""
if dob:
birthday = datetime.datetime.strptime(dob, "%Y%m%d")
else:
age = datetime.timedelta(days=self.generator.random.randrange(18 * 365, 90 * 365))
birthday = datetime.datetime.now() - age
if not gender:
gender = self.generator.random.choice(("F", "M"))
elif gender not in ("F", "M"):
raise ValueError("Gender must be one of F or M.")
while True:
if 1900 <= birthday.year <= 1999:
suffix = self.generator.random.randrange(0, 49)
elif 1854 <= birthday.year <= 1899:
suffix = self.generator.random.randrange(50, 74)
elif 2000 <= birthday.year <= 2039:
suffix = self.generator.random.randrange(50, 99)
elif 1940 <= birthday.year <= 1999:
suffix = self.generator.random.randrange(90, 99)
if gender == "F":
gender_num = self.generator.random.choice((0, 2, 4, 6, 8))
elif gender == "M":
gender_num = self.generator.random.choice((1, 3, 5, 7, 9))
pnr = f"{birthday:%d%m%y}{suffix:02}{gender_num}"
pnr_nums = [int(ch) for ch in pnr]
k1 = checksum(Provider.scale1, pnr_nums)
k2 = checksum(Provider.scale2, pnr_nums + [k1])
# Checksums with a value of 10 is rejected.
# https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
if k1 == 10 or k2 == 10:
continue
pnr += f"{k1}{k2}"
return pnr
|
Provider
|
python
|
spyder-ide__spyder
|
spyder/plugins/externalterminal/widgets/run_conf.py
|
{
"start": 4035,
"end": 8324
}
|
class ____(RunExecutorConfigurationGroup):
"""External terminal shell run configuration options."""
def __init__(
self,
parent,
context: Context, input_extension: str,
input_metadata: RunConfigurationMetadata
):
super().__init__(parent, context, input_extension, input_metadata)
# --- Interpreter ---
interpreter_group = QGroupBox(_("Interpreter"))
interpreter_layout = QVBoxLayout(interpreter_group)
interpreter_label = QLabel(_("Shell interpreter:"))
self.interpreter_edit = QLineEdit(self)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select interpreter"))
browse_btn.clicked.connect(self.select_interpreter)
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
shell_layout = QHBoxLayout()
shell_layout.addWidget(interpreter_label)
shell_layout.addWidget(self.interpreter_edit)
shell_layout.addWidget(browse_btn)
interpreter_layout.addLayout(shell_layout)
self.interpreter_opts_cb = QCheckBox(_("Interpreter arguments:"))
self.interpreter_opts_edit = QLineEdit(self)
self.interpreter_opts_edit.setMinimumWidth(250)
self.interpreter_opts_cb.toggled.connect(
self.interpreter_opts_edit.setEnabled
)
self.interpreter_opts_edit.setEnabled(False)
interpreter_opts_layout = QHBoxLayout()
interpreter_opts_layout.addWidget(self.interpreter_opts_cb)
interpreter_opts_layout.addWidget(self.interpreter_opts_edit)
interpreter_layout.addLayout(interpreter_opts_layout)
# --- Script ---
script_group = QGroupBox(_('Script'))
script_layout = QVBoxLayout(script_group)
self.script_opts_cb = QCheckBox(_("Script arguments:"))
self.script_opts_edit = QLineEdit(self)
self.script_opts_cb.toggled.connect(
self.script_opts_edit.setEnabled
)
self.script_opts_edit.setEnabled(False)
script_args_layout = QHBoxLayout()
script_args_layout.addWidget(self.script_opts_cb)
script_args_layout.addWidget(self.script_opts_edit)
script_layout.addLayout(script_args_layout)
self.close_after_exec_cb = QCheckBox(
_("Close terminal after execution")
)
script_layout.addWidget(self.close_after_exec_cb)
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(interpreter_group)
layout.addWidget(script_group)
layout.addStretch(100)
def select_interpreter(self):
"""Select an interpreter."""
basedir = str(self.interpreter_edit.text())
if not osp.isdir(basedir):
basedir = getcwd_or_home()
file, __ = getopenfilename(self, _("Select executable"), basedir)
if file:
self.interpreter_edit.setText(file)
def set_configuration(self, config: ExtTerminalShConfiguration):
interpreter = config['interpreter']
interpreter_opts_enabled = config['interpreter_opts_enabled']
interpreter_opts = config['interpreter_opts']
script_opts_enabled = config['script_opts_enabled']
script_opts = config['script_opts']
close_after_exec = config['close_after_exec']
self.interpreter_edit.setText(interpreter)
self.interpreter_opts_cb.setChecked(interpreter_opts_enabled)
self.interpreter_opts_edit.setText(interpreter_opts)
self.script_opts_cb.setChecked(script_opts_enabled)
self.script_opts_edit.setText(script_opts)
self.close_after_exec_cb.setChecked(close_after_exec)
def get_configuration(self) -> ExtTerminalShConfiguration:
return {
'interpreter': self.interpreter_edit.text(),
'interpreter_opts_enabled': self.interpreter_opts_cb.isChecked(),
'interpreter_opts': self.interpreter_opts_edit.text(),
'script_opts_enabled': self.script_opts_cb.isChecked(),
'script_opts': self.script_opts_edit.text(),
'close_after_exec': self.close_after_exec_cb.isChecked()
}
|
GenericExternalTerminalShConfiguration
|
python
|
neetcode-gh__leetcode
|
python/0167-two-sum-ii-input-array-is-sorted.py
|
{
"start": 0,
"end": 355
}
|
class ____:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
l, r = 0, len(numbers) - 1
while l < r:
curSum = numbers[l] + numbers[r]
if curSum > target:
r -= 1
elif curSum < target:
l += 1
else:
return [l + 1, r + 1]
|
Solution
|
python
|
ray-project__ray
|
release/ray_release/tests/test_cluster_manager.py
|
{
"start": 1325,
"end": 1845
}
|
class ____:
def __init__(
self,
callback: Callable[[], None],
finish_after: float,
before: APIDict,
after: APIDict,
):
self.callback = callback
self.finish_after = time.monotonic() + finish_after
self.before = before
self.after = after
def __call__(self, *args, **kwargs):
self.callback()
if time.monotonic() > self.finish_after:
return self.after
else:
return self.before
|
_DelayedResponse
|
python
|
django__django
|
tests/model_formsets/models.py
|
{
"start": 5939,
"end": 6161
}
|
class ____(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
|
UUIDPKChild
|
python
|
ray-project__ray
|
doc/source/templates/03_serving_stable_diffusion/app.py
|
{
"start": 1159,
"end": 1894
}
|
class ____:
def __init__(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler = EulerDiscreteScheduler.from_pretrained(
model_id, subfolder="scheduler"
)
self.pipe = StableDiffusionPipeline.from_pretrained(
model_id, scheduler=scheduler, revision="fp16", torch_dtype=torch.float16
)
self.pipe = self.pipe.to("cuda")
def generate(self, prompt: str, img_size: int = 512):
assert len(prompt), "prompt parameter cannot be empty"
logger.info("Prompt: [%s]", prompt)
image = self.pipe(prompt, height=img_size, width=img_size).images[0]
return image
entrypoint = APIIngress.bind(StableDiffusionV2.bind())
|
StableDiffusionV2
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_lookup.py
|
{
"start": 38387,
"end": 38806
}
|
class ____(int):
def __init__(self, value: int, /) -> None:
if not isinstance(value, int):
raise TypeError
@given(...)
def test_from_type_resolves_required_posonly_args(n: CustomInteger):
# st.builds() does not infer for positional arguments, but st.from_type()
# does. See e.g. https://stackoverflow.com/q/79199376/ for motivation.
assert isinstance(n, CustomInteger)
|
CustomInteger
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/ticker.py
|
{
"start": 11464,
"end": 12158
}
|
class ____(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick.
The format string should have a single variable format (%) in it.
It will be applied to the value (not the position) of the tick.
Negative numeric values (e.g., -1) will use a dash, not a Unicode minus;
use mathtext to get a Unicode minus by wrapping the format specifier with $
(e.g. "$%g$").
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
"""
Return the formatted label string.
Only the value *x* is formatted. The position is ignored.
"""
return self.fmt % x
|
FormatStrFormatter
|
python
|
pytorch__pytorch
|
torch/fx/experimental/migrate_gradual_types/constraint.py
|
{
"start": 11986,
"end": 13407
}
|
class ____(Constraint):
def __init__(
self,
maxpool_result,
input_var,
kernel,
padding,
stride,
dilation,
matching_constraint_vars,
):
"""
:param maxpool_result: the result of maxpool
:param input_var: input to convolution
:param kernel: kernel tuple
"""
self.maxpool_result = maxpool_result
self.input_var = input_var
self.kernel = kernel
self.padding = padding
self.stride = stride
self.dilation = dilation
self.matching_constraint = matching_constraint_vars
def __repr__(self):
return (
f"{self.maxpool_result} ="
f" calc-maxpool({self.input_var},"
f" {self.kernel}, "
f"{self.padding}, {self.stride},"
f" {self.dilation})"
)
def __eq__(self, other):
if isinstance(other, CalcMaxPool):
return (
self.maxpool_result == other.maxpool_result
and self.input_var == other.input_var
and self.kernel == other.kernel
and self.padding == other.padding
and self.stride == other.stride
and self.dilation == other.dilation
and self.matching_constraint == other.matching_constraint
)
else:
return False
|
CalcMaxPool
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_sm.py
|
{
"start": 426,
"end": 2876
}
|
class ____(CUDATestCase):
def test_issue_953_sm_linkage_conflict(self):
@cuda.jit(device=True)
def inner():
inner_arr = cuda.shared.array(1, dtype=int32) # noqa: F841
@cuda.jit
def outer():
outer_arr = cuda.shared.array(1, dtype=int32) # noqa: F841
inner()
outer[1, 1]()
def _check_shared_array_size(self, shape, expected):
@cuda.jit
def s(a):
arr = cuda.shared.array(shape, dtype=int32)
a[0] = arr.size
result = np.zeros(1, dtype=np.int32)
s[1, 1](result)
self.assertEqual(result[0], expected)
def test_issue_1051_shared_size_broken_1d(self):
self._check_shared_array_size(2, 2)
def test_issue_1051_shared_size_broken_2d(self):
self._check_shared_array_size((2, 3), 6)
def test_issue_1051_shared_size_broken_3d(self):
self._check_shared_array_size((2, 3, 4), 24)
def _check_shared_array_size_fp16(self, shape, expected, ty):
@cuda.jit
def s(a):
arr = cuda.shared.array(shape, dtype=ty)
a[0] = arr.size
result = np.zeros(1, dtype=np.float16)
s[1, 1](result)
self.assertEqual(result[0], expected)
def test_issue_fp16_support(self):
self._check_shared_array_size_fp16(2, 2, types.float16)
self._check_shared_array_size_fp16(2, 2, np.float16)
def test_issue_2393(self):
"""
Test issue of warp misalign address due to nvvm not knowing the
alignment(? but it should have taken the natural alignment of the type)
"""
num_weights = 2
num_blocks = 48
examples_per_block = 4
threads_per_block = 1
@cuda.jit
def costs_func(d_block_costs):
s_features = cuda.shared.array((examples_per_block, num_weights),
float64)
s_initialcost = cuda.shared.array(7, float64) # Bug
threadIdx = cuda.threadIdx.x
prediction = 0
for j in range(num_weights):
prediction += s_features[threadIdx, j]
d_block_costs[0] = s_initialcost[0] + prediction
block_costs = np.zeros(num_blocks, dtype=np.float64)
d_block_costs = cuda.to_device(block_costs)
costs_func[num_blocks, threads_per_block](d_block_costs)
cuda.synchronize()
|
TestSharedMemoryIssue
|
python
|
apache__airflow
|
airflow-core/src/airflow/timetables/simple.py
|
{
"start": 3668,
"end": 5203
}
|
class ____(_TrivialTimetable):
"""
Timetable that schedules continually, while still respecting start_date and end_date.
This corresponds to ``schedule="@continuous"``.
"""
description: str = "As frequently as possible, but only one run at a time."
active_runs_limit = 1 # Continuous DAGRuns should be constrained to one run at a time
@property
def summary(self) -> str:
return "@continuous"
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
if restriction.earliest is None: # No start date, won't run.
return None
current_time = timezone.coerce_datetime(timezone.utcnow())
if last_automated_data_interval is not None: # has already run once
if last_automated_data_interval.end > current_time: # start date is future
start = restriction.earliest
elapsed = last_automated_data_interval.end - last_automated_data_interval.start
end = start + elapsed.as_timedelta()
else:
start = last_automated_data_interval.end
end = current_time
else: # first run
start = restriction.earliest
end = max(restriction.earliest, current_time)
if restriction.latest is not None and end > restriction.latest:
return None
return DagRunInfo.interval(start, end)
|
ContinuousTimetable
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/venv/backends.py
|
{
"start": 415,
"end": 469
}
|
class ____(ProjectError):
pass
|
VirtualenvCreateError
|
python
|
pydata__xarray
|
xarray/tests/test_sparse.py
|
{
"start": 17913,
"end": 28020
}
|
class ____:
@pytest.fixture(autouse=True)
def setUp(self):
self.sp_ar = sparse.random((4, 6), random_state=0, density=0.5)
self.sp_xr = xr.DataArray(
self.sp_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
self.ds_ar = self.sp_ar.todense()
self.ds_xr = xr.DataArray(
self.ds_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
def test_to_dataset_roundtrip(self):
x = self.sp_xr
assert_equal(x, x.to_dataset("x").to_dataarray("x"))
def test_align(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "c", "d"]},
)
b1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "d", "e"]},
)
a2, b2 = xr.align(a1, b1, join="inner")
assert isinstance(a2.data, sparse.SparseArray)
assert isinstance(b2.data, sparse.SparseArray)
assert np.all(a2.coords["x"].data == ["a", "b", "d"])
assert np.all(b2.coords["x"].data == ["a", "b", "d"])
@pytest.mark.xfail(
reason="COO objects currently do not accept more than one "
"iterable index at a time"
)
def test_align_2d(self):
A1 = xr.DataArray(
self.sp_ar,
dims=["x", "y"],
coords={
"x": np.arange(self.sp_ar.shape[0]),
"y": np.arange(self.sp_ar.shape[1]),
},
)
A2 = xr.DataArray(
self.sp_ar,
dims=["x", "y"],
coords={
"x": np.arange(1, self.sp_ar.shape[0] + 1),
"y": np.arange(1, self.sp_ar.shape[1] + 1),
},
)
B1, B2 = xr.align(A1, A2, join="inner")
assert np.all(B1.coords["x"] == np.arange(1, self.sp_ar.shape[0]))
assert np.all(B1.coords["y"] == np.arange(1, self.sp_ar.shape[0]))
assert np.all(B1.coords["x"] == B2.coords["x"])
assert np.all(B1.coords["y"] == B2.coords["y"])
def test_align_outer(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "c", "d"]},
)
b1 = xr.DataArray(
sparse.COO.from_numpy(np.arange(4)),
dims=["x"],
coords={"x": ["a", "b", "d", "e"]},
)
a2, b2 = xr.align(a1, b1, join="outer")
assert isinstance(a2.data, sparse.SparseArray)
assert isinstance(b2.data, sparse.SparseArray)
assert np.all(a2.coords["x"].data == ["a", "b", "c", "d", "e"])
assert np.all(b2.coords["x"].data == ["a", "b", "c", "d", "e"])
def test_concat(self):
ds1 = xr.Dataset(data_vars={"d": self.sp_xr})
ds2 = xr.Dataset(data_vars={"d": self.sp_xr})
ds3 = xr.Dataset(data_vars={"d": self.sp_xr})
out = xr.concat([ds1, ds2, ds3], dim="x")
assert_sparse_equal(
out["d"].data,
sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=0),
)
out_concat = xr.concat([self.sp_xr, self.sp_xr, self.sp_xr], dim="y")
assert_sparse_equal(
out_concat.data,
sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=1),
)
def test_stack(self):
arr = make_xrarray({"w": 2, "x": 3, "y": 4})
stacked = arr.stack(z=("x", "y"))
z = pd.MultiIndex.from_product(
[list(range(3)), list(range(4))], names=["x", "y"]
)
expected = xr.DataArray(
arr.data.reshape((2, -1)), {"w": [0, 1], "z": z}, dims=["w", "z"]
)
assert_equal(expected, stacked)
roundtripped = stacked.unstack()
assert_identical(arr, roundtripped)
def test_dataarray_repr(self):
a = xr.DataArray(
sparse.COO.from_numpy(np.ones(4)),
dims=["x"],
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))},
)
expected = dedent(
"""\
<xarray.DataArray (x: 4)> Size: 64B
<COO: shape=(4,), dtype=float64, nnz=4, fill_value=0.0>
Coordinates:
y (x) int64 48B <COO: nnz=3, fill_value=0>
Dimensions without coordinates: x"""
)
assert expected == repr(a)
def test_dataset_repr(self):
ds = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))},
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))},
)
expected = dedent(
"""\
<xarray.Dataset> Size: 112B
Dimensions: (x: 4)
Coordinates:
y (x) int64 48B <COO: nnz=3, fill_value=0>
Dimensions without coordinates: x
Data variables:
a (x) float64 64B <COO: nnz=4, fill_value=0.0>"""
)
assert expected == repr(ds)
@requires_dask
def test_sparse_dask_dataset_repr(self):
ds = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))}
).chunk()
if Version(sparse.__version__) >= Version("0.16.0"):
meta = "sparse.numba_backend._coo.core.COO"
else:
meta = "sparse.COO"
expected = dedent(
f"""\
<xarray.Dataset> Size: 32B
Dimensions: (x: 4)
Dimensions without coordinates: x
Data variables:
a (x) float64 32B dask.array<chunksize=(4,), meta={meta}>"""
)
assert expected == repr(ds)
def test_dataarray_pickle(self):
a1 = xr.DataArray(
sparse.COO.from_numpy(np.ones(4)),
dims=["x"],
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))},
)
a2 = pickle.loads(pickle.dumps(a1))
assert_identical(a1, a2)
def test_dataset_pickle(self):
ds1 = xr.Dataset(
data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))},
coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))},
)
ds2 = pickle.loads(pickle.dumps(ds1))
assert_identical(ds1, ds2)
def test_coarsen(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.coarsen(x=2, boundary="trim").mean() # type: ignore[attr-defined]
m2 = a2.coarsen(x=2, boundary="trim").mean() # type: ignore[attr-defined]
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="No implementation of np.pad")
def test_rolling(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.rolling(x=2, center=True).mean()
m2 = a2.rolling(x=2, center=True).mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Coercion to dense")
def test_rolling_exp(self):
a1 = self.ds_xr
a2 = self.sp_xr
m1 = a1.rolling_exp(x=2, center=True).mean()
m2 = a2.rolling_exp(x=2, center=True).mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="No implementation of np.einsum")
def test_dot(self):
a1 = self.sp_xr.dot(self.sp_xr[0])
a2 = self.sp_ar.dot(self.sp_ar[0])
assert_equal(a1, a2)
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby(self):
x1 = self.ds_xr
x2 = self.sp_xr
m1 = x1.groupby("x").mean(...)
m2 = x2.groupby("x").mean(...)
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby_first(self):
x = self.sp_xr.copy()
x.coords["ab"] = ("x", ["a", "a", "b", "b"])
x.groupby("ab").first()
x.groupby("ab").first(skipna=False)
@pytest.mark.xfail(reason="Groupby reductions produce dense output")
def test_groupby_bins(self):
x1 = self.ds_xr
x2 = self.sp_xr
m1 = x1.groupby_bins("x", bins=[0, 3, 7, 10]).sum(...)
m2 = x2.groupby_bins("x", bins=[0, 3, 7, 10]).sum(...)
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail(reason="Resample produces dense output")
def test_resample(self):
t1 = xr.DataArray(
np.linspace(0, 11, num=12),
coords=[
pd.date_range("1999-12-15", periods=12, freq=pd.DateOffset(months=1))
],
dims="time",
)
t2 = t1.copy()
t2.data = sparse.COO(t2.data)
m1 = t1.resample(time="QS-DEC").mean()
m2 = t2.resample(time="QS-DEC").mean()
assert isinstance(m2.data, sparse.SparseArray)
assert np.allclose(m1.data, m2.data.todense())
@pytest.mark.xfail
def test_reindex(self):
x1 = self.ds_xr
x2 = self.sp_xr
for kwargs in [
{"x": [2, 3, 4]},
{"x": [1, 100, 2, 101, 3]},
{"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
]:
m1 = x1.reindex(**kwargs) # type: ignore[arg-type]
m2 = x2.reindex(**kwargs) # type: ignore[arg-type]
assert np.allclose(m1, m2, equal_nan=True)
@pytest.mark.xfail
def test_merge(self):
x = self.sp_xr
y = xr.merge([x, x.rename("bar")]).to_dataarray()
assert isinstance(y, sparse.SparseArray)
@pytest.mark.xfail
def test_where(self):
a = np.arange(10)
cond = a > 3
xr.DataArray(a).where(cond)
s = sparse.COO.from_numpy(a)
cond2 = s > 3
xr.DataArray(s).where(cond2)
x = xr.DataArray(s)
cond3: DataArray = x > 3
x.where(cond3)
|
TestSparseDataArrayAndDataset
|
python
|
sympy__sympy
|
sympy/solvers/diophantine/diophantine.py
|
{
"start": 36243,
"end": 122444
}
|
class ____(DiophantineEquationType):
"""
Representation of the diophantine equation
`x_{1}^e + x_{2}^e + . . . + x_{n}^e - k = 0`
where `e` is an even, integer power.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import GeneralSumOfEvenPowers
>>> from sympy.abc import a, b
>>> GeneralSumOfEvenPowers(a**4 + b**4 - (2**4 + 3**4)).solve()
{(2, 3)}
"""
name = 'general_sum_of_even_powers'
def matches(self):
if not self.total_degree > 3:
return False
if self.total_degree % 2 != 0:
return False
if not all(k.is_Pow and k.exp == self.total_degree for k in self.coeff if k != 1):
return False
return all(self.coeff[k] == 1 for k in self.coeff if k != 1)
def solve(self, parameters=None, limit=1):
self.pre_solve(parameters)
var = self.free_symbols
coeff = self.coeff
p = None
for q in coeff.keys():
if q.is_Pow and coeff[q]:
p = q.exp
k = len(var)
n = -coeff[1]
result = DiophantineSolutionSet(var, parameters=self.parameters)
if n < 0 or limit < 1:
return result
sign = [-1 if x.is_nonpositive else 1 for x in var]
negs = sign.count(-1) != 0
for took, t in enumerate(power_representation(n, p, k), 1):
if negs:
result.add([sign[i]*j for i, j in enumerate(t)])
else:
result.add(t)
if took == limit:
break
return result
# these types are known (but not necessarily handled)
# note that order is important here (in the current solver state)
all_diop_classes = [
Linear,
Univariate,
BinaryQuadratic,
InhomogeneousTernaryQuadratic,
HomogeneousTernaryQuadraticNormal,
HomogeneousTernaryQuadratic,
InhomogeneousGeneralQuadratic,
HomogeneousGeneralQuadratic,
GeneralSumOfSquares,
GeneralPythagorean,
CubicThue,
GeneralSumOfEvenPowers,
]
diop_known = {diop_class.name for diop_class in all_diop_classes}
def _remove_gcd(*x):
try:
g = igcd(*x)
except ValueError:
fx = list(filter(None, x))
if len(fx) < 2:
return x
g = igcd(*[i.as_content_primitive()[0] for i in fx])
except TypeError:
raise TypeError('_remove_gcd(a,b,c) or _remove_gcd(*container)')
if g == 1:
return x
return tuple([i//g for i in x])
def _rational_pq(a, b):
# return `(numer, denom)` for a/b; sign in numer and gcd removed
return _remove_gcd(sign(b)*a, abs(b))
def _nint_or_floor(p, q):
# return nearest int to p/q; in case of tie return floor(p/q)
w, r = divmod(p, q)
if abs(r) <= abs(q)//2:
return w
return w + 1
def _odd(i):
return i % 2 != 0
def _even(i):
return i % 2 == 0
def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
Explanation
===========
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``. (Although it is possible to call ``diop_solve()``
directly, one must be careful to pass an equation in the correct
form and to interpret the output correctly; ``diophantine()`` is
the public-facing function to use in general.)
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Usage
=====
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (-3*t_0 - 4, t_0)}
See Also
========
diop_solve
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
eq = _sympify(eq)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
try:
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param, permute=permute)}
n, d = eq.as_numer_denom()
if n.is_number:
return set()
if not d.is_number:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
eq = factor_terms(n)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name]
permute_signs_check = [
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
BinaryQuadratic.name]
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = (a[0]*a[1] for a in var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any((xy_coeff, x_coeff)):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = (x[0]*x[1] for x in var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any((xy_coeff, x_coeff)):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
fl = factor_list(eq)
if fl[0].is_Rational and fl[0] != 1:
return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
terms = fl[1]
sols = set()
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
Linear.name,
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
GeneralPythagorean.name]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
BinaryQuadratic.name,
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name,
Univariate.name]:
sols.update(merge_solution(var, var_t, sol) for sol in solution)
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
sols.discard(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)).is_zero:
if all(check_assumptions(val, **s.assumptions0) is not False for val, s in zip(null, var)):
sols.add(null)
final_soln = set()
for sol in sols:
if all(int_valued(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
def merge_solution(var, var_t, solution):
"""
This is used to construct the full solution from the solutions of sub
equations.
Explanation
===========
For example when solving the equation `(x - y)(x^2 + y^2 - z^2) = 0`,
solutions for each of the equations `x - y = 0` and `x^2 + y^2 - z^2` are
found independently. Solutions for `x - y = 0` are `(x, y) = (t, t)`. But
we should introduce a value for z when we output the solution for the
original equation. This function converts `(t, t)` into `(t, t, n_{1})`
where `n_{1}` is an integer parameter.
"""
sol = []
if None in solution:
return ()
solution = iter(solution)
params = numbered_symbols("n", integer=True, start=1)
for v in var:
if v in var_t:
sol.append(next(solution))
else:
sol.append(next(params))
for val, symb in zip(sol, var):
if check_assumptions(val, **symb.assumptions0) is False:
return ()
return tuple(sol)
def _diop_solve(eq, params=None):
for diop_type in all_diop_classes:
if diop_type(eq).matches():
return diop_type(eq).solve(parameters=params)
def diop_solve(eq, param=symbols("t", integer=True)):
"""
Solves the diophantine equation ``eq``.
Explanation
===========
Unlike ``diophantine()``, factoring of ``eq`` is not attempted. Uses
``classify_diop()`` to determine the type of the equation and calls
the appropriate solver function.
Use of ``diophantine()`` is recommended over other helper functions.
``diop_solve()`` can return either a set or a tuple depending on the
nature of the equation. All non-trivial solutions are returned: assumptions
on symbols are ignored.
Usage
=====
``diop_solve(eq, t)``: Solve diophantine equation, ``eq`` using ``t``
as a parameter if needed.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_solve
>>> from sympy.abc import x, y, z, w
>>> diop_solve(2*x + 3*y - 5)
(3*t_0 - 5, 5 - 2*t_0)
>>> diop_solve(4*x + 3*y - 4*z + 5)
(t_0, 8*t_0 + 4*t_1 + 5, 7*t_0 + 3*t_1 + 5)
>>> diop_solve(x + 3*y - 4*z + w - 6)
(t_0, t_0 + t_1, 6*t_0 + 5*t_1 + 4*t_2 - 6, 5*t_0 + 4*t_1 + 3*t_2 - 6)
>>> diop_solve(x**2 + y**2 - 5)
{(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)}
See Also
========
diophantine()
"""
var, coeff, eq_type = classify_diop(eq, _dict=False)
if eq_type == Linear.name:
return diop_linear(eq, param)
elif eq_type == BinaryQuadratic.name:
return diop_quadratic(eq, param)
elif eq_type == HomogeneousTernaryQuadratic.name:
return diop_ternary_quadratic(eq, parameterize=True)
elif eq_type == HomogeneousTernaryQuadraticNormal.name:
return diop_ternary_quadratic_normal(eq, parameterize=True)
elif eq_type == GeneralPythagorean.name:
return diop_general_pythagorean(eq, param)
elif eq_type == Univariate.name:
return diop_univariate(eq)
elif eq_type == GeneralSumOfSquares.name:
return diop_general_sum_of_squares(eq, limit=S.Infinity)
elif eq_type == GeneralSumOfEvenPowers.name:
return diop_general_sum_of_even_powers(eq, limit=S.Infinity)
if eq_type is not None and eq_type not in diop_known:
raise ValueError(filldedent('''
Although this type of equation was identified, it is not yet
handled. It should, however, be listed in `diop_known` at the
top of this file. Developers should see comments at the end of
`classify_diop`.
''')) # pragma: no cover
else:
raise NotImplementedError(
'No solver has been written for %s.' % eq_type)
def classify_diop(eq, _dict=True):
# docstring supplied externally
matched = False
diop_type = None
for diop_class in all_diop_classes:
diop_type = diop_class(eq)
if diop_type.matches():
matched = True
break
if matched:
return diop_type.free_symbols, dict(diop_type.coeff) if _dict else diop_type.coeff, diop_type.name
# new diop type instructions
# --------------------------
# if this error raises and the equation *can* be classified,
# * it should be identified in the if-block above
# * the type should be added to the diop_known
# if a solver can be written for it,
# * a dedicated handler should be written (e.g. diop_linear)
# * it should be passed to that handler in diop_solve
raise NotImplementedError(filldedent('''
This equation is not yet recognized or else has not been
simplified sufficiently to put it in a form recognized by
diop_classify().'''))
classify_diop.func_doc = ( # type: ignore
'''
Helper routine used by diop_solve() to find information about ``eq``.
Explanation
===========
Returns a tuple containing the type of the diophantine equation
along with the variables (free symbols) and their coefficients.
Variables are returned as a list and coefficients are returned
as a dict with the key being the respective term and the constant
term is keyed to 1. The type is one of the following:
* %s
Usage
=====
``classify_diop(eq)``: Return variables, coefficients and type of the
``eq``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``_dict`` is for internal use: when True (default) a dict is returned,
otherwise a defaultdict which supplies 0 for missing keys is returned.
Examples
========
>>> from sympy.solvers.diophantine import classify_diop
>>> from sympy.abc import x, y, z, w, t
>>> classify_diop(4*x + 6*y - 4)
([x, y], {1: -4, x: 4, y: 6}, 'linear')
>>> classify_diop(x + 3*y -4*z + 5)
([x, y, z], {1: 5, x: 1, y: 3, z: -4}, 'linear')
>>> classify_diop(x**2 + y**2 - x*y + x + 5)
([x, y], {1: 5, x: 1, x**2: 1, y**2: 1, x*y: -1}, 'binary_quadratic')
''' % ('\n * '.join(sorted(diop_known))))
def diop_linear(eq, param=symbols("t", integer=True)):
"""
Solves linear diophantine equations.
A linear diophantine equation is an equation of the form `a_{1}x_{1} +
a_{2}x_{2} + .. + a_{n}x_{n} = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x_{1}, x_{2}, ..x_{n}` are integer variables.
Usage
=====
``diop_linear(eq)``: Returns a tuple containing solutions to the
diophantine equation ``eq``. Values in the tuple is arranged in the same
order as the sorted variables.
Details
=======
``eq`` is a linear diophantine equation which is assumed to be zero.
``param`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_linear
>>> from sympy.abc import x, y, z
>>> diop_linear(2*x - 3*y - 5) # solves equation 2*x - 3*y - 5 == 0
(3*t_0 - 5, 2*t_0 - 5)
Here x = -3*t_0 - 5 and y = -2*t_0 - 5
>>> diop_linear(2*x - 3*y - 4*z -3)
(t_0, 2*t_0 + 4*t_1 + 3, -t_0 - 3*t_1 - 3)
See Also
========
diop_quadratic(), diop_ternary_quadratic(), diop_general_pythagorean(),
diop_general_sum_of_squares()
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == Linear.name:
parameters = None
if param is not None:
parameters = symbols('%s_0:%i' % (param, len(var)), integer=True)
result = Linear(eq).solve(parameters=parameters)
if param is None:
result = result(*[0]*len(result.parameters))
if len(result) > 0:
return list(result)[0]
else:
return tuple([None]*len(result.parameters))
def base_solution_linear(c, a, b, t=None):
"""
Return the base solution for the linear equation, `ax + by = c`.
Explanation
===========
Used by ``diop_linear()`` to find the base solution of a linear
Diophantine equation. If ``t`` is given then the parametrized solution is
returned.
Usage
=====
``base_solution_linear(c, a, b, t)``: ``a``, ``b``, ``c`` are coefficients
in `ax + by = c` and ``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import base_solution_linear
>>> from sympy.abc import t
>>> base_solution_linear(5, 2, 3) # equation 2*x + 3*y = 5
(-5, 5)
>>> base_solution_linear(0, 5, 7) # equation 5*x + 7*y = 0
(0, 0)
>>> base_solution_linear(5, 2, 3, t) # equation 2*x + 3*y = 5
(3*t - 5, 5 - 2*t)
>>> base_solution_linear(0, 5, 7, t) # equation 5*x + 7*y = 0
(7*t, -5*t)
"""
a, b, c = _remove_gcd(a, b, c)
if c == 0:
if t is None:
return (0, 0)
if b < 0:
t = -t
return (b*t, -a*t)
x0, y0, d = igcdex(abs(a), abs(b))
x0 *= sign(a)
y0 *= sign(b)
if c % d:
return (None, None)
if t is None:
return (c*x0, c*y0)
if b < 0:
t = -t
return (c*x0 + b*t, c*y0 - a*t)
def diop_univariate(eq):
"""
Solves a univariate diophantine equations.
Explanation
===========
A univariate diophantine equation is an equation of the form
`a_{0} + a_{1}x + a_{2}x^2 + .. + a_{n}x^n = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x` is an integer variable.
Usage
=====
``diop_univariate(eq)``: Returns a set containing solutions to the
diophantine equation ``eq``.
Details
=======
``eq`` is a univariate diophantine equation which is assumed to be zero.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_univariate
>>> from sympy.abc import x
>>> diop_univariate((x - 2)*(x - 3)**2) # solves equation (x - 2)*(x - 3)**2 == 0
{(2,), (3,)}
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == Univariate.name:
return {(int(i),) for i in solveset_real(
eq, var[0]).intersect(S.Integers)}
def divisible(a, b):
"""
Returns `True` if ``a`` is divisible by ``b`` and `False` otherwise.
"""
return not a % b
def diop_quadratic(eq, param=symbols("t", integer=True)):
"""
Solves quadratic diophantine equations.
i.e. equations of the form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`. Returns a
set containing the tuples `(x, y)` which contains the solutions. If there
are no solutions then `(None, None)` is returned.
Usage
=====
``diop_quadratic(eq, param)``: ``eq`` is a quadratic binary diophantine
equation. ``param`` is used to indicate the parameter to be used in the
solution.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``param`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, t
>>> from sympy.solvers.diophantine.diophantine import diop_quadratic
>>> diop_quadratic(x**2 + y**2 + 2*x + 2*y + 2, t)
{(-1, -1)}
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0, [online],
Available: https://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: https://web.archive.org/web/20160323033111/http://www.jpr2718.org/ax2p.pdf
See Also
========
diop_linear(), diop_ternary_quadratic(), diop_general_sum_of_squares(),
diop_general_pythagorean()
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == BinaryQuadratic.name:
if param is not None:
parameters = [param, Symbol("u", integer=True)]
else:
parameters = None
return set(BinaryQuadratic(eq).solve(parameters=parameters))
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
reps = dict(zip(var, (u, v)))
eq = Add(*[j*i.xreplace(reps) for i, j in coeff.items()])
return _mexpand(eq) == 0
def diop_DN(D, N, t=symbols("t", integer=True)):
"""
Solves the equation `x^2 - Dy^2 = N`.
Explanation
===========
Mainly concerned with the case `D > 0, D` is not a perfect square,
which is the same as the generalized Pell equation. The LMM
algorithm [1]_ is used to solve this equation.
Returns one solution tuple, (`x, y)` for each class of the solutions.
Other solutions of the class can be constructed according to the
values of ``D`` and ``N``.
Usage
=====
``diop_DN(D, N, t)``: D and N are integers as in `x^2 - Dy^2 = N` and
``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_DN
>>> diop_DN(13, -4) # Solves equation x**2 - 13*y**2 = -4
[(3, 1), (393, 109), (36, 10)]
The output can be interpreted as follows: There are three fundamental
solutions to the equation `x^2 - 13y^2 = -4` given by (3, 1), (393, 109)
and (36, 10). Each tuple is in the form (x, y), i.e. solution (3, 1) means
that `x = 3` and `y = 1`.
>>> diop_DN(986, 1) # Solves equation x**2 - 986*y**2 = 1
[(49299, 1570)]
See Also
========
find_DN(), diop_bf_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Pages 16 - 17. [online], Available:
https://web.archive.org/web/20160323033128/http://www.jpr2718.org/pell.pdf
"""
if D < 0:
if N == 0:
return [(0, 0)]
if N < 0:
return []
# N > 0:
sol = []
for d in divisors(square_factor(N), generator=True):
for x, y in cornacchia(1, int(-D), int(N // d**2)):
sol.append((d*x, d*y))
if D == -1:
sol.append((d*y, d*x))
return sol
if D == 0:
if N < 0:
return []
if N == 0:
return [(0, t)]
sN, _exact = integer_nthroot(N, 2)
if _exact:
return [(sN, t)]
return []
# D > 0
sD, _exact = integer_nthroot(D, 2)
if _exact:
if N == 0:
return [(sD*t, t)]
sol = []
for y in range(floor(sign(N)*(N - 1)/(2*sD)) + 1):
try:
sq, _exact = integer_nthroot(D*y**2 + N, 2)
except ValueError:
_exact = False
if _exact:
sol.append((sq, y))
return sol
if 1 < N**2 < D:
# It is much faster to call `_special_diop_DN`.
return _special_diop_DN(D, N)
if N == 0:
return [(0, 0)]
sol = []
if abs(N) == 1:
pqa = PQa(0, 1, D)
*_, prev_B, prev_G = next(pqa)
for j, (*_, a, _, _B, _G) in enumerate(pqa):
if a == 2*sD:
break
prev_B, prev_G = _B, _G
if j % 2:
if N == 1:
sol.append((prev_G, prev_B))
return sol
if N == -1:
return [(prev_G, prev_B)]
for _ in range(j):
*_, _B, _G = next(pqa)
return [(_G, _B)]
for f in divisors(square_factor(N), generator=True):
m = N // f**2
am = abs(m)
for sqm in sqrt_mod(D, am, all_roots=True):
z = symmetric_residue(sqm, am)
pqa = PQa(z, am, D)
*_, prev_B, prev_G = next(pqa)
for _ in range(length(z, am, D) - 1):
_, q, *_, _B, _G = next(pqa)
if abs(q) == 1:
if prev_G**2 - D*prev_B**2 == m:
sol.append((f*prev_G, f*prev_B))
elif a := diop_DN(D, -1):
sol.append((f*(prev_G*a[0][0] + prev_B*D*a[0][1]),
f*(prev_G*a[0][1] + prev_B*a[0][0])))
break
prev_B, prev_G = _B, _G
return sol
def _special_diop_DN(D, N):
"""
Solves the equation `x^2 - Dy^2 = N` for the special case where
`1 < N**2 < D` and `D` is not a perfect square.
It is better to call `diop_DN` rather than this function, as
the former checks the condition `1 < N**2 < D`, and calls the latter only
if appropriate.
Usage
=====
WARNING: Internal method. Do not call directly!
``_special_diop_DN(D, N)``: D and N are integers as in `x^2 - Dy^2 = N`.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import _special_diop_DN
>>> _special_diop_DN(13, -3) # Solves equation x**2 - 13*y**2 = -3
[(7, 2), (137, 38)]
The output can be interpreted as follows: There are two fundamental
solutions to the equation `x^2 - 13y^2 = -3` given by (7, 2) and
(137, 38). Each tuple is in the form (x, y), i.e. solution (7, 2) means
that `x = 7` and `y = 2`.
>>> _special_diop_DN(2445, -20) # Solves equation x**2 - 2445*y**2 = -20
[(445, 9), (17625560, 356454), (698095554475, 14118073569)]
See Also
========
diop_DN()
References
==========
.. [1] Section 4.4.4 of the following book:
Quadratic Diophantine Equations, T. Andreescu and D. Andrica,
Springer, 2015.
"""
# The following assertion was removed for efficiency, with the understanding
# that this method is not called directly. The parent method, `diop_DN`
# is responsible for performing the appropriate checks.
#
# assert (1 < N**2 < D) and (not integer_nthroot(D, 2)[1])
sqrt_D = isqrt(D)
F = {N // f**2: f for f in divisors(square_factor(abs(N)), generator=True)}
P = 0
Q = 1
G0, G1 = 0, 1
B0, B1 = 1, 0
solutions = []
while True:
for _ in range(2):
a = (P + sqrt_D) // Q
P = a*Q - P
Q = (D - P**2) // Q
G0, G1 = G1, a*G1 + G0
B0, B1 = B1, a*B1 + B0
if (s := G1**2 - D*B1**2) in F:
f = F[s]
solutions.append((f*G1, f*B1))
if Q == 1:
break
return solutions
def cornacchia(a:int, b:int, m:int) -> set[tuple[int, int]]:
r"""
Solves `ax^2 + by^2 = m` where `\gcd(a, b) = 1 = gcd(a, m)` and `a, b > 0`.
Explanation
===========
Uses the algorithm due to Cornacchia. The method only finds primitive
solutions, i.e. ones with `\gcd(x, y) = 1`. So this method cannot be used to
find the solutions of `x^2 + y^2 = 20` since the only solution to former is
`(x, y) = (4, 2)` and it is not primitive. When `a = b`, only the
solutions with `x \leq y` are found. For more details, see the References.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import cornacchia
>>> cornacchia(2, 3, 35) # equation 2x**2 + 3y**2 = 35
{(2, 3), (4, 1)}
>>> cornacchia(1, 1, 25) # equation x**2 + y**2 = 25
{(4, 3)}
References
===========
.. [1] A. Nitaj, "L'algorithme de Cornacchia"
.. [2] Solving the diophantine equation ax**2 + by**2 = m by Cornacchia's
method, [online], Available:
http://www.numbertheory.org/php/cornacchia.html
See Also
========
sympy.utilities.iterables.signed_permutations
"""
# Assume gcd(a, b) = gcd(a, m) = 1 and a, b > 0 but no error checking
sols = set()
if a + b > m:
# xy = 0 must hold if there exists a solution
if a == 1:
# y = 0
s, _exact = iroot(m // a, 2)
if _exact:
sols.add((int(s), 0))
if a == b:
# only keep one solution
return sols
if m % b == 0:
# x = 0
s, _exact = iroot(m // b, 2)
if _exact:
sols.add((0, int(s)))
return sols
# the original cornacchia
for t in sqrt_mod_iter(-b*invert(a, m), m):
if t < m // 2:
continue
u, r = m, t
while (m1 := m - a*r**2) <= 0:
u, r = r, u % r
m1, _r = divmod(m1, b)
if _r:
continue
s, _exact = iroot(m1, 2)
if _exact:
if a == b and r < s:
r, s = s, r
sols.add((int(r), int(s)))
return sols
def PQa(P_0, Q_0, D):
r"""
Returns useful information needed to solve the Pell equation.
Explanation
===========
There are six sequences of integers defined related to the continued
fraction representation of `\\frac{P + \sqrt{D}}{Q}`, namely {`P_{i}`},
{`Q_{i}`}, {`a_{i}`},{`A_{i}`}, {`B_{i}`}, {`G_{i}`}. ``PQa()`` Returns
these values as a 6-tuple in the same order as mentioned above. Refer [1]_
for more detailed information.
Usage
=====
``PQa(P_0, Q_0, D)``: ``P_0``, ``Q_0`` and ``D`` are integers corresponding
to `P_{0}`, `Q_{0}` and `D` in the continued fraction
`\\frac{P_{0} + \sqrt{D}}{Q_{0}}`.
Also it's assumed that `P_{0}^2 == D mod(|Q_{0}|)` and `D` is square free.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import PQa
>>> pqa = PQa(13, 4, 5) # (13 + sqrt(5))/4
>>> next(pqa) # (P_0, Q_0, a_0, A_0, B_0, G_0)
(13, 4, 3, 3, 1, -1)
>>> next(pqa) # (P_1, Q_1, a_1, A_1, B_1, G_1)
(-1, 1, 1, 4, 1, 3)
References
==========
.. [1] Solving the generalized Pell equation x^2 - Dy^2 = N, John P.
Robertson, July 31, 2004, Pages 4 - 8. https://web.archive.org/web/20160323033128/http://www.jpr2718.org/pell.pdf
"""
sqD = isqrt(D)
A2 = B1 = 0
A1 = B2 = 1
G1 = Q_0
G2 = -P_0
P_i = P_0
Q_i = Q_0
while True:
a_i = (P_i + sqD) // Q_i
A1, A2 = a_i*A1 + A2, A1
B1, B2 = a_i*B1 + B2, B1
G1, G2 = a_i*G1 + G2, G1
yield P_i, Q_i, a_i, A1, B1, G1
P_i = a_i*Q_i - P_i
Q_i = (D - P_i**2) // Q_i
def diop_bf_DN(D, N, t=symbols("t", integer=True)):
r"""
Uses brute force to solve the equation, `x^2 - Dy^2 = N`.
Explanation
===========
Mainly concerned with the generalized Pell equation which is the case when
`D > 0, D` is not a perfect square. For more information on the case refer
[1]_. Let `(t, u)` be the minimal positive solution of the equation
`x^2 - Dy^2 = 1`. Then this method requires
`\sqrt{\\frac{\mid N \mid (t \pm 1)}{2D}}` to be small.
Usage
=====
``diop_bf_DN(D, N, t)``: ``D`` and ``N`` are coefficients in
`x^2 - Dy^2 = N` and ``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_bf_DN
>>> diop_bf_DN(13, -4)
[(3, 1), (-3, 1), (36, 10)]
>>> diop_bf_DN(986, 1)
[(49299, 1570)]
See Also
========
diop_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 15. https://web.archive.org/web/20160323033128/http://www.jpr2718.org/pell.pdf
"""
D = as_int(D)
N = as_int(N)
sol = []
a = diop_DN(D, 1)
u = a[0][0]
if N == 0:
if D < 0:
return [(0, 0)]
if D == 0:
return [(0, t)]
sD, _exact = integer_nthroot(D, 2)
if _exact:
return [(sD*t, t), (-sD*t, t)]
return [(0, 0)]
if abs(N) == 1:
return diop_DN(D, N)
if N > 1:
L1 = 0
L2 = integer_nthroot(int(N*(u - 1)/(2*D)), 2)[0] + 1
else: # N < -1
L1, _exact = integer_nthroot(-int(N/D), 2)
if not _exact:
L1 += 1
L2 = integer_nthroot(-int(N*(u + 1)/(2*D)), 2)[0] + 1
for y in range(L1, L2):
try:
x, _exact = integer_nthroot(N + D*y**2, 2)
except ValueError:
_exact = False
if _exact:
sol.append((x, y))
if not equivalent(x, y, -x, y, D, N):
sol.append((-x, y))
return sol
def equivalent(u, v, r, s, D, N):
"""
Returns True if two solutions `(u, v)` and `(r, s)` of `x^2 - Dy^2 = N`
belongs to the same equivalence class and False otherwise.
Explanation
===========
Two solutions `(u, v)` and `(r, s)` to the above equation fall to the same
equivalence class iff both `(ur - Dvs)` and `(us - vr)` are divisible by
`N`. See reference [1]_. No test is performed to test whether `(u, v)` and
`(r, s)` are actually solutions to the equation. User should take care of
this.
Usage
=====
``equivalent(u, v, r, s, D, N)``: `(u, v)` and `(r, s)` are two solutions
of the equation `x^2 - Dy^2 = N` and all parameters involved are integers.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import equivalent
>>> equivalent(18, 5, -18, -5, 13, -1)
True
>>> equivalent(3, 1, -18, 393, 109, -4)
False
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 12. https://web.archive.org/web/20160323033128/http://www.jpr2718.org/pell.pdf
"""
return divisible(u*r - D*v*s, N) and divisible(u*s - v*r, N)
def length(P, Q, D):
r"""
Returns the (length of aperiodic part + length of periodic part) of
continued fraction representation of `\\frac{P + \sqrt{D}}{Q}`.
It is important to remember that this does NOT return the length of the
periodic part but the sum of the lengths of the two parts as mentioned
above.
Usage
=====
``length(P, Q, D)``: ``P``, ``Q`` and ``D`` are integers corresponding to
the continued fraction `\\frac{P + \sqrt{D}}{Q}`.
Details
=======
``P``, ``D`` and ``Q`` corresponds to P, D and Q in the continued fraction,
`\\frac{P + \sqrt{D}}{Q}`.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import length
>>> length(-2, 4, 5) # (-2 + sqrt(5))/4
3
>>> length(-5, 4, 17) # (-5 + sqrt(17))/4
4
See Also
========
sympy.ntheory.continued_fraction.continued_fraction_periodic
"""
from sympy.ntheory.continued_fraction import continued_fraction_periodic
v = continued_fraction_periodic(P, Q, D)
if isinstance(v[-1], list):
rpt = len(v[-1])
nonrpt = len(v) - 1
else:
rpt = 0
nonrpt = len(v)
return rpt + nonrpt
def transformation_to_DN(eq):
"""
This function transforms general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`
to more easy to deal with `X^2 - DY^2 = N` form.
Explanation
===========
This is used to solve the general quadratic equation by transforming it to
the latter form. Refer to [1]_ for more detailed information on the
transformation. This function returns a tuple (A, B) where A is a 2 X 2
matrix and B is a 2 X 1 matrix such that,
Transpose([x y]) = A * Transpose([X Y]) + B
Usage
=====
``transformation_to_DN(eq)``: where ``eq`` is the quadratic to be
transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine.diophantine import transformation_to_DN
>>> A, B = transformation_to_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
>>> A
Matrix([
[1/26, 3/26],
[ 0, 1/13]])
>>> B
Matrix([
[-6/13],
[-4/13]])
A, B returned are such that Transpose((x y)) = A * Transpose((X Y)) + B.
Substituting these values for `x` and `y` and a bit of simplifying work
will give an equation of the form `x^2 - Dy^2 = N`.
>>> from sympy.abc import X, Y
>>> from sympy import Matrix, simplify
>>> u = (A*Matrix([X, Y]) + B)[0] # Transformation for x
>>> u
X/26 + 3*Y/26 - 6/13
>>> v = (A*Matrix([X, Y]) + B)[1] # Transformation for y
>>> v
Y/13 - 4/13
Next we will substitute these formulas for `x` and `y` and do
``simplify()``.
>>> eq = simplify((x**2 - 3*x*y - y**2 - 2*y + 1).subs(zip((x, y), (u, v))))
>>> eq
X**2/676 - Y**2/52 + 17/13
By multiplying the denominator appropriately, we can get a Pell equation
in the standard form.
>>> eq * 676
X**2 - 13*Y**2 + 884
If only the final equation is needed, ``find_DN()`` can be used.
See Also
========
find_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
https://web.archive.org/web/20160323033111/http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == BinaryQuadratic.name:
return _transformation_to_DN(var, coeff)
def _transformation_to_DN(var, coeff):
x, y = var
a = coeff[x**2]
b = coeff[x*y]
c = coeff[y**2]
d = coeff[x]
e = coeff[y]
f = coeff[1]
a, b, c, d, e, f = [as_int(i) for i in _remove_gcd(a, b, c, d, e, f)]
X, Y = symbols("X, Y", integer=True)
if b:
B, C = _rational_pq(2*a, b)
A, T = _rational_pq(a, B**2)
# eq_1 = A*B*X**2 + B*(c*T - A*C**2)*Y**2 + d*T*X + (B*e*T - d*T*C)*Y + f*T*B
coeff = {X**2: A*B, X*Y: 0, Y**2: B*(c*T - A*C**2), X: d*T, Y: B*e*T - d*T*C, 1: f*T*B}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S.One/B, -S(C)/B, 0, 1])*A_0, Matrix(2, 2, [S.One/B, -S(C)/B, 0, 1])*B_0
if d:
B, C = _rational_pq(2*a, d)
A, T = _rational_pq(a, B**2)
# eq_2 = A*X**2 + c*T*Y**2 + e*T*Y + f*T - A*C**2
coeff = {X**2: A, X*Y: 0, Y**2: c*T, X: 0, Y: e*T, 1: f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S.One/B, 0, 0, 1])*A_0, Matrix(2, 2, [S.One/B, 0, 0, 1])*B_0 + Matrix([-S(C)/B, 0])
if e:
B, C = _rational_pq(2*c, e)
A, T = _rational_pq(c, B**2)
# eq_3 = a*T*X**2 + A*Y**2 + f*T - A*C**2
coeff = {X**2: a*T, X*Y: 0, Y**2: A, X: 0, Y: 0, 1: f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [1, 0, 0, S.One/B])*A_0, Matrix(2, 2, [1, 0, 0, S.One/B])*B_0 + Matrix([0, -S(C)/B])
# TODO: pre-simplification: Not necessary but may simplify
# the equation.
return Matrix(2, 2, [S.One/a, 0, 0, 1]), Matrix([0, 0])
def find_DN(eq):
"""
This function returns a tuple, `(D, N)` of the simplified form,
`x^2 - Dy^2 = N`, corresponding to the general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`.
Solving the general quadratic is then equivalent to solving the equation
`X^2 - DY^2 = N` and transforming the solutions by using the transformation
matrices returned by ``transformation_to_DN()``.
Usage
=====
``find_DN(eq)``: where ``eq`` is the quadratic to be transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine.diophantine import find_DN
>>> find_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
(13, -884)
Interpretation of the output is that we get `X^2 -13Y^2 = -884` after
transforming `x^2 - 3xy - y^2 - 2y + 1` using the transformation returned
by ``transformation_to_DN()``.
See Also
========
transformation_to_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
https://web.archive.org/web/20160323033111/http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == BinaryQuadratic.name:
return _find_DN(var, coeff)
def _find_DN(var, coeff):
x, y = var
X, Y = symbols("X, Y", integer=True)
A, B = _transformation_to_DN(var, coeff)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[1]
simplified = _mexpand(eq.subs(zip((x, y), (u, v))))
coeff = simplified.as_coefficients_dict()
return -coeff[Y**2]/coeff[X**2], -coeff[1]/coeff[X**2]
def check_param(x, y, a, params):
"""
If there is a number modulo ``a`` such that ``x`` and ``y`` are both
integers, then return a parametric representation for ``x`` and ``y``
else return (None, None).
Here ``x`` and ``y`` are functions of ``t``.
"""
from sympy.simplify.simplify import clear_coefficients
if x.is_number and not x.is_Integer:
return DiophantineSolutionSet([x, y], parameters=params)
if y.is_number and not y.is_Integer:
return DiophantineSolutionSet([x, y], parameters=params)
m, n = symbols("m, n", integer=True)
c, p = (m*x + n*y).as_content_primitive()
if a % c.q:
return DiophantineSolutionSet([x, y], parameters=params)
# clear_coefficients(mx + b, R)[1] -> (R - b)/m
eq = clear_coefficients(x, m)[1] - clear_coefficients(y, n)[1]
junk, eq = eq.as_content_primitive()
return _diop_solve(eq, params=params)
def diop_ternary_quadratic(eq, parameterize=False):
"""
Solves the general quadratic ternary form,
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Returns a tuple `(x, y, z)` which is a base solution for the above
equation. If there are no solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic(eq)``: Return a tuple containing a basic solution
to ``eq``.
Details
=======
``eq`` should be an homogeneous expression of degree two in three variables
and it is assumed to be zero.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine.diophantine import diop_ternary_quadratic
>>> diop_ternary_quadratic(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic(45*x**2 - 7*y**2 - 8*x*y - z**2)
(28, 45, 105)
>>> diop_ternary_quadratic(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
(9, 1, 5)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name):
sol = _diop_ternary_quadratic(var, coeff)
if len(sol) > 0:
x_0, y_0, z_0 = list(sol)[0]
else:
x_0, y_0, z_0 = None, None, None
if parameterize:
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
return x_0, y_0, z_0
def _diop_ternary_quadratic(_var, coeff):
eq = sum(i*coeff[i] for i in coeff)
if HomogeneousTernaryQuadratic(eq).matches():
return HomogeneousTernaryQuadratic(eq, free_symbols=_var).solve()
elif HomogeneousTernaryQuadraticNormal(eq).matches():
return HomogeneousTernaryQuadraticNormal(eq, free_symbols=_var).solve()
def transformation_to_normal(eq):
"""
Returns the transformation Matrix that converts a general ternary
quadratic equation ``eq`` (`ax^2 + by^2 + cz^2 + dxy + eyz + fxz`)
to a form without cross terms: `ax^2 + by^2 + cz^2 = 0`. This is
not used in solving ternary quadratics; it is only implemented for
the sake of completeness.
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal"):
return _transformation_to_normal(var, coeff)
def _transformation_to_normal(var, coeff):
_var = list(var) # copy
x, y, z = var
if not any(coeff[i**2] for i in var):
# https://math.stackexchange.com/questions/448051/transform-quadratic-ternary-form-to-normal-form/448065#448065
a = coeff[x*y]
b = coeff[y*z]
c = coeff[x*z]
swap = False
if not a: # b can't be 0 or else there aren't 3 vars
swap = True
a, b = b, a
T = Matrix(((1, 1, -b/a), (1, -1, -c/a), (0, 0, 1)))
if swap:
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
# Apply the transformation x --> X - (B*Y + C*Z)/(2*A)
if coeff[x*y] != 0 or coeff[x*z] != 0:
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = {}
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
T_0 = _transformation_to_normal(_var, _coeff)
return Matrix(3, 3, [1, S(-B)/(2*A), S(-C)/(2*A), 0, 1, 0, 0, 0, 1])*T_0
elif coeff[y*z] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
# Apply transformation y -> Y + Z ans z -> Y - Z
return Matrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, -1])
# Ax**2 + E*y*z + F*z**2 = 0
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, F may be zero
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
return Matrix.eye(3)
def parametrize_ternary_quadratic(eq):
"""
Returns the parametrized general solution for the ternary quadratic
equation ``eq`` which has the form
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Examples
========
>>> from sympy import Tuple, ordered
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine.diophantine import parametrize_ternary_quadratic
The parametrized solution may be returned with three parameters:
>>> parametrize_ternary_quadratic(2*x**2 + y**2 - 2*z**2)
(p**2 - 2*q**2, -2*p**2 + 4*p*q - 4*p*r - 4*q**2, p**2 - 4*p*q + 2*q**2 - 4*q*r)
There might also be only two parameters:
>>> parametrize_ternary_quadratic(4*x**2 + 2*y**2 - 3*z**2)
(2*p**2 - 3*q**2, -4*p**2 + 12*p*q - 6*q**2, 4*p**2 - 8*p*q + 6*q**2)
Notes
=====
Consider ``p`` and ``q`` in the previous 2-parameter
solution and observe that more than one solution can be represented
by a given pair of parameters. If `p` and ``q`` are not coprime, this is
trivially true since the common factor will also be a common factor of the
solution values. But it may also be true even when ``p`` and
``q`` are coprime:
>>> sol = Tuple(*_)
>>> p, q = ordered(sol.free_symbols)
>>> sol.subs([(p, 3), (q, 2)])
(6, 12, 12)
>>> sol.subs([(q, 1), (p, 1)])
(-1, 2, 2)
>>> sol.subs([(q, 0), (p, 1)])
(2, -4, 4)
>>> sol.subs([(q, 1), (p, 0)])
(-3, -6, 6)
Except for sign and a common factor, these are equivalent to
the solution of (1, 2, 2).
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type in (
"homogeneous_ternary_quadratic",
"homogeneous_ternary_quadratic_normal"):
x_0, y_0, z_0 = list(_diop_ternary_quadratic(var, coeff))[0]
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
def _parametrize_ternary_quadratic(solution, _var, coeff):
# called for a*x**2 + b*y**2 + c*z**2 + d*x*y + e*y*z + f*x*z = 0
assert 1 not in coeff
x_0, y_0, z_0 = solution
v = list(_var) # copy
if x_0 is None:
return (None, None, None)
if solution.count(0) >= 2:
# if there are 2 zeros the equation reduces
# to k*X**2 == 0 where X is x, y, or z so X must
# be zero, too. So there is only the trivial
# solution.
return (None, None, None)
if x_0 == 0:
v[0], v[1] = v[1], v[0]
y_p, x_p, z_p = _parametrize_ternary_quadratic(
(y_0, x_0, z_0), v, coeff)
return x_p, y_p, z_p
x, y, z = v
r, p, q = symbols("r, p, q", integer=True)
eq = sum(k*v for k, v in coeff.items())
eq_1 = _mexpand(eq.subs(zip(
(x, y, z), (r*x_0, r*y_0 + p, r*z_0 + q))))
A, B = eq_1.as_independent(r, as_Add=True)
x = A*x_0
y = (A*y_0 - _mexpand(B/r*p))
z = (A*z_0 - _mexpand(B/r*q))
return _remove_gcd(x, y, z)
def diop_ternary_quadratic_normal(eq, parameterize=False):
"""
Solves the quadratic ternary diophantine equation,
`ax^2 + by^2 + cz^2 = 0`.
Explanation
===========
Here the coefficients `a`, `b`, and `c` should be non zero. Otherwise the
equation will be a quadratic binary or univariate equation. If solvable,
returns a tuple `(x, y, z)` that satisfies the given equation. If the
equation does not have integer solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic_normal(eq)``: where ``eq`` is an equation of the form
`ax^2 + by^2 + cz^2 = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine.diophantine import diop_ternary_quadratic_normal
>>> diop_ternary_quadratic_normal(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic_normal(34*x**2 - 3*y**2 - 301*z**2)
(4, 9, 1)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == HomogeneousTernaryQuadraticNormal.name:
sol = _diop_ternary_quadratic_normal(var, coeff)
if len(sol) > 0:
x_0, y_0, z_0 = list(sol)[0]
else:
x_0, y_0, z_0 = None, None, None
if parameterize:
return _parametrize_ternary_quadratic(
(x_0, y_0, z_0), var, coeff)
return x_0, y_0, z_0
def _diop_ternary_quadratic_normal(var, coeff):
eq = sum(i * coeff[i] for i in coeff)
return HomogeneousTernaryQuadraticNormal(eq, free_symbols=var).solve()
def sqf_normal(a, b, c, steps=False):
"""
Return `a', b', c'`, the coefficients of the square-free normal
form of `ax^2 + by^2 + cz^2 = 0`, where `a', b', c'` are pairwise
prime. If `steps` is True then also return three tuples:
`sq`, `sqf`, and `(a', b', c')` where `sq` contains the square
factors of `a`, `b` and `c` after removing the `gcd(a, b, c)`;
`sqf` contains the values of `a`, `b` and `c` after removing
both the `gcd(a, b, c)` and the square factors.
The solutions for `ax^2 + by^2 + cz^2 = 0` can be
recovered from the solutions of `a'x^2 + b'y^2 + c'z^2 = 0`.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import sqf_normal
>>> sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11)
(11, 1, 5)
>>> sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11, True)
((3, 1, 7), (5, 55, 11), (11, 1, 5))
References
==========
.. [1] Legendre's Theorem, Legrange's Descent,
https://public.csusm.edu/aitken_html/notes/legendre.pdf
See Also
========
reconstruct()
"""
ABC = _remove_gcd(a, b, c)
sq = tuple(square_factor(i) for i in ABC)
sqf = A, B, C = tuple([i//j**2 for i,j in zip(ABC, sq)])
pc = igcd(A, B)
A /= pc
B /= pc
pa = igcd(B, C)
B /= pa
C /= pa
pb = igcd(A, C)
A /= pb
B /= pb
A *= pa
B *= pb
C *= pc
if steps:
return (sq, sqf, (A, B, C))
else:
return A, B, C
def square_factor(a):
r"""
Returns an integer `c` s.t. `a = c^2k, \ c,k \in Z`. Here `k` is square
free. `a` can be given as an integer or a dictionary of factors.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import square_factor
>>> square_factor(24)
2
>>> square_factor(-36*3)
6
>>> square_factor(1)
1
>>> square_factor({3: 2, 2: 1, -1: 1}) # -18
3
See Also
========
sympy.ntheory.factor_.core
"""
f = a if isinstance(a, dict) else factorint(a)
return Mul(*[p**(e//2) for p, e in f.items()])
def reconstruct(A, B, z):
"""
Reconstruct the `z` value of an equivalent solution of `ax^2 + by^2 + cz^2`
from the `z` value of a solution of the square-free normal form of the
equation, `a'*x^2 + b'*y^2 + c'*z^2`, where `a'`, `b'` and `c'` are square
free and `gcd(a', b', c') == 1`.
"""
f = factorint(igcd(A, B))
for p, e in f.items():
if e != 1:
raise ValueError('a and b should be square-free')
z *= p
return z
def ldescent(A, B):
"""
Return a non-trivial solution to `w^2 = Ax^2 + By^2` using
Lagrange's method; return None if there is no such solution.
Parameters
==========
A : Integer
B : Integer
non-zero integer
Returns
=======
(int, int, int) | None : a tuple `(w_0, x_0, y_0)` which is a solution to the above equation.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import ldescent
>>> ldescent(1, 1) # w^2 = x^2 + y^2
(1, 1, 0)
>>> ldescent(4, -7) # w^2 = 4x^2 - 7y^2
(2, -1, 0)
This means that `x = -1, y = 0` and `w = 2` is a solution to the equation
`w^2 = 4x^2 - 7y^2`
>>> ldescent(5, -1) # w^2 = 5x^2 - y^2
(2, 1, -1)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
.. [2] Cremona, J. E., Rusin, D. (2003). Efficient Solution of Rational Conics.
Mathematics of Computation, 72(243), 1417-1441.
https://doi.org/10.1090/S0025-5718-02-01480-1
"""
if A == 0 or B == 0:
raise ValueError("A and B must be non-zero integers")
if abs(A) > abs(B):
w, y, x = ldescent(B, A)
return w, x, y
if A == 1:
return (1, 1, 0)
if B == 1:
return (1, 0, 1)
if B == -1: # and A == -1
return
r = sqrt_mod(A, B)
if r is None:
return
Q = (r**2 - A) // B
if Q == 0:
return r, -1, 0
for i in divisors(Q):
d, _exact = integer_nthroot(abs(Q) // i, 2)
if _exact:
B_0 = sign(Q)*i
W, X, Y = ldescent(A, B_0)
return _remove_gcd(-A*X + r*W, r*X - W, Y*B_0*d)
def descent(A, B):
"""
Returns a non-trivial solution, (x, y, z), to `x^2 = Ay^2 + Bz^2`
using Lagrange's descent method with lattice-reduction. `A` and `B`
are assumed to be valid for such a solution to exist.
This is faster than the normal Lagrange's descent algorithm because
the Gaussian reduction is used.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import descent
>>> descent(3, 1) # x**2 = 3*y**2 + z**2
(1, 0, 1)
`(x, y, z) = (1, 0, 1)` is a solution to the above equation.
>>> descent(41, -113)
(-16, -3, 1)
References
==========
.. [1] Cremona, J. E., Rusin, D. (2003). Efficient Solution of Rational Conics.
Mathematics of Computation, 72(243), 1417-1441.
https://doi.org/10.1090/S0025-5718-02-01480-1
"""
if abs(A) > abs(B):
x, y, z = descent(B, A)
return x, z, y
if B == 1:
return (1, 0, 1)
if A == 1:
return (1, 1, 0)
if B == -A:
return (0, 1, 1)
if B == A:
x, z, y = descent(-1, A)
return (A*y, z, x)
w = sqrt_mod(A, B)
x_0, z_0 = gaussian_reduce(w, A, B)
t = (x_0**2 - A*z_0**2) // B
t_2 = square_factor(t)
t_1 = t // t_2**2
x_1, z_1, y_1 = descent(A, t_1)
return _remove_gcd(x_0*x_1 + A*z_0*z_1, z_0*x_1 + x_0*z_1, t_1*t_2*y_1)
def gaussian_reduce(w:int, a:int, b:int) -> tuple[int, int]:
r"""
Returns a reduced solution `(x, z)` to the congruence
`X^2 - aZ^2 \equiv 0 \pmod{b}` so that `x^2 + |a|z^2` is as small as possible.
Here ``w`` is a solution of the congruence `x^2 \equiv a \pmod{b}`.
This function is intended to be used only for ``descent()``.
Explanation
===========
The Gaussian reduction can find the shortest vector for any norm.
So we define the special norm for the vectors `u = (u_1, u_2)` and `v = (v_1, v_2)` as follows.
.. math ::
u \cdot v := (wu_1 + bu_2)(wv_1 + bv_2) + |a|u_1v_1
Note that, given the mapping `f: (u_1, u_2) \to (wu_1 + bu_2, u_1)`,
`f((u_1,u_2))` is the solution to `X^2 - aZ^2 \equiv 0 \pmod{b}`.
In other words, finding the shortest vector in this norm will yield a solution with smaller `X^2 + |a|Z^2`.
The algorithm starts from basis vectors `(0, 1)` and `(1, 0)`
(corresponding to solutions `(b, 0)` and `(w, 1)`, respectively) and finds the shortest vector.
The shortest vector does not necessarily correspond to the smallest solution,
but since ``descent()`` only wants the smallest possible solution, it is sufficient.
Parameters
==========
w : int
``w`` s.t. `w^2 \equiv a \pmod{b}`
a : int
square-free nonzero integer
b : int
square-free nonzero integer
Examples
========
>>> from sympy.solvers.diophantine.diophantine import gaussian_reduce
>>> from sympy.ntheory.residue_ntheory import sqrt_mod
>>> a, b = 19, 101
>>> gaussian_reduce(sqrt_mod(a, b), a, b) # 1**2 - 19*(-4)**2 = -303
(1, -4)
>>> a, b = 11, 14
>>> x, z = gaussian_reduce(sqrt_mod(a, b), a, b)
>>> (x**2 - a*z**2) % b == 0
True
It does not always return the smallest solution.
>>> a, b = 6, 95
>>> min_x, min_z = 1, 4
>>> x, z = gaussian_reduce(sqrt_mod(a, b), a, b)
>>> (x**2 - a*z**2) % b == 0 and (min_x**2 - a*min_z**2) % b == 0
True
>>> min_x**2 + abs(a)*min_z**2 < x**2 + abs(a)*z**2
True
References
==========
.. [1] Gaussian lattice Reduction [online]. Available:
https://web.archive.org/web/20201021115213/http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=404
.. [2] Cremona, J. E., Rusin, D. (2003). Efficient Solution of Rational Conics.
Mathematics of Computation, 72(243), 1417-1441.
https://doi.org/10.1090/S0025-5718-02-01480-1
"""
a = abs(a)
def _dot(u, v):
return u[0]*v[0] + a*u[1]*v[1]
u = (b, 0)
v = (w, 1) if b*w >= 0 else (-w, -1)
# i.e., _dot(u, v) >= 0
if b**2 < w**2 + a:
u, v = v, u
# i.e., norm(u) >= norm(v), where norm(u) := sqrt(_dot(u, u))
while _dot(u, u) > (dv := _dot(v, v)):
k = _dot(u, v) // dv
u, v = v, (u[0] - k*v[0], u[1] - k*v[1])
c = (v[0] - u[0], v[1] - u[1])
if _dot(c, c) <= _dot(u, u) <= 2*_dot(u, v):
return c
return u
def holzer(x, y, z, a, b, c):
r"""
Simplify the solution `(x, y, z)` of the equation
`ax^2 + by^2 = cz^2` with `a, b, c > 0` and `z^2 \geq \mid ab \mid` to
a new reduced solution `(x', y', z')` such that `z'^2 \leq \mid ab \mid`.
The algorithm is an interpretation of Mordell's reduction as described
on page 8 of Cremona and Rusin's paper [1]_ and the work of Mordell in
reference [2]_.
References
==========
.. [1] Cremona, J. E., Rusin, D. (2003). Efficient Solution of Rational Conics.
Mathematics of Computation, 72(243), 1417-1441.
https://doi.org/10.1090/S0025-5718-02-01480-1
.. [2] Diophantine Equations, L. J. Mordell, page 48.
"""
if _odd(c):
k = 2*c
else:
k = c//2
small = a*b*c
step = 0
while True:
t1, t2, t3 = a*x**2, b*y**2, c*z**2
# check that it's a solution
if t1 + t2 != t3:
if step == 0:
raise ValueError('bad starting solution')
break
x_0, y_0, z_0 = x, y, z
if max(t1, t2, t3) <= small:
# Holzer condition
break
uv = u, v = base_solution_linear(k, y_0, -x_0)
if None in uv:
break
p, q = -(a*u*x_0 + b*v*y_0), c*z_0
r = Rational(p, q)
if _even(c):
w = _nint_or_floor(p, q)
assert abs(w - r) <= S.Half
else:
w = p//q # floor
if _odd(a*u + b*v + c*w):
w += 1
assert abs(w - r) <= S.One
A = (a*u**2 + b*v**2 + c*w**2)
B = (a*u*x_0 + b*v*y_0 + c*w*z_0)
x = Rational(x_0*A - 2*u*B, k)
y = Rational(y_0*A - 2*v*B, k)
z = Rational(z_0*A - 2*w*B, k)
assert all(i.is_Integer for i in (x, y, z))
step += 1
return tuple([int(i) for i in (x_0, y_0, z_0)])
def diop_general_pythagorean(eq, param=symbols("m", integer=True)):
"""
Solves the general pythagorean equation,
`a_{1}^2x_{1}^2 + a_{2}^2x_{2}^2 + . . . + a_{n}^2x_{n}^2 - a_{n + 1}^2x_{n + 1}^2 = 0`.
Returns a tuple which contains a parametrized solution to the equation,
sorted in the same order as the input variables.
Usage
=====
``diop_general_pythagorean(eq, param)``: where ``eq`` is a general
pythagorean equation which is assumed to be zero and ``param`` is the base
parameter used to construct other parameters by subscripting.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_general_pythagorean
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_pythagorean(a**2 + b**2 + c**2 - d**2)
(m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2)
>>> diop_general_pythagorean(9*a**2 - 4*b**2 + 16*c**2 + 25*d**2 + e**2)
(10*m1**2 + 10*m2**2 + 10*m3**2 - 10*m4**2, 15*m1**2 + 15*m2**2 + 15*m3**2 + 15*m4**2, 15*m1*m4, 12*m2*m4, 60*m3*m4)
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralPythagorean.name:
if param is None:
params = None
else:
params = symbols('%s1:%i' % (param, len(var)), integer=True)
return list(GeneralPythagorean(eq).solve(parameters=params))[0]
def diop_general_sum_of_squares(eq, limit=1):
r"""
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer to [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
{(15, 22, 22, 24, 24)}
Reference
=========
.. [1] Representing an integer as a sum of three squares, [online],
Available:
https://proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfSquares.name:
return set(GeneralSumOfSquares(eq).solve(limit=limit))
def diop_general_sum_of_even_powers(eq, limit=1):
"""
Solves the equation `x_{1}^e + x_{2}^e + . . . + x_{n}^e - k = 0`
where `e` is an even, integer power.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_even_powers(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^e + x_{2}^e + . . . + x_{n}^e - k = 0`.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_even_powers
>>> from sympy.abc import a, b
>>> diop_general_sum_of_even_powers(a**4 + b**4 - (2**4 + 3**4))
{(2, 3)}
See Also
========
power_representation
"""
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfEvenPowers.name:
return set(GeneralSumOfEvenPowers(eq).solve(limit=limit))
## Functions below this comment can be more suitably grouped under
## an Additive number theory module rather than the Diophantine
## equation module.
def partition(n, k=None, zeros=False):
"""
Returns a generator that can be used to generate partitions of an integer
`n`.
Explanation
===========
A partition of `n` is a set of positive integers which add up to `n`. For
example, partitions of 3 are 3, 1 + 2, 1 + 1 + 1. A partition is returned
as a tuple. If ``k`` equals None, then all possible partitions are returned
irrespective of their size, otherwise only the partitions of size ``k`` are
returned. If the ``zero`` parameter is set to True then a suitable
number of zeros are added at the end of every partition of size less than
``k``.
``zero`` parameter is considered only if ``k`` is not None. When the
partitions are over, the last `next()` call throws the ``StopIteration``
exception, so this function should always be used inside a try - except
block.
Details
=======
``partition(n, k)``: Here ``n`` is a positive integer and ``k`` is the size
of the partition which is also positive integer.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import partition
>>> f = partition(5)
>>> next(f)
(1, 1, 1, 1, 1)
>>> next(f)
(1, 1, 1, 2)
>>> g = partition(5, 3)
>>> next(g)
(1, 1, 3)
>>> next(g)
(1, 2, 2)
>>> g = partition(5, 3, zeros=True)
>>> next(g)
(0, 0, 5)
"""
if not zeros or k is None:
for i in ordered_partitions(n, k):
yield tuple(i)
else:
for m in range(1, k + 1):
for i in ordered_partitions(n, m):
i = tuple(i)
yield (0,)*(k - len(i)) + i
def prime_as_sum_of_two_squares(p):
"""
Represent a prime `p` as a unique sum of two squares; this can
only be done if the prime is congruent to 1 mod 4.
Parameters
==========
p : Integer
A prime that is congruent to 1 mod 4
Returns
=======
(int, int) | None : Pair of positive integers ``(x, y)`` satisfying ``x**2 + y**2 = p``.
None if ``p`` is not congruent to 1 mod 4.
Raises
======
ValueError
If ``p`` is not prime number
Examples
========
>>> from sympy.solvers.diophantine.diophantine import prime_as_sum_of_two_squares
>>> prime_as_sum_of_two_squares(7) # can't be done
>>> prime_as_sum_of_two_squares(5)
(1, 2)
Reference
=========
.. [1] Representing a number as a sum of four squares, [online],
Available: https://schorn.ch/lagrange.html
See Also
========
sum_of_squares
"""
p = as_int(p)
if p % 4 != 1:
return
if not isprime(p):
raise ValueError("p should be a prime number")
if p % 8 == 5:
# Legendre symbol (2/p) == -1 if p % 8 in [3, 5]
b = 2
elif p % 12 == 5:
# Legendre symbol (3/p) == -1 if p % 12 in [5, 7]
b = 3
elif p % 5 in [2, 3]:
# Legendre symbol (5/p) == -1 if p % 5 in [2, 3]
b = 5
else:
b = 7
while jacobi(b, p) == 1:
b = nextprime(b)
b = pow(b, p >> 2, p)
a = p
while b**2 > p:
a, b = b, a % b
return (int(a % b), int(b)) # convert from long
def sum_of_three_squares(n):
r"""
Returns a 3-tuple $(a, b, c)$ such that $a^2 + b^2 + c^2 = n$ and
$a, b, c \geq 0$.
Returns None if $n = 4^a(8m + 7)$ for some `a, m \in \mathbb{Z}`. See
[1]_ for more details.
Parameters
==========
n : Integer
non-negative integer
Returns
=======
(int, int, int) | None : 3-tuple non-negative integers ``(a, b, c)`` satisfying ``a**2 + b**2 + c**2 = n``.
a,b,c are sorted in ascending order. ``None`` if no such ``(a,b,c)``.
Raises
======
ValueError
If ``n`` is a negative integer
Examples
========
>>> from sympy.solvers.diophantine.diophantine import sum_of_three_squares
>>> sum_of_three_squares(44542)
(18, 37, 207)
References
==========
.. [1] Representing a number as a sum of three squares, [online],
Available: https://schorn.ch/lagrange.html
See Also
========
power_representation :
``sum_of_three_squares(n)`` is one of the solutions output by ``power_representation(n, 2, 3, zeros=True)``
"""
# https://math.stackexchange.com/questions/483101/rabin-and-shallit-algorithm/651425#651425
# discusses these numbers (except for 1, 2, 3) as the exceptions of H&L's conjecture that
# Every sufficiently large number n is either a square or the sum of a prime and a square.
special = {1: (0, 0, 1), 2: (0, 1, 1), 3: (1, 1, 1), 10: (0, 1, 3), 34: (3, 3, 4),
58: (0, 3, 7), 85: (0, 6, 7), 130: (0, 3, 11), 214: (3, 6, 13), 226: (8, 9, 9),
370: (8, 9, 15), 526: (6, 7, 21), 706: (15, 15, 16), 730: (0, 1, 27),
1414: (6, 17, 33), 1906: (13, 21, 36), 2986: (21, 32, 39), 9634: (56, 57, 57)}
n = as_int(n)
if n < 0:
raise ValueError("n should be a non-negative integer")
if n == 0:
return (0, 0, 0)
n, v = remove(n, 4)
v = 1 << v
if n % 8 == 7:
return
if n in special:
return tuple([v*i for i in special[n]])
s, _exact = integer_nthroot(n, 2)
if _exact:
return (0, 0, v*s)
if n % 8 == 3:
if not s % 2:
s -= 1
for x in range(s, -1, -2):
N = (n - x**2) // 2
if isprime(N):
# n % 8 == 3 and x % 2 == 1 => N % 4 == 1
y, z = prime_as_sum_of_two_squares(N)
return tuple(sorted([v*x, v*(y + z), v*abs(y - z)]))
# We will never reach this point because there must be a solution.
assert False
# assert n % 4 in [1, 2]
if not((n % 2) ^ (s % 2)):
s -= 1
for x in range(s, -1, -2):
N = n - x**2
if isprime(N):
# assert N % 4 == 1
y, z = prime_as_sum_of_two_squares(N)
return tuple(sorted([v*x, v*y, v*z]))
# We will never reach this point because there must be a solution.
assert False
def sum_of_four_squares(n):
r"""
Returns a 4-tuple `(a, b, c, d)` such that `a^2 + b^2 + c^2 + d^2 = n`.
Here `a, b, c, d \geq 0`.
Parameters
==========
n : Integer
non-negative integer
Returns
=======
(int, int, int, int) : 4-tuple non-negative integers ``(a, b, c, d)`` satisfying ``a**2 + b**2 + c**2 + d**2 = n``.
a,b,c,d are sorted in ascending order.
Raises
======
ValueError
If ``n`` is a negative integer
Examples
========
>>> from sympy.solvers.diophantine.diophantine import sum_of_four_squares
>>> sum_of_four_squares(3456)
(8, 8, 32, 48)
>>> sum_of_four_squares(1294585930293)
(0, 1234, 2161, 1137796)
References
==========
.. [1] Representing a number as a sum of four squares, [online],
Available: https://schorn.ch/lagrange.html
See Also
========
power_representation :
``sum_of_four_squares(n)`` is one of the solutions output by ``power_representation(n, 2, 4, zeros=True)``
"""
n = as_int(n)
if n < 0:
raise ValueError("n should be a non-negative integer")
if n == 0:
return (0, 0, 0, 0)
# remove factors of 4 since a solution in terms of 3 squares is
# going to be returned; this is also done in sum_of_three_squares,
# but it needs to be done here to select d
n, v = remove(n, 4)
v = 1 << v
if n % 8 == 7:
d = 2
n = n - 4
elif n % 8 in (2, 6):
d = 1
n = n - 1
else:
d = 0
x, y, z = sum_of_three_squares(n) # sorted
return tuple(sorted([v*d, v*x, v*y, v*z]))
def power_representation(n, p, k, zeros=False):
r"""
Returns a generator for finding k-tuples of integers,
`(n_{1}, n_{2}, . . . n_{k})`, such that
`n = n_{1}^p + n_{2}^p + . . . n_{k}^p`.
Usage
=====
``power_representation(n, p, k, zeros)``: Represent non-negative number
``n`` as a sum of ``k`` ``p``\ th powers. If ``zeros`` is true, then the
solutions is allowed to contain zeros.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import power_representation
Represent 1729 as a sum of two cubes:
>>> f = power_representation(1729, 3, 2)
>>> next(f)
(9, 10)
>>> next(f)
(1, 12)
If the flag `zeros` is True, the solution may contain tuples with
zeros; any such solutions will be generated after the solutions
without zeros:
>>> list(power_representation(125, 2, 3, zeros=True))
[(5, 6, 8), (3, 4, 10), (0, 5, 10), (0, 2, 11)]
For even `p` the `permute_sign` function can be used to get all
signed values:
>>> from sympy.utilities.iterables import permute_signs
>>> list(permute_signs((1, 12)))
[(1, 12), (-1, 12), (1, -12), (-1, -12)]
All possible signed permutations can also be obtained:
>>> from sympy.utilities.iterables import signed_permutations
>>> list(signed_permutations((1, 12)))
[(1, 12), (-1, 12), (1, -12), (-1, -12), (12, 1), (-12, 1), (12, -1), (-12, -1)]
"""
n, p, k = [as_int(i) for i in (n, p, k)]
if n < 0:
if p % 2:
for t in power_representation(-n, p, k, zeros):
yield tuple(-i for i in t)
return
if p < 1 or k < 1:
raise ValueError(filldedent('''
Expecting positive integers for `(p, k)`, but got `(%s, %s)`'''
% (p, k)))
if n == 0:
if zeros:
yield (0,)*k
return
if k == 1:
if p == 1:
yield (n,)
elif n == 1:
yield (1,)
else:
be = perfect_power(n)
if be:
b, e = be
d, r = divmod(e, p)
if not r:
yield (b**d,)
return
if p == 1:
yield from partition(n, k, zeros=zeros)
return
if p == 2:
if k == 3:
n, v = remove(n, 4)
if v:
v = 1 << v
for t in power_representation(n, p, k, zeros):
yield tuple(i*v for i in t)
return
feasible = _can_do_sum_of_squares(n, k)
if not feasible:
return
if not zeros:
if n > 33 and k >= 5 and k <= n and n - k in (
13, 10, 7, 5, 4, 2, 1):
'''Todd G. Will, "When Is n^2 a Sum of k Squares?", [online].
Available: https://www.maa.org/sites/default/files/Will-MMz-201037918.pdf'''
return
# quick tests since feasibility includes the possibility of 0
if k == 4 and (n in (1, 3, 5, 9, 11, 17, 29, 41) or remove(n, 4)[0] in (2, 6, 14)):
# A000534
return
if k == 3 and n in (1, 2, 5, 10, 13, 25, 37, 58, 85, 130): # or n = some number >= 5*10**10
# A051952
return
if feasible is not True: # it's prime and k == 2
yield prime_as_sum_of_two_squares(n)
return
if k == 2 and p > 2:
be = perfect_power(n)
if be and be[1] % p == 0:
return # Fermat: a**n + b**n = c**n has no solution for n > 2
if n >= k:
a = integer_nthroot(n - (k - 1), p)[0]
for t in pow_rep_recursive(a, k, n, [], p):
yield tuple(reversed(t))
if zeros:
a = integer_nthroot(n, p)[0]
for i in range(1, k):
for t in pow_rep_recursive(a, i, n, [], p):
yield tuple(reversed(t + (0,)*(k - i)))
sum_of_powers = power_representation
def pow_rep_recursive(n_i, k, n_remaining, terms, p):
# Invalid arguments
if n_i <= 0 or k <= 0:
return
# No solutions may exist
if n_remaining < k:
return
if k * pow(n_i, p) < n_remaining:
return
if k == 0 and n_remaining == 0:
yield tuple(terms)
elif k == 1:
# next_term^p must equal to n_remaining
next_term, exact = integer_nthroot(n_remaining, p)
if exact and next_term <= n_i:
yield tuple(terms + [next_term])
return
else:
# TODO: Fall back to diop_DN when k = 2
if n_i >= 1 and k > 0:
for next_term in range(1, n_i + 1):
residual = n_remaining - pow(next_term, p)
if residual < 0:
break
yield from pow_rep_recursive(next_term, k - 1, residual, terms + [next_term], p)
def sum_of_squares(n, k, zeros=False):
"""Return a generator that yields the k-tuples of nonnegative
values, the squares of which sum to n. If zeros is False (default)
then the solution will not contain zeros. The nonnegative
elements of a tuple are sorted.
* If k == 1 and n is square, (n,) is returned.
* If k == 2 then n can only be written as a sum of squares if
every prime in the factorization of n that has the form
4*k + 3 has an even multiplicity. If n is prime then
it can only be written as a sum of two squares if it is
in the form 4*k + 1.
* if k == 3 then n can be written as a sum of squares if it does
not have the form 4**m*(8*k + 7).
* all integers can be written as the sum of 4 squares.
* if k > 4 then n can be partitioned and each partition can
be written as a sum of 4 squares; if n is not evenly divisible
by 4 then n can be written as a sum of squares only if the
an additional partition can be written as sum of squares.
For example, if k = 6 then n is partitioned into two parts,
the first being written as a sum of 4 squares and the second
being written as a sum of 2 squares -- which can only be
done if the condition above for k = 2 can be met, so this will
automatically reject certain partitions of n.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import sum_of_squares
>>> list(sum_of_squares(25, 2))
[(3, 4)]
>>> list(sum_of_squares(25, 2, True))
[(3, 4), (0, 5)]
>>> list(sum_of_squares(25, 4))
[(1, 2, 2, 4)]
See Also
========
sympy.utilities.iterables.signed_permutations
"""
yield from power_representation(n, 2, k, zeros)
def _can_do_sum_of_squares(n, k):
"""Return True if n can be written as the sum of k squares,
False if it cannot, or 1 if ``k == 2`` and ``n`` is prime (in which
case it *can* be written as a sum of two squares). A False
is returned only if it cannot be written as ``k``-squares, even
if 0s are allowed.
"""
if k < 1:
return False
if n < 0:
return False
if n == 0:
return True
if k == 1:
return is_square(n)
if k == 2:
if n in (1, 2):
return True
if isprime(n):
if n % 4 == 1:
return 1 # signal that it was prime
return False
# n is a composite number
# we can proceed iff no prime factor in the form 4*k + 3
# has an odd multiplicity
return all(p % 4 !=3 or m % 2 == 0 for p, m in factorint(n).items())
if k == 3:
return remove(n, 4)[0] % 8 != 7
# every number can be written as a sum of 4 squares; for k > 4 partitions
# can be 0
return True
|
GeneralSumOfEvenPowers
|
python
|
Pylons__pyramid
|
src/pyramid/authorization.py
|
{
"start": 958,
"end": 1016
}
|
class ____(_AllPermissionsList):
pass
|
AllPermissionsList
|
python
|
ApeWorX__ape
|
src/ape/cli/choices.py
|
{
"start": 1522,
"end": 2390
}
|
class ____(click.Choice):
"""
A ``click.Choice`` for loading account aliases for the active project at runtime.
Provide an ``account_type`` to limit the type of account to choose from.
Defaults to all account types in ``choices()``.
"""
name = "alias"
def __init__(self, key: _ACCOUNT_TYPE_FILTER = None):
# NOTE: we purposely skip the constructor of `Choice`
self.case_sensitive = False
self._key_filter = key
@cached_property
def choices(self) -> Sequence: # type: ignore[override]
from ape.types.basic import _LazySequence
return _LazySequence(self._choices_iterator)
@property
def _choices_iterator(self) -> Iterator[str]:
for acct in _get_accounts(key=self._key_filter):
if acct.alias is None:
continue
yield acct.alias
|
Alias
|
python
|
mkdocs__mkdocs
|
mkdocs/tests/utils/utils_tests.py
|
{
"start": 19142,
"end": 21630
}
|
class ____(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('dummy')
self.log.propagate = False
self.log.setLevel(1)
self.counter = utils.CountHandler()
self.log.addHandler(self.counter)
def tearDown(self):
self.log.removeHandler(self.counter)
def test_default_values(self):
self.assertEqual(self.counter.get_counts(), [])
def test_count_critical(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.critical('msg')
self.assertEqual(self.counter.get_counts(), [('CRITICAL', 1)])
def test_count_error(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.error('msg')
self.assertEqual(self.counter.get_counts(), [('ERROR', 1)])
def test_count_warning(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.warning('msg')
self.assertEqual(self.counter.get_counts(), [('WARNING', 1)])
def test_count_info(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.info('msg')
self.assertEqual(self.counter.get_counts(), [('INFO', 1)])
def test_count_debug(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.debug('msg')
self.assertEqual(self.counter.get_counts(), [('DEBUG', 1)])
def test_count_multiple(self):
self.assertEqual(self.counter.get_counts(), [])
self.log.warning('msg 1')
self.assertEqual(self.counter.get_counts(), [('WARNING', 1)])
self.log.warning('msg 2')
self.assertEqual(self.counter.get_counts(), [('WARNING', 2)])
self.log.debug('msg 3')
self.assertEqual(self.counter.get_counts(), [('WARNING', 2), ('DEBUG', 1)])
self.log.error('mdg 4')
self.assertEqual(self.counter.get_counts(), [('ERROR', 1), ('WARNING', 2), ('DEBUG', 1)])
def test_log_level(self):
self.assertEqual(self.counter.get_counts(), [])
self.counter.setLevel(logging.ERROR)
self.log.error('counted')
self.log.warning('not counted')
self.log.info('not counted')
self.assertEqual(self.counter.get_counts(), [('ERROR', 1)])
self.counter.setLevel(logging.WARNING)
self.log.error('counted')
self.log.warning('counted')
self.log.info('not counted')
self.assertEqual(self.counter.get_counts(), [('ERROR', 2), ('WARNING', 1)])
@dataclasses.dataclass
|
LogCounterTests
|
python
|
django__django
|
tests/transactions/tests.py
|
{
"start": 19135,
"end": 22452
}
|
class ____(TransactionTestCase):
available_apps = ["transactions"]
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable:
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature("can_release_savepoints")
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer
# exists.
connection.savepoint_rollback(sid)
def test_mark_for_rollback_on_error_in_transaction(self):
with transaction.atomic(savepoint=False):
# Swallow the intentional error raised.
with self.assertRaisesMessage(Exception, "Oops"):
# Wrap in `mark_for_rollback_on_error` to check if the
# transaction is marked broken.
with transaction.mark_for_rollback_on_error():
# Ensure that we are still in a good state.
self.assertFalse(transaction.get_rollback())
raise Exception("Oops")
# mark_for_rollback_on_error marked the transaction as broken …
self.assertTrue(transaction.get_rollback())
# … and further queries fail.
msg = "You can't execute queries until the end of the 'atomic' block."
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
Reporter.objects.create()
# Transaction errors are reset at the end of an transaction, so this
# should just work.
Reporter.objects.create()
def test_mark_for_rollback_on_error_in_autocommit(self):
self.assertTrue(transaction.get_autocommit())
# Swallow the intentional error raised.
with self.assertRaisesMessage(Exception, "Oops"):
# Wrap in `mark_for_rollback_on_error` to check if the transaction
# is marked broken.
with transaction.mark_for_rollback_on_error():
# Ensure that we are still in a good state.
self.assertFalse(transaction.get_connection().needs_rollback)
raise Exception("Oops")
# Ensure that `mark_for_rollback_on_error` did not mark the
# transaction as broken, since we are in autocommit mode …
self.assertFalse(transaction.get_connection().needs_rollback)
# … and further queries work nicely.
Reporter.objects.create()
|
AtomicMiscTests
|
python
|
python-attrs__attrs
|
src/attr/validators.py
|
{
"start": 20247,
"end": 21458
}
|
class ____:
validators = attrib()
def __call__(self, inst, attr, value):
for v in self.validators:
try:
v(inst, attr, value)
except Exception: # noqa: BLE001, PERF203, S112
continue
else:
return
msg = f"None of {self.validators!r} satisfied for value {value!r}"
raise ValueError(msg)
def __repr__(self):
return f"<or validator wrapping {self.validators!r}>"
def or_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators until one of them is
satisfied.
Args:
validators (~collections.abc.Iterable[typing.Callable]):
Arbitrary number of validators.
Raises:
ValueError:
If no validator is satisfied. Raised with a human-readable error
message listing all the wrapped validators and the value that
failed all of them.
.. versionadded:: 24.1.0
"""
vals = []
for v in validators:
vals.extend(v.validators if isinstance(v, _OrValidator) else [v])
return _OrValidator(tuple(vals))
|
_OrValidator
|
python
|
huggingface__transformers
|
tests/models/groupvit/test_modeling_groupvit.py
|
{
"start": 19440,
"end": 22356
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (GroupViTModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {}
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = GroupViTModelTester(self)
common_properties = ["projection_dim", "projection_intermediate_dim", "logit_scale_init_value"]
self.config_tester = ConfigTester(
self, config_class=GroupViTConfig, has_text_modality=False, common_properties=common_properties
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
@is_flaky(description="The `index` computed with `max()` in `hard_softmax` is not stable.")
def test_batching_equivalence(self):
super().test_batching_equivalence()
@unittest.skip(reason="hidden_states are tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="input_embeds are tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="GroupViTModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save GroupViTConfig and check if we can load GroupViTVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = GroupViTVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save GroupViTConfig and check if we can load GroupViTTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = GroupViTTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "nvidia/groupvit-gcc-yfcc"
model = GroupViTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
|
GroupViTModelTest
|
python
|
django__django
|
tests/migrations/test_migrations_squashed_complex_multi_apps/app2/2_auto.py
|
{
"start": 35,
"end": 182
}
|
class ____(migrations.Migration):
dependencies = [("app2", "1_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
|
Migration
|
python
|
PyCQA__pylint
|
tests/functional/o/overridden_final_method_py38.py
|
{
"start": 592,
"end": 693
}
|
class ____(BaseConditional):
def my_method(self): # [overridden-final-method]
pass
|
Subclass2
|
python
|
catalyst-team__catalyst
|
examples/detection/callbacks.py
|
{
"start": 9354,
"end": 12849
}
|
class ____(Callback):
"""Compute mAP for Object Detection task."""
def __init__(
self,
num_classes=1,
metric_key="mAP",
output_type="ssd",
iou_threshold=0.5,
confidence_threshold=0.5,
):
"""
Args:
num_classes (int): Number of classes.
Default is ``1``.
metric_key (str): name of a metric.
Default is ``"mAP"``.
output_type (str): model output type. Valid values are ``"ssd"`` or
``"centernet"`` or ``"yolo-x"``.
Default is ``"ssd"``.
iou_threshold (float): IoU threshold to use in NMS.
Default is ``0.5``.
confidence_threshold (float): confidence threshold,
proposals with lover values than threshold will be ignored.
Default is ``0.5``.
"""
super().__init__(order=CallbackOrder.Metric)
assert output_type in ("ssd", "centernet", "yolo-x")
self.num_classes = num_classes
self.metric_key = metric_key
self.output_type = output_type
self.iou_threshold = iou_threshold
self.confidence_threshold = confidence_threshold
self.metric_fn = MetricBuilder.build_evaluation_metric(
"map_2d", async_mode=False, num_classes=num_classes
)
def on_loader_start(self, runner: "IRunner"): # noqa: D102, F821
if not runner.is_valid_loader:
return
self.metric_fn.reset()
def on_batch_end(self, runner: "IRunner"): # noqa: D102, F821
if not runner.is_valid_loader:
return
if self.output_type == "ssd":
p_box = runner.batch["predicted_bboxes"]
gt_box = runner.batch["bboxes"]
p_scores = runner.batch["predicted_scores"]
gt_labels = runner.batch["labels"]
for predicted_sample, ground_truth_sample in process_ssd_output(
p_box, p_scores, gt_box, gt_labels, iou_threshold=self.iou_threshold
):
self.metric_fn.add(predicted_sample, ground_truth_sample)
elif self.output_type == "centernet":
p_heatmap = runner.batch["predicted_heatmap"]
gt_box = runner.batch["bboxes"]
p_regression = runner.batch["predicted_regression"]
gt_labels = runner.batch["labels"]
for predicted_sample, ground_truth_sample in process_centernet_output(
p_heatmap,
p_regression,
gt_box,
gt_labels,
iou_threshold=self.iou_threshold,
confidence_threshold=self.confidence_threshold,
):
self.metric_fn.add(predicted_sample, ground_truth_sample)
elif self.output_type == "yolo-x":
p_tensor = runner.batch["predicted_tensor"]
gt_box = runner.batch["bboxes"]
gt_labels = runner.batch["labels"]
for predicted_sample, ground_truth_sample in process_yolo_x_output(
p_tensor, gt_box, gt_labels, iou_threshold=self.iou_threshold
):
self.metric_fn.add(predicted_sample, ground_truth_sample)
def on_loader_end(self, runner: "IRunner"): # noqa: D102, F821
if not runner.is_valid_loader:
return
map_value = self.metric_fn.value()["mAP"]
runner.loader_metrics[self.metric_key] = map_value
|
DetectionMeanAveragePrecision
|
python
|
doocs__leetcode
|
solution/3000-3099/3042.Count Prefix and Suffix Pairs I/Solution2.py
|
{
"start": 0,
"end": 123
}
|
class ____:
__slots__ = ["children", "cnt"]
def __init__(self):
self.children = {}
self.cnt = 0
|
Node
|
python
|
fluentpython__example-code-2e
|
21-async/mojifinder/bottle.py
|
{
"start": 135868,
"end": 150565
}
|
class ____(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '([urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Open and close grouping tokens
_re_tok += '|([\\[\\{\\(])'
_re_tok += '|([\\]\\}\\)])'
# 5,6: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 7: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 8: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=\\r?$))'
# 9: And finally, a single newline. The 10th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)'
# Match inline statements (may contain python strings)
_re_inl = '(?m)%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
_re_tok = '(?m)' + _re_tok
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # New escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+m.group(5)+line+sep)
self.offset += len(line+sep)+1
continue
elif m.group(5): # Old escape syntax
depr('Escape code lines with a backslash.') #0.12
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_backward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_backward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.') #0.12
if len(parts) == 1: return "_printlist([base])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.') #0.12
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
StplParser
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 914993,
"end": 915561
}
|
class ____(sgqlc.types.Type):
"""Represents an author of a reaction."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "reacted_at")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(sgqlc.types.non_null("Reactor"), graphql_name="node")
"""The author of the reaction."""
reacted_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="reactedAt")
"""The moment when the user made the reaction."""
|
ReactorEdge
|
python
|
tornadoweb__tornado
|
tornado/locale.py
|
{
"start": 17661,
"end": 18832
}
|
class ____(Locale):
"""Locale implementation using tornado's CSV translation format."""
def __init__(self, code: str, translations: Dict[str, Dict[str, str]]) -> None:
self.translations = translations
super().__init__(code)
def translate(
self,
message: str,
plural_message: Optional[str] = None,
count: Optional[int] = None,
) -> str:
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
def pgettext(
self,
context: str,
message: str,
plural_message: Optional[str] = None,
count: Optional[int] = None,
) -> str:
if self.translations:
gen_log.warning("pgettext is not supported by CSVLocale")
return self.translate(message, plural_message, count)
|
CSVLocale
|
python
|
Pylons__pyramid
|
tests/test_util.py
|
{
"start": 36485,
"end": 37071
}
|
class ____(unittest.TestCase):
class Dummy:
def run(self): # pragma: no cover
return 'OK'
def _callFUT(self, val):
from pyramid.util import is_unbound_method
return is_unbound_method(val)
def test_bound_method(self):
self.assertFalse(self._callFUT(self.Dummy().run))
def test_unbound_method(self):
self.assertTrue(self._callFUT(self.Dummy.run))
def test_normal_func_unbound(self):
def func(): # pragma: no cover
return 'OK'
self.assertFalse(self._callFUT(func))
|
TestUnboundMethods
|
python
|
kamyu104__LeetCode-Solutions
|
Python/build-binary-expression-tree-from-infix-expression.py
|
{
"start": 66,
"end": 219
}
|
class ____(object):
def __init__(self, val=" ", left=None, right=None):
self.val = val
self.left = left
self.right = right
|
Node
|
python
|
PrefectHQ__prefect
|
tests/utilities/schema_tools/test_validation.py
|
{
"start": 23642,
"end": 24844
}
|
class ____:
def test_prioritize_placeholder_errors(self):
errors = [
# error we want to throw away
MockValidationError(
message="InvalidJSON() is not of type 'string",
relative_path=["x"],
instance=InvalidJSON(),
validator="type",
),
# error we want to keep
MockValidationError(
message="Invalid JSON: Unterminated string starting at: line 1 column 1 (char 0)",
relative_path=["x"],
instance=InvalidJSON(),
validator="_placeholders",
),
# unrelated error
MockValidationError(
message="1 is not of type 'string",
relative_path=["y"],
instance=1,
validator="type",
),
]
prioritized_errors = prioritize_placeholder_errors(errors)
assert len(prioritized_errors) == 2
assert prioritized_errors[0].validator == "_placeholders"
assert prioritized_errors[1].validator == "type"
assert prioritized_errors[1].instance == 1
|
TestPrioritizePlaceholderErrors
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/contenttypes.py
|
{
"start": 951,
"end": 8980
}
|
class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX ContentTypes file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
# Copy the defaults in case we need to change them.
self.defaults = copy.deepcopy(defaults)
self.overrides = copy.deepcopy(overrides)
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_types()
self._write_defaults()
self._write_overrides()
self._xml_end_tag("Types")
# Close the file.
self._xml_close()
def _add_default(self, default: Tuple[str, str]) -> None:
# Add elements to the ContentTypes defaults.
self.defaults.append(default)
def _add_override(self, override: Tuple[str, str]) -> None:
# Add elements to the ContentTypes overrides.
self.overrides.append(override)
def _add_worksheet_name(self, worksheet_name: str) -> None:
# Add the name of a worksheet to the ContentTypes overrides.
worksheet_name = "/xl/worksheets/" + worksheet_name + ".xml"
self._add_override(
(worksheet_name, APP_DOCUMENT + "spreadsheetml.worksheet+xml")
)
def _add_chartsheet_name(self, chartsheet_name: str) -> None:
# Add the name of a chartsheet to the ContentTypes overrides.
chartsheet_name = "/xl/chartsheets/" + chartsheet_name + ".xml"
self._add_override(
(chartsheet_name, APP_DOCUMENT + "spreadsheetml.chartsheet+xml")
)
def _add_chart_name(self, chart_name: str) -> None:
# Add the name of a chart to the ContentTypes overrides.
chart_name = "/xl/charts/" + chart_name + ".xml"
self._add_override((chart_name, APP_DOCUMENT + "drawingml.chart+xml"))
def _add_drawing_name(self, drawing_name: str) -> None:
# Add the name of a drawing to the ContentTypes overrides.
drawing_name = "/xl/drawings/" + drawing_name + ".xml"
self._add_override((drawing_name, APP_DOCUMENT + "drawing+xml"))
def _add_vml_name(self) -> None:
# Add the name of a VML drawing to the ContentTypes defaults.
self._add_default(("vml", APP_DOCUMENT + "vmlDrawing"))
def _add_comment_name(self, comment_name: str) -> None:
# Add the name of a comment to the ContentTypes overrides.
comment_name = "/xl/" + comment_name + ".xml"
self._add_override((comment_name, APP_DOCUMENT + "spreadsheetml.comments+xml"))
def _add_shared_strings(self) -> None:
# Add the sharedStrings link to the ContentTypes overrides.
self._add_override(
("/xl/sharedStrings.xml", APP_DOCUMENT + "spreadsheetml.sharedStrings+xml")
)
def _add_calc_chain(self) -> None:
# Add the calcChain link to the ContentTypes overrides.
self._add_override(
("/xl/calcChain.xml", APP_DOCUMENT + "spreadsheetml.calcChain+xml")
)
def _add_image_types(self, image_types: Dict[str, bool]) -> None:
# Add the image default types.
for image_type in image_types:
extension = image_type
if image_type in ("wmf", "emf"):
image_type = "x-" + image_type
self._add_default((extension, "image/" + image_type))
def _add_table_name(self, table_name: str) -> None:
# Add the name of a table to the ContentTypes overrides.
table_name = "/xl/tables/" + table_name + ".xml"
self._add_override((table_name, APP_DOCUMENT + "spreadsheetml.table+xml"))
def _add_vba_project(self) -> None:
# Add a vbaProject to the ContentTypes defaults.
# Change the workbook.xml content-type from xlsx to xlsm.
for i, override in enumerate(self.overrides):
if override[0] == "/xl/workbook.xml":
xlsm = "application/vnd.ms-excel.sheet.macroEnabled.main+xml"
self.overrides[i] = ("/xl/workbook.xml", xlsm)
self._add_default(("bin", "application/vnd.ms-office.vbaProject"))
def _add_vba_project_signature(self) -> None:
# Add a vbaProjectSignature to the ContentTypes overrides.
self._add_override(
(
"/xl/vbaProjectSignature.bin",
"application/vnd.ms-office.vbaProjectSignature",
)
)
def _add_custom_properties(self) -> None:
# Add the custom properties to the ContentTypes overrides.
self._add_override(
("/docProps/custom.xml", APP_DOCUMENT + "custom-properties+xml")
)
def _add_metadata(self) -> None:
# Add the metadata file to the ContentTypes overrides.
self._add_override(
("/xl/metadata.xml", APP_DOCUMENT + "spreadsheetml.sheetMetadata+xml")
)
def _add_feature_bag_property(self) -> None:
# Add the featurePropertyBag file to the ContentTypes overrides.
self._add_override(
(
"/xl/featurePropertyBag/featurePropertyBag.xml",
"application/vnd.ms-excel.featurepropertybag+xml",
)
)
def _add_rich_value(self) -> None:
# Add the richValue files to the ContentTypes overrides.
self._add_override(
(
"/xl/richData/rdRichValueTypes.xml",
"application/vnd.ms-excel.rdrichvaluetypes+xml",
)
)
self._add_override(
("/xl/richData/rdrichvalue.xml", "application/vnd.ms-excel.rdrichvalue+xml")
)
self._add_override(
(
"/xl/richData/rdrichvaluestructure.xml",
"application/vnd.ms-excel.rdrichvaluestructure+xml",
)
)
self._add_override(
(
"/xl/richData/richValueRel.xml",
"application/vnd.ms-excel.richvaluerel+xml",
)
)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_defaults(self) -> None:
# Write out all of the <Default> types.
for extension, content_type in self.defaults:
self._xml_empty_tag(
"Default", [("Extension", extension), ("ContentType", content_type)]
)
def _write_overrides(self) -> None:
# Write out all of the <Override> types.
for part_name, content_type in self.overrides:
self._xml_empty_tag(
"Override", [("PartName", part_name), ("ContentType", content_type)]
)
def _write_types(self) -> None:
# Write the <Types> element.
xmlns = "http://schemas.openxmlformats.org/package/2006/content-types"
attributes = [
(
"xmlns",
xmlns,
)
]
self._xml_start_tag("Types", attributes)
def _write_default(self, extension, content_type) -> None:
# Write the <Default> element.
attributes = [
("Extension", extension),
("ContentType", content_type),
]
self._xml_empty_tag("Default", attributes)
def _write_override(self, part_name, content_type) -> None:
# Write the <Override> element.
attributes = [
("PartName", part_name),
("ContentType", content_type),
]
self._xml_empty_tag("Override", attributes)
|
ContentTypes
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_ed25519.py
|
{
"start": 2096,
"end": 13101
}
|
class ____:
def test_sign_verify_input(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "sign.input"),
load_ed25519_vectors,
)
for vector in vectors:
with subtests.test():
sk = binascii.unhexlify(vector["secret_key"])
pk = binascii.unhexlify(vector["public_key"])
message = binascii.unhexlify(vector["message"])
signature = binascii.unhexlify(vector["signature"])
private_key = Ed25519PrivateKey.from_private_bytes(sk)
computed_sig = private_key.sign(message)
assert computed_sig == signature
public_key = private_key.public_key()
assert (
public_key.public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.Raw,
)
== pk
)
public_key.verify(signature, message)
def test_pub_priv_bytes_raw(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "sign.input"),
load_ed25519_vectors,
)
for vector in vectors:
with subtests.test():
sk = binascii.unhexlify(vector["secret_key"])
pk = binascii.unhexlify(vector["public_key"])
private_key = Ed25519PrivateKey.from_private_bytes(sk)
assert private_key.private_bytes_raw() == sk
public_key = Ed25519PublicKey.from_public_bytes(pk)
assert public_key.public_bytes_raw() == pk
def test_invalid_signature(self, backend):
key = Ed25519PrivateKey.generate()
signature = key.sign(b"test data")
with pytest.raises(InvalidSignature):
key.public_key().verify(signature, b"wrong data")
with pytest.raises(InvalidSignature):
key.public_key().verify(b"0" * 64, b"test data")
def test_sign_verify_buffer(self, backend):
key = Ed25519PrivateKey.generate()
data = bytearray(b"test data")
signature = key.sign(data)
key.public_key().verify(bytearray(signature), data)
def test_generate(self, backend):
key = Ed25519PrivateKey.generate()
assert key
assert key.public_key()
def test_load_public_bytes(self, backend):
public_key = Ed25519PrivateKey.generate().public_key()
public_bytes = public_key.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
public_key2 = Ed25519PublicKey.from_public_bytes(public_bytes)
assert public_bytes == public_key2.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
def test_invalid_type_public_bytes(self, backend):
with pytest.raises(TypeError):
Ed25519PublicKey.from_public_bytes(
object() # type: ignore[arg-type]
)
def test_invalid_type_private_bytes(self, backend):
with pytest.raises(TypeError):
Ed25519PrivateKey.from_private_bytes(
object() # type: ignore[arg-type]
)
def test_invalid_length_from_public_bytes(self, backend):
with pytest.raises(ValueError):
Ed25519PublicKey.from_public_bytes(b"a" * 31)
with pytest.raises(ValueError):
Ed25519PublicKey.from_public_bytes(b"a" * 33)
def test_invalid_length_from_private_bytes(self, backend):
with pytest.raises(ValueError):
Ed25519PrivateKey.from_private_bytes(b"a" * 31)
with pytest.raises(ValueError):
Ed25519PrivateKey.from_private_bytes(b"a" * 33)
def test_invalid_private_bytes(self, backend):
key = Ed25519PrivateKey.generate()
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
None, # type: ignore[arg-type]
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
DummyKeySerializationEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.PKCS8,
DummyKeySerializationEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.OpenSSH,
serialization.NoEncryption(),
)
def test_invalid_public_bytes(self, backend):
key = Ed25519PrivateKey.generate().public_key()
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.PKCS1
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.Raw
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.OpenSSH
)
@pytest.mark.parametrize(
("encoding", "fmt", "encryption", "passwd", "load_func"),
[
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_der_private_key,
),
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_der_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"\x00"),
b"\x00",
serialization.load_der_private_key,
),
],
)
def test_round_trip_private_serialization(
self, encoding, fmt, encryption, passwd, load_func, backend
):
key = Ed25519PrivateKey.generate()
serialized = key.private_bytes(encoding, fmt, encryption)
loaded_key = load_func(serialized, passwd, backend)
assert isinstance(loaded_key, Ed25519PrivateKey)
def test_invalid_public_key_pem(self):
with pytest.raises(ValueError):
serialization.load_pem_public_key(
textwrap.dedent("""
-----BEGIN PUBLIC KEY-----
MCswBQYDK2VwAyIA////////////////////////////////////////////
-----END PUBLIC KEY-----""").encode()
)
def test_buffer_protocol(self, backend):
private_bytes = os.urandom(32)
key = Ed25519PrivateKey.from_private_bytes(bytearray(private_bytes))
assert (
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
== private_bytes
)
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_public_key_equality(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = serialization.load_der_private_key(key_bytes, None).public_key()
key3 = Ed25519PrivateKey.generate().public_key()
assert key1 == key2
assert key1 != key3
assert key1 != object()
with pytest.raises(TypeError):
key1 < key2 # type: ignore[operator]
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_public_key_copy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = copy.copy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_public_key_deepcopy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = copy.deepcopy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_private_key_copy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None)
key2 = copy.copy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_private_key_deepcopy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None)
key2 = copy.deepcopy(key1)
assert key1 == key2
|
TestEd25519Signing
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_gtk3.py
|
{
"start": 18903,
"end": 21811
}
|
class ____(backend_tools.ToolHelpBase):
def _normalize_shortcut(self, key):
"""
Convert Matplotlib key presses to GTK+ accelerator identifiers.
Related to `FigureCanvasGTK3._get_key`.
"""
special = {
'backspace': 'BackSpace',
'pagedown': 'Page_Down',
'pageup': 'Page_Up',
'scroll_lock': 'Scroll_Lock',
}
parts = key.split('+')
mods = ['<' + mod + '>' for mod in parts[:-1]]
key = parts[-1]
if key in special:
key = special[key]
elif len(key) > 1:
key = key.capitalize()
elif key.isupper():
mods += ['<shift>']
return ''.join(mods) + key
def _is_valid_shortcut(self, key):
"""
Check for a valid shortcut to be displayed.
- GTK will never send 'cmd+' (see `FigureCanvasGTK3._get_key`).
- The shortcut window only shows keyboard shortcuts, not mouse buttons.
"""
return 'cmd+' not in key and not key.startswith('MouseButton.')
def _show_shortcuts_window(self):
section = Gtk.ShortcutsSection()
for name, tool in sorted(self.toolmanager.tools.items()):
if not tool.description:
continue
# Putting everything in a separate group allows GTK to
# automatically split them into separate columns/pages, which is
# useful because we have lots of shortcuts, some with many keys
# that are very wide.
group = Gtk.ShortcutsGroup()
section.add(group)
# A hack to remove the title since we have no group naming.
group.forall(lambda widget, data: widget.set_visible(False), None)
shortcut = Gtk.ShortcutsShortcut(
accelerator=' '.join(
self._normalize_shortcut(key)
for key in self.toolmanager.get_tool_keymap(name)
if self._is_valid_shortcut(key)),
title=tool.name,
subtitle=tool.description)
group.add(shortcut)
window = Gtk.ShortcutsWindow(
title='Help',
modal=True,
transient_for=self._figure.canvas.get_toplevel())
section.show() # Must be done explicitly before add!
window.add(section)
window.show_all()
def _show_shortcuts_dialog(self):
dialog = Gtk.MessageDialog(
self._figure.canvas.get_toplevel(),
0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, self._get_help_text(),
title="Help")
dialog.run()
dialog.destroy()
def trigger(self, *args):
if Gtk.check_version(3, 20, 0) is None:
self._show_shortcuts_window()
else:
self._show_shortcuts_dialog()
@backend_tools._register_tool_class(FigureCanvasGTK3)
|
HelpGTK3
|
python
|
imageio__imageio
|
imageio/plugins/_tifffile.py
|
{
"start": 115380,
"end": 123046
}
|
class ____(object):
"""Sequence of TIFF image file directories."""
def __init__(self, parent):
"""Initialize instance from file. Read first TiffPage from file.
The file position must be at an offset to an offset to a TiffPage.
"""
self.parent = parent
self.pages = [] # cache of TiffPages, TiffFrames, or their offsets
self.complete = False # True if offsets to all pages were read
self._tiffpage = TiffPage # class for reading tiff pages
self._keyframe = None
self._cache = True
# read offset to first page
fh = parent.filehandle
self._nextpageoffset = fh.tell()
offset = struct.unpack(parent.offsetformat, fh.read(parent.offsetsize))[0]
if offset == 0:
# warnings.warn('file contains no pages')
self.complete = True
return
if offset >= fh.size:
warnings.warn("invalid page offset (%i)" % offset)
self.complete = True
return
# always read and cache first page
fh.seek(offset)
page = TiffPage(parent, index=0)
self.pages.append(page)
self._keyframe = page
@property
def cache(self):
"""Return if pages/frames are currently being cached."""
return self._cache
@cache.setter
def cache(self, value):
"""Enable or disable caching of pages/frames. Clear cache if False."""
value = bool(value)
if self._cache and not value:
self.clear()
self._cache = value
@property
def useframes(self):
"""Return if currently using TiffFrame (True) or TiffPage (False)."""
return self._tiffpage == TiffFrame and TiffFrame is not TiffPage
@useframes.setter
def useframes(self, value):
"""Set to use TiffFrame (True) or TiffPage (False)."""
self._tiffpage = TiffFrame if value else TiffPage
@property
def keyframe(self):
"""Return index of current keyframe."""
return self._keyframe.index
@keyframe.setter
def keyframe(self, index):
"""Set current keyframe. Load TiffPage from file if necessary."""
if self._keyframe.index == index:
return
if self.complete or 0 <= index < len(self.pages):
page = self.pages[index]
if isinstance(page, TiffPage):
self._keyframe = page
return
elif isinstance(page, TiffFrame):
# remove existing frame
self.pages[index] = page.offset
# load TiffPage from file
useframes = self.useframes
self._tiffpage = TiffPage
self._keyframe = self[index]
self.useframes = useframes
@property
def next_page_offset(self):
"""Return offset where offset to a new page can be stored."""
if not self.complete:
self._seek(-1)
return self._nextpageoffset
def load(self):
"""Read all remaining pages from file."""
fh = self.parent.filehandle
keyframe = self._keyframe
pages = self.pages
if not self.complete:
self._seek(-1)
for i, page in enumerate(pages):
if isinstance(page, inttypes):
fh.seek(page)
page = self._tiffpage(self.parent, index=i, keyframe=keyframe)
pages[i] = page
def clear(self, fully=True):
"""Delete all but first page from cache. Set keyframe to first page."""
pages = self.pages
if not self._cache or len(pages) < 1:
return
self._keyframe = pages[0]
if fully:
# delete all but first TiffPage/TiffFrame
for i, page in enumerate(pages[1:]):
if not isinstance(page, inttypes):
pages[i + 1] = page.offset
elif TiffFrame is not TiffPage:
# delete only TiffFrames
for i, page in enumerate(pages):
if isinstance(page, TiffFrame):
pages[i] = page.offset
def _seek(self, index, maxpages=2**22):
"""Seek file to offset of specified page."""
pages = self.pages
if not pages:
return
fh = self.parent.filehandle
if fh.closed:
raise RuntimeError("FileHandle is closed")
if self.complete or 0 <= index < len(pages):
page = pages[index]
offset = page if isinstance(page, inttypes) else page.offset
fh.seek(offset)
return
offsetformat = self.parent.offsetformat
offsetsize = self.parent.offsetsize
tagnoformat = self.parent.tagnoformat
tagnosize = self.parent.tagnosize
tagsize = self.parent.tagsize
unpack = struct.unpack
page = pages[-1]
offset = page if isinstance(page, inttypes) else page.offset
while len(pages) < maxpages:
# read offsets to pages from file until index is reached
fh.seek(offset)
# skip tags
try:
tagno = unpack(tagnoformat, fh.read(tagnosize))[0]
if tagno > 4096:
raise ValueError("suspicious number of tags")
except Exception:
warnings.warn("corrupted tag list at offset %i" % offset)
del pages[-1]
self.complete = True
break
self._nextpageoffset = offset + tagnosize + tagno * tagsize
fh.seek(self._nextpageoffset)
# read offset to next page
offset = unpack(offsetformat, fh.read(offsetsize))[0]
if offset == 0:
self.complete = True
break
if offset >= fh.size:
warnings.warn("invalid page offset (%i)" % offset)
self.complete = True
break
pages.append(offset)
if 0 <= index < len(pages):
break
if index >= len(pages):
raise IndexError("list index out of range")
page = pages[index]
fh.seek(page if isinstance(page, inttypes) else page.offset)
def __bool__(self):
"""Return True if file contains any pages."""
return len(self.pages) > 0
def __len__(self):
"""Return number of pages in file."""
if not self.complete:
self._seek(-1)
return len(self.pages)
def __getitem__(self, key):
"""Return specified page(s) from cache or file."""
pages = self.pages
if not pages:
raise IndexError("list index out of range")
if key == 0:
return pages[key]
if isinstance(key, slice):
start, stop, _ = key.indices(2**31 - 1)
if not self.complete and max(stop, start) > len(pages):
self._seek(-1)
return [self[i] for i in range(*key.indices(len(pages)))]
if self.complete and key >= len(pages):
raise IndexError("list index out of range")
try:
page = pages[key]
except IndexError:
page = 0
if not isinstance(page, inttypes):
return page
self._seek(key)
page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe)
if self._cache:
pages[key] = page
return page
def __iter__(self):
"""Return iterator over all pages."""
i = 0
while True:
try:
yield self[i]
i += 1
except IndexError:
break
|
TiffPages
|
python
|
ZoranPandovski__al-go-rithms
|
puzzles/CollectRubicCube/Python/solve_rubic_cube.py
|
{
"start": 10629,
"end": 10916
}
|
class ____:
def __init__(self, vertexId):
self.Id = vertexId
self.Neighbors = []
def getId(self):
return self.Id
def addNeighbor(self, Vertex):
self.Neighbors.append(Vertex)
def getNeighbors(self):
return self.Neighbors
|
GraphVertex
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/split.py
|
{
"start": 342,
"end": 1718
}
|
class ____(Activation):
"""Parallel split gateway activation."""
def __init__(self, *args, **kwargs): # noqa D102
self.next_tasks = []
super().__init__(*args, **kwargs)
@Activation.status.super()
def activate(self):
"""Calculate nodes list to activate."""
for node, cond, data_source in self.flow_task._branches:
if cond and not cond(self):
continue
else:
self.next_tasks.append((node, None))
if not self.next_tasks:
raise FlowRuntimeError(
"No next task available for {}".format(self.flow_task.name)
)
@Activation.status.super()
def create_next(self):
"""Activate next tasks for parallel execution.
Each task would have a new execution token attached,
the Split task token as a common prefix.
"""
token_source = Token.split_token_source(self.task.token, self.task.pk)
next_tasks = [
(task, data) for task, data in self.next_tasks if not isinstance(task, Join)
] + [(task, data) for task, data in self.next_tasks if isinstance(task, Join)]
for n, (next_task, data) in enumerate(next_tasks, 1):
yield next_task._create(
prev_activation=self, token=next(token_source), data=data
)
|
SplitActivation
|
python
|
MongoEngine__mongoengine
|
mongoengine/queryset/visitor.py
|
{
"start": 321,
"end": 633
}
|
class ____:
"""Base visitor class for visiting Q-object nodes in a query tree."""
def visit_combination(self, combination):
"""Called by QCombination objects."""
return combination
def visit_query(self, query):
"""Called by (New)Q objects."""
return query
|
QNodeVisitor
|
python
|
pypa__hatch
|
backend/src/hatchling/version/source/plugin/interface.py
|
{
"start": 74,
"end": 1852
}
|
class ____(ABC): # no cov
"""
Example usage:
```python tab="plugin.py"
from hatchling.version.source.plugin.interface import VersionSourceInterface
class SpecialVersionSource(VersionSourceInterface):
PLUGIN_NAME = "special"
...
```
```python tab="hooks.py"
from hatchling.plugin import hookimpl
from .plugin import SpecialVersionSource
@hookimpl
def hatch_register_version_source():
return SpecialVersionSource
```
"""
PLUGIN_NAME = ""
"""The name used for selection."""
def __init__(self, root: str, config: dict) -> None:
self.__root = root
self.__config = config
@property
def root(self) -> str:
"""
The root of the project tree as a string.
"""
return self.__root
@property
def config(self) -> dict:
"""
```toml config-example
[tool.hatch.version]
```
"""
return self.__config
@abstractmethod
def get_version_data(self) -> dict:
"""
This should return a mapping with a `version` key representing the current version of the project and will be
displayed when invoking the [`version`](../../cli/reference.md#hatch-version) command without any arguments.
The mapping can contain anything else and will be passed to
[set_version](reference.md#hatchling.version.source.plugin.interface.VersionSourceInterface.set_version)
when updating the version.
"""
def set_version(self, version: str, version_data: dict) -> None:
"""
This should update the version to the first argument with the data provided during retrieval.
"""
raise NotImplementedError
|
VersionSourceInterface
|
python
|
yaml__pyyaml
|
lib/yaml/tokens.py
|
{
"start": 1509,
"end": 1557
}
|
class ____(Token):
id = '}'
|
FlowMappingEndToken
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tpu.py
|
{
"start": 65585,
"end": 72309
}
|
class ____(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside a TPU inference computation.
The primary role of `_TPUInferenceContext` is to indicate the mode of
operation and possibly sanity check operators inside a
tpu.rewrite_for_inference() computation.
"""
def __init__(self, name: Text, check_ops: bool = True):
super(_TPUInferenceContext, self).__init__()
self._name = name
self._check_ops = check_ops
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS:
raise NotImplementedError(
f"Operation of type {op.type} ({op.name}) is not supported on the "
"TPU for inference. Execution will fail if this op is used in the "
"graph. Make sure your variables are using variable_scope.")
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
@property
def grad_state(self):
return None
def validate_inference_rewrite_for_variables(graph: ops.Graph):
"""Validates whether rewrite_for_inference() 'worked' for variables.
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
after ReadVariableOps, but this mechanism works only if you are using
tf.compat.v1.get_variable() to create and access variables in your tpu
computation. This validation method can be called immediately after calling
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
to the graph.
Typical usages:
tpu.validate_inference_rewrite_for_variables(
tf.compat.v1.get_default_graph())
tpu.validate_inference_rewrite_for_variables(sess.graph)
Args:
graph: The graph which needs to be validated.
Raises:
RuntimeError: if validation failed.
"""
if not any(x.type == "GuaranteeConst" for x in graph.get_operations()):
raise RuntimeError(
"No GuaranteeConst ops found in the graph after running "
"tpu.rewrite_for_inference(...). Please check that you are using "
"tf.get_variable() to create and access variables in your tpu "
"computation.")
def rewrite_for_inference(
computation: Callable[..., Any],
inputs: Optional[List[core_types.Tensor]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None,
name: Optional[Text] = None) -> List[core_types.Tensor]:
"""Rewrites `computation` for inference on a TPU system.
Other than 'rewriting' the computation to run on a TPU, if using variables
in your computation, it moves the ReadVariableOps outside the TPU
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
This mechanism works only if you are using tf.compat.v1.get_variable() to
create and access variables in your tpu computation. You can validate
whether this worked, by calling validate_inference_rewrite_for_variables()
method immediately after this method to check whether GuaranteeConstOps
where added to the graph.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors. If the function returns m outputs, rewrite will return a list of
m tensors.
inputs: A list of input tensors or `None` (equivalent to an empty list).
infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple
of arguments as inputs to `computation`.
device_assignment: if not `None`, a `DeviceAssignment` describing the
mapping between logical cores in the computation with physical cores in
the TPU topology. May be omitted for a single-core computation, in which
case the core attached to task 0, TPU device 0 is used.
name: The name of the operator.
Returns:
A list of output tensors.
"""
def guarantee_const_getter(getter, name, *args, **kwargs):
with ops.control_dependencies(None):
return array_ops.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
def wrapped_computation(*args, **kwargs):
"""Execute computation under `_TPUInferenceContext`."""
context = _TPUInferenceContext(
name=ops.get_default_graph().unique_name("rewrite_for_inference"))
try:
context.Enter()
vscope = variable_scope.get_variable_scope()
prev_custom_getter = vscope.custom_getter
prev_caching_device = vscope.caching_device
vscope.set_custom_getter(guarantee_const_getter)
vscope.set_caching_device(lambda op: op.device)
result = computation(*args, **kwargs)
vscope.set_custom_getter(prev_custom_getter)
vscope.set_caching_device(prev_caching_device)
finally:
context.Exit()
return result
# pylint: disable=undefined-variable
return rewrite(
wrapped_computation,
inputs=inputs,
infeed_queue=infeed_queue,
device_assignment=device_assignment,
name=name)
# pylint: enable=undefined-variable
def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):
"""Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.
Args:
prune_graph: A tensorflow graph from which we wish to prune unconnected ops
as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have
no inputs and no consumers. These can often be left behind due to graph
construction rewiring (for instance TF-Hub). While they never execute,
they will cause XLA compile to fail so we strip them from XLA compile by
removing the tpu_replicate attribute.
"""
# Scan over the top level graph and all function graphs.
for graph in [prune_graph] + [
f for f in prune_graph._functions.values() # pylint: disable=protected-access
]:
if not isinstance(graph, ops.Graph):
continue
for op in graph.get_operations():
if op.type not in _UNCONNECTED_OPS_TO_PRUNE:
continue
outputs_consumed = False
for output in op.outputs:
if output.consumers():
outputs_consumed = True
break
if not outputs_consumed:
logging.info(
"Pruning OP %s of type %s from XLA Compile due to "
"it being disconnected.", op.name, op.type)
op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR) # pylint: disable=protected-access
|
_TPUInferenceContext
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/client_tests/test_shutdown_repository_location.py
|
{
"start": 669,
"end": 1236
}
|
class ____(ReadonlyGraphQLContextTestMatrix):
def test_shutdown_repository_location_permission_failure(self, graphql_context):
result = execute_dagster_graphql(
graphql_context,
SHUTDOWN_REPOSITORY_LOCATION_MUTATION,
{"repositoryLocationName": main_repo_location_name()},
)
assert result
assert result.data
assert result.data["shutdownRepositoryLocation"]
assert result.data["shutdownRepositoryLocation"]["__typename"] == "UnauthorizedError"
|
TestShutdownRepositoryLocationReadOnly
|
python
|
numba__numba
|
numba/core/typing/npdatetime.py
|
{
"start": 5156,
"end": 5251
}
|
class ____(TimedeltaOrderedCmpOp):
key = operator.lt
@infer_global(operator.le)
|
TimedeltaCmpLt
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/workbook/test_get_worksheet_by_name.py
|
{
"start": 299,
"end": 2155
}
|
class ____(unittest.TestCase):
"""
Test assembling a complete Workbook file.
"""
def test_get_worksheet_by_name01(self):
"""Test get_worksheet_by_name()"""
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
exp = workbook.add_worksheet()
got = workbook.get_worksheet_by_name("Sheet1")
workbook.fileclosed = 1
self.assertEqual(exp, got)
def test_get_worksheet_by_name02(self):
"""Test get_worksheet_by_name()"""
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
workbook.add_worksheet()
exp = workbook.add_worksheet()
got = workbook.get_worksheet_by_name("Sheet2")
workbook.fileclosed = 1
self.assertEqual(exp, got)
def test_get_worksheet_by_name03(self):
"""Test get_worksheet_by_name()"""
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
exp = workbook.add_worksheet("Sheet 3")
got = workbook.get_worksheet_by_name("Sheet 3")
workbook.fileclosed = 1
self.assertEqual(exp, got)
def test_get_worksheet_by_name04(self):
"""Test get_worksheet_by_name()"""
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
exp = workbook.add_worksheet("Sheet '4")
got = workbook.get_worksheet_by_name("Sheet '4")
workbook.fileclosed = 1
self.assertEqual(exp, got)
def test_get_worksheet_by_name05(self):
"""Test get_worksheet_by_name()"""
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
exp = None
got = workbook.get_worksheet_by_name("Sheet 5")
workbook.fileclosed = 1
self.assertEqual(exp, got)
|
TestAssembleWorkbook
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.