language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | weaviate__weaviate-python-client | weaviate/collections/queries/bm25/query/executor.py | {
"start": 866,
"end": 14682
} | class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[Properties, References]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[QueryReturn[Properties, CrossReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[Properties, TReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[TProperties, References]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[QueryReturn[TProperties, CrossReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[TProperties, TReferences]]: ...
###### GROUP BY ######
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GroupByReturn[Properties, References]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GroupByReturn[Properties, CrossReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GroupByReturn[Properties, TReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GroupByReturn[TProperties, References]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GroupByReturn[TProperties, CrossReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GroupByReturn[TProperties, TReferences]]: ...
@overload
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
QuerySearchReturnType[Properties, References, TProperties, TReferences]
]: ...
def bm25(
self,
query: Optional[str],
*,
query_properties: Optional[List[str]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
operator: Optional[BM25OperatorOptions] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[QuerySearchReturnType[Properties, References, TProperties, TReferences]]:
"""Search for objects in this collection using the keyword-based BM25 algorithm.
See the [docs](https://weaviate.io/developers/weaviate/search/bm25) for a more detailed explanation.
Args:
query: The keyword-based query to search for, REQUIRED. If None, a normal search will be performed.
query_properties: The properties to search in. If not specified, all properties are searched.
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied.
filters: The filters to apply to the search.
group_by: How the results should be grouped by a specific property.
rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
NOTE:
If `return_properties` is not provided then all non-reference properties are returned including nested properties.
If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
If `return_references` is not provided then no references are provided.
Returns:
A `QueryReturn` or `GroupByReturn` object that includes the searched objects.
If `group_by` is provided then a `GroupByReturn` object is returned, otherwise a `QueryReturn` object is returned.
Raises:
weaviate.exceptions.WeaviateQueryError: If the network connection to Weaviate fails.
weaviate.exceptions.WeaviateNotImplementedError: If a group by is provided and the Weaviate server version is lower than 1.25.0.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> QuerySearchReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_query_or_groupby_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
rerank,
group_by,
),
),
)
request = self._query.bm25(
query=query,
properties=query_properties,
limit=limit,
offset=offset,
operator=operator,
autocut=auto_limit,
filters=filters,
group_by=_GroupBy.from_input(group_by),
rerank=rerank,
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(cast(Any, return_references)),
)
return executor.execute(
response_callback=resp,
method=self._connection.grpc_search,
request=request,
)
| _BM25QueryExecutor |
python | numpy__numpy | numpy/_core/tests/test_arrayprint.py | {
"start": 7830,
"end": 23591
} | class ____:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_unexpected_kwarg(self):
# ensure than an appropriate TypeError
# is raised when array2string receives
# an unexpected kwarg
with assert_raises_regex(TypeError, 'nonsense'):
np.array2string(np.array([1, 2, 3]),
nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all': _format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind': _format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all': lambda x: f"{x:.4f}"}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int': hex}),
x_hex)
assert_equal(np.array2string(x, formatter={'int': oct}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind': lambda x: f"{x:.2f}"}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float': lambda x: f"{x:.2f}"}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr': lambda s: s * 2}) ==
'[abcabc defdef]')
def test_structure_format_mixed(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
def test_structure_format_int(self):
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)],
dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
def test_structure_format_float(self):
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100],
dtype='u1').view('V8')
assert_equal(repr(a[0]),
r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08',"
"\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), {'np': np}), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
try:
np.set_printoptions(legacy='2.1')
assert_equal(repr(A), reprA)
finally:
np.set_printoptions(legacy=False)
assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))'))
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
try:
np.set_printoptions(legacy='2.1')
assert_equal(repr(A), reprA)
finally:
np.set_printoptions(legacy=False)
assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))'))
def test_summarize_2d_dtype(self):
A = np.arange(1002, dtype='i2').reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n'
' [ 501, 502, 503, ..., 999, 1000, 1001]],\n'
' shape=(2, 501), dtype=int16)')
assert_equal(repr(A), reprA)
def test_summarize_structure(self):
A = (np.arange(2002, dtype="<i8").reshape(2, 1001)
.view([('i', "<i8", (1001,))]))
strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n"
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]")
assert_equal(str(A), strA)
reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n"
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n"
" dtype=[('i', '<i8', (1001,))])")
assert_equal(repr(A), reprA)
B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))])
strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]"
assert_equal(str(B), strB)
reprB = (
"array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n"
" dtype=[('i', '>i8', (2, 1001))])"
)
assert_equal(repr(B), reprB)
C = (np.arange(22, dtype="<i8").reshape(2, 11)
.view([('i1', "<i8"), ('i10', "<i8", (10,))]))
strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]"
assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None, None, :]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
def test_multiline_repr(self):
class MultiLine:
def __repr__(self):
return "Line 1\nLine 2"
a = np.array([[None, MultiLine()], [MultiLine(), None]])
assert_equal(
np.array2string(a),
'[[None Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2 None]]'
)
assert_equal(
np.array2string(a, max_line_width=5),
'[[None\n'
' Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2\n'
' None]]'
)
assert_equal(
repr(a),
'array([[None, Line 1\n'
' Line 2],\n'
' [Line 1\n'
' Line 2, None]], dtype=object)'
)
class MultiLineLong:
def __repr__(self):
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
assert_equal(
repr(a),
'array([[None, Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 , None]], dtype=object)'
)
assert_equal(
np.array_repr(a, 20),
'array([[None,\n'
' Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ,\n'
' None]],\n'
' dtype=object)'
)
def test_nested_array_repr(self):
a = np.empty((2, 2), dtype=object)
a[0, 0] = np.eye(2)
a[0, 1] = np.eye(3)
a[1, 0] = None
a[1, 1] = np.ones((3, 1))
assert_equal(
repr(a),
'array([[array([[1., 0.],\n'
' [0., 1.]]), array([[1., 0., 0.],\n'
' [0., 1., 0.],\n'
' [0., 0., 1.]])],\n'
' [None, array([[1.],\n'
' [1.],\n'
' [1.]])]], dtype=object)'
)
@given(hynp.from_dtype(np.dtype("U")))
def test_any_text(self, text):
# This test checks that, given any value that can be represented in an
# array of dtype("U") (i.e. unicode string), ...
a = np.array([text, text, text])
# casting a list of them to an array does not e.g. truncate the value
assert_equal(a[0], text)
text = text.item() # use raw python strings for repr below
# and that np.array2string puts a newline in the expected location
expected_repr = f"[{text!r} {text!r}\n {text!r}]"
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
assert_equal(result, expected_repr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.thread_unsafe(reason="garbage collector is global state")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
gc.disable()
a = np.arange(2)
r1 = sys.getrefcount(a)
np.array2string(a)
np.array2string(a)
r2 = sys.getrefcount(a)
gc.collect()
gc.enable()
assert_(r1 == r2)
def test_with_sign(self):
# mixed negative and positive value array
a = np.array([-2, 0, 3])
assert_equal(
np.array2string(a, sign='+'),
'[-2 +0 +3]'
)
assert_equal(
np.array2string(a, sign='-'),
'[-2 0 3]'
)
assert_equal(
np.array2string(a, sign=' '),
'[-2 0 3]'
)
# all non-negative array
a = np.array([2, 0, 3])
assert_equal(
np.array2string(a, sign='+'),
'[+2 +0 +3]'
)
assert_equal(
np.array2string(a, sign='-'),
'[2 0 3]'
)
assert_equal(
np.array2string(a, sign=' '),
'[ 2 0 3]'
)
# all negative array
a = np.array([-2, -1, -3])
assert_equal(
np.array2string(a, sign='+'),
'[-2 -1 -3]'
)
assert_equal(
np.array2string(a, sign='-'),
'[-2 -1 -3]'
)
assert_equal(
np.array2string(a, sign=' '),
'[-2 -1 -3]'
)
# 2d array mixed negative and positive
a = np.array([[10, -1, 1, 1], [10, 10, 10, 10]])
assert_equal(
np.array2string(a, sign='+'),
'[[+10 -1 +1 +1]\n [+10 +10 +10 +10]]'
)
assert_equal(
np.array2string(a, sign='-'),
'[[10 -1 1 1]\n [10 10 10 10]]'
)
assert_equal(
np.array2string(a, sign=' '),
'[[10 -1 1 1]\n [10 10 10 10]]'
)
# 2d array all positive
a = np.array([[10, 0, 1, 1], [10, 10, 10, 10]])
assert_equal(
np.array2string(a, sign='+'),
'[[+10 +0 +1 +1]\n [+10 +10 +10 +10]]'
)
assert_equal(
np.array2string(a, sign='-'),
'[[10 0 1 1]\n [10 10 10 10]]'
)
assert_equal(
np.array2string(a, sign=' '),
'[[ 10 0 1 1]\n [ 10 10 10 10]]'
)
# 2d array all negative
a = np.array([[-10, -1, -1, -1], [-10, -10, -10, -10]])
assert_equal(
np.array2string(a, sign='+'),
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
)
assert_equal(
np.array2string(a, sign='-'),
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
)
assert_equal(
np.array2string(a, sign=' '),
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
)
| TestArray2String |
python | skorch-dev__skorch | skorch/tests/test_helper.py | {
"start": 18409,
"end": 26594
} | class ____:
@pytest.fixture
def transformer_cls(self):
from skorch.helper import DataFrameTransformer
return DataFrameTransformer
@pytest.fixture
def df(self):
"""DataFrame containing float, int, category types"""
import pandas as pd
df = pd.DataFrame({
'col_floats': [0.1, 0.2, 0.3],
'col_ints': [11, 11, 10],
'col_cats': ['a', 'b', 'a'],
})
df['col_cats'] = df['col_cats'].astype('category')
return df
def test_fit_transform_defaults(self, transformer_cls, df):
expected = {
'X': np.asarray([
[0.1, 11.0],
[0.2, 11.0],
[0.3, 10.0],
]).astype(np.float32),
'col_cats': np.asarray([0, 1, 0]),
}
Xt = transformer_cls().fit_transform(df)
assert_dicts_equal(Xt, expected)
def test_fit_and_transform_defaults(self, transformer_cls, df):
expected = {
'X': np.asarray([
[0.1, 11.0],
[0.2, 11.0],
[0.3, 10.0],
]).astype(np.float32),
'col_cats': np.asarray([0, 1, 0]),
}
Xt = transformer_cls().fit(df).transform(df)
assert_dicts_equal(Xt, expected)
def test_fit_transform_defaults_two_categoricals(
self, transformer_cls, df):
expected = {
'X': np.asarray([
[0.1, 11.0],
[0.2, 11.0],
[0.3, 10.0],
]).astype(np.float32),
'col_cats': np.asarray([0, 1, 0]),
'col_foo': np.asarray([1, 1, 0]),
}
df = df.assign(col_foo=df['col_ints'].astype('category'))
Xt = transformer_cls().fit_transform(df)
assert_dicts_equal(Xt, expected)
def test_fit_transform_int_as_categorical(self, transformer_cls, df):
expected = {
'X': np.asarray([0.1, 0.2, 0.3]).astype(np.float32).reshape(-1, 1),
'col_ints': np.asarray([1, 1, 0]),
'col_cats': np.asarray([0, 1, 0]),
}
Xt = transformer_cls(treat_int_as_categorical=True).fit_transform(df)
assert_dicts_equal(Xt, expected)
def test_fit_transform_no_X(self, transformer_cls, df):
df = df[['col_ints', 'col_cats']] # no float type present
expected = {
'col_ints': np.asarray([1, 1, 0]),
'col_cats': np.asarray([0, 1, 0]),
}
Xt = transformer_cls(treat_int_as_categorical=True).fit_transform(df)
assert_dicts_equal(Xt, expected)
@pytest.mark.parametrize('data', [
np.array([object, object, object]),
np.array(['foo', 'bar', 'baz']),
])
def test_invalid_dtype_raises(self, transformer_cls, df, data):
df = df.assign(invalid=data)
with pytest.raises(TypeError) as exc:
transformer_cls().fit_transform(df)
msg = exc.value.args[0]
expected = ("The following columns have dtypes that cannot be "
"interpreted as numerical dtypes: invalid (object)")
assert msg == expected
def test_two_invalid_dtypes_raises(self, transformer_cls, df):
df = df.assign(
invalid0=np.array([object, object, object]),
invalid1=np.array(['foo', 'bar', 'baz']),
)
with pytest.raises(TypeError) as exc:
transformer_cls().fit_transform(df)
msg = exc.value.args[0]
expected = ("The following columns have dtypes that cannot be "
"interpreted as numerical dtypes: invalid0 (object), "
"invalid1 (object)")
assert msg == expected
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_set_float_dtype(self, transformer_cls, df, dtype):
Xt = transformer_cls(float_dtype=dtype).fit_transform(df)
assert Xt['X'].dtype == dtype
@pytest.mark.parametrize('dtype', [np.int16, np.int32, np.int64])
def test_set_int_dtype(self, transformer_cls, df, dtype):
Xt = transformer_cls(
treat_int_as_categorical=True, int_dtype=dtype).fit_transform(df)
assert Xt['col_cats'].dtype == dtype
assert Xt['col_ints'].dtype == dtype
def test_leave_float_dtype_as_in_df(self, transformer_cls, df):
# None -> don't cast
Xt = transformer_cls(float_dtype=None).fit_transform(df)
assert Xt['X'].dtype == np.float64
def test_leave_int_dtype_as_in_df(self, transformer_cls, df):
# None -> don't cast
# pandas will use the lowest precision int that is capable to
# encode the categories; since we only have 2 values, that is
# int8 here
Xt = transformer_cls(int_dtype=None).fit_transform(df)
assert Xt['col_cats'].dtype == np.int8
def test_column_named_X_present(self, transformer_cls, df):
df = df.assign(X=df['col_cats'])
with pytest.raises(ValueError) as exc:
transformer_cls().fit(df)
msg = exc.value.args[0]
expected = ("DataFrame contains a column named 'X', which clashes "
"with the name chosen for cardinal features; consider "
"renaming that column.")
assert msg == expected
@pytest.fixture
def module_cls(self):
"""Simple module with embedding and linear layers"""
# pylint: disable=missing-docstring
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.reset_params()
def reset_params(self):
self.embedding = nn.Embedding(2, 10)
self.linear = nn.Linear(2, 10)
self.out = nn.Linear(20, 2)
self.nonlin = nn.Softmax(dim=-1)
# pylint: disable=arguments-differ
def forward(self, X, col_cats):
X_lin = self.linear(X)
X_cat = self.embedding(col_cats)
X_concat = torch.cat((X_lin, X_cat), dim=1)
return self.nonlin(self.out(X_concat))
return MyModule
@pytest.fixture
def net(self, module_cls):
from skorch import NeuralNetClassifier
net = NeuralNetClassifier(
module_cls,
train_split=None,
max_epochs=3,
)
return net
@pytest.fixture
def pipe(self, transformer_cls, net):
pipe = Pipeline([
('transform', transformer_cls()),
('net', net),
])
return pipe
def test_fit_and_predict_with_pipeline(self, pipe, df):
y = np.asarray([0, 0, 1])
pipe.fit(df, y)
y_proba = pipe.predict_proba(df)
assert y_proba.shape == (len(df), 2)
y_pred = pipe.predict(df)
assert y_pred.shape == (len(df),)
def test_describe_signature_default_df(self, transformer_cls, df):
result = transformer_cls().describe_signature(df)
expected = {
'X': {"dtype": torch.float32, "input_units": 2},
'col_cats': {"dtype": torch.int64, "input_units": 2},
}
assert result == expected
def test_describe_signature_non_default_df(self, transformer_cls, df):
# replace float column with integer having 3 unique units
df = df.assign(col_floats=[1, 2, 0])
result = transformer_cls(
treat_int_as_categorical=True).describe_signature(df)
expected = {
'col_floats': {"dtype": torch.int64, "input_units": 3},
'col_ints': {"dtype": torch.int64, "input_units": 2},
'col_cats': {"dtype": torch.int64, "input_units": 2},
}
assert result == expected
def test_describe_signature_other_dtypes(self, transformer_cls, df):
transformer = transformer_cls(
float_dtype=np.float16,
int_dtype=np.int32,
)
result = transformer.describe_signature(df)
expected = {
'X': {"dtype": torch.float16, "input_units": 2},
'col_cats': {"dtype": torch.int32, "input_units": 2},
}
assert result == expected
| TestDataFrameTransformer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/slice_generators.py | {
"start": 810,
"end": 2454
} | class ____(SliceGenerator):
"""
Split slices into event ranges of 90 days (or less for final slice) from
start_date up to current date.
"""
RANGE_LENGTH_DAYS: int = 90
_slices: List[StreamSlice] = []
def __init__(self, start_date: DateTime, end_date: Optional[DateTime] = None):
super().__init__(start_date, end_date)
self._slices = [
StreamSlice(start_date=start, end_date=end)
for start, end in self.make_datetime_ranges(self._start_date, self._end_date, self.RANGE_LENGTH_DAYS)
]
def __next__(self) -> StreamSlice:
if not self._slices:
raise StopIteration()
return self._slices.pop(0)
@staticmethod
def make_datetime_ranges(start: DateTime, end: DateTime, range_days: int) -> Iterable[Tuple[DateTime, DateTime]]:
"""
Generates list of ranges starting from start up to end date with duration of ranges_days.
Args:
start (DateTime): start of the range
end (DateTime): end of the range
range_days (int): Number in days to split subranges into.
Returns:
List[Tuple[DateTime, DateTime]]: list of tuples with ranges.
Each tuple contains two daytime variables: first is period start
and second is period end.
"""
if start > end:
return []
next_start = start
period = pendulum.Duration(days=range_days)
while next_start < end:
next_end = min(next_start + period, end)
yield next_start, next_end
next_start = next_end
| RangeSliceGenerator |
python | rapidsai__cudf | python/cudf/cudf/pandas/fast_slow_proxy.py | {
"start": 20181,
"end": 23285
} | class ____(_FastSlowProxy):
"""
Proxy type for a pair of fast and slow "final" types for which
there is a known conversion from fast to slow, and vice-versa.
The conversion between fast and slow types is done using
user-provided conversion functions.
Do not attempt to use this class directly. Instead, use
`make_final_proxy_type` to create subtypes.
"""
@classmethod
def _fsproxy_wrap(cls, value, func):
"""Default mechanism to wrap a value in a proxy type
Parameters
----------
cls
The proxy type
value
The value to wrap up
func
The function called that constructed value
Returns
-------
A new proxied object
Notes
-----
_FinalProxy subclasses can override this classmethod if they
need particular behaviour when wrapped up.
"""
# TODO: Replace the if-elif-else using singledispatch helper function
base_class = _get_proxy_base_class(cls)
if base_class is object:
proxy = base_class.__new__(cls)
elif base_class is ProxyNDarrayBase:
proxy = base_class.__new__(cls, value)
elif base_class is datetime.datetime:
proxy = base_class.__new__(
cls,
year=value.year,
month=value.month,
day=value.day,
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond,
tzinfo=value.tzinfo,
fold=value.fold,
)
elif base_class is datetime.timedelta:
proxy = base_class.__new__(
cls,
days=value.days,
seconds=value.seconds,
microseconds=value.microseconds,
)
else:
raise TypeError(
f"Cannot create an proxy instance of {cls.__name__} using base class {base_class.__name__}. "
f"Expected either 'object' or another type in 'PROXY_BASE_CLASSES'"
)
proxy._fsproxy_wrapped = value
return proxy
def __reduce__(self):
"""
In conjunction with `__proxy_setstate__`, this effectively enables
proxy types to be pickled and unpickled by pickling and unpickling
the underlying wrapped types.
"""
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_wrapped_obj = pickle.dumps(self._fsproxy_wrapped)
return (_PickleConstructor(type(self)), (), pickled_wrapped_obj)
def __setstate__(self, state):
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickled_wrapped_obj = pickle.loads(state)
self._fsproxy_wrapped = unpickled_wrapped_obj
| _FinalProxy |
python | doocs__leetcode | solution/3700-3799/3723.Maximize Sum of Squares of Digits/Solution.py | {
"start": 0,
"end": 274
} | class ____:
def maxSumOfSquares(self, num: int, sum: int) -> str:
if num * 9 < sum:
return ""
k, s = divmod(sum, 9)
ans = "9" * k
if s:
ans += digits[s]
ans += "0" * (num - len(ans))
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 10109,
"end": 12485
} | class ____(PrivateProjectMixin, TestCase):
response_data = {
# Places where we 302 on success, and 301 for old pages -- These delete pages should probably be 405'ing
"/dashboard/import/manual/demo/": {"status_code": 302},
"/dashboard/pip/": {"status_code": 301},
"/dashboard/pip/subprojects/delete/sub/": {"status_code": 302},
"/dashboard/pip/advanced/": {"status_code": 301},
# 405's where we should be POST'ing
"/dashboard/pip/users/delete/": {"status_code": 405},
"/dashboard/pip/notifications/delete/": {"status_code": 405},
"/dashboard/pip/redirects/{redirect_pk}/delete/": {"status_code": 405},
"/dashboard/pip/redirects/{redirect_pk}/insert/{position}/": {
"status_code": 405
},
"/dashboard/pip/subprojects/sub/delete/": {"status_code": 405},
"/dashboard/pip/integrations/sync/": {"status_code": 405},
"/dashboard/pip/integrations/{integration_id}/sync/": {"status_code": 405},
"/dashboard/pip/integrations/{integration_id}/delete/": {"status_code": 405},
"/dashboard/pip/environmentvariables/{environmentvariable_id}/delete/": {
"status_code": 405
},
"/dashboard/pip/translations/delete/sub/": {"status_code": 405},
"/dashboard/pip/version/latest/delete_html/": {"status_code": 405},
"/dashboard/pip/rules/{automation_rule_id}/delete/": {"status_code": 405},
"/dashboard/pip/rules/{automation_rule_id}/move/{steps}/": {"status_code": 405},
"/dashboard/pip/webhooks/{webhook_id}/delete/": {"status_code": 405},
# Placeholder URLs.
"/dashboard/pip/sharing/": {"status_code": 404},
"/dashboard/pip/keys/": {"status_code": 404},
}
def get_url_path_ctx(self):
return {
"integration_id": self.integration.id,
"environmentvariable_id": self.environment_variable.id,
"automation_rule_id": self.automation_rule.id,
"webhook_id": self.webhook.id,
"redirect_pk": self.redirect.pk,
"steps": 1,
"position": 0,
}
def login(self):
return self.client.login(username="owner", password="test")
def is_admin(self):
return True
@mock.patch("readthedocs.core.utils.trigger_build", mock.MagicMock())
| PrivateProjectAdminAccessTest |
python | jazzband__django-oauth-toolkit | oauth2_provider/migrations/0013_alter_application_authorization_grant_type_device.py | {
"start": 158,
"end": 2210
} | class ____(migrations.Migration):
dependencies = [
('oauth2_provider', '0012_add_token_checksum'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('urn:ietf:params:oauth:grant-type:device_code', 'Device Code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=44),
),
migrations.CreateModel(
name='DeviceGrant',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('device_code', models.CharField(max_length=100, unique=True)),
('user_code', models.CharField(max_length=100)),
('scope', models.CharField(max_length=64, null=True)),
('interval', models.IntegerField(default=5)),
('expires', models.DateTimeField()),
('status', models.CharField(blank=True, choices=[('authorized', 'Authorized'), ('authorization-pending', 'Authorization pending'), ('expired', 'Expired'), ('denied', 'Denied')], default='authorization-pending', max_length=64)),
('client_id', models.CharField(db_index=True, max_length=100)),
('last_checked', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='%(app_label)s_%(class)s', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'swappable': 'OAUTH2_PROVIDER_DEVICE_GRANT_MODEL',
'constraints': [models.UniqueConstraint(fields=('device_code',), name='oauth2_provider_devicegrant_unique_device_code')],
},
),
]
| Migration |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqlconnector.py | {
"start": 3247,
"end": 3548
} | class ____(MySQLCompiler):
def visit_mod_binary(
self, binary: BinaryExpression[Any], operator: Any, **kw: Any
) -> str:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
| MySQLCompiler_mysqlconnector |
python | spack__spack | lib/spack/spack/test/conftest.py | {
"start": 23117,
"end": 41051
} | class ____:
"""Build a mock repository in a directory"""
_counter = 0
def __init__(self, root_directory: str) -> None:
RepoBuilder._counter += 1
namespace = f"test_namespace_{RepoBuilder._counter}"
repo_root = os.path.join(root_directory, namespace)
os.makedirs(repo_root, exist_ok=True)
self.root, self.namespace = spack.repo.create_repo(repo_root, namespace)
self.build_system_name = f"test_build_system_{self.namespace}"
self._add_build_system()
def add_package(
self,
name: str,
dependencies: Optional[List[Tuple[str, Optional[str], Optional[str]]]] = None,
) -> None:
"""Create a mock package in the repository, using a Jinja2 template.
Args:
name: name of the new package
dependencies: list of ("dep_spec", "dep_type", "condition") tuples.
Both "dep_type" and "condition" can default to ``None`` in which case
``spack.dependency.default_deptype`` and ``spack.spec.Spec()`` are used.
"""
dependencies = dependencies or []
context = {
"cls_name": spack.util.naming.pkg_name_to_class_name(name),
"dependencies": dependencies,
}
template = spack.tengine.make_environment().get_template("mock-repository/package.pyt")
package_py = self._recipe_filename(name)
os.makedirs(os.path.dirname(package_py), exist_ok=True)
with open(package_py, "w", encoding="utf-8") as f:
f.write(template.render(context))
def remove(self, name: str) -> None:
package_py = self._recipe_filename(name)
shutil.rmtree(os.path.dirname(package_py))
def _add_build_system(self) -> None:
"""Add spack_repo.<namespace>.build_systems.test_build_system with
build_system=test_build_system_<namespace>."""
template = spack.tengine.make_environment().get_template(
"mock-repository/build_system.pyt"
)
text = template.render({"build_system_name": self.build_system_name})
build_system_py = os.path.join(self.root, "build_systems", "test_build_system.py")
os.makedirs(os.path.dirname(build_system_py), exist_ok=True)
with open(build_system_py, "w", encoding="utf-8") as f:
f.write(text)
def _recipe_filename(self, name: str) -> str:
return os.path.join(
self.root,
"packages",
spack.util.naming.pkg_name_to_pkg_dir(name, package_api=(2, 0)),
"package.py",
)
@pytest.fixture
def repo_builder(tmp_path: Path):
return RepoBuilder(str(tmp_path))
@pytest.fixture()
def mock_custom_repository(tmp_path: Path, mutable_mock_repo):
"""Create a custom repository with a single package "c" and return its path."""
builder = RepoBuilder(str(tmp_path))
builder.add_package("pkg-c")
return builder.root
@pytest.fixture(scope="session")
def linux_os():
"""Returns a named tuple with attributes 'name' and 'version'
representing the OS.
"""
platform = spack.platforms.host()
name, version = "debian", "6"
if platform.name == "linux":
current_os = platform.default_operating_system()
name, version = current_os.name, current_os.version
LinuxOS = collections.namedtuple("LinuxOS", ["name", "version"])
return LinuxOS(name=name, version=version)
@pytest.fixture
def ensure_debug(monkeypatch):
current_debug_level = tty.debug_level()
tty.set_debug(1)
yield
tty.set_debug(current_debug_level)
@pytest.fixture
def default_config():
"""Isolates the default configuration from the user configs.
This ensures we can test the real default configuration without having
tests fail when the user overrides the defaults that we test against."""
defaults_path = os.path.join(spack.paths.etc_path, "defaults")
if sys.platform == "win32":
defaults_path = os.path.join(defaults_path, "windows")
with spack.config.use_configuration(defaults_path) as defaults_config:
yield defaults_config
@pytest.fixture(scope="session")
def mock_uarch_json(tmp_path_factory: pytest.TempPathFactory):
"""Mock microarchitectures.json with test architecture descriptions."""
tmpdir = tmp_path_factory.mktemp("microarchitectures")
uarch_json_source = (
Path(spack.paths.test_path) / "data" / "microarchitectures" / "microarchitectures.json"
)
uarch_json_dest = tmpdir / "microarchitectures.json"
shutil.copy2(uarch_json_source, uarch_json_dest)
yield str(uarch_json_dest)
@pytest.fixture(scope="session")
def mock_uarch_configuration(mock_uarch_json):
"""Create mock dictionaries for the spack.vendor.archspec.cpu."""
def load_json():
with open(mock_uarch_json, encoding="utf-8") as f:
return json.load(f)
targets_json = load_json()
targets = spack.vendor.archspec.cpu.microarchitecture._known_microarchitectures()
yield targets_json, targets
@pytest.fixture(scope="function")
def mock_targets(mock_uarch_configuration, monkeypatch):
"""Use this fixture to enable mock uarch targets for testing."""
targets_json, targets = mock_uarch_configuration
monkeypatch.setattr(spack.vendor.archspec.cpu.schema, "TARGETS_JSON", targets_json)
monkeypatch.setattr(spack.vendor.archspec.cpu.microarchitecture, "TARGETS", targets)
@pytest.fixture(scope="session")
def configuration_dir(tmp_path_factory: pytest.TempPathFactory, linux_os):
"""Copies mock configuration files in a temporary directory. Returns the
directory path.
"""
tmp_path = tmp_path_factory.mktemp("configurations")
install_tree_root = tmp_path_factory.mktemp("opt")
modules_root = tmp_path_factory.mktemp("share")
tcl_root = modules_root / "modules"
tcl_root.mkdir()
lmod_root = modules_root / "lmod"
lmod_root.mkdir()
# <test_path>/data/config has mock config yaml files in it
# copy these to the site config.
test_config = Path(spack.paths.test_path) / "data" / "config"
shutil.copytree(test_config, tmp_path / "site")
# Create temporary 'defaults', 'site' and 'user' folders
(tmp_path / "user").mkdir()
# Fill out config.yaml, compilers.yaml and modules.yaml templates.
locks = sys.platform != "win32"
config = tmp_path / "site" / "config.yaml"
config_template = test_config / "config.yaml"
config.write_text(config_template.read_text().format(install_tree_root, locks))
target = str(spack.vendor.archspec.cpu.host().family)
compilers = tmp_path / "site" / "packages.yaml"
compilers_template = test_config / "packages.yaml"
compilers.write_text(compilers_template.read_text().format(linux_os=linux_os, target=target))
modules = tmp_path / "site" / "modules.yaml"
modules_template = test_config / "modules.yaml"
modules.write_text(modules_template.read_text().format(tcl_root, lmod_root))
yield tmp_path
def _create_mock_configuration_scopes(configuration_dir):
"""Create the configuration scopes used in `config` and `mutable_config`."""
return [
(
ConfigScopePriority.DEFAULTS,
spack.config.InternalConfigScope("_builtin", spack.config.CONFIG_DEFAULTS),
),
(
ConfigScopePriority.CONFIG_FILES,
spack.config.DirectoryConfigScope("site", str(configuration_dir / "site")),
),
(
ConfigScopePriority.CONFIG_FILES,
spack.config.DirectoryConfigScope("system", str(configuration_dir / "system")),
),
(
ConfigScopePriority.CONFIG_FILES,
spack.config.DirectoryConfigScope("user", str(configuration_dir / "user")),
),
(ConfigScopePriority.COMMAND_LINE, spack.config.InternalConfigScope("command_line")),
]
@pytest.fixture(scope="session")
def mock_configuration_scopes(configuration_dir):
"""Create a persistent Configuration object from the configuration_dir."""
yield _create_mock_configuration_scopes(configuration_dir)
@pytest.fixture(scope="function")
def config(mock_configuration_scopes):
"""This fixture activates/deactivates the mock configuration."""
with spack.config.use_configuration(*mock_configuration_scopes) as config:
yield config
@pytest.fixture(scope="function")
def mutable_config(tmp_path_factory: pytest.TempPathFactory, configuration_dir):
"""Like config, but tests can modify the configuration."""
mutable_dir = tmp_path_factory.mktemp("mutable_config") / "tmp"
shutil.copytree(configuration_dir, mutable_dir)
scopes = _create_mock_configuration_scopes(mutable_dir)
with spack.config.use_configuration(*scopes) as cfg:
yield cfg
@pytest.fixture(scope="function")
def mutable_empty_config(tmp_path_factory: pytest.TempPathFactory, configuration_dir):
"""Empty configuration that can be modified by the tests."""
mutable_dir = tmp_path_factory.mktemp("mutable_config") / "tmp"
scopes = [
spack.config.DirectoryConfigScope(name, str(mutable_dir / name))
for name in ["site", "system", "user"]
]
with spack.config.use_configuration(*scopes) as cfg:
yield cfg
# From https://github.com/pytest-dev/pytest/issues/363#issuecomment-1335631998
# Current suggested implementation from issue compatible with pytest >= 6.2
# this may be subject to change as new versions of Pytest are released
# and update the suggested solution
@pytest.fixture(scope="session")
def monkeypatch_session():
with pytest.MonkeyPatch.context() as monkeypatch:
yield monkeypatch
@pytest.fixture(scope="session", autouse=True)
def mock_wsdk_externals(monkeypatch_session):
"""Skip check for required external packages on Windows during testing
Note: In general this should cover this behavior for all tests,
however any session scoped fixture involving concretization should
include this fixture
"""
monkeypatch_session.setattr(
spack.bootstrap.core, "ensure_winsdk_external_or_raise", _return_none
)
@pytest.fixture(scope="function")
def concretize_scope(mutable_config, tmp_path: Path):
"""Adds a scope for concretization preferences"""
concretize_dir = tmp_path / "concretize"
concretize_dir.mkdir()
with spack.config.override(
spack.config.DirectoryConfigScope("concretize", str(concretize_dir))
):
yield str(concretize_dir)
spack.repo.PATH._provider_index = None
@pytest.fixture
def no_packages_yaml(mutable_config):
"""Creates a temporary configuration without compilers.yaml"""
for local_config in mutable_config.scopes.values():
if not isinstance(local_config, spack.config.DirectoryConfigScope):
continue
compilers_yaml = local_config.get_section_filename("packages")
if os.path.exists(compilers_yaml):
os.remove(compilers_yaml)
mutable_config.clear_caches()
return mutable_config
@pytest.fixture()
def mock_low_high_config(tmp_path: Path):
"""Mocks two configuration scopes: 'low' and 'high'."""
scopes = [
spack.config.DirectoryConfigScope(name, str(tmp_path / name)) for name in ["low", "high"]
]
with spack.config.use_configuration(*scopes) as config:
yield config
def _populate(mock_db):
r"""Populate a mock database with packages.
Here is what the mock DB looks like (explicit roots at top):
o mpileaks o mpileaks' o mpileaks'' o externaltest o trivial-smoke-test
|\ |\ |\ |
| o callpath | o callpath' | o callpath'' o externaltool
|/| |/| |/| |
o | mpich o | mpich2 o | zmpi o externalvirtual
| | o | fake
| | |
| |______________/
| .____________/
|/
o dyninst
|\
| o libdwarf
|/
o libelf
"""
def _install(spec):
s = spack.concretize.concretize_one(spec)
PackageInstaller([s.package], fake=True, explicit=True).install()
_install("mpileaks ^mpich")
_install("mpileaks ^mpich2")
_install("mpileaks ^zmpi")
_install("externaltest ^externalvirtual")
_install("trivial-smoke-test")
@pytest.fixture(scope="session")
def _store_dir_and_cache(tmp_path_factory: pytest.TempPathFactory):
"""Returns the directory where to build the mock database and
where to cache it.
"""
store = tmp_path_factory.mktemp("mock_store")
cache = tmp_path_factory.mktemp("mock_store_cache")
return store, cache
@pytest.fixture(scope="session")
def mock_store(
tmp_path_factory: pytest.TempPathFactory,
mock_wsdk_externals,
mock_packages_repo,
mock_configuration_scopes,
_store_dir_and_cache: Tuple[Path, Path],
mock_stage_for_database,
):
"""Creates a read-only mock database with some packages installed note
that the ref count for dyninst here will be 3, as it's recycled
across each install.
This does not actually activate the store for use by Spack -- see the
``database`` fixture for that.
"""
store_path, store_cache = _store_dir_and_cache
# Make the DB filesystem read-only to ensure constructors don't modify anything in it.
# We want Spack to be able to point to a DB on a read-only filesystem easily.
_recursive_chmod(store_path, 0o555)
# If the cache does not exist populate the store and create it
if not os.path.exists(str(store_cache / ".spack-db")):
with spack.config.use_configuration(*mock_configuration_scopes):
with spack.store.use_store(str(store_path)) as store:
with spack.repo.use_repositories(mock_packages_repo):
# make the DB filesystem writable only while we populate it
_recursive_chmod(store_path, 0o755)
_populate(store.db)
_recursive_chmod(store_path, 0o555)
_recursive_chmod(store_cache, 0o755)
copy_tree(str(store_path), str(store_cache))
_recursive_chmod(store_cache, 0o555)
yield store_path
@pytest.fixture(scope="function")
def database(mock_store, mock_packages, config):
"""This activates the mock store, packages, AND config."""
with spack.store.use_store(str(mock_store)) as store:
yield store.db
# Force reading the database again between tests
store.db.last_seen_verifier = ""
@pytest.fixture(scope="function")
def database_mutable_config(mock_store, mock_packages, mutable_config, monkeypatch):
"""This activates the mock store, packages, AND config."""
with spack.store.use_store(str(mock_store)) as store:
yield store.db
store.db.last_seen_verifier = ""
@pytest.fixture(scope="function")
def mutable_database(database_mutable_config, _store_dir_and_cache: Tuple[Path, Path]):
"""Writeable version of the fixture, restored to its initial state
after each test.
"""
# Make the database writeable, as we are going to modify it
store_path, store_cache = _store_dir_and_cache
_recursive_chmod(store_path, 0o755)
yield database_mutable_config
# Restore the initial state by copying the content of the cache back into
# the store and making the database read-only
shutil.rmtree(store_path)
copy_tree(str(store_cache), str(store_path))
_recursive_chmod(store_path, 0o555)
@pytest.fixture()
def dirs_with_libfiles(tmp_path_factory: pytest.TempPathFactory):
lib_to_libfiles = {
"libstdc++": ["libstdc++.so", "libstdc++.tbd"],
"libgfortran": ["libgfortran.a", "libgfortran.dylib"],
"libirc": ["libirc.a", "libirc.so"],
}
root = tmp_path_factory.mktemp("root")
lib_to_dirs = {}
i = 0
for lib, libfiles in lib_to_libfiles.items():
dirs = []
for libfile in libfiles:
lib_dir = root / str(i)
lib_dir.mkdir()
(lib_dir / libfile).touch()
dirs.append(str(lib_dir))
i += 1
lib_to_dirs[lib] = dirs
all_dirs = list(itertools.chain.from_iterable(lib_to_dirs.values()))
yield lib_to_dirs, all_dirs
def _return_none(*args):
return None
@pytest.fixture(autouse=True)
def disable_compiler_output_cache(monkeypatch):
monkeypatch.setattr(
spack.compilers.libraries, "COMPILER_CACHE", spack.compilers.libraries.CompilerCache()
)
@pytest.fixture(scope="function")
def install_mockery(temporary_store: spack.store.Store, mutable_config, mock_packages):
"""Hooks a fake install directory, DB, and stage directory into Spack."""
# We use a fake package, so temporarily disable checksumming
with spack.config.override("config:checksum", False):
yield
# Wipe out any cached prefix failure locks (associated with the session-scoped mock archive)
temporary_store.failure_tracker.clear_all()
@pytest.fixture(scope="function")
def temporary_mirror(mutable_config, tmp_path_factory):
mirror_dir = tmp_path_factory.mktemp("mirror")
mirror_cmd("add", "test-mirror-func", mirror_dir.as_uri())
yield str(mirror_dir)
@pytest.fixture(scope="function")
def temporary_store(tmp_path: Path, request):
"""Hooks a temporary empty store for the test function."""
ensure_configuration_fixture_run_before(request)
temporary_store_path = tmp_path / "opt"
with spack.store.use_store(str(temporary_store_path)) as s:
yield s
if temporary_store_path.exists():
shutil.rmtree(temporary_store_path)
@pytest.fixture()
def mock_fetch(mock_archive, monkeypatch):
"""Fake the URL for a package so it downloads from a file."""
monkeypatch.setattr(
spack.package_base.PackageBase, "fetcher", URLFetchStrategy(url=mock_archive.url)
)
| RepoBuilder |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/util.py | {
"start": 5279,
"end": 5710
} | class ____(Protocol):
def __call__(
self,
cls: Type[Any],
annotation: _AnnotationScanType,
originating_module: str,
*,
str_cleanup_fn: Optional[Callable[[str, str], str]] = None,
include_generic: bool = False,
) -> _MatchedOnType: ...
de_stringify_annotation = cast(
_DeStringifyAnnotation, _de_stringify_partial(_de_stringify_annotation)
)
| _DeStringifyAnnotation |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/arrayeditor.py | {
"start": 15739,
"end": 22289
} | class ____(QTableView, SpyderWidgetMixin):
"""Array view class"""
CONF_SECTION = 'variable_explorer'
def __init__(self, parent, model, dtype, shape):
QTableView.__init__(self, parent)
self.setModel(model)
self.setItemDelegate(ArrayDelegate(dtype, self))
total_width = 0
for k in range(shape[1]):
total_width += self.columnWidth(k)
self.viewport().resize(min(total_width, 1024), self.height())
self.shape = shape
self.menu = self.setup_menu()
self.register_shortcut_for_widget(name='copy', triggered=self.copy)
self.horizontalScrollBar().valueChanged.connect(
self._load_more_columns
)
self.verticalScrollBar().valueChanged.connect(self._load_more_rows)
def _load_more_columns(self, value):
"""Load more columns to display."""
# Needed to avoid a NameError while fetching data when closing
# See spyder-ide/spyder#12034.
try:
self.load_more_data(value, columns=True)
except NameError:
pass
def _load_more_rows(self, value):
"""Load more rows to display."""
# Needed to avoid a NameError while fetching data when closing
# See spyder-ide/spyder#12034.
try:
self.load_more_data(value, rows=True)
except NameError:
pass
def load_more_data(self, value, rows=False, columns=False):
try:
old_selection = self.selectionModel().selection()
old_rows_loaded = old_cols_loaded = None
if rows and value == self.verticalScrollBar().maximum():
old_rows_loaded = self.model().rows_loaded
self.model().fetch_more(rows=rows)
if columns and value == self.horizontalScrollBar().maximum():
old_cols_loaded = self.model().cols_loaded
self.model().fetch_more(columns=columns)
if old_rows_loaded is not None or old_cols_loaded is not None:
# if we've changed anything, update selection
new_selection = QItemSelection()
for part in old_selection:
top = part.top()
bottom = part.bottom()
if (old_rows_loaded is not None and
top == 0 and bottom == (old_rows_loaded-1)):
# complete column selected (so expand it to match
# updated range)
bottom = self.model().rows_loaded-1
left = part.left()
right = part.right()
if (old_cols_loaded is not None
and left == 0 and right == (old_cols_loaded-1)):
# compete row selected (so expand it to match updated
# range)
right = self.model().cols_loaded-1
top_left = self.model().index(top, left)
bottom_right = self.model().index(bottom, right)
part = QItemSelectionRange(top_left, bottom_right)
new_selection.append(part)
self.selectionModel().select(
new_selection, self.selectionModel().ClearAndSelect)
except NameError:
# Needed to handle a NameError while fetching data when closing
# See isue 7880
pass
@Slot()
def resize_to_contents(self):
"""Resize cells to contents"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.resizeColumnsToContents()
self.model().fetch_more(columns=True)
self.resizeColumnsToContents()
QApplication.restoreOverrideCursor()
def setup_menu(self):
"""Setup context menu"""
self.copy_action = self.create_action(
name=ArrayEditorActions.Copy,
text=_('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
register_action=False
)
self.copy_action.setShortcut(keybinding('Copy'))
self.copy_action.setShortcutContext(Qt.WidgetShortcut)
edit_action = self.create_action(
name=ArrayEditorActions.Edit,
text=_('Edit'),
icon=ima.icon('edit'),
triggered=self.edit_item,
register_action=False
)
menu = self.create_menu('Editor menu', register=False)
for action in [self.copy_action, edit_action]:
self.add_item_to_menu(action, menu)
return menu
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
self.menu.popup(event.globalPos())
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method"""
if event == QKeySequence.Copy:
self.copy()
else:
QTableView.keyPressEvent(self, event)
def _sel_to_text(self, cell_range):
"""Copy an array portion to a unicode string"""
if not cell_range:
return
row_min, row_max, col_min, col_max = get_idx_rect(cell_range)
if col_min == 0 and col_max == (self.model().cols_loaded-1):
# we've selected a whole column. It isn't possible to
# select only the first part of a column without loading more,
# so we can treat it as intentional and copy the whole thing
col_max = self.model().total_cols-1
if row_min == 0 and row_max == (self.model().rows_loaded-1):
row_max = self.model().total_rows-1
_data = self.model().get_data()
output = io.BytesIO()
try:
fmt = '%' + self.model().get_format_spec()
np.savetxt(output, _data[row_min:row_max+1, col_min:col_max+1],
delimiter='\t', fmt=fmt)
except:
QMessageBox.warning(self, _("Warning"),
_("It was not possible to copy values for "
"this array"))
return
contents = output.getvalue().decode('utf-8')
output.close()
return contents
@Slot()
def copy(self):
"""Copy text to clipboard"""
cliptxt = self._sel_to_text( self.selectedIndexes() )
clipboard = QApplication.clipboard()
clipboard.setText(cliptxt)
def edit_item(self):
"""Edit item"""
index = self.currentIndex()
if index.isValid():
self.edit(index)
| ArrayView |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 25243,
"end": 73777
} | class ____(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OverflowError, socket.if_indextoname, -1)
self.assertRaises(OverflowError, socket.if_indextoname, 2**1000)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
if hasattr(socket, 'if_nameindex'):
indices = dict(socket.if_nameindex())
for index in indices:
index2 = index + 2**32
if index2 not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index2)
for index in 2**32-1, 2**64-1:
if index not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index)
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind(("0.0.0.0", port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
def test_getfqdn_filter_localhost(self):
self.assertEqual(socket.getfqdn(), socket.getfqdn("0.0.0.0"))
self.assertEqual(socket.getfqdn(), socket.getfqdn("::"))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
| GeneralModuleTests |
python | pytorch__pytorch | torch/nn/parallel/distributed.py | {
"start": 7647,
"end": 7997
} | class ____:
buffer_comm_hook: Callable
buffer_comm_hook_state: Any
buffer_comm_hook_location: _BufferCommHookLocation
# Add a DDPSink to run various functions when backwards starts, such as
# queueing call back of out-most backward/graph task,
# this helps call back is fired after all gradients' calculation
# is completed.
| _BufferCommHook |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solverLiteral1.py | {
"start": 404,
"end": 868
} | class ____(Generic[_T]):
pass
TA1 = Callable[[ClassA[_T]], None]
def func1(value: _T) -> TA1[_T]:
def ret(ctx: ClassA[_T]) -> None:
pass
return ret
def func2() -> TA1[bool]:
return func1(True)
def func3(value: _T) -> Callable[[_T], None]: ...
x: Callable[[tuple[bool]], None] = func3((True,))
def func4(v: _T, f: Callable[[_T], None]): ...
def func5(v: Literal[1, 2], f: Callable[[Literal[1, 2]], None]):
func4(v, f)
| ClassA |
python | openai__openai-python | src/openai/resources/beta/threads/threads.py | {
"start": 93285,
"end": 94781
} | class ____:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.create = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
threads.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
threads.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
threads.update, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
threads.delete, # pyright: ignore[reportDeprecated],
)
)
self.create_and_run = ( # pyright: ignore[reportDeprecated]
_legacy_response.async_to_raw_response_wrapper(
threads.create_and_run, # pyright: ignore[reportDeprecated],
)
)
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
return AsyncRunsWithRawResponse(self._threads.runs)
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
return AsyncMessagesWithRawResponse(self._threads.messages)
| AsyncThreadsWithRawResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple23.py | {
"start": 1473,
"end": 1701
} | class ____(Generic[*Shape, DType]): ...
def insert(values: NDArray[*Shape, DType]) -> NDArray[int, *Shape, DType]: ...
def prepend(values: NDArray[*Shape, DType]) -> NDArray[int, *Shape, DType]:
return insert(values)
| NDArray |
python | getsentry__sentry | src/sentry/api/endpoints/organization_traces.py | {
"start": 6638,
"end": 36537
} | class ____:
def __init__(
self,
*,
dataset: Dataset,
snuba_params: SnubaParams,
user_queries: list[str],
sort: str | None,
limit: int,
breakdown_slices: int,
get_all_projects: Callable[[], list[Project]],
):
self.dataset = dataset
self.snuba_params = snuba_params
self.raw_user_queries = user_queries
self.rpc_user_queries = process_rpc_user_queries(snuba_params, user_queries)
self.sort = sort
self.offset = 0
self.limit = limit
self.breakdown_slices = breakdown_slices
self.get_all_projects = get_all_projects
def params_with_all_projects(self) -> SnubaParams:
all_projects_snuba_params = dataclasses.replace(
self.snuba_params, projects=self.get_all_projects()
)
return all_projects_snuba_params
def execute(self, offset: int, limit: int):
# To support pagination on only EAP, we use the offset/limit
# values from the paginator here.
self.offset = offset
self.limit = limit
return {"data": self._execute_rpc()}
def _execute_rpc(self):
if self.snuba_params.organization_id is None:
raise Exception("An organization is required to resolve queries")
all_projects = self.get_all_projects()
rpc_request = self.get_traces_rpc(all_projects)
rpc_response = get_traces_rpc(rpc_request)
if not rpc_response.traces:
return []
projects_map: dict[int, str] = {project.id: project.slug for project in all_projects}
traces = [format_trace_result(trace, projects_map) for trace in rpc_response.traces]
with handle_span_query_errors():
snuba_params = self.params_with_all_projects()
self.enrich_eap_traces_with_extra_data(traces, snuba_params)
return traces
def enrich_eap_traces_with_extra_data(
self,
traces: list[TraceResult],
snuba_params: SnubaParams,
):
trace_ids = [trace["trace"] for trace in traces]
breakdown_raw_results = Spans.run_table_query(
params=snuba_params,
query_string=f"is_transaction:1 trace:[{','.join(trace_ids)}]",
selected_columns=[
"trace",
"project",
"sdk.name",
"span.op",
"parent_span",
"transaction",
"precise.start_ts",
"precise.finish_ts",
"span.duration",
],
orderby=["precise.start_ts", "-precise.finish_ts"],
offset=0,
limit=MAX_SNUBA_RESULTS,
referrer=Referrer.API_TRACE_EXPLORER_TRACES_BREAKDOWNS.value,
config=SearchResolverConfig(auto_fields=True),
sampling_mode=None,
)
spans = breakdown_raw_results["data"]
extra_queries = [
self.get_traces_errors_query(snuba_params, trace_ids),
self.get_traces_occurrences_query(snuba_params, trace_ids),
]
extra_raw_results = bulk_snuba_queries_with_referrers(
[(query.get_snql_query(), referrer.value) for query, referrer in extra_queries]
)
extra_results = [
query.process_results(result)
for (query, _), result in zip(extra_queries, extra_raw_results)
]
traces_errors: dict[str, int] = {
row["trace"]: row["count()"] for row in extra_results[0]["data"]
}
traces_occurrences: dict[str, int] = {
row["trace"]: row["count()"] for row in extra_results[1]["data"]
}
self.enrich_traces_with_extra_data(
traces,
spans,
traces_errors,
traces_occurrences,
)
def enrich_traces_with_extra_data(
self,
traces: list[TraceResult],
spans: list[dict[str, Any]],
traces_errors: dict[str, int],
traces_occurrences: dict[str, int],
):
traces_range = {
trace["trace"]: {
"start": trace["start"],
"end": trace["end"],
"slices": self.breakdown_slices,
}
for trace in traces
}
spans.sort(key=lambda span: (span["precise.start_ts"], span["precise.finish_ts"]))
try:
traces_breakdowns = process_breakdowns(spans, traces_range)
except Exception as e:
traces_breakdowns = defaultdict(list)
context = {"traces": list(sorted(traces_range.keys()))}
sentry_sdk.capture_exception(e, contexts={"bad_traces": context})
# This is the name of the trace's root span without a parent span
traces_primary_info: MutableMapping[str, tuple[str, str, float]] = {}
# This is the name of a span that can take the place of the trace's root
# based on some heuristics for that type of trace
traces_fallback_info: MutableMapping[str, tuple[str, str, float]] = {}
# This is the name of the first span in the trace that will be used if
# no other candidates names are found
traces_default_info: MutableMapping[str, tuple[str, str, float]] = {}
# Normally, the name given to a trace is the name of the first root transaction
# found within the trace.
#
# But there are some cases where traces do not have any root transactions. For
# these traces, we try to pick out a name from the first span that is a good
# candidate for the trace name.
for span in spans:
if span["trace"] in traces_primary_info:
continue
name: tuple[str, str, float] = (
span["project"],
span["transaction"],
# to minmimize the impact of floating point errors,
# multiply by 1e3 first then do the subtraction
# once we move to eap_items, this can be just `span["span.duration"]`
span["precise.finish_ts"] * 1e3 - span["precise.start_ts"] * 1e3,
)
# The underlying column is a Nullable(UInt64) but we write a default of 0 to it.
# So make sure to handle both in case something changes.
if not span["parent_span"] or int(span["parent_span"], 16) == 0:
traces_primary_info[span["trace"]] = name
if span["trace"] in traces_fallback_info:
continue
# This span is a good candidate for the trace name so use it.
if span["trace"] not in traces_fallback_info and is_trace_name_candidate(span):
traces_fallback_info[span["trace"]] = name
if span["trace"] in traces_default_info:
continue
# This is the first span in this trace.
traces_default_info[span["trace"]] = name
def get_trace_info(
trace: str,
) -> tuple[str, str, float] | tuple[None, None, None]:
if trace in traces_primary_info:
return traces_primary_info[trace]
if trace in traces_fallback_info:
return traces_fallback_info[trace]
if trace in traces_default_info:
return traces_default_info[trace]
return (None, None, None)
for trace in traces:
info = get_trace_info(trace["trace"])
if info[0] is not None and info[1] is not None:
trace["project"] = info[0]
trace["name"] = info[1]
trace["rootDuration"] = info[2]
trace["numErrors"] = traces_errors.get(trace["trace"], 0)
trace["numOccurrences"] = traces_occurrences.get(trace["trace"], 0)
trace["breakdowns"] = traces_breakdowns[trace["trace"]]
def get_traces_rpc(self, projects: list[Project]):
assert self.snuba_params.organization_id is not None
meta = RequestMeta(
organization_id=self.snuba_params.organization_id,
referrer=Referrer.API_TRACE_EXPLORER_SPANS_LIST.value,
project_ids=[project.id for project in projects],
start_timestamp=self.snuba_params.rpc_start_date,
end_timestamp=self.snuba_params.rpc_end_date,
trace_item_type=TraceItemType.TRACE_ITEM_TYPE_UNSPECIFIED,
)
base_filter = TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_int_array=IntArray(values=self.snuba_params.project_ids)),
)
)
if self.rpc_user_queries:
filters = [
GetTracesRequest.TraceFilter(
item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
filter=TraceItemFilter(
and_filter=AndFilter(
filters=[
base_filter,
user_query,
],
)
),
)
for user_query in self.rpc_user_queries.values()
]
else:
filters = [
GetTracesRequest.TraceFilter(
item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
filter=base_filter,
)
]
if self.sort == "-timestamp":
orderby = [
GetTracesRequest.OrderBy(
key=TraceAttribute.Key.KEY_START_TIMESTAMP, descending=True
),
]
elif self.sort == "timestamp":
orderby = [
GetTracesRequest.OrderBy(
key=TraceAttribute.Key.KEY_START_TIMESTAMP, descending=False
),
]
else:
# The orderby is intentionally empty here as this query is much faster
# if we let Clickhouse decide which order to return the results in.
# This also means we cannot order by any columns or paginate.
orderby = []
return GetTracesRequest(
meta=meta,
page_token=PageToken(offset=self.offset),
limit=self.limit,
filters=filters,
order_by=orderby,
attributes=[
TraceAttribute(key=TraceAttribute.Key.KEY_TRACE_ID),
TraceAttribute(key=TraceAttribute.Key.KEY_START_TIMESTAMP),
TraceAttribute(key=TraceAttribute.Key.KEY_END_TIMESTAMP),
TraceAttribute(key=TraceAttribute.Key.KEY_TOTAL_ITEM_COUNT),
TraceAttribute(key=TraceAttribute.Key.KEY_FILTERED_ITEM_COUNT),
# earliest span
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_SPAN_PROJECT_ID),
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_SPAN_NAME),
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_SPAN_DURATION_MS),
# frontend span
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_FRONTEND_SPAN_PROJECT_ID),
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_FRONTEND_SPAN),
TraceAttribute(key=TraceAttribute.Key.KEY_EARLIEST_FRONTEND_SPAN_DURATION_MS),
# root span
TraceAttribute(key=TraceAttribute.Key.KEY_ROOT_SPAN_PROJECT_ID),
TraceAttribute(key=TraceAttribute.Key.KEY_ROOT_SPAN_NAME),
TraceAttribute(key=TraceAttribute.Key.KEY_ROOT_SPAN_DURATION_MS),
],
)
def refine_params(self, min_timestamp: datetime, max_timestamp: datetime):
"""
Once we have a min/max timestamp for all the traces in the query,
refine the params so that it selects a time range that is as small as possible.
"""
# TODO: move to use `update_snuba_params_with_timestamp`
time_buffer = options.get("performance.traces.trace-explorer-buffer-hours")
buffer = timedelta(hours=time_buffer)
self.snuba_params.start = min_timestamp - buffer
self.snuba_params.end = max_timestamp + buffer
def process_final_results(
self,
*,
traces_metas_results,
traces_errors_results,
traces_occurrences_results,
traces_breakdown_projects_results,
) -> list[TraceResult]:
results: list[TraceResult] = []
for row in traces_metas_results["data"]:
result: TraceResult = {
"trace": row["trace"],
"numErrors": 0,
"numOccurrences": 0,
"matchingSpans": row[MATCHING_COUNT_ALIAS],
# In EAP mode, we have to use `count_sample()` to avoid extrapolation
"numSpans": row.get("count()") or row.get("count_sample()") or 0,
"project": None,
"name": None,
"rootDuration": None,
"duration": row["last_seen()"] - row["first_seen()"],
"start": row["first_seen()"],
"end": row["last_seen()"],
"breakdowns": [],
}
results.append(result)
traces_errors: dict[str, int] = {
row["trace"]: row["count()"] for row in traces_errors_results["data"]
}
traces_occurrences: dict[str, int] = {
row["trace"]: row["count()"] for row in traces_occurrences_results["data"]
}
self.enrich_traces_with_extra_data(
results,
traces_breakdown_projects_results["data"],
traces_errors,
traces_occurrences,
)
return results
def process_meta_results(self, results):
return results["meta"]
def get_traces_errors_query(
self,
snuba_params: SnubaParams,
trace_ids: list[str],
) -> tuple[BaseQueryBuilder, Referrer]:
query = DiscoverQueryBuilder(
Dataset.Events,
params={},
snuba_params=snuba_params,
query=None,
selected_columns=["trace", "count()"],
limit=len(trace_ids),
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
# restrict the query to just this subset of trace ids
query.add_conditions([Condition(Column("trace_id"), Op.IN, trace_ids)])
return query, Referrer.API_TRACE_EXPLORER_TRACES_ERRORS
def get_traces_occurrences_query(
self,
snuba_params: SnubaParams,
trace_ids: list[str],
) -> tuple[BaseQueryBuilder, Referrer]:
query = DiscoverQueryBuilder(
Dataset.IssuePlatform,
params={},
snuba_params=snuba_params,
query=None,
selected_columns=["trace", "count()"],
limit=len(trace_ids),
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
# restrict the query to just this subset of trace ids
query.add_conditions([Condition(Column("trace_id"), Op.IN, trace_ids)])
return query, Referrer.API_TRACE_EXPLORER_TRACES_OCCURRENCES
def convert_to_slice(timestamp, trace_range, left_bound=None) -> int:
slices = trace_range["slices"]
trace_start = trace_range["start"]
trace_end = trace_range["end"]
trace_duration = trace_end - trace_start
idx = round((timestamp - trace_start) * slices / trace_duration)
if idx < slices and left_bound is not None and left_bound >= idx:
idx = left_bound + 1
return idx
def quantize_range(span_start, span_end, trace_range):
slices = trace_range["slices"]
trace_start = trace_range["start"]
trace_end = trace_range["end"]
trace_duration = trace_end - trace_start
if trace_duration == 0:
start_index = 0
end_index = slices
else:
raw_start_index = convert_to_slice(span_start, trace_range)
start_index = clip(raw_start_index, 0, slices)
raw_end_index = convert_to_slice(span_end, trace_range, start_index)
end_index = clip(raw_end_index, 0, slices)
if raw_start_index != start_index:
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("slice start", {"raw": raw_start_index, "clipped": start_index})
sentry_sdk.capture_message("Slice start was adjusted", level="warning")
if raw_end_index != end_index:
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("slice end", {"raw": raw_end_index, "clipped": end_index})
sentry_sdk.capture_message("Slice end was adjusted", level="warning")
rounded_start = span_start
rounded_end = span_end
if slices > 0:
bin_size = int((trace_end - trace_start) / slices)
if bin_size > 0:
rounded_start = round((span_start - trace_start) / bin_size) * bin_size + trace_start
rounded_end = round((span_end - trace_start) / bin_size) * bin_size + trace_start
# ensure minimum of 1 width
if rounded_start == rounded_end:
rounded_end += bin_size
if span_start <= trace_start:
rounded_start = trace_start
# To avoid creating gaps at the end of the trace,
# do not adjust the end if it's at the trace end.
if span_end >= trace_end:
rounded_end = trace_end
return (int(rounded_start), int(rounded_end)), (start_index, end_index)
def new_trace_interval(row) -> TraceInterval:
parent_span = row.get("parent_span", "")
return {
"kind": "project",
"project": row["project"],
"sdkName": row["sdk.name"],
"start": row["quantized.start_ts"],
"end": row["quantized.finish_ts"],
"sliceStart": row["start_index"],
"sliceEnd": row["end_index"],
"sliceWidth": row["end_index"] - row["start_index"],
"duration": 0,
"components": [(row["precise.start_ts"], row["precise.finish_ts"])],
"isRoot": not parent_span or set(parent_span) == {"0"},
}
def process_breakdowns(data, traces_range):
breakdowns: Mapping[str, list[TraceInterval]] = {trace: [] for trace in traces_range}
stacks: Mapping[str, list[TraceInterval]] = {trace: [] for trace in traces_range}
def should_merge(interval_a, interval_b):
return (
# only merge intervals that have parent spans, i.e. those that aren't the trace root
not interval_a["isRoot"]
and not interval_b["isRoot"]
# only merge intervals that overlap
and interval_a["end"] >= interval_b["start"]
# only merge intervals that are part of the same service
and interval_a["project"] == interval_b["project"]
and interval_a["sdkName"] == interval_b["sdkName"]
)
def breakdown_push(trace, interval):
breakdown = breakdowns[trace]
""" TODO: Add this back
# Find the last interval. If there is an interval on the stack, it
# should take priority over intervals in the breakdown because intervals
# on the stack are always active, where intervals on the breakdown are
# the most recently started, and it's possible older intervals end after
# the newer intervals
last_interval = stack_peek(trace)
if last_interval is None and breakdown:
last_interval = breakdown[-1]
if last_interval and last_interval["end"] < interval["start"]:
# A gap in the breakdown was found, fill it with a missing interval
breakdown.append(
{
"kind": "missing",
"project": None,
"sdkName": None,
"start": last_interval["end"],
"end": interval["start"],
"duration": 0,
"components": [
(last_interval["components"][-1][1], interval["components"][0][0]),
],
"isRoot": False,
}
)
"""
breakdown.append(interval)
def stack_push(trace, interval):
for last_interval in reversed(stacks[trace]):
if not should_merge(last_interval, interval):
continue
# update the end of this interval and it will
# be updated in the breakdown as well
last_interval["end"] = max(interval["end"], last_interval["end"])
last_interval["sliceEnd"] = max(interval["sliceEnd"], last_interval["sliceEnd"])
# need to update the components of the last interval by merging
# current interval into it
last_component = last_interval["components"][-1]
# there should always be 1 component in the current interval
assert len(interval["components"]) == 1
cur_component = interval["components"][0]
if last_component[1] >= cur_component[0]:
last_interval["components"][-1] = (
last_component[0],
max(last_component[1], cur_component[1]),
)
else:
last_interval["components"].extend(interval["components"])
return
# Make sure to push the breakdown before the stack. This is because
# pushing the breakdown can fill in missing intervals but that needs
# to know what the current state of the stack is. If we push the
# interval onto the stack first, it would not generate the missing
# intervals correctly.
breakdown_push(trace, interval)
stack = stacks[trace]
stack.append(interval)
def stack_peek(trace):
if not stacks[trace]:
return None
return stacks[trace][-1]
def stack_pop(trace):
return stacks[trace].pop()
def stack_clear(trace, until=None):
while stacks[trace]:
if until is not None and stack_peek(trace)["end"] >= until:
break
stack_pop(trace)
quantized_data = []
for row in data:
try:
trace = row["trace"]
precise_start = int(row["precise.start_ts"] * 1000)
precise_end = int(row["precise.finish_ts"] * 1000)
trace_range = traces_range[trace]
trace_start = trace_range["start"]
trace_end = trace_range["end"]
# clip the intervals os that it is within range of the trace
precise_start = clip(precise_start, trace_start, trace_end)
precise_end = clip(precise_end, trace_start, trace_end)
(quantized_start, quantized_end), (start_index, end_index) = quantize_range(
precise_start,
precise_end,
traces_range[trace],
)
quantized_data.append(
{
**row,
"precise.start_ts": precise_start,
"precise.finish_ts": precise_end,
"quantized.start_ts": quantized_start,
"quantized.finish_ts": quantized_end,
"start_index": start_index,
"end_index": end_index,
}
)
except Exception as e:
context = {"trace": row["trace"]}
sentry_sdk.capture_exception(e, contexts={"bad_trace": context})
quantized_data.sort(
key=lambda row: (
row["start_index"],
row["precise.start_ts"],
-row["end_index"],
-row["precise.finish_ts"],
)
)
last_timestamp_per_trace: dict[str, int] = defaultdict(int)
for row in quantized_data:
try:
trace = row["trace"]
last_timestamp_per_trace["trace"] = max(
row["precise.finish_ts"], last_timestamp_per_trace["trace"]
)
if row["start_index"] == row["end_index"]:
# after quantizing, this span is far too small to render, so remove it
continue
cur = new_trace_interval(row)
# Clear the stack of any intervals that end before the current interval
# starts while pushing them to the breakdowns.
stack_clear(trace, until=cur["start"])
stack_push(trace, cur)
except Exception as e:
context = {"trace": row["trace"]}
sentry_sdk.capture_exception(e, contexts={"bad_trace": context})
""" TODO: Add this back
for trace, trace_range in traces_range.items():
# Check to see if there is still a gap before the trace ends and fill it
# with an other interval.
other_start = trace_range["start"]
other_end = trace_range["end"]
other: TraceInterval = {
"kind": "other",
"project": None,
"sdkName": None,
"start": other_start,
"end": other_end,
"duration": 0,
"isRoot": False,
}
# Clear the remaining intervals on the stack to find the latest end time
# of the intervals. This will be used to decide if there are any portion
# of the trace that was not covered by one of the intervals.
while stacks[trace]:
interval = stack_pop(trace)
other["start"] = max(other["start"], interval["end"])
# other["start"] = max(other["start"], interval["components"][-1][1])
last_component = interval["components"][-1]
other_start = max(other_start, last_component[1])
other["components"] = [(other_start, other_end)]
if other["start"] < other["end"]:
breakdown_push(trace, other)
"""
for breakdown in breakdowns.values():
for interval in breakdown:
components = interval.pop("components", [])
component_duration = sum(component[1] - component[0] for component in components)
interval_duration = interval["end"] - interval["start"]
# in the event we don't have a duration from the components, we fall back to the interval
interval["duration"] = (
component_duration if component_duration > 0 else interval_duration
)
interval["sliceWidth"] = interval["sliceEnd"] - interval["sliceStart"]
return breakdowns
def process_rpc_user_queries(
snuba_params: SnubaParams,
user_queries: list[str],
dataset: Dataset = Dataset.SpansIndexed,
) -> dict[str, TraceItemFilter]:
if len(user_queries) > 1:
raise ValueError("Only 1 user query supported")
queries: dict[str, TraceItemFilter] = {}
config = SearchResolverConfig(auto_fields=True)
resolver = Spans.get_resolver(snuba_params, config)
# Filter out empty queries as they do not do anything to change the results.
user_queries = [user_query for user_query in user_queries if len(user_query) > 0]
# ensure at least 1 user query exists as the environment filter is AND'ed to it
if not user_queries:
user_queries.append("")
for user_query in user_queries:
user_query = user_query.strip()
# We want to ignore all the aggregate conditions here because we're strictly
# searching on span attributes, not aggregates
where, _, _ = resolver.resolve_query(user_query)
if where is not None:
queries[user_query] = where
set_span_attribute("user_queries_count", len(queries))
sentry_sdk.set_context("user_queries", {"raw_queries": user_queries})
return queries
OP_TO_FUNC = {
Op.GT: "greater",
Op.LT: "less",
Op.GTE: "greaterOrEquals",
Op.LTE: "lessOrEquals",
Op.EQ: "equals",
Op.NEQ: "notEquals",
Op.IN: "in",
Op.NOT_IN: "notIn",
Op.LIKE: "like",
Op.NOT_LIKE: "notLike",
}
def generate_trace_condition(span_conditions: list[WhereType]) -> WhereType | None:
trace_conditions: list[Function] = format_as_trace_conditions(span_conditions)
if not trace_conditions:
return None
elif len(trace_conditions) == 1:
return Condition(Function("countIf", trace_conditions), Op.GT, 0)
else:
return Condition(Function("countIf", [Function("and", trace_conditions)]), Op.GT, 0)
def format_as_trace_conditions(span_conditions: list[WhereType]) -> list[Function]:
return [format_as_trace_condition(span_condition) for span_condition in span_conditions]
def format_as_trace_condition(span_condition: WhereType) -> Function:
if isinstance(span_condition, Condition):
if span_condition.op == Op.IS_NULL:
return Function("isNull", span_condition.lhs)
elif span_condition.op == Op.IS_NOT_NULL:
return Function("isNotNull", span_condition.lhs)
else:
return Function(
OP_TO_FUNC[span_condition.op],
[span_condition.lhs, span_condition.rhs],
)
elif isinstance(span_condition, BooleanCondition):
if span_condition.op == BooleanOp.AND:
return Function(
"and",
format_as_trace_conditions(span_condition.conditions),
)
elif span_condition.op == BooleanOp.OR:
return Function(
"or",
format_as_trace_conditions(span_condition.conditions),
)
else:
raise ValueError(f"{span_condition.op} is not a BooleanOp")
else:
raise ValueError(f"{span_condition} is not a Condition or BooleanCondition")
@dataclasses.dataclass
| TracesExecutor |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/VTickGroup.py | {
"start": 300,
"end": 3489
} | class ____(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Draws a set of tick marks which always occupy the same vertical range of the view,
but have x coordinates relative to the data within the view.
"""
def __init__(self, xvals=None, yrange=None, pen=None):
"""
============== ===================================================================
**Arguments:**
xvals A list of x values (in data coordinates) at which to draw ticks.
yrange A list of [low, high] limits for the tick. 0 is the bottom of
the view, 1 is the top. [0.8, 1] would draw ticks in the top
fifth of the view.
pen The pen to use for drawing ticks. Default is grey. Can be specified
as any argument valid for :func:`mkPen<pyqtgraph.mkPen>`
============== ===================================================================
"""
if yrange is None:
yrange = [0, 1]
if xvals is None:
xvals = []
UIGraphicsItem.__init__(self)
if pen is None:
pen = (200, 200, 200)
self.path = QtWidgets.QGraphicsPathItem()
self.ticks = []
self.xvals = []
self.yrange = [0,1]
self.setPen(pen)
self.setYRange(yrange)
self.setXVals(xvals)
def setPen(self, *args, **kwargs):
"""Set the pen to use for drawing ticks. Can be specified as any arguments valid
for :func:`mkPen<pyqtgraph.mkPen>`"""
self.pen = fn.mkPen(*args, **kwargs)
def setXVals(self, vals):
"""Set the x values for the ticks.
============== =====================================================================
**Arguments:**
vals A list of x values (in data/plot coordinates) at which to draw ticks.
============== =====================================================================
"""
self.xvals = vals
self.rebuildTicks()
#self.valid = False
def setYRange(self, vals):
"""Set the y range [low, high] that the ticks are drawn on. 0 is the bottom of
the view, 1 is the top."""
self.yrange = vals
self.rebuildTicks()
def dataBounds(self, *args, **kargs):
return None ## item should never affect view autoscaling
def yRange(self):
return self.yrange
def rebuildTicks(self):
self.path = QtGui.QPainterPath()
for x in self.xvals:
self.path.moveTo(x, 0.)
self.path.lineTo(x, 1.)
def paint(self, p, *args):
UIGraphicsItem.paint(self, p, *args)
br = self.boundingRect()
h = br.height()
br.setY(br.y() + self.yrange[0] * h)
br.setHeight((self.yrange[1] - self.yrange[0]) * h)
p.translate(0, br.y())
p.scale(1.0, br.height())
p.setPen(self.pen)
p.drawPath(self.path)
| VTickGroup |
python | ray-project__ray | python/ray/train/xgboost/config.py | {
"start": 474,
"end": 1859
} | class ____(BackendConfig):
"""Configuration for xgboost collective communication setup.
Ray Train will set up the necessary coordinator processes and environment
variables for your workers to communicate with each other.
Additional configuration options can be passed into the
`xgboost.collective.CommunicatorContext` that wraps your own `xgboost.train` code.
See the `xgboost.collective` module for more information:
https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/collective.py
Args:
xgboost_communicator: The backend to use for collective communication for
distributed xgboost training. For now, only "rabit" is supported.
"""
xgboost_communicator: str = "rabit"
@property
def train_func_context(self):
@contextmanager
def collective_communication_context():
with CommunicatorContext(**_get_xgboost_args()):
yield
return collective_communication_context
@property
def backend_cls(self):
if self.xgboost_communicator == "rabit":
return (
_XGBoostRabitBackend
if Version(xgboost.__version__) >= Version("2.1.0")
else _XGBoostRabitBackend_pre_xgb210
)
raise NotImplementedError(f"Unsupported backend: {self.xgboost_communicator}")
| XGBoostConfig |
python | numpy__numpy | numpy/lib/tests/test_type_check.py | {
"start": 3287,
"end": 4051
} | class ____:
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
y = np.array(1)
out = np.real(y)
assert_array_equal(y, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.real(y)
assert_equal(y, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,) + 1j * np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
y = np.array(1 + 1j)
out = np.real(y)
assert_array_equal(y.real, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.real(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
| TestReal |
python | ApeWorX__ape | src/ape_console/config.py | {
"start": 92,
"end": 301
} | class ____(PluginConfig):
plugins: list[str] = []
"""Additional IPython plugins to include in your session."""
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_CONSOLE_")
| ConsoleConfig |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 6438,
"end": 6552
} | class ____(GQLResult):
node: Optional[RegistryFragmentArtifactTypesEdgesNode]
| RegistryFragmentArtifactTypesEdges |
python | scikit-image__scikit-image | benchmarks/benchmark_morphology.py | {
"start": 6584,
"end": 8419
} | class ____:
# skip rectangle as roughly equivalent to square
param_names = ["shape", "dtype"]
params = [
((10, 10), (64, 64), (1200, 1200), (96, 96, 96)),
(np.uint8, np.float32, np.float64),
]
def setup(self, shape, dtype):
rng = np.random.default_rng(123)
# make an image that is mostly True, with a few isolated False areas
rvals = rng.integers(1, 255, size=shape).astype(dtype=dtype)
roi1 = tuple(slice(s // 4, s // 2) for s in rvals.shape)
roi2 = tuple(slice(s // 2 + 1, (3 * s) // 4) for s in rvals.shape)
seed = np.full(rvals.shape, 1, dtype=dtype)
seed[roi1] = rvals[roi1]
seed[roi2] = rvals[roi2]
# create a mask with a couple of square regions set to seed maximum
mask = np.full(seed.shape, 1, dtype=dtype)
mask[roi1] = 255
mask[roi2] = 255
self.seed = seed
self.mask = mask
def time_reconstruction(self, shape, dtype):
morphology.reconstruction(self.seed, self.mask)
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_reconstruction(self, shape, dtype):
morphology.reconstruction(self.seed, self.mask)
| GrayReconstruction |
python | tox-dev__tox | src/tox/util/spinner.py | {
"start": 1310,
"end": 1379
} | class ____(NamedTuple):
ok: str
fail: str
skip: str
| Outcome |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datastore.py | {
"start": 1588,
"end": 6215
} | class ____(GoogleCloudBaseOperator):
"""
Export entities from Google Cloud Datastore to Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreExportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: name of the cloud storage bucket to back up data
:param namespace: optional namespace path in the specified Cloud Storage bucket
to back up data. If this namespace does not exist in GCS, it will be created.
:param datastore_conn_id: the name of the Datastore connection id to use
:param cloud_storage_conn_id: the name of the cloud storage connection id to
force-write backup
:param entity_filter: description of what data from the project is included in the
export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:param labels: client-assigned labels for cloud storage
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:param overwrite_existing: if the storage bucket + namespace is not empty, it will be
emptied prior to exports. This enables overwriting existing backups.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"namespace",
"entity_filter",
"labels",
"impersonation_chain",
)
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket: str,
namespace: str | None = None,
datastore_conn_id: str = "google_cloud_default",
cloud_storage_conn_id: str = "google_cloud_default",
entity_filter: dict | None = None,
labels: dict | None = None,
polling_interval_in_seconds: int = 10,
overwrite_existing: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.cloud_storage_conn_id = cloud_storage_conn_id
self.bucket = bucket
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.overwrite_existing = overwrite_existing
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
self.log.info("Exporting data to Cloud Storage bucket %s", self.bucket)
if self.overwrite_existing and self.namespace:
gcs_hook = GCSHook(self.cloud_storage_conn_id, impersonation_chain=self.impersonation_chain)
objects = gcs_hook.list(self.bucket, prefix=self.namespace)
for obj in objects:
gcs_hook.delete(self.bucket, obj)
ds_hook = DatastoreHook(
gcp_conn_id=self.datastore_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.export_to_storage_bucket(
bucket=self.bucket,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result["name"]
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result["metadata"]["common"]["state"]
if state != "SUCCESSFUL":
raise AirflowException(f"Operation failed: result={result}")
StorageLink.persist(
context=context,
uri=f"{self.bucket}/{result['response']['outputUrl'].split('/')[3]}",
project_id=self.project_id or ds_hook.project_id,
)
return result
| CloudDatastoreExportEntitiesOperator |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 95279,
"end": 97498
} | class ____(PallasBaseTest):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest('DMAs not supported on TPU generations <= 3')
def test_simple_tile_aligned_dynamic_size_dma(self):
def kernel(size_smem_ref, x_hbm_ref, _, o_hbm_ref, sem):
size = size_smem_ref[0]
pltpu.async_copy(
x_hbm_ref.at[pl.ds(0, size)],
o_hbm_ref.at[pl.ds(0, size)], sem).wait()
x = jnp.tile(jnp.arange(8, dtype=jnp.int32)[:, None, None], [1, 8, 128])
o = jnp.zeros((8, 8, 128), dtype=jnp.int32)
size = jnp.array([4], dtype=jnp.int32)
out = pl.pallas_call(
kernel,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM),
pl.BlockSpec(memory_space=pltpu.ANY),
pl.BlockSpec(memory_space=pltpu.ANY)],
out_specs=pl.BlockSpec(memory_space=pltpu.ANY),
scratch_shapes=[pltpu.SemaphoreType.DMA]
),
out_shape=o,
input_output_aliases={2: 0},
)(size, x, o)
expected = o.at[:4].set(x.at[:4].get())
np.testing.assert_array_equal(out, expected)
def test_simple_dynamic_size_dma(self):
self.skipTest("doesn't work yet.")
def kernel(size_smem_ref, x_hbm_ref, _, o_hbm_ref, sem):
size = size_smem_ref[0]
pltpu.async_copy(
x_hbm_ref.at[pl.ds(0, size)],
o_hbm_ref.at[pl.ds(0, size)], sem).wait()
x = jnp.arange(8, dtype=jnp.int32)
o = jnp.zeros(8, dtype=jnp.int32)
size = jnp.array([4], dtype=jnp.int32)
out = pl.pallas_call(
kernel,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM),
pl.BlockSpec(memory_space=pltpu.ANY),
pl.BlockSpec(memory_space=pltpu.ANY)],
out_specs=pl.BlockSpec(memory_space=pltpu.ANY),
scratch_shapes=[pltpu.SemaphoreType.DMA]
),
out_shape=o,
input_output_aliases={2: 0},
)(size, x, o)
expected = o.at[:4].set(x.at[:4].get())
np.testing.assert_array_equal(out, expected)
| PallasCallDynamicDMATest |
python | kamyu104__LeetCode-Solutions | Python/graph-connectivity-with-threshold.py | {
"start": 867,
"end": 1498
} | class ____(object):
def areConnected(self, n, threshold, queries):
"""
:type n: int
:type threshold: int
:type queries: List[List[int]]
:rtype: List[bool]
"""
union_find = UnionFind(n)
for i in xrange(threshold+1, n+1):
# https://stackoverflow.com/questions/25905118/finding-big-o-of-the-harmonic-series
# sum of harmonic series is O(logn)
for j in xrange(2*i, n+1, i): # step by i
union_find.union_set(i-1, j-1)
return [union_find.find_set(q[0]-1) == union_find.find_set(q[1]-1) for q in queries]
| Solution |
python | pandas-dev__pandas | pandas/tests/series/methods/test_sort_values.py | {
"start": 7984,
"end": 8975
} | class ____:
def test_sort_values_key(self):
series = Series(np.array(["Hello", "goodbye"]))
result = series.sort_values(axis=0)
expected = series
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: x.str.lower())
expected = series[::-1]
tm.assert_series_equal(result, expected)
def test_sort_values_key_nan(self):
series = Series(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = series.sort_values(axis=0)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: x + 5)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
result = series.sort_values(axis=0, key=lambda x: -x, ascending=False)
expected = series.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_series_equal(result, expected)
| TestSeriesSortingKey |
python | getsentry__sentry | src/sentry/migrations/0999_add_extrapolation_mode_to_snuba_query.py | {
"start": 155,
"end": 1633
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0998_add_prebuilt_id_to_dashboards"),
]
operations = [
migrations.AddField(
model_name="snubaquery",
name="extrapolation_mode",
field=models.IntegerField(db_default=0, default=0),
),
migrations.AddField(
model_name="snubaquery",
name="query_snapshot",
field=models.JSONField(null=True),
),
]
| Migration |
python | huggingface__transformers | src/transformers/utils/auto_docstring.py | {
"start": 2077,
"end": 6971
} | class ____:
images = {
"description": """
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
""",
"shape": None,
}
videos = {
"description": """
Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
""",
"shape": None,
}
do_resize = {
"description": """
Whether to resize the image.
""",
"shape": None,
}
size = {
"description": """
Describes the maximum input dimensions to the model.
""",
"shape": None,
}
size_divisor = {
"description": """
The size by which to make sure both the height and width can be divided.
""",
"shape": None,
}
default_to_square = {
"description": """
Whether to default to a square image when resizing, if size is an int.
""",
"shape": None,
}
resample = {
"description": """
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
""",
"shape": None,
}
do_center_crop = {
"description": """
Whether to center crop the image.
""",
"shape": None,
}
crop_size = {
"description": """
Size of the output image after applying `center_crop`.
""",
"shape": None,
}
do_pad = {
"description": """
Whether to pad the image. Padding is done either to the largest size in the batch
or to a fixed square size per image. The exact padding strategy depends on the model.
""",
"shape": None,
}
pad_size = {
"description": """
The size in `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch. Applied only when `do_pad=True.`
""",
"shape": None,
}
do_rescale = {
"description": """
Whether to rescale the image.
""",
"shape": None,
}
rescale_factor = {
"description": """
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
""",
"shape": None,
}
do_normalize = {
"description": """
Whether to normalize the image.
""",
"shape": None,
}
image_mean = {
"description": """
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
""",
"shape": None,
}
image_std = {
"description": """
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
""",
"shape": None,
}
do_convert_rgb = {
"description": """
Whether to convert the image to RGB.
""",
"shape": None,
}
return_tensors = {
"description": """
Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
""",
"shape": None,
}
data_format = {
"description": """
Only `ChannelDimension.FIRST` is supported. Added for compatibility with slow processors.
""",
"shape": None,
}
input_data_format = {
"description": """
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
""",
"shape": None,
}
device = {
"description": """
The device to process the images on. If unset, the device is inferred from the input images.
""",
"shape": None,
}
disable_grouping = {
"description": """
Whether to disable grouping of images by size to process them individually and not in batches.
If None, will be set to True if the images are on CPU, and False otherwise. This choice is based on
empirical observations, as detailed here: https://github.com/huggingface/transformers/pull/38157
""",
"shape": None,
}
image_seq_length = {
"description": """
The number of image tokens to be used for each image in the input.
Added for backward compatibility but this should be set as a processor attribute in future models.
""",
"shape": None,
}
| ImageProcessorArgs |
python | ray-project__ray | python/ray/data/_internal/logical/operators/map_operator.py | {
"start": 612,
"end": 2988
} | class ____(AbstractOneToOne):
"""Abstract class for logical operators that should be converted to physical
MapOperator.
"""
def __init__(
self,
name: str,
input_op: Optional[LogicalOperator] = None,
num_outputs: Optional[int] = None,
*,
min_rows_per_bundled_input: Optional[int] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
compute: Optional[ComputeStrategy] = None,
):
"""Initialize an ``AbstractMap`` logical operator that will later
be converted into a physical ``MapOperator``.
Args:
name: Name for this operator. This is the name that will appear when
inspecting the logical plan of a Dataset.
input_op: The operator preceding this operator in the plan DAG. The
outputs of ``input_op`` will be the inputs to this operator.
num_outputs: Number of outputs for this operator.
min_rows_per_bundled_input: Minimum number of rows a single bundle of
blocks passed on to the task must possess.
ray_remote_args: Args to provide to :func:`ray.remote`.
ray_remote_args_fn: A function that returns a dictionary of remote
args passed to each map worker. The purpose of this argument is
to generate dynamic arguments for each actor/task, and it will
be called each time prior to initializing the worker. Args
returned from this dict always override the args in
``ray_remote_args``. Note: this is an advanced, experimental
feature.
compute: The compute strategy, either ``TaskPoolStrategy`` (default)
to use Ray tasks, or ``ActorPoolStrategy`` to use an
autoscaling actor pool.
"""
super().__init__(name, input_op, num_outputs)
self._min_rows_per_bundled_input = min_rows_per_bundled_input
self._ray_remote_args = ray_remote_args or {}
self._ray_remote_args_fn = ray_remote_args_fn
self._compute = compute or TaskPoolStrategy()
self._per_block_limit = None
def set_per_block_limit(self, per_block_limit: int):
self._per_block_limit = per_block_limit
| AbstractMap |
python | kamyu104__LeetCode-Solutions | Python/count-substrings-divisible-by-last-digit.py | {
"start": 2902,
"end": 3452
} | class ____(object):
def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
dp = [[0]*10 for _ in xrange(10)]
for i in xrange(1, len(s)+1):
new_dp = [[0]*10 for _ in xrange(10)]
x = ord(s[i-1])-ord('0')
for d in xrange(1, 9+1):
new_dp[d][x%d] += 1
for r in xrange(d):
new_dp[d][(r*10+x)%d] += dp[d][r]
dp = new_dp
result += dp[x][0]
return result
| Solution3 |
python | doocs__leetcode | solution/0800-0899/0804.Unique Morse Code Words/Solution.py | {
"start": 0,
"end": 713
} | class ____:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
codes = [
".-",
"-...",
"-.-.",
"-..",
".",
"..-.",
"--.",
"....",
"..",
".---",
"-.-",
".-..",
"--",
"-.",
"---",
".--.",
"--.-",
".-.",
"...",
"-",
"..-",
"...-",
".--",
"-..-",
"-.--",
"--..",
]
s = {''.join([codes[ord(c) - ord('a')] for c in word]) for word in words}
return len(s)
| Solution |
python | pytorch__pytorch | test/distributed/tensor/test_experimental_ops.py | {
"start": 439,
"end": 7978
} | class ____(DTensorTestBase):
@property
def world_size(self) -> int:
# hard code world size to 2
return 2
@with_comms
def test_slice(self):
device_mesh = self.build_device_mesh()
shard_spec = [Replicate()]
input_list = torch.rand(ITER_TIME, 1024, 10)
grad_output_list = torch.rand(ITER_TIME, 1024, 5) * 1e-3
for i in range(ITER_TIME):
inp = input_list[i].to(self.device_type).requires_grad_()
grad_output = grad_output_list[i].to(self.device_type)
# droppath with dtensor
inp_dtensor = distribute_tensor(inp, device_mesh, shard_spec)
grad_output_dtensor = distribute_tensor(
grad_output, device_mesh, shard_spec
)
output = inp_dtensor[:, :5]
output.backward(grad_output_dtensor)
# nll with plain tensor
output_gt = inp[:, :5]
output_gt.backward(grad_output)
output_diff_abs = output.to_local() - output_gt
output_diff_rel = output_diff_abs / (torch.abs(output_gt) + 1e-8)
output_mse_abs = torch.mean(output_diff_abs * output_diff_abs).item()
output_mse_rel = torch.mean(output_diff_rel * output_diff_rel).item()
grad_diff_abs = inp_dtensor.grad.to_local() - inp.grad
grad_diff_rel = grad_diff_abs / (torch.abs(inp.grad) + 1e-8)
grad_mse_abs = torch.mean(grad_diff_abs * grad_diff_abs).item()
grad_mse_rel = torch.mean(grad_diff_rel * grad_diff_rel).item()
self.assertTrue(
output_mse_abs <= 1e-6,
f"Too large absolute mse for output, expected less equal 1e-6, got {output_mse_abs}",
)
self.assertTrue(
output_mse_rel <= 1e-6,
f"Too large relative mse for output, expected less equal 1e-6, got {output_mse_rel}",
)
self.assertTrue(
grad_mse_abs <= 1e-6,
f"Too large absolute mse for gradient, expected less equal 1e-6, got {grad_mse_abs}",
)
self.assertTrue(
grad_mse_rel <= 1e-6,
f"Too large relative mse for gradient, expected less equal 1e-6, got {grad_mse_rel}",
)
@with_comms
def test_bernoulli(self):
rank = dist.get_rank()
device_mesh = self.build_device_mesh()
shard_spec = [Replicate()]
input_list = torch.rand(ITER_TIME, 1024, 10)
grad_output_list = torch.rand(ITER_TIME, 1024, 10) * 1e-3
for i in range(ITER_TIME):
inp = input_list[i].to(self.device_type).requires_grad_()
grad_output = grad_output_list[i].to(self.device_type)
# bernoulli with dtensor
inp_dtensor = distribute_tensor(inp, device_mesh, shard_spec)
grad_output_dtensor = distribute_tensor(
grad_output, device_mesh, shard_spec
)
output = torch.bernoulli(inp_dtensor)
output.backward(grad_output_dtensor)
send_output_tensor = output.to_local()
recv_output_tensor = torch.zeros_like(send_output_tensor)
send_grad_tensor = inp_dtensor.grad.to_local()
recv_grad_tensor = torch.zeros_like(send_grad_tensor)
send_op_1 = dist.P2POp(dist.isend, send_output_tensor, 1 ^ rank)
send_op_2 = dist.P2POp(dist.isend, send_grad_tensor, 1 ^ rank)
recv_op_1 = dist.P2POp(dist.irecv, recv_output_tensor, 1 ^ rank)
recv_op_2 = dist.P2POp(dist.irecv, recv_grad_tensor, 1 ^ rank)
reqs = dist.batch_isend_irecv([send_op_1, send_op_2, recv_op_1, recv_op_2])
for req in reqs:
req.wait()
output_diff_abs = send_output_tensor - recv_output_tensor
output_diff_rel = output_diff_abs / (torch.abs(recv_output_tensor) + 1e-8)
output_mse_abs = torch.mean(output_diff_abs * output_diff_abs).item()
output_mse_rel = torch.mean(output_diff_rel * output_diff_rel).item()
grad_diff_abs = send_grad_tensor - recv_grad_tensor
grad_diff_rel = grad_diff_abs / (torch.abs(recv_grad_tensor) + 1e-8)
grad_mse_abs = torch.mean(grad_diff_abs * grad_diff_abs).item()
grad_mse_rel = torch.mean(grad_diff_rel * grad_diff_rel).item()
self.assertTrue(
output_mse_abs <= 1e-6,
f"Too large absolute mse for output, expected less equal 1e-6, got {output_mse_abs}",
)
self.assertTrue(
output_mse_rel <= 1e-6,
f"Too large relative mse for output, expected less equal 1e-6, got {output_mse_rel}",
)
self.assertTrue(
grad_mse_abs <= 1e-6,
f"Too large absolute mse for gradient, expected less equal 1e-6, got {grad_mse_abs}",
)
self.assertTrue(
grad_mse_rel <= 1e-6,
f"Too large relative mse for gradient, expected less equal 1e-6, got {grad_mse_rel}",
)
@with_comms
def test_nll(self):
device_mesh = self.build_device_mesh()
shard_spec = [Replicate()]
pred_list = torch.rand(ITER_TIME, 1024, 10)
target_list = torch.randint(0, 10, (ITER_TIME, 1024), dtype=torch.long)
criterion = torch.nn.CrossEntropyLoss()
for i in range(ITER_TIME):
pred = pred_list[i].to(self.device_type).requires_grad_()
target = target_list[i].to(self.device_type)
# nll with dtensor
pred_dtensor = distribute_tensor(pred, device_mesh, shard_spec)
target_dtensor = distribute_tensor(target, device_mesh, shard_spec)
loss = criterion(pred_dtensor, target_dtensor)
loss.backward()
# nll with plain tensor
loss_gt = criterion(pred, target)
loss_gt.backward()
loss_diff_abs = loss.to_local() - loss_gt
loss_diff_rel = loss_diff_abs / (torch.abs(loss_gt) + 1e-8)
loss_mse_abs = torch.mean(loss_diff_abs * loss_diff_abs).item()
loss_mse_rel = torch.mean(loss_diff_rel * loss_diff_rel).item()
grad_diff_abs = pred_dtensor.grad.to_local() - pred.grad
grad_diff_rel = grad_diff_abs / (torch.abs(pred.grad) + 1e-8)
grad_mse_abs = torch.mean(grad_diff_abs * grad_diff_abs).item()
grad_mse_rel = torch.mean(grad_diff_rel * grad_diff_rel).item()
self.assertTrue(
loss_mse_abs <= 1e-6,
f"Too large absolute mse for loss, expected less equal 1e-6, got {loss_mse_abs}",
)
self.assertTrue(
loss_mse_rel <= 1e-6,
f"Too large relative mse for loss, expected less equal 1e-6, got {loss_mse_rel}",
)
self.assertTrue(
grad_mse_abs <= 1e-6,
f"Too large absolute mse for gradient, expected less equal 1e-6, got {grad_mse_abs}",
)
self.assertTrue(
grad_mse_rel <= 1e-6,
f"Too large relative mse for gradient, expected less equal 1e-6, got {grad_mse_rel}",
)
DistOtherOpsTestWithLocalTensor = create_local_tensor_test_class(
DistOtherOpsTest,
# Send / recv ops are not supported
skipped_tests=["test_bernoulli"],
)
if __name__ == "__main__":
run_tests()
| DistOtherOpsTest |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_lat_lon_in_timezone.py | {
"start": 623,
"end": 2358
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.lat_lon_in_timezone"
condition_value_keys = ("timezone",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, timezone, **kwargs):
def is_in_timezone(point, timezone):
try:
tf = TimezoneFinder()
detected_timezone = tf.timezone_at(lat=point[0], lng=point[1])
return detected_timezone == timezone
except ValueError:
return False
return column.apply(lambda x: is_in_timezone(x, timezone))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, timezone, **kwargs):
def is_in_timezone(point, timezone):
try:
tf = TimezoneFinder()
detected_timezone = tf.timezone_at(lat=point[0], lng=point[1])
return detected_timezone == timezone
except ValueError:
return False
tz_udf = F.udf(lambda x: is_in_timezone(x, timezone), pyspark.types.BooleanType())
return tz_udf(column)
# This class defines the Expectation itself
| ColumnValuesLatLonInTimezone |
python | huggingface__transformers | src/transformers/models/persimmon/configuration_persimmon.py | {
"start": 878,
"end": 6216
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PersimmonModel`]. It is used to instantiate an
Persimmon model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[adept/persimmon-8b-base](https://huggingface.co/adept/persimmon-8b-base).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262144):
Vocabulary size of the Persimmon model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`PersimmonModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 16384):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
qk_layernorm (`bool`, *optional*, default to `True`):
Whether or not to normalize the Queries and Keys after projecting the hidden states
hidden_dropout (`float`, *optional*, default to 0.0):
The dropout ratio after applying the MLP to the hidden states.
attention_dropout (`float`, *optional*, default to 0.0):
The dropout ratio after computing the attention scores.
Example:
```python
>>> from transformers import PersimmonModel, PersimmonConfig
>>> # Initializing a Persimmon persimmon-7b style configuration
>>> configuration = PersimmonConfig()
```"""
model_type = "persimmon"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: Optional[int] = 262144,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 16384,
num_hidden_layers: Optional[int] = 36,
num_attention_heads: Optional[int] = 64,
hidden_act: Optional[str] = "relu2",
max_position_embeddings: Optional[int] = 16384,
initializer_range: Optional[float] = 0.02,
layer_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
qk_layernorm: Optional[bool] = True,
hidden_dropout: Optional[float] = 0.0,
attention_dropout: Optional[float] = 0.0,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.qk_layernorm = qk_layernorm
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["PersimmonConfig"]
| PersimmonConfig |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gridlines07.py | {
"start": 315,
"end": 2365
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gridlines07.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test XlsxWriter gridlines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [59313152, 59364096]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
chart.set_x_axis(
{
"major_gridlines": {"visible": 1},
"minor_gridlines": {"visible": 1},
}
)
chart.set_y_axis(
{
"major_gridlines": {"visible": 1},
"minor_gridlines": {"visible": 1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/feature_column/sequence_feature_column_test.py | {
"start": 13459,
"end": 15020
} | class ____(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self, expected, self.evaluate(id_weight_pair.id_tensor))
| SequenceCategoricalColumnWithVocabularyListTest |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/kueue.py | {
"start": 1396,
"end": 4208
} | class ____(BaseOperator):
"""
Installs a Kubernetes Kueue.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KubernetesInstallKueueOperator`
:param kueue_version: The Kubernetes Kueue version to install.
:param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
for the Kubernetes cluster.
"""
template_fields: Sequence[str] = (
"kueue_version",
"kubernetes_conn_id",
)
def __init__(self, kueue_version: str, kubernetes_conn_id: str = "kubernetes_default", *args, **kwargs):
super().__init__(*args, **kwargs)
self.kubernetes_conn_id = kubernetes_conn_id
self.kueue_version = kueue_version
self._kueue_yaml_url = (
f"https://github.com/kubernetes-sigs/kueue/releases/download/{self.kueue_version}/manifests.yaml"
)
@cached_property
def hook(self) -> KubernetesHook:
return KubernetesHook(conn_id=self.kubernetes_conn_id)
def execute(self, context):
yaml_objects = self.hook.get_yaml_content_from_file(kueue_yaml_url=self._kueue_yaml_url)
try:
self.hook.apply_from_yaml_file(yaml_objects=yaml_objects)
except FailToCreateError as ex:
error_bodies = []
for e in ex.api_exceptions:
try:
if e.body:
error_bodies.append(json.loads(e.body))
else:
# If no body content, use reason as the message
reason = getattr(e, "reason", "Unknown")
error_bodies.append({"message": reason, "reason": reason})
except (json.JSONDecodeError, ValueError, TypeError):
# If the body is a string (e.g., in a 429 error), it can't be parsed as JSON.
# Use the body directly as the message instead.
error_bodies.append({"message": e.body, "reason": getattr(e, "reason", "Unknown")})
if next((e for e in error_bodies if e.get("reason") == "AlreadyExists"), None):
self.log.info("Kueue is already enabled for the cluster")
if errors := [e for e in error_bodies if e.get("reason") != "AlreadyExists"]:
error_message = "\n".join(
e.get("message") or e.get("body") or f"Unknown error: {e.get('reason', 'Unknown')}"
for e in errors
)
raise AirflowException(error_message)
return
self.hook.check_kueue_deployment_running(name="kueue-controller-manager", namespace="kueue-system")
self.log.info("Kueue installed successfully!")
| KubernetesInstallKueueOperator |
python | kamyu104__LeetCode-Solutions | Python/shortest-subarray-with-sum-at-least-k.py | {
"start": 50,
"end": 893
} | class ____(object):
def shortestSubarray(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
accumulated_sum = [0]*(len(A)+1)
for i in xrange(len(A)):
accumulated_sum[i+1] = accumulated_sum[i]+A[i]
result = float("inf")
mono_increasing_q = collections.deque()
for i, curr in enumerate(accumulated_sum):
while mono_increasing_q and curr <= \
accumulated_sum[mono_increasing_q[-1]]:
mono_increasing_q.pop()
while mono_increasing_q and \
curr-accumulated_sum[mono_increasing_q[0]] >= K:
result = min(result, i-mono_increasing_q.popleft())
mono_increasing_q.append(i)
return result if result != float("inf") else -1
| Solution |
python | huggingface__transformers | src/transformers/models/seed_oss/modeling_seed_oss.py | {
"start": 10352,
"end": 12146
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: SeedOssConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = SeedOssAttention(config=config, layer_idx=layer_idx)
self.mlp = SeedOssMLP(config)
self.input_layernorm = SeedOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = SeedOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| SeedOssDecoderLayer |
python | encode__httpx | tests/client/test_auth.py | {
"start": 829,
"end": 2474
} | class ____:
def __init__(
self,
algorithm: str = "SHA-256",
send_response_after_attempt: int = 1,
qop: str = "auth",
regenerate_nonce: bool = True,
) -> None:
self.algorithm = algorithm
self.send_response_after_attempt = send_response_after_attempt
self.qop = qop
self._regenerate_nonce = regenerate_nonce
self._response_count = 0
def __call__(self, request: httpx.Request) -> httpx.Response:
if self._response_count < self.send_response_after_attempt:
return self.challenge_send(request)
data = {"auth": request.headers.get("Authorization")}
return httpx.Response(200, json=data)
def challenge_send(self, request: httpx.Request) -> httpx.Response:
self._response_count += 1
nonce = (
hashlib.sha256(os.urandom(8)).hexdigest()
if self._regenerate_nonce
else "ee96edced2a0b43e4869e96ebe27563f369c1205a049d06419bb51d8aeddf3d3"
)
challenge_data = {
"nonce": nonce,
"qop": self.qop,
"opaque": (
"ee6378f3ee14ebfd2fff54b70a91a7c9390518047f242ab2271380db0e14bda1"
),
"algorithm": self.algorithm,
"stale": "FALSE",
}
challenge_str = ", ".join(
'{}="{}"'.format(key, value)
for key, value in challenge_data.items()
if value
)
headers = {
"www-authenticate": f'Digest realm="httpx@example.org", {challenge_str}',
}
return httpx.Response(401, headers=headers)
| DigestApp |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 16423,
"end": 16489
} | class ____:
embeddings: Embeddings
fetch: int
@dataclass
| KNN |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 22948,
"end": 23770
} | class ____(DagsterTypeLoader):
def __init__(self, inner_dagster_type):
self._inner_dagster_type = check.inst_param(
inner_dagster_type, "inner_dagster_type", DagsterType
)
check.param_invariant(inner_dagster_type.loader, "inner_dagster_type")
self._schema_type = Array(inner_dagster_type.loader.schema_type)
@property
def schema_type(self):
return self._schema_type
def construct_from_config_value(self, context, config_value):
convert_item = partial(
self._inner_dagster_type.loader.construct_from_config_value, context
)
return list(map(convert_item, config_value))
def _create_list_input_schema(inner_type):
if not inner_type.loader:
return None
return ListInputSchema(inner_type)
| ListInputSchema |
python | pyca__cryptography | tests/hazmat/primitives/test_rsa.py | {
"start": 57805,
"end": 59311
} | class ____:
def test_calculate_max_pss_salt_length(self):
with pytest.raises(TypeError):
padding.calculate_max_pss_salt_length(
object(), # type:ignore[arg-type]
hashes.SHA256(),
)
def test_invalid_salt_length_not_integer(self):
with pytest.raises(TypeError):
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=b"not_a_length", # type:ignore[arg-type]
)
def test_invalid_salt_length_negative_integer(self):
with pytest.raises(ValueError):
padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=-1)
def test_valid_pss_parameters(self):
algorithm = hashes.SHA256()
salt_length = algorithm.digest_size
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=salt_length)
assert pss._mgf == mgf
assert pss._salt_length == salt_length
def test_valid_pss_parameters_maximum(self):
algorithm = hashes.SHA256()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert pss._mgf == mgf
assert pss._salt_length == padding.PSS.MAX_LENGTH
def test_mgf_property(self):
algorithm = hashes.SHA256()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert pss.mgf == mgf
assert pss.mgf == pss._mgf
| TestPSS |
python | doocs__leetcode | solution/0700-0799/0755.Pour Water/Solution.py | {
"start": 0,
"end": 540
} | class ____:
def pourWater(self, heights: List[int], volume: int, k: int) -> List[int]:
for _ in range(volume):
for d in (-1, 1):
i = j = k
while 0 <= i + d < len(heights) and heights[i + d] <= heights[i]:
if heights[i + d] < heights[i]:
j = i + d
i += d
if j != k:
heights[j] += 1
break
else:
heights[k] += 1
return heights
| Solution |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 102310,
"end": 102731
} | class ____(Literal):
"""
An empty token, will always match.
"""
def __init__(self, match_string="", *, matchString="") -> None:
super().__init__("")
self._may_return_empty = True
self.mayIndexError = False
def _generateDefaultName(self) -> str:
return "Empty"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
return loc, []
| Empty |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_index.py | {
"start": 16683,
"end": 17882
} | class ____(unittest.TestCase):
def test_indices_ivfpq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
M = 4
nbits = 8
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFPQConfig()
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
# invalid k (should be > 0)
k = -5
idx.nprobe = 3
self.assertRaises(AssertionError, idx.search, xb[10:20], k)
# nprobe is unsigned now, so this is caught before reaching C++
# k = 5
# idx.nprobe = -3
# self.assertRaises(RuntimeError, idx.search, xb[10:20], k)
# valid params
k = 5
idx.nprobe = 3
_, I = idx.search(xb[10:20], k)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
| TestInvalidParams |
python | huggingface__transformers | tests/models/stablelm/test_modeling_stablelm.py | {
"start": 1266,
"end": 1391
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = StableLmModelTester
@require_torch
| StableLmModelTest |
python | getsentry__sentry | src/sentry/incidents/models/alert_rule.py | {
"start": 1667,
"end": 1863
} | class ____(models.TextChoices):
STATIC = "static", gettext_lazy("Static")
PERCENT = "percent", gettext_lazy("Percent")
DYNAMIC = "dynamic", gettext_lazy("Dynamic")
| AlertRuleDetectionType |
python | astropy__astropy | astropy/coordinates/distances.py | {
"start": 432,
"end": 9287
} | class ____(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized by providing one of the following:
* Distance ``value`` (array or float) and a ``unit``
* |Quantity| object with dimensionality of length
* Redshift and (optionally) a `~astropy.cosmology.Cosmology`
* Distance modulus
* Parallax
Parameters
----------
value : scalar or `~astropy.units.Quantity` ['length']
The value of this distance.
unit : `~astropy.units.UnitBase` ['length']
The unit for this distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : `~astropy.cosmology.Cosmology` or None
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : angle-like
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible in some
cosmologies). Default: `False`.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a length unit.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``cosmology`` is provided when ``z`` is *not* given.
If either none or more than one of ``value``, ``z``, ``distmod``,
or ``parallax`` were given.
Examples
--------
>>> from astropy import units as u
>>> from astropy.cosmology import WMAP5
>>> Distance(10, u.Mpc)
<Distance 10. Mpc>
>>> Distance(40*u.pc, unit=u.kpc)
<Distance 0.04 kpc>
>>> Distance(z=0.23) # doctest: +FLOAT_CMP
<Distance 1184.01657566 Mpc>
>>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP
<Distance 1147.78831918 Mpc>
>>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP
<Distance 783.42964277 kpc>
>>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP
<Distance 46.86035614 pc>
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(
cls,
value=None,
unit=None,
z=None,
cosmology=None,
distmod=None,
parallax=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
allow_negative=False,
):
n_not_none = sum(x is not None for x in [value, z, distmod, parallax])
if n_not_none == 0:
raise ValueError(
"none of `value`, `z`, `distmod`, or `parallax` "
"were given to Distance constructor"
)
if n_not_none > 1:
raise ValueError(
"more than one of `value`, `z`, `distmod`, or "
"`parallax` were given to Distance constructor"
)
if value is None:
# If something else but `value` was provided then a new array will
# be created anyways and there is no need to copy that.
copy = False
if z is not None:
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
elif cosmology is not None:
raise ValueError(
"a `cosmology` was given but `z` was not "
"provided in Distance constructor"
)
elif distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the output unit is not specified, convert `value`
# based on the mean of the log of the distance.
# Leaving `unit=None` is fine for the `super().__new__()` call below.
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
value <<= u.Mpc
elif meanlogval > 3:
value <<= u.kpc
elif meanlogval < -3: # ~200 AU
value <<= u.AU
elif parallax is not None:
parallax = u.Quantity(parallax, copy=COPY_IF_NEEDED, subok=True)
value = parallax.to(unit or u.pc, equivalencies=u.parallax())
if np.any(parallax < 0):
if not allow_negative:
raise ValueError(
"some parallaxes are negative, which are not "
"interpretable as distances. See the discussion in "
"this paper: https://arxiv.org/abs/1507.02105 . You "
"can convert negative parallaxes to NaN distances by "
"providing the `allow_negative=True` argument."
)
warnings.warn(
"negative parallaxes are converted to NaN distances even when"
" `allow_negative=True`, because negative parallaxes cannot be"
" transformed into distances. See the discussion in this paper:"
" https://arxiv.org/abs/1507.02105",
AstropyWarning,
)
allow_negative = True # No need to check twice.
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
if not allow_negative and np.any(distance.value < 0):
raise ValueError(
"distance must be >= 0. Use the argument "
"`allow_negative=True` to allow negative values."
)
return distance
@property
def z(self):
"""Short for ``self.compute_z()``."""
return self.compute_z()
def compute_z(self, cosmology=None, **atzkw):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` or None
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
z : `~astropy.units.Quantity`
The redshift of this distance given the provided ``cosmology``.
Warnings
--------
This method can be slow for large arrays.
The redshift is determined using :func:`astropy.cosmology.z_at_value`,
which handles vector inputs (e.g. an array of distances) by
element-wise calling of :func:`scipy.optimize.minimize_scalar`.
For faster results consider using an interpolation table;
:func:`astropy.cosmology.z_at_value` provides details.
See Also
--------
:func:`astropy.cosmology.z_at_value` : Find the redshift corresponding to a
:meth:`astropy.cosmology.FLRW.luminosity_distance`.
"""
from astropy.cosmology import z_at_value
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
atzkw.setdefault("ztol", 1.0e-10)
return z_at_value(cosmology.luminosity_distance, self, **atzkw)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`."""
val = 5.0 * np.log10(self.to_value(u.pc)) - 5.0
return u.Quantity(val, u.mag, copy=COPY_IF_NEEDED)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.0), u.pc, copy=COPY_IF_NEEDED)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object."""
return Angle(self.to(u.milliarcsecond, u.parallax()))
| Distance |
python | pytorch__pytorch | torch/backends/mkldnn/__init__.py | {
"start": 3464,
"end": 4324
} | class ____(PropModule):
def is_available(self):
return is_available()
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
deterministic = ContextProp(
torch._C._get_mkldnn_deterministic, torch._C._set_mkldnn_deterministic
)
allow_tf32 = ContextProp(
torch._C._get_onednn_allow_tf32, torch._C._set_onednn_allow_tf32
)
matmul = _FP32Precision("mkldnn", "matmul")
conv = _FP32Precision("mkldnn", "conv")
rnn = _FP32Precision("mkldnn", "rnn")
fp32_precision = ContextProp(
_get_fp32_precision_getter("mkldnn", "all"),
_set_fp32_precision_setter("generic", "all"),
)
if TYPE_CHECKING:
enabled: ContextProp
deterministic: ContextProp
allow_tf32: ContextProp
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
| MkldnnModule |
python | encode__httpx | httpx/_urlparse.py | {
"start": 4871,
"end": 18546
} | class ____(typing.NamedTuple):
scheme: str
userinfo: str
host: str
port: int | None
path: str
query: str | None
fragment: str | None
@property
def authority(self) -> str:
return "".join(
[
f"{self.userinfo}@" if self.userinfo else "",
f"[{self.host}]" if ":" in self.host else self.host,
f":{self.port}" if self.port is not None else "",
]
)
@property
def netloc(self) -> str:
return "".join(
[
f"[{self.host}]" if ":" in self.host else self.host,
f":{self.port}" if self.port is not None else "",
]
)
def copy_with(self, **kwargs: str | None) -> ParseResult:
if not kwargs:
return self
defaults = {
"scheme": self.scheme,
"authority": self.authority,
"path": self.path,
"query": self.query,
"fragment": self.fragment,
}
defaults.update(kwargs)
return urlparse("", **defaults)
def __str__(self) -> str:
authority = self.authority
return "".join(
[
f"{self.scheme}:" if self.scheme else "",
f"//{authority}" if authority else "",
self.path,
f"?{self.query}" if self.query is not None else "",
f"#{self.fragment}" if self.fragment is not None else "",
]
)
def urlparse(url: str = "", **kwargs: str | None) -> ParseResult:
# Initial basic checks on allowable URLs.
# ---------------------------------------
# Hard limit the maximum allowable URL length.
if len(url) > MAX_URL_LENGTH:
raise InvalidURL("URL too long")
# If a URL includes any ASCII control characters including \t, \r, \n,
# then treat it as invalid.
if any(char.isascii() and not char.isprintable() for char in url):
char = next(char for char in url if char.isascii() and not char.isprintable())
idx = url.find(char)
error = (
f"Invalid non-printable ASCII character in URL, {char!r} at position {idx}."
)
raise InvalidURL(error)
# Some keyword arguments require special handling.
# ------------------------------------------------
# Coerce "port" to a string, if it is provided as an integer.
if "port" in kwargs:
port = kwargs["port"]
kwargs["port"] = str(port) if isinstance(port, int) else port
# Replace "netloc" with "host and "port".
if "netloc" in kwargs:
netloc = kwargs.pop("netloc") or ""
kwargs["host"], _, kwargs["port"] = netloc.partition(":")
# Replace "username" and/or "password" with "userinfo".
if "username" in kwargs or "password" in kwargs:
username = quote(kwargs.pop("username", "") or "", safe=USERNAME_SAFE)
password = quote(kwargs.pop("password", "") or "", safe=PASSWORD_SAFE)
kwargs["userinfo"] = f"{username}:{password}" if password else username
# Replace "raw_path" with "path" and "query".
if "raw_path" in kwargs:
raw_path = kwargs.pop("raw_path") or ""
kwargs["path"], seperator, kwargs["query"] = raw_path.partition("?")
if not seperator:
kwargs["query"] = None
# Ensure that IPv6 "host" addresses are always escaped with "[...]".
if "host" in kwargs:
host = kwargs.get("host") or ""
if ":" in host and not (host.startswith("[") and host.endswith("]")):
kwargs["host"] = f"[{host}]"
# If any keyword arguments are provided, ensure they are valid.
# -------------------------------------------------------------
for key, value in kwargs.items():
if value is not None:
if len(value) > MAX_URL_LENGTH:
raise InvalidURL(f"URL component '{key}' too long")
# If a component includes any ASCII control characters including \t, \r, \n,
# then treat it as invalid.
if any(char.isascii() and not char.isprintable() for char in value):
char = next(
char for char in value if char.isascii() and not char.isprintable()
)
idx = value.find(char)
error = (
f"Invalid non-printable ASCII character in URL {key} component, "
f"{char!r} at position {idx}."
)
raise InvalidURL(error)
# Ensure that keyword arguments match as a valid regex.
if not COMPONENT_REGEX[key].fullmatch(value):
raise InvalidURL(f"Invalid URL component '{key}'")
# The URL_REGEX will always match, but may have empty components.
url_match = URL_REGEX.match(url)
assert url_match is not None
url_dict = url_match.groupdict()
# * 'scheme', 'authority', and 'path' may be empty strings.
# * 'query' may be 'None', indicating no trailing "?" portion.
# Any string including the empty string, indicates a trailing "?".
# * 'fragment' may be 'None', indicating no trailing "#" portion.
# Any string including the empty string, indicates a trailing "#".
scheme = kwargs.get("scheme", url_dict["scheme"]) or ""
authority = kwargs.get("authority", url_dict["authority"]) or ""
path = kwargs.get("path", url_dict["path"]) or ""
query = kwargs.get("query", url_dict["query"])
frag = kwargs.get("fragment", url_dict["fragment"])
# The AUTHORITY_REGEX will always match, but may have empty components.
authority_match = AUTHORITY_REGEX.match(authority)
assert authority_match is not None
authority_dict = authority_match.groupdict()
# * 'userinfo' and 'host' may be empty strings.
# * 'port' may be 'None'.
userinfo = kwargs.get("userinfo", authority_dict["userinfo"]) or ""
host = kwargs.get("host", authority_dict["host"]) or ""
port = kwargs.get("port", authority_dict["port"])
# Normalize and validate each component.
# We end up with a parsed representation of the URL,
# with components that are plain ASCII bytestrings.
parsed_scheme: str = scheme.lower()
parsed_userinfo: str = quote(userinfo, safe=USERINFO_SAFE)
parsed_host: str = encode_host(host)
parsed_port: int | None = normalize_port(port, scheme)
has_scheme = parsed_scheme != ""
has_authority = (
parsed_userinfo != "" or parsed_host != "" or parsed_port is not None
)
validate_path(path, has_scheme=has_scheme, has_authority=has_authority)
if has_scheme or has_authority:
path = normalize_path(path)
parsed_path: str = quote(path, safe=PATH_SAFE)
parsed_query: str | None = None if query is None else quote(query, safe=QUERY_SAFE)
parsed_frag: str | None = None if frag is None else quote(frag, safe=FRAG_SAFE)
# The parsed ASCII bytestrings are our canonical form.
# All properties of the URL are derived from these.
return ParseResult(
parsed_scheme,
parsed_userinfo,
parsed_host,
parsed_port,
parsed_path,
parsed_query,
parsed_frag,
)
def encode_host(host: str) -> str:
if not host:
return ""
elif IPv4_STYLE_HOSTNAME.match(host):
# Validate IPv4 hostnames like #.#.#.#
#
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
#
# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
try:
ipaddress.IPv4Address(host)
except ipaddress.AddressValueError:
raise InvalidURL(f"Invalid IPv4 address: {host!r}")
return host
elif IPv6_STYLE_HOSTNAME.match(host):
# Validate IPv6 hostnames like [...]
#
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
#
# "A host identified by an Internet Protocol literal address, version 6
# [RFC3513] or later, is distinguished by enclosing the IP literal
# within square brackets ("[" and "]"). This is the only place where
# square bracket characters are allowed in the URI syntax."
try:
ipaddress.IPv6Address(host[1:-1])
except ipaddress.AddressValueError:
raise InvalidURL(f"Invalid IPv6 address: {host!r}")
return host[1:-1]
elif host.isascii():
# Regular ASCII hostnames
#
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
#
# reg-name = *( unreserved / pct-encoded / sub-delims )
WHATWG_SAFE = '"`{}%|\\'
return quote(host.lower(), safe=SUB_DELIMS + WHATWG_SAFE)
# IDNA hostnames
try:
return idna.encode(host.lower()).decode("ascii")
except idna.IDNAError:
raise InvalidURL(f"Invalid IDNA hostname: {host!r}")
def normalize_port(port: str | int | None, scheme: str) -> int | None:
# From https://tools.ietf.org/html/rfc3986#section-3.2.3
#
# "A scheme may define a default port. For example, the "http" scheme
# defines a default port of "80", corresponding to its reserved TCP
# port number. The type of port designated by the port number (e.g.,
# TCP, UDP, SCTP) is defined by the URI scheme. URI producers and
# normalizers should omit the port component and its ":" delimiter if
# port is empty or if its value would be the same as that of the
# scheme's default."
if port is None or port == "":
return None
try:
port_as_int = int(port)
except ValueError:
raise InvalidURL(f"Invalid port: {port!r}")
# See https://url.spec.whatwg.org/#url-miscellaneous
default_port = {"ftp": 21, "http": 80, "https": 443, "ws": 80, "wss": 443}.get(
scheme
)
if port_as_int == default_port:
return None
return port_as_int
def validate_path(path: str, has_scheme: bool, has_authority: bool) -> None:
"""
Path validation rules that depend on if the URL contains
a scheme or authority component.
See https://datatracker.ietf.org/doc/html/rfc3986.html#section-3.3
"""
if has_authority:
# If a URI contains an authority component, then the path component
# must either be empty or begin with a slash ("/") character."
if path and not path.startswith("/"):
raise InvalidURL("For absolute URLs, path must be empty or begin with '/'")
if not has_scheme and not has_authority:
# If a URI does not contain an authority component, then the path cannot begin
# with two slash characters ("//").
if path.startswith("//"):
raise InvalidURL("Relative URLs cannot have a path starting with '//'")
# In addition, a URI reference (Section 4.1) may be a relative-path reference,
# in which case the first path segment cannot contain a colon (":") character.
if path.startswith(":"):
raise InvalidURL("Relative URLs cannot have a path starting with ':'")
def normalize_path(path: str) -> str:
"""
Drop "." and ".." segments from a URL path.
For example:
normalize_path("/path/./to/somewhere/..") == "/path/to"
"""
# Fast return when no '.' characters in the path.
if "." not in path:
return path
components = path.split("/")
# Fast return when no '.' or '..' components in the path.
if "." not in components and ".." not in components:
return path
# https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4
output: list[str] = []
for component in components:
if component == ".":
pass
elif component == "..":
if output and output != [""]:
output.pop()
else:
output.append(component)
return "/".join(output)
def PERCENT(string: str) -> str:
return "".join([f"%{byte:02X}" for byte in string.encode("utf-8")])
def percent_encoded(string: str, safe: str) -> str:
"""
Use percent-encoding to quote a string.
"""
NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe
# Fast path for strings that don't need escaping.
if not string.rstrip(NON_ESCAPED_CHARS):
return string
return "".join(
[char if char in NON_ESCAPED_CHARS else PERCENT(char) for char in string]
)
def quote(string: str, safe: str) -> str:
"""
Use percent-encoding to quote a string, omitting existing '%xx' escape sequences.
See: https://www.rfc-editor.org/rfc/rfc3986#section-2.1
* `string`: The string to be percent-escaped.
* `safe`: A string containing characters that may be treated as safe, and do not
need to be escaped. Unreserved characters are always treated as safe.
See: https://www.rfc-editor.org/rfc/rfc3986#section-2.3
"""
parts = []
current_position = 0
for match in re.finditer(PERCENT_ENCODED_REGEX, string):
start_position, end_position = match.start(), match.end()
matched_text = match.group(0)
# Add any text up to the '%xx' escape sequence.
if start_position != current_position:
leading_text = string[current_position:start_position]
parts.append(percent_encoded(leading_text, safe=safe))
# Add the '%xx' escape sequence.
parts.append(matched_text)
current_position = end_position
# Add any text after the final '%xx' escape sequence.
if current_position != len(string):
trailing_text = string[current_position:]
parts.append(percent_encoded(trailing_text, safe=safe))
return "".join(parts)
| ParseResult |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-accepted-invitations.py | {
"start": 4901,
"end": 5925
} | class ____(object):
def maximumInvitations(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def augment(grid, u, lookup, match):
for v in xrange(V):
if not get_grid(u, v) or v in lookup:
continue
lookup.add(v)
if v not in match or augment(grid, match[v], lookup, match):
match[v] = u # greedily match
return True
return False
def hungarian(grid):
match = {}
for i in xrange(U):
augment(grid, i, set(), match)
return len(match)
U, V = min(len(grid), len(grid[0])), max(len(grid), len(grid[0]))
get_grid = (lambda x, y: grid[x][y]) if len(grid) < len(grid[0]) else (lambda x, y: grid[y][x])
return hungarian(grid)
# Time: O(|V| * |E|) = O(min(m, n) * (m * n))
# Space: O(|E|) = O(m * n)
import collections
# Hungarian bipartite matching
| Solution2 |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1099532,
"end": 1100259
} | class ____(ValueChannelMixin, core.PositionValueDef):
"""
YValue schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
| YValue |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 1787,
"end": 2626
} | class ____(Predicate):
"""
Invertible matrix predicate.
Explanation
===========
``Q.invertible(x)`` is true iff ``x`` is an invertible matrix.
A square matrix is called invertible only if its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.invertible(X*Y), Q.invertible(X))
False
>>> ask(Q.invertible(X*Z), Q.invertible(X) & Q.invertible(Z))
True
>>> ask(Q.invertible(X), Q.fullrank(X) & Q.square(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Invertible_matrix
"""
name = 'invertible'
handler = Dispatcher("InvertibleHandler", doc="Handler for Q.invertible.")
| InvertiblePredicate |
python | fabric__fabric | tests/config.py | {
"start": 324,
"end": 6992
} | class ____:
def defaults_to_merger_of_global_defaults(self):
# I.e. our global_defaults + Invoke's global_defaults
c = Config()
# From invoke's global_defaults
assert c.run.warn is False
# From ours
assert c.port == 22
def our_global_defaults_can_override_invokes(self):
"our global_defaults can override Invoke's key-by-key"
with patch.object(
Config,
"global_defaults",
return_value={
"run": {"warn": "nope lol"},
# NOTE: Config requires these to be present to instantiate
# happily
"load_ssh_configs": True,
"ssh_config_path": None,
},
):
# If our global_defaults didn't win, this would still
# resolve to False.
assert Config().run.warn == "nope lol"
def has_various_Fabric_specific_default_keys(self):
c = Config()
assert c.port == 22
assert c.user == get_local_user()
assert c.forward_agent is False
assert c.connect_kwargs == {}
assert c.timeouts.connect is None
assert c.ssh_config_path is None
assert c.inline_ssh_env is True
def overrides_some_Invoke_defaults(self):
config = Config()
assert config.tasks.collection_name == "fabfile"
def amends_Invoke_runners_map(self):
config = Config()
assert config.runners == dict(
remote=Remote, remote_shell=RemoteShell, local=Local
)
def uses_Fabric_prefix(self):
# NOTE: see also the integration-esque tests in tests/main.py; this
# just tests the underlying data/attribute driving the behavior.
assert Config().prefix == "fabric"
class from_v1:
def setup(self):
self.env = faux_v1_env()
def _conf(self, **kwargs):
self.env.update(kwargs)
return Config.from_v1(self.env)
def must_be_given_explicit_env_arg(self):
config = Config.from_v1(
env=Lexicon(self.env, sudo_password="sikrit")
)
assert config.sudo.password == "sikrit"
class additional_kwargs:
def forwards_arbitrary_kwargs_to_init(self):
config = Config.from_v1(
self.env,
# Vanilla Invoke
overrides={"some": "value"},
# Fabric
system_ssh_path="/what/ever",
)
assert config.some == "value"
assert config._system_ssh_path == "/what/ever"
def subservient_to_runtime_overrides(self):
env = self.env
env.sudo_password = "from-v1"
config = Config.from_v1(
env, overrides={"sudo": {"password": "runtime"}}
)
assert config.sudo.password == "runtime"
def connect_kwargs_also_merged_with_imported_values(self):
self.env["key_filename"] = "whatever"
conf = Config.from_v1(
self.env, overrides={"connect_kwargs": {"meh": "effort"}}
)
assert conf.connect_kwargs["key_filename"] == "whatever"
assert conf.connect_kwargs["meh"] == "effort"
class var_mappings:
def always_use_pty(self):
# Testing both due to v1-didn't-use-None-default issues
config = self._conf(always_use_pty=True)
assert config.run.pty is True
config = self._conf(always_use_pty=False)
assert config.run.pty is False
def forward_agent(self):
config = self._conf(forward_agent=True)
assert config.forward_agent is True
def gateway(self):
config = self._conf(gateway="bastion.host")
assert config.gateway == "bastion.host"
class key_filename:
def base(self):
config = self._conf(key_filename="/some/path")
assert (
config.connect_kwargs["key_filename"] == "/some/path"
)
def is_not_set_if_None(self):
config = self._conf(key_filename=None)
assert "key_filename" not in config.connect_kwargs
def no_agent(self):
config = self._conf()
assert config.connect_kwargs.allow_agent is True
config = self._conf(no_agent=True)
assert config.connect_kwargs.allow_agent is False
class password:
def set_just_to_connect_kwargs_if_sudo_password_set(self):
# NOTE: default faux env has sudo_password set already...
config = self._conf(password="screaming-firehawks")
passwd = config.connect_kwargs.password
assert passwd == "screaming-firehawks"
def set_to_both_password_fields_if_necessary(self):
config = self._conf(password="sikrit", sudo_password=None)
assert config.connect_kwargs.password == "sikrit"
assert config.sudo.password == "sikrit"
def ssh_config_path(self):
self.env.ssh_config_path = "/where/ever"
config = Config.from_v1(self.env, lazy=True)
assert config.ssh_config_path == "/where/ever"
def sudo_password(self):
config = self._conf(sudo_password="sikrit")
assert config.sudo.password == "sikrit"
def sudo_prompt(self):
config = self._conf(sudo_prompt="password???")
assert config.sudo.prompt == "password???"
def timeout(self):
config = self._conf(timeout=15)
assert config.timeouts.connect == 15
def use_ssh_config(self):
# Testing both due to v1-didn't-use-None-default issues
config = self._conf(use_ssh_config=True)
assert config.load_ssh_configs is True
config = self._conf(use_ssh_config=False)
assert config.load_ssh_configs is False
def warn_only(self):
# Testing both due to v1-didn't-use-None-default issues
config = self._conf(warn_only=True)
assert config.run.warn is True
config = self._conf(warn_only=False)
assert config.run.warn is False
| Config_ |
python | apache__airflow | airflow-core/src/airflow/cli/commands/task_command.py | {
"start": 11217,
"end": 19429
} | class ____(Protocol):
def post_mortem(self) -> None: ...
def set_trace(self) -> None: ...
SUPPORTED_DEBUGGER_MODULES = [
"pudb",
"web_pdb",
"pdbr",
"ipdb",
"pdb",
]
def _guess_debugger() -> _SupportedDebugger:
"""
Try to guess the debugger used by the user.
When it doesn't find any user-installed debugger, returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `pdbr <https://github.com/cansarigol/pdbr>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
exc: Exception
for mod_name in SUPPORTED_DEBUGGER_MODULES:
try:
return cast("_SupportedDebugger", importlib.import_module(mod_name))
except ImportError as e:
exc = e
raise exc
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
@providers_configuration_loaded
@provide_session
def task_states_for_dag_run(args, session: Session = NEW_SESSION) -> None:
"""Get the status of all task instances in a DagRun."""
dag_run, _ = fetch_dag_run_from_run_id_or_logical_date_string(
dag_id=args.dag_id,
value=args.logical_date_or_run_id,
session=session,
)
if dag_run is None:
raise DagRunNotFound(
f"DagRun for {args.dag_id} with run_id or logical_date of {args.logical_date_or_run_id!r} "
"not found"
)
has_mapped_instances = any(ti.map_index >= 0 for ti in dag_run.task_instances)
def format_task_instance(ti: TaskInstance) -> dict[str, str]:
data = {
"dag_id": ti.dag_id,
"logical_date": dag_run.logical_date.isoformat() if dag_run.logical_date else "",
"task_id": ti.task_id,
"state": ti.state or "",
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
}
if has_mapped_instances:
data["map_index"] = str(ti.map_index) if ti.map_index is not None and ti.map_index >= 0 else ""
return data
AirflowConsole().print_as(data=dag_run.task_instances, output=args.output, mapper=format_task_instance)
@cli_utils.action_cli(check_db=False)
def task_test(args, dag: DAG | None = None) -> None:
"""Test task for a given dag_id."""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
from airflow.sdk._shared.secrets_masker import SecretsMasker
SecretsMasker.enable_log_masking()
handlers = logging.getLogger("airflow.task").handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger("airflow.task").propagate = True
env_vars = {"AIRFLOW_TEST_MODE": "True"}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
if dag:
sdk_dag = dag
scheduler_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
else:
sdk_dag = get_bagged_dag(args.bundle_name, args.dag_id)
scheduler_dag = get_db_dag(args.bundle_name, args.dag_id)
sdk_task = sdk_dag.get_task(args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
sdk_task.params.update(passed_in_params)
if sdk_task.params and isinstance(sdk_task.params, ParamsDict):
sdk_task.params.validate()
ti, dr_created = _get_ti(
scheduler_dag.get_task(args.task_id),
args.map_index,
logical_date_or_run_id=args.logical_date_or_run_id,
create_if_necessary="db",
)
try:
# TODO: move bulk of this logic into the SDK: http://github.com/apache/airflow/issues/54658
from airflow.sdk._shared.secrets_masker import RedactedIO
with redirect_stdout(RedactedIO()):
_run_task(ti=ti, task=sdk_task, run_triggerer=True)
if ti.state == State.FAILED and args.post_mortem:
debugger = _guess_debugger()
debugger.set_trace()
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger("airflow.task").propagate = False
if dr_created:
with create_session() as session:
session.delete(ti.dag_run)
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
@providers_configuration_loaded
def task_render(args, dag: DAG | None = None) -> None:
"""Render and displays templated fields for a given task."""
if not dag:
dag = get_bagged_dag(args.bundle_name, args.dag_id)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
ti, _ = _get_ti(
serialized_dag.get_task(task_id=args.task_id),
args.map_index,
logical_date_or_run_id=args.logical_date_or_run_id,
create_if_necessary="memory",
)
with create_session() as session:
context = ti.get_template_context(session=session)
task = dag.get_task(args.task_id)
# TODO (GH-52141): After sdk separation, ti.get_template_context() would
# contain serialized operators, but we need the real operators for
# rendering. This does not make sense and eventually we should rewrite
# this entire function so "ti" is a RuntimeTaskInstance instead, but for
# now we'll just manually fix it to contain the right objects.
context["task"] = context["ti"].task = task
task.render_template_fields(context)
for attr in context["task"].template_fields:
print(
textwrap.dedent(
f"""\
# ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
"""
)
+ str(getattr(context["task"], attr)) # This shouldn't be dedented.
)
@cli_utils.action_cli(check_db=False)
@providers_configuration_loaded
def task_clear(args) -> None:
"""Clear all task instances or only those matched by regex for a DAG(s)."""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.bundle_name and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
# Reading from_db because clear method still not implemented in Task SDK DAG
dags = get_dags(args.bundle_name, args.dag_id, use_regex=args.dag_regex, from_db=True)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
if not args.yes:
tis = SerializedDAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
dry_run=True,
)
if not tis:
return
if not ask_yesno(f"You are about to delete these {len(tis)} tasks:\n{tis}\n\nAre you sure? [y/n]"):
print("Cancelled, nothing was cleared.")
return
SerializedDAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
)
| _SupportedDebugger |
python | getsentry__sentry | src/sentry/analytics/events/sentryapp_issue_webhooks.py | {
"start": 247,
"end": 359
} | class ____(SentryAppIssueEvent):
pass
@analytics.eventclass("sentry_app.issue.created")
| SentryAppIssueAssigned |
python | realpython__materials | python-311/scientists.py | {
"start": 802,
"end": 1241
} | class ____(NamedTuple):
name: str
life_span: tuple
def dict_to_person(info):
"""Convert a dictionary to a Person object"""
return Person(
name=f"{info['name']['first']} {info['name']['last']}",
life_span=(info["birth"]["year"], info["death"]["year"]),
)
def convert_pair(first, second):
"""Convert two dictionaries to Person objects"""
return dict_to_person(first), dict_to_person(second)
| Person |
python | getsentry__sentry | src/sentry/rules/conditions/base.py | {
"start": 280,
"end": 464
} | class ____(TypedDict):
# the ID in the rules registry that maps to a condition class
# e.g. "sentry.rules.conditions.every_event.EveryEventCondition"
id: str
| GenericCondition |
python | davidhalter__jedi | jedi/api/classes.py | {
"start": 26551,
"end": 27437
} | class ____(Name):
"""
These signatures are returned by :meth:`BaseName.get_signatures`
calls.
"""
def __init__(self, inference_state, signature):
super().__init__(inference_state, signature.name)
self._signature = signature
@property
def params(self):
"""
Returns definitions for all parameters that a signature defines.
This includes stuff like ``*args`` and ``**kwargs``.
:rtype: list of :class:`.ParamName`
"""
return [ParamName(self._inference_state, n)
for n in self._signature.get_param_names(resolve_stars=True)]
def to_string(self):
"""
Returns a text representation of the signature. This could for example
look like ``foo(bar, baz: int, **kwargs)``.
:rtype: str
"""
return self._signature.to_string()
| BaseSignature |
python | pytorch__pytorch | test/distributed/checkpoint/test_pg_transport.py | {
"start": 21319,
"end": 22702
} | class ____(TestCase):
def setUp(self):
self.device = torch.device("cpu")
self.pg = MagicMock()
self.timeout = timedelta(seconds=10)
# Mock Work object
self.mock_work = MagicMock()
self.mock_work.wait = MagicMock()
# Setup process group mock to return mock_work
self.pg.send = MagicMock(return_value=self.mock_work)
self.pg.recv = MagicMock(return_value=self.mock_work)
@unittest.skipIf(not HAS_ACCELERATOR, "No accelerator")
def test_send_checkpoint_with_cpu_tensors(self):
"""Test send_checkpoint with CPU tensors when device is accelerator."""
device = torch.device(f"{device_type}:0")
# Create a state dict with CPU tensors
state_dict = {
"cpu_tensor1": torch.randn(2, 3),
"cpu_tensor2": torch.randn(3, 4),
}
# Create transport with accelerator device
transport = PGTransport(self.pg, self.timeout, device)
# Call send_checkpoint
transport.send_checkpoint([1], state_dict)
# Check that send was called
self.assertGreaterEqual(
self.pg.send.call_count, 4
) # len_t, buf_t, and 2 tensors
# Check that wait was called
self.assertGreaterEqual(self.mock_work.wait.call_count, 4)
if __name__ == "__main__":
run_tests()
| TestPGTransportEdgeCases |
python | realpython__materials | oop-in-java-vs-python/car.py | {
"start": 108,
"end": 334
} | class ____:
"""The Vehicle class is the parent for all vehicles."""
def __init__(self, color, model):
"""Define the color and model of our vehicle"""
self.color = color
self.model = model
| Vehicle |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/UIGraphicsItem.py | {
"start": 104,
"end": 4376
} | class ____(GraphicsObject):
"""
Base class for graphics items with boundaries relative to a GraphicsView or ViewBox.
The purpose of this class is to allow the creation of GraphicsItems which live inside
a scalable view, but whose boundaries will always stay fixed relative to the view's boundaries.
For example: GridItem, InfiniteLine
The view can be specified on initialization or it can be automatically detected when the item is painted.
NOTE: Only the item's boundingRect is affected; the item is not transformed in any way. Use viewRangeChanged
to respond to changes in the view.
"""
#sigViewChanged = QtCore.Signal(object) ## emitted whenever the viewport coords have changed
def __init__(self, bounds=None, parent=None):
"""
============== =============================================================================
**Arguments:**
bounds QRectF with coordinates relative to view box. The default is QRectF(0,0,1,1),
which means the item will have the same bounds as the view.
============== =============================================================================
"""
GraphicsObject.__init__(self, parent)
self.setFlag(self.GraphicsItemFlag.ItemSendsScenePositionChanges)
if bounds is None:
self._bounds = QtCore.QRectF(0, 0, 1, 1)
else:
self._bounds = bounds
self._boundingRect = None
self._updateView()
def paint(self, *args):
## check for a new view object every time we paint.
#self.updateView()
pass
def itemChange(self, change, value):
ret = GraphicsObject.itemChange(self, change, value)
if change == self.GraphicsItemChange.ItemScenePositionHasChanged:
self.setNewBounds()
return ret
#def updateView(self):
### called to see whether this item has a new view to connect to
### check for this item's current viewbox or view widget
#view = self.getViewBox()
#if view is None:
##print " no view"
#return
#if self._connectedView is not None and view is self._connectedView():
##print " already have view", view
#return
### disconnect from previous view
#if self._connectedView is not None:
#cv = self._connectedView()
#if cv is not None:
##print "disconnect:", self
#cv.sigRangeChanged.disconnect(self.viewRangeChanged)
### connect to new view
##print "connect:", self
#view.sigRangeChanged.connect(self.viewRangeChanged)
#self._connectedView = weakref.ref(view)
#self.setNewBounds()
def boundingRect(self):
if self._boundingRect is None:
br = self.viewRect()
if br is None:
return QtCore.QRectF()
else:
self._boundingRect = br
return QtCore.QRectF(self._boundingRect)
def dataBounds(self, axis, frac=1.0, orthoRange=None):
"""Called by ViewBox for determining the auto-range bounds.
By default, UIGraphicsItems are excluded from autoRange."""
return None
@QtCore.Slot()
def viewRangeChanged(self):
"""Called when the view widget/viewbox is resized/rescaled"""
self.setNewBounds()
self.update()
def setNewBounds(self):
"""Update the item's bounding rect to match the viewport"""
self._boundingRect = None ## invalidate bounding rect, regenerate later if needed.
self.prepareGeometryChange()
def setPos(self, *args):
GraphicsObject.setPos(self, *args)
self.setNewBounds()
def mouseShape(self):
"""Return the shape of this item after expanding by 2 pixels"""
shape = self.shape()
ds = self.mapToDevice(shape)
stroker = QtGui.QPainterPathStroker()
stroker.setWidh(2)
ds2 = stroker.createStroke(ds).united(ds)
return self.mapFromDevice(ds2)
| UIGraphicsItem |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 91944,
"end": 92161
} | class ____(_PrintableStructure):
_fields_ = [
('schedulerPolicy', c_uint),
('enableARRMode', c_uint),
('schedulerParams', c_nvmlVgpuSchedulerSetParams_t),
]
| c_nvmlVgpuSchedulerSetState_t |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 83580,
"end": 83748
} | class ____(ExecutionControlCommandBase):
"Execute until function returns to a caller."
invoke = dont_suppress_errors(ExecutionControlCommandBase.finish)
| PyFinish |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 20773,
"end": 23410
} | class ____(BaseGroup):
"""
Feature: The .get method allows access to objects and metadata
"""
def test_get_default(self):
""" Object is returned, or default if it doesn't exist """
name = make_name()
default = object()
out = self.f.get('mongoose', default)
self.assertIs(out, default)
grp = self.f.create_group(name)
out = self.f.get(name.encode('utf8'))
self.assertEqual(out, grp)
def test_get_class(self):
""" Object class is returned with getclass option """
foo = make_name("foo")
bar = make_name("bar")
baz = make_name("baz")
self.f.create_group(foo)
out = self.f.get(foo, getclass=True)
self.assertEqual(out, Group)
self.f.create_dataset(bar, (4,))
out = self.f.get(bar, getclass=True)
self.assertEqual(out, Dataset)
self.f[baz] = np.dtype('|S10')
out = self.f.get(baz, getclass=True)
self.assertEqual(out, Datatype)
def test_get_link_class(self):
""" Get link classes """
hard = make_name("hard")
soft = make_name("soft")
external = make_name("external")
default = object()
sl = SoftLink('/mongoose')
el = ExternalLink('somewhere.hdf5', 'mongoose')
self.f.create_group(hard)
self.f[soft] = sl
self.f[external] = el
out_hl = self.f.get(hard, default, getlink=True, getclass=True)
out_sl = self.f.get(soft, default, getlink=True, getclass=True)
out_el = self.f.get(external, default, getlink=True, getclass=True)
self.assertEqual(out_hl, HardLink)
self.assertEqual(out_sl, SoftLink)
self.assertEqual(out_el, ExternalLink)
def test_get_link(self):
""" Get link values """
hard = make_name("hard")
soft = make_name("soft")
external = make_name("external")
sl = SoftLink('/mongoose')
el = ExternalLink('somewhere.hdf5', 'mongoose')
self.f.create_group(hard)
self.f[soft] = sl
self.f[external] = el
out_hl = self.f.get(hard, getlink=True)
out_sl = self.f.get(soft, getlink=True)
out_el = self.f.get(external, getlink=True)
#TODO: redo with SoftLink/ExternalLink built-in equality
self.assertIsInstance(out_hl, HardLink)
self.assertIsInstance(out_sl, SoftLink)
self.assertEqual(out_sl._path, sl._path)
self.assertIsInstance(out_el, ExternalLink)
self.assertEqual(out_el._path, el._path)
self.assertEqual(out_el._filename, el._filename)
| TestGet |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/2D_car/DDPG.py | {
"start": 1359,
"end": 4017
} | class ____(object):
def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):
self.sess = sess
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.contrib.layers.xavier_initializer()
init_b = tf.constant_initializer(0.001)
net = tf.layers.dense(s, 100, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1',
trainable=trainable)
net = tf.layers.dense(net, 20, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2',
trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
def learn(self, s): # batch update
self.sess.run(self.train_op, feed_dict={S: s})
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
def choose_action(self, s):
s = s[np.newaxis, :] # single state
return self.sess.run(self.a, feed_dict={S: s})[0] # single action
def add_grad_to_graph(self, a_grads):
with tf.variable_scope('policy_grads'):
self.policy_grads = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)
with tf.variable_scope('A_train'):
opt = tf.train.RMSPropOptimizer(-self.lr) # (- learning rate) for ascent policy
self.train_op = opt.apply_gradients(zip(self.policy_grads, self.e_params))
| Actor |
python | lxml__lxml | src/lxml/html/_difflib.py | {
"start": 29497,
"end": 69233
} | class ____:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; the underlying SequenceMatcher class has
an adaptive notion of "noise" lines that's better than any static
definition the author has ever been able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writelines()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# Don't synch up unless the lines have a similarity score above
# cutoff. Previously only the smallest pair was handled here,
# and if there are many pairs with the best ratio, recursion
# could grow very deep, and runtime cubic. See:
# https://github.com/python/cpython/issues/119105
#
# Later, more pathological cases prompted removing recursion
# entirely.
cutoff = 0.74999
cruncher = SequenceMatcher(self.charjunk)
crqr = cruncher.real_quick_ratio
cqr = cruncher.quick_ratio
cr = cruncher.ratio
WINDOW = 10
best_i = best_j = None
dump_i, dump_j = alo, blo # smallest indices not yet resolved
for j in range(blo, bhi):
cruncher.set_seq2(b[j])
# Search the corresponding i's within WINDOW for rhe highest
# ratio greater than `cutoff`.
aequiv = alo + (j - blo)
arange = range(max(aequiv - WINDOW, dump_i),
min(aequiv + WINDOW + 1, ahi))
if not arange: # likely exit if `a` is shorter than `b`
break
best_ratio = cutoff
for i in arange:
cruncher.set_seq1(a[i])
# Ordering by cheapest to most expensive ratio is very
# valuable, most often getting out early.
if (crqr() > best_ratio
and cqr() > best_ratio
and cr() > best_ratio):
best_i, best_j, best_ratio = i, j, cr()
if best_i is None:
# found nothing to synch on yet - move to next j
continue
# pump out straight replace from before this synch pair
yield from self._fancy_helper(a, dump_i, best_i,
b, dump_j, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if aelt != belt:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
dump_i, dump_j = best_i + 1, best_j + 1
best_i = best_j = None
# pump out straight replace from after the last synch pair
yield from self._fancy_helper(a, dump_i, ahi,
b, dump_j, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._plain_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
atags = _keep_original_ws(aline, atags).rstrip()
btags = _keep_original_ws(bline, btags).rstrip()
yield "- " + aline
if atags:
yield f"? {atags}\n"
yield "+ " + bline
if btags:
yield f"? {btags}\n"
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
def IS_LINE_JUNK(line, pat=None):
r"""
Return True for ignorable line: if `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
if pat is None:
# Default: match '#' or the empty string
return line.strip() in '#'
# Previous versions used the undocumented parameter 'pat' as a
# match function. Retain this behaviour for compatibility.
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return True for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def _check_types(a, b, *args):
# Checking types is weird, but the alternative is garbled output when
# someone passes mixed bytes and str to {unified,context}_diff(). E.g.
# without this check, passing filenames as bytes results in output like
# --- b'oldfile.txt'
# +++ b'newfile.txt'
# because of how str.format() incorporates bytes objects.
if a and not isinstance(a[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(a[0]).__name__, a[0]))
if b and not isinstance(b[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(b[0]).__name__, b[0]))
if isinstance(a, str):
raise TypeError('input must be a sequence of strings, not %s' %
type(a).__name__)
if isinstance(b, str):
raise TypeError('input must be a sequence of strings, not %s' %
type(b).__name__)
for arg in args:
if not isinstance(arg, str):
raise TypeError('all arguments must be str, not: %r' % (arg,))
def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
r"""
Compare `a` and `b`, two sequences of lines represented as bytes rather
than str. This is a wrapper for `dfunc`, which is typically either
unified_diff() or context_diff(). Inputs are losslessly converted to
strings so that `dfunc` only has to worry about strings, and encoded
back to bytes on return. This is necessary to compare files with
unknown or inconsistent encoding. All other inputs (except `n`) must be
bytes rather than str.
"""
def decode(s):
try:
return s.decode('ascii', 'surrogateescape')
except AttributeError as err:
msg = ('all arguments must be bytes, not %s (%r)' %
(type(s).__name__, s))
raise TypeError(msg) from err
a = list(map(decode, a))
b = list(map(decode, b))
fromfile = decode(fromfile)
tofile = decode(tofile)
fromfiledate = decode(fromfiledate)
tofiledate = decode(tofiledate)
lineterm = decode(lineterm)
lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
for line in lines:
yield line.encode('ascii', 'surrogateescape')
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions, or can be None:
- linejunk: A function that should accept a single string argument and
return true iff the string is junk. The default is None, and is
recommended; the underlying SequenceMatcher class has an adaptive
notion of "noise" lines.
- charjunk: A function that accepts a character (string of length
1), and returns true iff the character is junk. The default is
the module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: it's a bad idea to
include newline in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile(r'(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in reversed(sub_info):
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
lines.append(next(diff_lines_iterator, 'X'))
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see an intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
return
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
try:
from_line, to_line, found_diff = next(line_iterator)
except StopIteration:
return
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
yield from line_pair_iterator
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
try:
from_line, to_line, found_diff = next(line_pair_iterator)
except StopIteration:
return
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
try:
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
except StopIteration:
# Catch exception from next() and return normally
return
_file_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="%(charset)s">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Diff comparison</title>
<style>%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
:root {color-scheme: light dark}
table.diff {
font-family: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace;
border: medium;
}
.diff_header {
background-color: #e0e0e0;
font-weight: bold;
}
td.diff_header {
text-align: right;
padding: 0 8px;
}
.diff_next {
background-color: #c0c0c0;
padding: 4px 0;
}
.diff_add {background-color:palegreen}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
table.diff[summary="Legends"] {
margin-top: 20px;
border: 1px solid #ccc;
}
table.diff[summary="Legends"] th {
background-color: #e0e0e0;
padding: 4px 8px;
}
table.diff[summary="Legends"] td {
padding: 4px 8px;
}
@media (prefers-color-scheme: dark) {
.diff_header {background-color:#666}
.diff_next {background-color:#393939}
.diff_add {background-color:darkgreen}
.diff_chg {background-color:#847415}
.diff_sub {background-color:darkred}
table.diff[summary="Legends"] {border-color:#555}
table.diff[summary="Legends"] th{background-color:#666}
}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
| Differ |
python | django__django | django/utils/translation/__init__.py | {
"start": 5654,
"end": 8878
} | class ____(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find("_")
if p >= 0:
return locale[:p].lower() + "-" + locale[p + 1 :].lower()
else:
return locale.lower()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
lang, _, country = language.lower().partition("-")
if not country:
return language[:3].lower() + language[3:]
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
country, _, tail = country.partition("-")
country = country.title() if len(country) > 2 else country.upper()
if tail:
country += "-" + tail
return lang + "_" + country
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def get_supported_language_variant(lang_code, *, strict=False):
return _trans.get_supported_language_variant(lang_code, strict)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if "fallback" in lang_info and "name" not in lang_info:
info = get_language_info(lang_info["fallback"][0])
else:
info = lang_info
except KeyError:
if "-" not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split("-")[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError(
"Unknown language code %s and %s." % (lang_code, generic_lang_code)
)
if info:
info["name_translated"] = gettext_lazy(info["name"])
return info
trim_whitespace_re = _lazy_re_compile(r"\s*\n\s*")
def trim_whitespace(s):
return trim_whitespace_re.sub(" ", s.strip())
def round_away_from_one(value):
return int(Decimal(value - 1).quantize(Decimal("0"), rounding=ROUND_UP)) + 1
| override |
python | Pylons__pyramid | tests/test_urldispatch.py | {
"start": 18419,
"end": 24979
} | class ____(unittest.TestCase):
def matches(self, pattern, path, expected):
from pyramid.urldispatch import _compile_route
matcher = _compile_route(pattern)[0]
result = matcher(path)
self.assertEqual(result, expected)
def generates(self, pattern, dict, result):
from pyramid.urldispatch import _compile_route
self.assertEqual(_compile_route(pattern)[1](dict), result)
def test_matcher_functional_notdynamic(self):
self.matches('/', '', None)
self.matches('', '', None)
self.matches('/', '/foo', None)
self.matches('/foo/', '/foo', None)
self.matches('', '/', {})
self.matches('/', '/', {})
def test_matcher_functional_newstyle(self):
self.matches('/{x}', '', None)
self.matches('/{x}', '/', None)
self.matches('/abc/{def}', '/abc/', None)
self.matches('/{x}', '/a', {'x': 'a'})
self.matches('zzz/{x}', '/zzz/abc', {'x': 'abc'})
self.matches(
'zzz/{x}*traverse', '/zzz/abc', {'x': 'abc', 'traverse': ()}
)
self.matches(
'zzz/{x}*traverse',
'/zzz/abc/def/g',
{'x': 'abc', 'traverse': ('def', 'g')},
)
self.matches('*traverse', '/zzz/abc', {'traverse': ('zzz', 'abc')})
self.matches('*traverse', '/zzz/ abc', {'traverse': ('zzz', ' abc')})
# '/La%20Pe%C3%B1a'
self.matches(
'{x}',
text_(b'/La Pe\xc3\xb1a', 'utf-8'),
{'x': text_(b'La Pe\xc3\xb1a', 'utf-8')},
)
# '/La%20Pe%C3%B1a/x'
self.matches(
'*traverse',
text_(b'/La Pe\xc3\xb1a/x'),
{'traverse': (text_(b'La Pe\xc3\xb1a'), 'x')},
)
self.matches('/foo/{id}.html', '/foo/bar.html', {'id': 'bar'})
self.matches(
'/{num:[0-9]+}/*traverse',
'/555/abc/def',
{'num': '555', 'traverse': ('abc', 'def')},
)
self.matches(
'/{num:[0-9]*}/*traverse',
'/555/abc/def',
{'num': '555', 'traverse': ('abc', 'def')},
)
self.matches('zzz/{_}', '/zzz/abc', {'_': 'abc'})
self.matches('zzz/{_abc}', '/zzz/abc', {'_abc': 'abc'})
self.matches('zzz/{abc_def}', '/zzz/abc', {'abc_def': 'abc'})
def test_matcher_functional_oldstyle(self):
self.matches('/:x', '', None)
self.matches('/:x', '/', None)
self.matches('/abc/:def', '/abc/', None)
self.matches('/:x', '/a', {'x': 'a'})
self.matches('zzz/:x', '/zzz/abc', {'x': 'abc'})
self.matches(
'zzz/:x*traverse', '/zzz/abc', {'x': 'abc', 'traverse': ()}
)
self.matches(
'zzz/:x*traverse',
'/zzz/abc/def/g',
{'x': 'abc', 'traverse': ('def', 'g')},
)
self.matches('*traverse', '/zzz/abc', {'traverse': ('zzz', 'abc')})
self.matches('*traverse', '/zzz/ abc', {'traverse': ('zzz', ' abc')})
# '/La%20Pe%C3%B1a'
# pattern, path, expected
self.matches(
':x',
text_(b'/La Pe\xc3\xb1a', 'utf-8'),
{'x': text_(b'La Pe\xc3\xb1a', 'utf-8')},
)
# '/La%20Pe%C3%B1a/x'
self.matches(
'*traverse',
text_(b'/La Pe\xc3\xb1a/x', 'utf-8'),
{'traverse': (text_(b'La Pe\xc3\xb1a', 'utf-8'), 'x')},
)
self.matches('/foo/:id.html', '/foo/bar.html', {'id': 'bar'})
self.matches('/foo/:id_html', '/foo/bar_html', {'id_html': 'bar_html'})
self.matches('zzz/:_', '/zzz/abc', {'_': 'abc'})
self.matches('zzz/:_abc', '/zzz/abc', {'_abc': 'abc'})
self.matches('zzz/:abc_def', '/zzz/abc', {'abc_def': 'abc'})
def test_generator_functional_notdynamic(self):
self.generates('', {}, '/')
self.generates('/', {}, '/')
def test_generator_functional_newstyle(self):
self.generates('/{x}', {'x': ''}, '/')
self.generates('/{x}', {'x': 'a'}, '/a')
self.generates('/{x}', {'x': 'a/b/c'}, '/a/b/c')
self.generates('/{x}', {'x': ':@&+$,'}, '/:@&+$,')
self.generates('zzz/{x}', {'x': 'abc'}, '/zzz/abc')
self.generates(
'zzz/{x}*traverse', {'x': 'abc', 'traverse': ''}, '/zzz/abc'
)
self.generates(
'zzz/{x}*traverse',
{'x': 'abc', 'traverse': '/def/g'},
'/zzz/abc/def/g',
)
self.generates(
'zzz/{x}*traverse',
{'x': ':@&+$,', 'traverse': '/:@&+$,'},
'/zzz/:@&+$,/:@&+$,',
)
self.generates(
'/{x}',
{'x': text_(b'/La Pe\xc3\xb1a', 'utf-8')},
'//La%20Pe%C3%B1a',
)
self.generates(
'/{x}*y',
{'x': text_(b'/La Pe\xc3\xb1a', 'utf-8'), 'y': '/rest/of/path'},
'//La%20Pe%C3%B1a/rest/of/path',
)
self.generates(
'*traverse',
{'traverse': ('a', text_(b'La Pe\xf1a'))},
'/a/La%20Pe%C3%B1a',
)
self.generates('/foo/{id}.html', {'id': 'bar'}, '/foo/bar.html')
self.generates('/foo/{_}', {'_': '20'}, '/foo/20')
self.generates('/foo/{_abc}', {'_abc': '20'}, '/foo/20')
self.generates('/foo/{abc_def}', {'abc_def': '20'}, '/foo/20')
def test_generator_functional_oldstyle(self):
self.generates('/:x', {'x': ''}, '/')
self.generates('/:x', {'x': 'a'}, '/a')
self.generates('zzz/:x', {'x': 'abc'}, '/zzz/abc')
self.generates(
'zzz/:x*traverse', {'x': 'abc', 'traverse': ''}, '/zzz/abc'
)
self.generates(
'zzz/:x*traverse',
{'x': 'abc', 'traverse': '/def/g'},
'/zzz/abc/def/g',
)
self.generates(
'/:x',
{'x': text_(b'/La Pe\xc3\xb1a', 'utf-8')},
'//La%20Pe%C3%B1a',
)
self.generates(
'/:x*y',
{'x': text_(b'/La Pe\xc3\xb1a', 'utf-8'), 'y': '/rest/of/path'},
'//La%20Pe%C3%B1a/rest/of/path',
)
self.generates(
'*traverse',
{'traverse': ('a', text_(b'La Pe\xf1a'))},
'/a/La%20Pe%C3%B1a',
)
self.generates('/foo/:id.html', {'id': 'bar'}, '/foo/bar.html')
self.generates('/foo/:_', {'_': '20'}, '/foo/20')
self.generates('/foo/:_abc', {'_abc': '20'}, '/foo/20')
self.generates('/foo/:abc_def', {'abc_def': '20'}, '/foo/20')
| TestCompileRouteFunctional |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 41835,
"end": 42989
} | class ____(FlowRunAction):
"""Resumes a paused or suspended flow run associated with the trigger"""
type: Literal["resume-flow-run"] = "resume-flow-run"
async def act(self, triggered_action: "TriggeredAction") -> None:
flow_run_id = await self.flow_run(triggered_action)
self._resulting_related_resources.append(
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
"prefect.resource.role": "target",
}
)
)
logger.debug(
"Resuming flow run",
extra={
"flow_run_id": str(flow_run_id),
**self.logging_context(triggered_action),
},
)
async with await self.orchestration_client(triggered_action) as orchestration:
result = await orchestration.resume_flow_run(flow_run_id)
if not isinstance(result.details, StateAcceptDetails):
raise ActionFailed(
f"Failed to resume flow run: {result.details.reason}"
)
| ResumeFlowRun |
python | kamyu104__LeetCode-Solutions | Python/sort-an-array.py | {
"start": 841,
"end": 2344
} | class ____(object):
def sortArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def nth_element(nums, left, n, right, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target):
i = left
while i <= right:
if compare(nums[i], target):
nums[i], nums[left] = nums[left], nums[i]
left += 1
i += 1
elif compare(target, nums[i]):
nums[i], nums[right] = nums[right], nums[i]
right -= 1
else:
i += 1
return left, right
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx])
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def quickSort(left, right, nums):
if left > right:
return
mid = left + (right-left)//2
nth_element(nums, left, mid, right)
quickSort(left, mid-1, nums)
quickSort(mid+1, right, nums)
quickSort(0, len(nums)-1, nums)
return nums
| Solution2 |
python | eventlet__eventlet | eventlet/corolocal.py | {
"start": 349,
"end": 1382
} | class ____:
__slots__ = '_local__args', '_local__greens'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
return self
def _patch(thrl):
greens = object.__getattribute__(thrl, '_local__greens')
# until we can store the localdict on greenlets themselves,
# we store it in _local__greens on the local object
cur = greenthread.getcurrent()
if cur not in greens:
# must be the first time we've seen this greenlet, call __init__
greens[cur] = {}
cls = type(thrl)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(thrl, '_local__args')
thrl.__init__(*args, **kw)
object.__setattr__(thrl, '__dict__', greens[cur])
| _localbase |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 117709,
"end": 118124
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
cluster_id: Optional[str] = Field(
None, description="Unique identifier for the cluster."
)
library_statuses: Optional[List[LibraryFullStatus]] = Field(
None, description="Status of all libraries on the cluster."
)
| ClusterLibraryStatuses |
python | aio-libs__aiohttp | aiohttp/web_protocol.py | {
"start": 1950,
"end": 2092
} | class ____(Exception):
"""Payload was accessed after response was sent."""
_PAYLOAD_ACCESS_ERROR = PayloadAccessError()
| PayloadAccessError |
python | django__django | tests/test_client_regress/tests.py | {
"start": 55744,
"end": 56840
} | class ____(SimpleTestCase):
"""Regression tests for #15929."""
# These tests are checking that certain middleware don't change certain
# global state. Alternatively, from the point of view of a test, they are
# ensuring test isolation behavior. So, unusually, it doesn't make sense to
# run the tests individually, and if any are failing it is confusing to run
# them with any other set of tests.
def common_test_that_should_always_pass(self):
request = RequestFactory().get("/")
request.session = {}
self.assertFalse(hasattr(request, "user"))
def test_request(self):
self.common_test_that_should_always_pass()
def test_request_after_client(self):
# apart from the next line the three tests are identical
self.client.get("/")
self.common_test_that_should_always_pass()
def test_request_after_client_2(self):
# This test is executed after the previous one
self.common_test_that_should_always_pass()
@override_settings(ROOT_URLCONF="test_client_regress.urls")
| RequestFactoryStateTest |
python | google__pytype | pytype/datatypes_test.py | {
"start": 9829,
"end": 11013
} | class ____(unittest.TestCase):
"""Test parser wrapper."""
def test_group(self):
parser = argparse.ArgumentParser()
wrapper = datatypes.ParserWrapper(parser)
wrapper.add_argument("--foo", dest="foo")
group = wrapper.add_argument_group("test1")
group.add_argument("--bar", dest="bar")
subgroup = wrapper.add_argument_group("test2")
subgroup.add_argument("--baz", dest="baz")
self.assertSetEqual(set(wrapper.actions), {"foo", "bar", "baz"})
def test_only(self):
parser = argparse.ArgumentParser()
wrapper = datatypes.ParserWrapper(parser)
with wrapper.add_only({"--foo", "--bar", "--baz", "--unused"}):
wrapper.add_argument("--foo", dest="foo")
wrapper.add_argument("--quux", dest="quux")
group = wrapper.add_argument_group("test1")
group.add_argument("-b", "--bar", dest="bar")
group.add_argument("--hello", dest="hello")
subgroup = group.add_argument_group("test2")
subgroup.add_argument("--baz", dest="baz")
subgroup.add_argument("--world", dest="world")
self.assertSetEqual(set(wrapper.actions), {"foo", "bar", "baz"})
if __name__ == "__main__":
unittest.main()
| ParserWrapperTest |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_new_york_state_zip.py | {
"start": 1797,
"end": 4184
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid New York state zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_new_york_state_zip": ["14652", "14701", "14711", "13739"],
"invalid_new_york_state_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_new_york_state_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_new_york_state_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_new_york_state_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidNewYorkStateZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidNewYorkStateZip |
python | pytorch__pytorch | torch/_inductor/remote_cache.py | {
"start": 9954,
"end": 11046
} | class ____(RemoteCache[JsonDataTy]):
def __init__(self, cache_id: str) -> None:
# Special test handling: If we're just going to override the backend
# anyway don't require redis
if self.__class__.backend_override_cls:
# This is totally bogus but it works for now...
backend = typing.cast(RemoteCacheBackend[bytes], None)
else:
backend = RedisRemoteCacheBackend(cache_id)
serde = RemoteCacheJsonSerde()
super().__init__(backend, serde)
version = 1 # consistency between various types of keys
self._key_fmt = f"pt2:{cache_id}::{{key}}:c{version}"
def _get_key(self, key: str) -> str:
return self._key_fmt.format(key=key)
@override
def _get(self, key: str, sample: Optional[Sample]) -> Optional[JsonDataTy]:
key = self._get_key(key)
return super()._get(key, sample)
@override
def _put(self, key: str, value: JsonDataTy, sample: Optional[Sample]) -> None:
key = self._get_key(key)
super()._put(key, value, sample)
| RedisRemoteCache |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 9072,
"end": 9516
} | class ____(ValueWrapper):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
api_type = 'function'
def __init__(self, value, method, builtin_func):
super().__init__(builtin_func)
self._value = value
self._method = method
def py__call__(self, arguments):
# TODO add TypeError if params are given/or not correct.
return self._method(self._value, arguments)
| _BuiltinMappedMethod |
python | coleifer__peewee | tests/schema.py | {
"start": 1353,
"end": 1793
} | class ____(TestModel):
name = TextField(unique=True)
timestamp = TimestampField()
status = IntegerField()
flags = IntegerField()
Article.add_index(Article.timestamp.desc(), Article.status)
idx = (Article
.index(Article.name, Article.timestamp, Article.flags.bin_and(4))
.where(Article.status == 1))
Article.add_index(idx)
Article.add_index(SQL('CREATE INDEX "article_foo" ON "article" ("flags" & 3)'))
| Article |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/autoVariance4.py | {
"start": 855,
"end": 1085
} | class ____[T](Parent_Contravariant[T]):
pass
c1: ShouldBeContravariant[int] = ShouldBeContravariant[float]()
# This should generate an error.
c2: ShouldBeContravariant[float] = ShouldBeContravariant[int]()
| ShouldBeContravariant |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 153911,
"end": 156261
} | class ____:
def test_describe(self):
assert self.locale.describe("now", only_distance=True) == "nå nettopp"
assert self.locale.describe("now", only_distance=False) == "nå nettopp"
def test_plurals(self):
assert self.locale._format_timeframe("now", 0) == "nå nettopp"
assert self.locale._format_timeframe("second", 1) == "ett sekund"
assert self.locale._format_timeframe("seconds", 30) == "30 sekunder"
assert self.locale._format_timeframe("minute", 1) == "ett minutt"
assert self.locale._format_timeframe("minutes", 40) == "40 minutter"
assert self.locale._format_timeframe("hour", 1) == "en time"
assert self.locale._format_timeframe("hours", 23) == "23 timer"
assert self.locale._format_timeframe("day", 1) == "en dag"
assert self.locale._format_timeframe("days", 12) == "12 dager"
assert self.locale._format_timeframe("week", 1) == "en uke"
assert self.locale._format_timeframe("weeks", 38) == "38 uker"
assert self.locale._format_timeframe("month", 1) == "en måned"
assert self.locale._format_timeframe("months", 11) == "11 måneder"
assert self.locale._format_timeframe("year", 1) == "ett år"
assert self.locale._format_timeframe("years", 12) == "12 år"
def test_ordinal_number(self):
assert self.locale.ordinal_number(0) == "0."
assert self.locale.ordinal_number(1) == "1."
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 timer"
assert self.locale._format_timeframe("hour", 0) == "en time"
def test_format_relative_now(self):
result = self.locale._format_relative("nå nettopp", "now", 0)
assert result == "nå nettopp"
def test_format_relative_past(self):
result = self.locale._format_relative("en time", "hour", 1)
assert result == "om en time"
def test_format_relative_future(self):
result = self.locale._format_relative("en time", "hour", -1)
assert result == "for en time siden"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "lørdag"
assert self.locale.day_abbreviation(dt.isoweekday()) == "lø"
@pytest.mark.usefixtures("lang_locale")
| TestNorwegianLocale |
python | django__django | tests/introspection/models.py | {
"start": 3830,
"end": 4085
} | class ____(models.Model):
fk_db_set_default = models.ForeignKey(
Country, on_delete=models.DB_SET_DEFAULT, db_default=models.Value(1)
)
class Meta:
required_db_features = {"supports_on_delete_db_default"}
| DbOnDeleteSetDefaultModel |
python | PrefectHQ__prefect | tests/cli/test_deploy.py | {
"start": 121528,
"end": 147139
} | class ____:
@pytest.mark.usefixtures("project_dir")
async def test_deploy_all(self, prefect_client: PrefectClient, work_pool: WorkPool):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
],
)
# Check if deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
@pytest.mark.usefixtures("project_dir")
async def test_deploy_all_schedules_remain_inactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"schedule": {"interval": 60.0, "active": True},
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"schedule": {"interval": 60.0, "active": False},
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
],
expected_output_does_not_contain=[
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
],
)
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.schedules[0].active is True
assert deployment2.name == "test-name-2"
assert deployment2.schedules[0].active is False
async def test_deploy_selected_deployments(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"enforce_parameter_schema": True,
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-3",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy only two deployments by name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --name test-name-1 --name test-name-2",
expected_code=0,
expected_output_contains=[
(
"Deployment 'An important name/test-name-1' successfully created"
" with id"
),
(
"Deployment 'An important name/test-name-2' successfully created"
" with id"
),
],
expected_output_does_not_contain=[
(
"Deployment 'An important name/test-name-3' successfully created"
" with id"
),
(
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options"
" will be ignored."
),
],
)
# Check if the two deployments were created correctly
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert deployment1.enforce_parameter_schema is True
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
assert deployment2.enforce_parameter_schema
# Check if the third deployment was not created
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
async def test_deploy_single_with_cron_schedule(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy a single deployment with a cron schedule
cron_schedule = "0 * * * *"
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy --name test-name-1 --cron '{cron_schedule}'",
expected_code=0,
expected_output_contains=[
(
"Deployment 'An important name/test-name-1' successfully created"
" with id"
),
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule == CronSchedule(cron="0 * * * *")
# Check if the second deployment was not created
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.parametrize(
"deployment_selector_options", ["--all", "-n test-name-1 -n test-name-2"]
)
async def test_deploy_multiple_with_cli_options(
self,
project_dir: Path,
prefect_client: PrefectClient,
work_pool: WorkPool,
deployment_selector_options: str,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy multiple deployments with CLI options
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy {deployment_selector_options} --cron '0 * * * *'",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
"An important name/test-name-2",
(
"You have passed options to the deploy command, but you are"
" creating or updating multiple deployments. These options will be"
" ignored."
),
],
)
# Check if deployments were created correctly and without the provided CLI options
deployment1 = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment2 = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment1.name == "test-name-1"
assert deployment1.work_pool_name == work_pool.name
assert len(deployment1.schedules) == 0
assert deployment2.name == "test-name-2"
assert deployment2.work_pool_name == work_pool.name
assert len(deployment2.schedules) == 0
async def test_deploy_with_cli_option_name(
self, project_dir, prefect_client, work_pool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy --name from-cli-name --pool"
f" {work_pool.name} ./flows/hello.py:my_flow"
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/from-cli-name' successfully created"
" with id"
],
)
# Check name from deployment.yaml was not used
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/from-cli-name"
)
deployment.name = "from-cli-name"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_without_name_in_prefect_yaml(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
expected_output_contains=["Discovered unnamed deployment. Skipping..."],
)
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_name_in_prefect_yaml_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
user_input=(
# accept naming deployment
readchar.key.ENTER
# enter deployment name
+ "test-name-2"
+ readchar.key.ENTER
# decline remote storage
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=["Discovered unnamed deployment."],
)
assert await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_without_name_in_prefect_yaml_interactive_user_skips(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
# Create multiple deployments
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
{
"entrypoint": "./flows/hello.py:my_flow",
# Missing name
"work_pool": {"name": work_pool.name},
"schedule": {"interval": 3600},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
user_input=(
# decline remote storage
"n"
+ readchar.key.ENTER
# reject saving configuration
+ "n"
+ readchar.key.ENTER
# reject naming deployment
+ "n"
+ readchar.key.ENTER
),
expected_output_contains=[
"Discovered unnamed deployment.",
"Would you like to give this deployment a name and deploy it?",
"Skipping unnamed deployment.",
],
)
assert len(await prefect_client.read_deployments()) == 1
async def test_deploy_with_name_not_in_prefect_yaml(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
},
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-2",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Attempt to deploy all
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-2 -n test-name-3",
expected_code=0,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
" deployed: test-name-3"
),
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-2"
)
assert deployment.name == "test-name-2"
assert deployment.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
async def test_deploy_with_single_deployment_with_name_in_file(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name-1",
"work_pool": {"name": work_pool.name},
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-1",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
async def test_deploy_errors_with_empty_deployments_list_and_no_cli_options(
self, project_dir
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = []
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
expected_code=1,
expected_output_contains=[
"An entrypoint must be provided:",
],
)
async def test_deploy_single_allows_options_override(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name -p"
f" {work_pool.name} --version 1.0.0 -jv env=prod -t foo-bar"
),
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created with id"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.name == "test-name"
assert deployment.work_pool_name == work_pool.name
assert deployment.version == "1.0.0"
assert deployment.tags == ["foo-bar"]
assert deployment.job_variables == {"env": "prod"}
async def test_deploy_single_deployment_with_name_in_cli(
self, project_dir: Path, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy -n test-name-1",
expected_code=0,
expected_output_contains=[
"An important name/test-name-1",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
@pytest.mark.parametrize(
"deploy_names",
[
("my-flow/test-name-1", "test-name-3"),
("my-flow/test-name-1", "my-flow/test-name-3"),
("test-name-1", "my-flow/test-name-3"),
("test-name-1", "test-name-3"),
],
)
async def test_deploy_existing_deployment_and_nonexistent_deployment_deploys_former(
self,
deploy_names: tuple[str, str],
project_dir: Path,
prefect_client: PrefectClient,
work_pool: WorkPool,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
{
"name": "test-name-2",
"entrypoint": "./flows/hello.py:my_flow",
"work_pool": {"name": work_pool.name},
},
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
# Deploy the deployment with a name
deploy_command = f"deploy -n '{deploy_names[0]}' -n '{deploy_names[1]}'"
await run_sync_in_worker_thread(
invoke_and_assert,
command=deploy_command,
expected_code=0,
expected_output_contains=[
(
"The following deployment(s) could not be found and will not be"
f" deployed: {deploy_names[1].split('/')[-1]}"
),
"An important name/test-name-1",
],
expected_output_does_not_contain=[
"An important name/test-name-3",
],
)
# Check if the deployment was created correctly
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name-1"
)
assert deployment.name == "test-name-1"
assert deployment.work_pool_name == work_pool.name
with pytest.raises(ObjectNotFound):
await prefect_client.read_deployment_by_name(
"An important name/test-name-3"
)
| TestMultiDeploy |
python | scipy__scipy | scipy/linalg/tests/test_decomp_update.py | {
"start": 47635,
"end": 47698
} | class ____(BaseQRinsert):
dtype = np.dtype('d')
| TestQRinsert_d |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 3889,
"end": 4747
} | class ____(Exception):
def __init__(
self,
key: Key,
state: TaskStateState,
story: list[tuple],
):
self.key = key
self.state = state
self.story = story
def __reduce__(self) -> tuple[Callable, tuple]:
return type(self), (self.key, self.state, self.story)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}: {self.key!r} :: {self.state}"
+ "\n"
+ " Story:\n "
+ "\n ".join(map(str, self.story))
)
__str__ = __repr__
def to_event(self) -> tuple[str, dict[str, Any]]:
return (
"invalid-worker-task-state",
{
"key": self.key,
"state": self.state,
"story": self.story,
},
)
| InvalidTaskState |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 4245,
"end": 7686
} | class ____(MutableSet):
"""Provides access to an element's class attribute as a set-like collection.
Usage::
>>> el = fromstring('<p class="hidden large">Text</p>')
>>> classes = el.classes # or: classes = Classes(el.attrib)
>>> classes |= ['block', 'paragraph']
>>> el.get('class')
'hidden large block paragraph'
>>> classes.toggle('hidden')
False
>>> el.get('class')
'large block paragraph'
>>> classes -= ('some', 'classes', 'block')
>>> el.get('class')
'large paragraph'
"""
def __init__(self, attributes):
self._attributes = attributes
self._get_class_value = partial(attributes.get, 'class', '')
def add(self, value):
"""
Add a class.
This has no effect if the class is already present.
"""
if not value or re.search(r'\s', value):
raise ValueError("Invalid class name: %r" % value)
classes = self._get_class_value().split()
if value in classes:
return
classes.append(value)
self._attributes['class'] = ' '.join(classes)
def discard(self, value):
"""
Remove a class if it is currently present.
If the class is not present, do nothing.
"""
if not value or re.search(r'\s', value):
raise ValueError("Invalid class name: %r" % value)
classes = [name for name in self._get_class_value().split()
if name != value]
if classes:
self._attributes['class'] = ' '.join(classes)
elif 'class' in self._attributes:
del self._attributes['class']
def remove(self, value):
"""
Remove a class; it must currently be present.
If the class is not present, raise a KeyError.
"""
if not value or re.search(r'\s', value):
raise ValueError("Invalid class name: %r" % value)
super().remove(value)
def __contains__(self, name):
classes = self._get_class_value()
return name in classes and name in classes.split()
def __iter__(self):
return iter(self._get_class_value().split())
def __len__(self):
return len(self._get_class_value().split())
# non-standard methods
def update(self, values):
"""
Add all names from 'values'.
"""
classes = self._get_class_value().split()
extended = False
for value in values:
if value not in classes:
classes.append(value)
extended = True
if extended:
self._attributes['class'] = ' '.join(classes)
def toggle(self, value):
"""
Add a class name if it isn't there yet, or remove it if it exists.
Returns true if the class was added (and is now enabled) and
false if it was removed (and is now disabled).
"""
if not value or re.search(r'\s', value):
raise ValueError("Invalid class name: %r" % value)
classes = self._get_class_value().split()
try:
classes.remove(value)
enabled = False
except ValueError:
classes.append(value)
enabled = True
if classes:
self._attributes['class'] = ' '.join(classes)
else:
del self._attributes['class']
return enabled
| Classes |
python | django__django | tests/i18n/tests.py | {
"start": 86662,
"end": 88326
} | class ____(SimpleTestCase):
"""
A language non present in default Django languages can still be
installed/used by a Django project.
"""
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
("xxx", "Somelanguage"),
],
LANGUAGE_CODE="xxx",
LOCALE_PATHS=[os.path.join(here, "commands", "locale")],
)
def test_non_django_language(self):
self.assertEqual(get_language(), "xxx")
self.assertEqual(gettext("year"), "reay")
@override_settings(USE_I18N=True)
def test_check_for_language(self):
with tempfile.TemporaryDirectory() as app_dir:
os.makedirs(os.path.join(app_dir, "locale", "dummy_Lang", "LC_MESSAGES"))
open(
os.path.join(
app_dir, "locale", "dummy_Lang", "LC_MESSAGES", "django.mo"
),
"w",
).close()
app_config = AppConfig("dummy_app", AppModuleStub(__path__=[app_dir]))
with mock.patch(
"django.apps.apps.get_app_configs", return_value=[app_config]
):
self.assertIs(check_for_language("dummy-lang"), True)
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
# xyz language has no locale files
("xyz", "XYZ"),
],
)
@translation.override("xyz")
def test_plural_non_django_language(self):
self.assertEqual(get_language(), "xyz")
self.assertEqual(ngettext("year", "years", 2), "years")
@override_settings(USE_I18N=True)
| NonDjangoLanguageTests |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-very-strict_ini/metaclass_args.py | {
"start": 292,
"end": 549
} | class ____(BaseModel, validate_by_name=True):
i: int = Field(alias='j')
MetaclassArgumentsNoDefault(i=None)
# MYPY: error: Argument "i" to "MetaclassArgumentsNoDefault" has incompatible type "None"; expected "int" [arg-type]
| MetaclassArgumentsNoDefault |
python | getsentry__sentry | tests/apidocs/endpoints/releases/test_project_release_commits.py | {
"start": 230,
"end": 1609
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
project = self.create_project(name="foo")
release = self.create_release(project=project, version="1")
release.add_project(project)
repo = self.create_repo(project=project, name=project.name)
commit = Commit.objects.create(
organization_id=project.organization_id, repository_id=repo.id, key="a" * 40
)
commit2 = Commit.objects.create(
organization_id=project.organization_id, repository_id=repo.id, key="b" * 40
)
ReleaseCommit.objects.create(
organization_id=project.organization_id, release=release, commit=commit, order=1
)
ReleaseCommit.objects.create(
organization_id=project.organization_id, release=release, commit=commit2, order=0
)
self.url = reverse(
"sentry-api-0-project-release-commits",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| ProjectReleaseCommitsListDocsTest |
python | getsentry__sentry | src/sentry/sentry_apps/utils/webhooks.py | {
"start": 791,
"end": 1130
} | class ____(SentryAppActionType):
ROOT_CAUSE_STARTED = "root_cause_started"
ROOT_CAUSE_COMPLETED = "root_cause_completed"
SOLUTION_STARTED = "solution_started"
SOLUTION_COMPLETED = "solution_completed"
CODING_STARTED = "coding_started"
CODING_COMPLETED = "coding_completed"
PR_CREATED = "pr_created"
| SeerActionType |
python | gevent__gevent | src/greentest/3.14/test_smtplib.py | {
"start": 59143,
"end": 59854
} | class ____(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
| SimSMTPAUTHInitialResponseChannel |
python | getsentry__sentry | src/sentry/uptime/models.py | {
"start": 1329,
"end": 4439
} | class ____(BaseRemoteSubscription, DefaultFieldsModelExisting):
# TODO: This should be included in export/import, but right now it has no relation to
# any projects/orgs. Will fix this in a later pr
__relocation_scope__ = RelocationScope.Excluded
class SupportedHTTPMethods(models.TextChoices):
GET = "GET", "GET"
POST = "POST", "POST"
HEAD = "HEAD", "HEAD"
PUT = "PUT", "PUT"
DELETE = "DELETE", "DELETE"
PATCH = "PATCH", "PATCH"
OPTIONS = "OPTIONS", "OPTIONS"
class IntervalSeconds(models.IntegerChoices):
ONE_MINUTE = 60, "1 minute"
FIVE_MINUTES = 300, "5 minutes"
TEN_MINUTES = 600, "10 minutes"
TWENTY_MINUTES = 1200, "20 minutes"
THIRTY_MINUTES = 1800, "30 minutes"
ONE_HOUR = 3600, "1 hour"
# The url to check
url = models.CharField(max_length=255)
# The domain of the url, extracted via TLDExtract
url_domain = models.CharField(max_length=255, default="", db_default="")
# The suffix of the url, extracted via TLDExtract. This can be a public
# suffix, such as com, gov.uk, com.au, or a private suffix, such as vercel.dev
url_domain_suffix = models.CharField(max_length=255, default="", db_default="")
# A unique identifier for the provider hosting the domain
host_provider_id = models.CharField(max_length=255, db_index=True, null=True)
# The name of the provider hosting this domain
host_provider_name = models.CharField(max_length=255, db_index=True, null=True)
# How frequently to run the check in seconds
interval_seconds: models.IntegerField[IntervalSecondsLiteral, IntervalSecondsLiteral] = (
models.IntegerField(choices=IntervalSeconds)
)
# How long to wait for a response from the url before we assume a timeout
timeout_ms = models.IntegerField()
# HTTP method to perform the check with
method: models.CharField[SupportedHTTPMethodsLiteral, SupportedHTTPMethodsLiteral] = (
models.CharField(max_length=20, choices=SupportedHTTPMethods, db_default="GET")
)
# TODO(mdtro): This field can potentially contain sensitive data, encrypt when field available
# HTTP headers to send when performing the check
headers = models.JSONField(db_default=[])
# HTTP body to send when performing the check
# TODO(mdtro): This field can potentially contain sensitive data, encrypt when field available
body = models.TextField(null=True)
# How to sample traces for this monitor. Note that we always send a trace_id, so any errors will
# be associated, this just controls the span sampling.
trace_sampling = models.BooleanField(default=False, db_default=False)
objects: ClassVar[BaseManager[Self]] = BaseManager(
cache_fields=["pk", "subscription_id"],
cache_ttl=int(timedelta(hours=1).total_seconds()),
)
class Meta:
app_label = "uptime"
db_table = "uptime_uptimesubscription"
indexes = [
models.Index(fields=("url_domain_suffix", "url_domain")),
]
@region_silo_model
| UptimeSubscription |
python | pennersr__django-allauth | allauth/socialaccount/providers/ynab/views.py | {
"start": 181,
"end": 1047
} | class ____(OAuth2Adapter):
provider_id = "ynab"
access_token_url = "https://app.youneedabudget.com/oauth/token" # nosec
authorize_url = "https://app.youneedabudget.com/oauth/authorize"
profile_url = "https://api.youneedabudget.com/v1/user"
def complete_login(self, request, app, token, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(
self.profile_url,
headers={"Authorization": "Bearer {}".format(token.token)},
)
)
resp.raise_for_status()
extra_data = resp.json()
login = self.get_provider().sociallogin_from_response(request, extra_data)
return login
oauth2_login = OAuth2LoginView.adapter_view(YNABOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(YNABOAuth2Adapter)
| YNABOAuth2Adapter |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_match_json_schema.py | {
"start": 526,
"end": 2494
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.match_json_schema"
condition_value_keys = ("json_schema",)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, json_schema, **kwargs):
def matches_json_schema(val):
try:
val_json = json.loads(val)
jsonschema.validate(val_json, json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
return column.map(matches_json_schema)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, json_schema, **kwargs):
# This step insures that Spark UDF defined can be pickled; otherwise, pickle serialization exceptions may occur. # noqa: E501 # FIXME CoP
json_schema = convert_to_json_serializable(data=json_schema)
def matches_json_schema(val):
if val is None:
return False
try:
val_json = json.loads(val)
jsonschema.validate(instance=val_json, schema=json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
matches_json_schema_udf = F.udf(
lambda val: matches_json_schema(val=val), pyspark.types.BooleanType()
)
return matches_json_schema_udf(column)
| ColumnValuesMatchJsonSchema |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_stock01.py | {
"start": 315,
"end": 2046
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_stock01.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [40522880, 40524416]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.