language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 9809,
"end": 11300
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = SplinterAttention(config)
self.intermediate = SplinterIntermediate(config)
self.output = SplinterOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.align.modeling_align.AlignTextEncoder with AlignText->Splinter
| SplinterLayer |
python | modin-project__modin | modin/core/computation/ops.py | {
"start": 8740,
"end": 13345
} | class ____(Op):
"""
Hold a binary operator and its operands.
Parameters
----------
op : str
lhs : Term or Op
rhs : Term or Op
"""
def __init__(self, op: str, lhs, rhs) -> None:
super().__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError as err:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError(
f"Invalid binary operator {repr(op)}, valid operators are {keys}"
) from err
def __call__(self, env):
"""
Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
"""
Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == "python":
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
right = self.rhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
from modin.core.computation.eval import eval
res = eval(self, local_dict=env, engine=engine, parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self) -> None:
"""
Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
encoder: Callable
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pandas.Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pandas.Timestamp(ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
rhs = self.rhs
lhs = self.lhs
# GH#24883 unwrap dtype if necessary to ensure we have a type object
rhs_rt = rhs.return_type
rhs_rt = getattr(rhs_rt, "type", rhs_rt)
lhs_rt = lhs.return_type
lhs_rt = getattr(lhs_rt, "type", lhs_rt)
if (
(lhs.is_scalar or rhs.is_scalar)
and self.op in _bool_ops_dict
and (
not (
issubclass(rhs_rt, (bool, np.bool_))
and issubclass(lhs_rt, (bool, np.bool_))
)
)
):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype) -> bool:
return issubclass(np.dtype(dtype).type, np.number)
UNARY_OPS_SYMS = ("+", "-", "~", "not")
_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert)
_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs))
| BinOp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-linnworks/source_linnworks/streams.py | {
"start": 1654,
"end": 2355
} | class ____(ABC):
# https://apps.linnworks.net/Api/Class/linnworks-spa-commondata-Generic-GenericPagedResult
@abstractmethod
def paged_result(self, response: requests.Response) -> Mapping[str, Any]:
pass
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
result = self.paged_result(response)
if result["PageNumber"] < result["TotalPages"]:
return {
"PageNumber": result["PageNumber"] + 1,
"EntriesPerPage": result["EntriesPerPage"],
"TotalEntries": result["TotalEntries"],
"TotalPages": result["TotalPages"],
}
| LinnworksGenericPagedResult |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 42472,
"end": 43058
} | class ____(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
def __init__(self, **options):
super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
| XmlDjangoLexer |
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-to-make-character-frequencies-unique.py | {
"start": 63,
"end": 505
} | class ____(object):
def minDeletions(self, s):
"""
:type s: str
:rtype: int
"""
count = collections.Counter(s)
result = 0
lookup = set()
for c in string.ascii_lowercase:
for i in reversed(xrange(1, count[c]+1)):
if i not in lookup:
lookup.add(i)
break
result += 1
return result
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 216303,
"end": 224338
} | class ____(TestCase):
class RaiseOnBool:
def __bool__(self):
raise ValueError
# true_vals = [True, np._CopyMode.ALWAYS, np.True_]
# false_vals = [False, np._CopyMode.IF_NEEDED, np.False_]
true_vals = [True, 1, np.True_]
false_vals = [False, 0, np.False_]
def test_scalars(self):
# Test both numpy and python scalars
for dtype in np.typecodes["All"]:
arr = np.zeros((), dtype=dtype)
scalar = arr[()]
pyscalar = arr.item(0)
# Test never-copy raises error:
assert_raises(ValueError, np.array, scalar, copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar, copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar, copy=self.RaiseOnBool())
# Casting with a dtype (to unsigned integers) can be special:
with pytest.raises(ValueError):
np.array(pyscalar, dtype=np.int64, copy=np._CopyMode.NEVER)
@xfail # TODO: handle `_CopyMode` properly in torch._numpy
def test_compatible_cast(self):
# Some types are compatible even though they are different, no
# copy is necessary for them. This is mostly true for some integers
def int_types(byteswap=False):
int_types = np.typecodes["Integer"] + np.typecodes["UnsignedInteger"]
for int_type in int_types:
yield np.dtype(int_type)
if byteswap:
yield np.dtype(int_type).newbyteorder()
for int1 in int_types():
for int2 in int_types(True):
arr = np.arange(10, dtype=int1)
for copy in self.true_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
if int1 == int2:
# Casting is not necessary, base check is sufficient here
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is arr or res.base is arr
res = np.array(arr, copy=np._CopyMode.NEVER, dtype=int2)
assert res is arr or res.base is arr
else:
# Casting is necessary, assert copy works:
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
assert_raises(
ValueError, np.array, arr, copy=np._CopyMode.NEVER, dtype=int2
)
assert_raises(ValueError, np.array, arr, copy=None, dtype=int2)
def test_buffer_interface(self):
# Buffer interface gives direct memory access (no copy)
arr = np.arange(10)
view = memoryview(arr)
# Checking bases is a bit tricky since numpy creates another
# memoryview, so use may_share_memory.
for copy in self.true_vals:
res = np.array(view, copy=copy)
assert not np.may_share_memory(arr, res)
for copy in self.false_vals:
res = np.array(view, copy=copy)
assert np.may_share_memory(arr, res)
res = np.array(view, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res)
def test_array_interfaces(self):
# Array interface gives direct memory access (much like a memoryview)
base_arr = np.arange(10)
class ArrayLike:
__array_interface__ = base_arr.__array_interface__
arr = ArrayLike()
for copy, val in [
(True, None),
(np._CopyMode.ALWAYS, None),
(False, arr),
(np._CopyMode.IF_NEEDED, arr),
(np._CopyMode.NEVER, arr),
]:
res = np.array(arr, copy=copy)
assert res.base is val
def test___array__(self):
base_arr = np.arange(10)
class ArrayLike:
def __array__(self):
# __array__ should return a copy, numpy cannot know this
# however.
return base_arr
arr = ArrayLike()
for copy in self.true_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
# An additional copy is currently forced by numpy in this case,
# you could argue, numpy does not trust the ArrayLike. This
# may be open for change:
assert res is not base_arr
for copy in self.false_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
assert res is base_arr # numpy trusts the ArrayLike
with pytest.raises(ValueError):
np.array(arr, copy=np._CopyMode.NEVER)
@parametrize("arr", [np.ones(()), np.arange(81).reshape((9, 9))])
@parametrize("order1", ["C", "F", None])
@parametrize("order2", ["C", "F", "A", "K"])
def test_order_mismatch(self, arr, order1, order2):
# The order is the main (python side) reason that can cause
# a never-copy to fail.
# Prepare C-order, F-order and non-contiguous arrays:
arr = arr.copy(order1)
if order1 == "C":
assert arr.flags.c_contiguous
elif order1 == "F":
assert arr.flags.f_contiguous
elif arr.ndim != 0:
# Make array non-contiguous
arr = arr[::2, ::2]
assert not arr.flags.forc
# Whether a copy is necessary depends on the order of arr:
if order2 == "C":
no_copy_necessary = arr.flags.c_contiguous
elif order2 == "F":
no_copy_necessary = arr.flags.f_contiguous
else:
# Keeporder and Anyorder are OK with non-contiguous output.
# This is not consistent with the `astype` behaviour which
# enforces contiguity for "A". It is probably historic from when
# "K" did not exist.
no_copy_necessary = True
# Test it for both the array and a memoryview
for view in [arr, memoryview(arr)]:
for copy in self.true_vals:
res = np.array(view, copy=copy, order=order2)
assert res is not arr and res.flags.owndata
assert_array_equal(arr, res)
if no_copy_necessary:
for copy in self.false_vals:
res = np.array(view, copy=copy, order=order2)
# res.base.obj refers to the memoryview
if not IS_PYPY:
assert res is arr or res.base.obj is arr
res = np.array(view, copy=np._CopyMode.NEVER, order=order2)
if not IS_PYPY:
assert res is arr or res.base.obj is arr
else:
for copy in self.false_vals:
res = np.array(arr, copy=copy, order=order2)
assert_array_equal(arr, res)
assert_raises(
ValueError, np.array, view, copy=np._CopyMode.NEVER, order=order2
)
assert_raises(ValueError, np.array, view, copy=None, order=order2)
def test_striding_not_ok(self):
arr = np.array([[1, 2, 4], [3, 4, 5]])
assert_raises(ValueError, np.array, arr.T, copy=np._CopyMode.NEVER, order="C")
assert_raises(
ValueError,
np.array,
arr.T,
copy=np._CopyMode.NEVER,
order="C",
dtype=np.int64,
)
assert_raises(ValueError, np.array, arr, copy=np._CopyMode.NEVER, order="F")
assert_raises(
ValueError,
np.array,
arr,
copy=np._CopyMode.NEVER,
order="F",
dtype=np.int64,
)
| TestArrayCreationCopyArgument |
python | mahmoud__glom | glom/core.py | {
"start": 28618,
"end": 33060
} | class ____:
"""Coalesce objects specify fallback behavior for a list of
subspecs.
Subspecs are passed as positional arguments, and keyword arguments
control defaults. Each subspec is evaluated in turn, and if none
match, a :exc:`CoalesceError` is raised, or a default is returned,
depending on the options used.
.. note::
This operation may seem very familar if you have experience with
`SQL`_ or even `C# and others`_.
In practice, this fallback behavior's simplicity is only surpassed
by its utility:
>>> target = {'c': 'd'}
>>> glom(target, Coalesce('a', 'b', 'c'))
'd'
glom tries to get ``'a'`` from ``target``, but gets a
KeyError. Rather than raise a :exc:`~glom.PathAccessError` as usual,
glom *coalesces* into the next subspec, ``'b'``. The process
repeats until it gets to ``'c'``, which returns our value,
``'d'``. If our value weren't present, we'd see:
>>> target = {}
>>> glom(target, Coalesce('a', 'b'))
Traceback (most recent call last):
...
CoalesceError: no valid values found. Tried ('a', 'b') and got (PathAccessError, PathAccessError) ...
Same process, but because ``target`` is empty, we get a
:exc:`CoalesceError`.
.. note::
Coalesce is a *branching* specifier type, so as of v20.7.0, its
exception messages feature an error tree. See
:ref:`branched-exceptions` for details on how to interpret these
exceptions.
If we want to avoid an exception, and we know which value we want
by default, we can set *default*:
>>> target = {}
>>> glom(target, Coalesce('a', 'b', 'c'), default='d-fault')
'd-fault'
``'a'``, ``'b'``, and ``'c'`` weren't present so we got ``'d-fault'``.
Args:
subspecs: One or more glommable subspecs
default: A value to return if no subspec results in a valid value
default_factory: A callable whose result will be returned as a default
skip: A value, tuple of values, or predicate function
representing values to ignore
skip_exc: An exception or tuple of exception types to catch and
move on to the next subspec. Defaults to :exc:`GlomError`, the
parent type of all glom runtime exceptions.
If all subspecs produce skipped values or exceptions, a
:exc:`CoalesceError` will be raised. For more examples, check out
the :doc:`tutorial`, which makes extensive use of Coalesce.
.. _SQL: https://en.wikipedia.org/w/index.php?title=Null_(SQL)&oldid=833093792#COALESCE
.. _C# and others: https://en.wikipedia.org/w/index.php?title=Null_coalescing_operator&oldid=839493322#C#
"""
def __init__(self, *subspecs, **kwargs):
self.subspecs = subspecs
self._orig_kwargs = dict(kwargs)
self.default = kwargs.pop('default', _MISSING)
self.default_factory = kwargs.pop('default_factory', _MISSING)
if self.default and self.default_factory:
raise ValueError('expected one of "default" or "default_factory", not both')
self.skip = kwargs.pop('skip', _MISSING)
if self.skip is _MISSING:
self.skip_func = lambda v: False
elif callable(self.skip):
self.skip_func = self.skip
elif isinstance(self.skip, tuple):
self.skip_func = lambda v: v in self.skip
else:
self.skip_func = lambda v: v == self.skip
self.skip_exc = kwargs.pop('skip_exc', GlomError)
if kwargs:
raise TypeError(f'unexpected keyword args: {sorted(kwargs.keys())!r}')
def glomit(self, target, scope):
skipped = []
for subspec in self.subspecs:
try:
ret = scope[glom](target, subspec, scope)
if not self.skip_func(ret):
break
skipped.append(ret)
except self.skip_exc as e:
skipped.append(e)
continue
else:
if self.default is not _MISSING:
ret = arg_val(target, self.default, scope)
elif self.default_factory is not _MISSING:
ret = self.default_factory()
else:
raise CoalesceError(self, skipped, scope[Path])
return ret
def __repr__(self):
cn = self.__class__.__name__
return format_invocation(cn, self.subspecs, self._orig_kwargs, repr=bbrepr)
| Coalesce |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 7768,
"end": 8879
} | class ____(TypedDict):
"""represent the reflected IDENTITY structure of a column, corresponding
to the :class:`_schema.Identity` construct.
The :class:`.ReflectedIdentity` structure is part of the
:class:`.ReflectedColumn` structure, which is returned by the
:meth:`.Inspector.get_columns` method.
"""
always: bool
"""type of identity column"""
on_null: bool
"""indicates ON NULL"""
start: int
"""starting index of the sequence"""
increment: int
"""increment value of the sequence"""
minvalue: int
"""the minimum value of the sequence."""
maxvalue: int
"""the maximum value of the sequence."""
nominvalue: bool
"""no minimum value of the sequence."""
nomaxvalue: bool
"""no maximum value of the sequence."""
cycle: bool
"""allows the sequence to wrap around when the maxvalue
or minvalue has been reached."""
cache: Optional[int]
"""number of future values in the
sequence which are calculated in advance."""
order: bool
"""if true, renders the ORDER keyword."""
| ReflectedIdentity |
python | ansible__ansible | test/units/executor/test_task_result.py | {
"start": 846,
"end": 6517
} | class ____(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = _RawTaskResult(mock_host, mock_task, {}, {})
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = _RawTaskResult(mock_host, mock_task, {}, {})
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = _RawTaskResult(mock_host, mock_task, dict(changed=True), {})
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]), {})
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]), {})
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = _RawTaskResult(mock_host, mock_task, dict(), {})
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = _RawTaskResult(mock_host, mock_task, dict(skipped=True), {})
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]), {})
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]), {})
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]), {})
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False), {})
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = _RawTaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True), {})
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = _RawTaskResult(mock_host, mock_task, {}, {})
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = _RawTaskResult(mock_host, mock_task, dict(unreachable=True), {})
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]), {})
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = _RawTaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]), {})
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = _RawTaskResult(mock_host, mock_task, dict(), {})
self.assertFalse(tr.is_failed())
# test failed result with rc values (should not matter)
tr = _RawTaskResult(mock_host, mock_task, dict(rc=0), {})
self.assertFalse(tr.is_failed())
tr = _RawTaskResult(mock_host, mock_task, dict(rc=1), {})
self.assertFalse(tr.is_failed())
# test with failed in result
tr = _RawTaskResult(mock_host, mock_task, dict(failed=True), {})
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = _RawTaskResult(mock_host, mock_task, dict(failed_when_result=True), {})
self.assertTrue(tr.is_failed())
def test_task_result_no_log(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should remove secrets
tr = _RawTaskResult(mock_host, mock_task, dict(_ansible_no_log=True, secret='DONTSHOWME'), {})
clean = tr.as_callback_task_result()
self.assertTrue('secret' not in clean.result)
def test_task_result_no_log_preserve(self):
mock_host = MagicMock()
mock_task = MagicMock()
# no_log should not remove preserved keys
tr = _RawTaskResult(
mock_host,
mock_task,
dict(
_ansible_no_log=True,
retries=5,
attempts=5,
changed=False,
foo='bar',
),
task_fields={},
)
clean = tr.as_callback_task_result()
self.assertTrue('retries' in clean.result)
self.assertTrue('attempts' in clean.result)
self.assertTrue('changed' in clean.result)
self.assertTrue('foo' not in clean.result)
| TestRawTaskResult |
python | doocs__leetcode | solution/0900-0999/0998.Maximum Binary Tree II/Solution.py | {
"start": 192,
"end": 476
} | class ____:
def insertIntoMaxTree(
self, root: Optional[TreeNode], val: int
) -> Optional[TreeNode]:
if root is None or root.val < val:
return TreeNode(val, root)
root.right = self.insertIntoMaxTree(root.right, val)
return root
| Solution |
python | pydata__xarray | xarray/tests/test_groupby.py | {
"start": 118057,
"end": 137199
} | class ____:
def test_season_to_month_tuple(self):
assert season_to_month_tuple(["JF", "MAM", "JJAS", "OND"]) == (
(1, 2),
(3, 4, 5),
(6, 7, 8, 9),
(10, 11, 12),
)
assert season_to_month_tuple(["DJFM", "AM", "JJAS", "ON"]) == (
(12, 1, 2, 3),
(4, 5),
(6, 7, 8, 9),
(10, 11),
)
def test_season_grouper_raises_error_if_months_are_not_valid_or_not_continuous(
self,
):
calendar = "standard"
time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
with pytest.raises(KeyError, match="IN"):
da.groupby(time=SeasonGrouper(["INVALID_SEASON"]))
with pytest.raises(KeyError, match="MD"):
da.groupby(time=SeasonGrouper(["MDF"]))
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_season_grouper_with_months_spanning_calendar_year_using_same_year(
self, calendar
):
time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
# fmt: off
data = np.array(
[
1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
]
)
# fmt: on
da = DataArray(data, dims="time", coords={"time": time})
da["year"] = da.time.dt.year
actual = da.groupby(
year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"])
).mean()
# Expected if the same year "ND" is used for seasonal grouping
expected = xr.DataArray(
data=np.array([[1.38, 1.616667], [1.51, 1.5]]),
dims=["year", "season"],
coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]},
)
assert_allclose(expected, actual)
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_season_grouper_with_partial_years(self, calendar):
time = date_range("2001-01-01", "2002-06-30", freq="MS", calendar=calendar)
# fmt: off
data = np.array(
[
1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
1.95, 1.05, 1.3, 1.55, 1.8, 1.15,
]
)
# fmt: on
da = DataArray(data, dims="time", coords={"time": time})
da["year"] = da.time.dt.year
actual = da.groupby(
year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"])
).mean()
# Expected if partial years are handled correctly
expected = xr.DataArray(
data=np.array([[1.38, 1.616667], [1.43333333, 1.5]]),
dims=["year", "season"],
coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]},
)
assert_allclose(expected, actual)
@pytest.mark.parametrize("calendar", ["standard"])
def test_season_grouper_with_single_month_seasons(self, calendar):
time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
# fmt: off
data = np.array(
[
1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
]
)
# fmt: on
da = DataArray(data, dims="time", coords={"time": time})
da["year"] = da.time.dt.year
# TODO: Consider supporting this if needed
# It does not work without flox, because the group labels are not unique,
# and so the stack/unstack approach does not work.
with pytest.raises(ValueError):
da.groupby(
year=UniqueGrouper(),
time=SeasonGrouper(
["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"]
),
).mean()
# Expected if single month seasons are handled correctly
# expected = xr.DataArray(
# data=np.array(
# [
# [1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7],
# [1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75],
# ]
# ),
# dims=["year", "season"],
# coords={
# "year": [2001, 2002],
# "season": ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"],
# },
# )
# assert_allclose(expected, actual)
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_season_grouper_with_months_spanning_calendar_year_using_previous_year(
self, calendar
):
time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
# fmt: off
data = np.array(
[
1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
]
)
# fmt: on
da = DataArray(data, dims="time", coords={"time": time})
gb = da.resample(time=SeasonResampler(["NDJFM", "AMJ"], drop_incomplete=False))
actual = gb.mean()
# fmt: off
new_time_da = xr.DataArray(
dims="time",
data=pd.DatetimeIndex(
[
"2000-11-01", "2001-04-01", "2001-11-01", "2002-04-01", "2002-11-01"
]
),
)
# fmt: on
if calendar != "standard":
new_time_da = new_time_da.convert_calendar(
calendar=calendar, align_on="date"
)
new_time = new_time_da.time.variable
# Expected if the previous "ND" is used for seasonal grouping
expected = xr.DataArray(
data=np.array([1.25, 1.616667, 1.49, 1.5, 1.625]),
dims="time",
coords={"time": new_time},
)
assert_allclose(expected, actual)
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
def test_season_grouper_simple(self, calendar) -> None:
time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
expected = da.groupby("time.season").mean()
# note season order matches expected
actual = da.groupby(
time=SeasonGrouper(
["DJF", "JJA", "MAM", "SON"], # drop_incomplete=False
)
).mean()
assert_identical(expected, actual)
@pytest.mark.parametrize("seasons", [["JJA", "MAM", "SON", "DJF"]])
def test_season_resampling_raises_unsorted_seasons(self, seasons):
calendar = "standard"
time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
with pytest.raises(ValueError, match="sort"):
da.resample(time=SeasonResampler(seasons))
@pytest.mark.parametrize(
"use_cftime", [pytest.param(True, marks=requires_cftime), False]
)
@pytest.mark.parametrize("drop_incomplete", [True, False])
@pytest.mark.parametrize(
"seasons",
[
pytest.param(["DJF", "MAM", "JJA", "SON"], id="standard"),
pytest.param(["NDJ", "FMA", "MJJ", "ASO"], id="nov-first"),
pytest.param(["MAM", "JJA", "SON", "DJF"], id="standard-diff-order"),
pytest.param(["JFM", "AMJ", "JAS", "OND"], id="december-same-year"),
pytest.param(["DJF", "MAM", "JJA", "ON"], id="skip-september"),
pytest.param(["JJAS"], id="jjas-only"),
],
)
def test_season_resampler(
self, seasons: list[str], drop_incomplete: bool, use_cftime: bool
) -> None:
calendar = "standard"
time = date_range(
"2001-01-01",
"2002-12-30",
freq="D",
calendar=calendar,
use_cftime=use_cftime,
)
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
counts = da.resample(time="ME").count()
seasons_as_ints = season_to_month_tuple(seasons)
month = counts.time.dt.month.data
year = counts.time.dt.year.data
for season, as_ints in zip(seasons, seasons_as_ints, strict=True):
if "DJ" in season:
for imonth in as_ints[season.index("D") + 1 :]:
year[month == imonth] -= 1
counts["time"] = (
"time",
[pd.Timestamp(f"{y}-{m}-01") for y, m in zip(year, month, strict=True)],
)
if has_cftime:
counts = counts.convert_calendar(calendar, "time", align_on="date")
expected_vals = []
expected_time = []
for year in [2001, 2002, 2003]:
for season, as_ints in zip(seasons, seasons_as_ints, strict=True):
out_year = year
if "DJ" in season:
out_year = year - 1
if out_year == 2003:
# this is a dummy year added to make sure we cover 2002-DJF
continue
available = [
counts.sel(time=f"{out_year}-{month:02d}").data for month in as_ints
]
if any(len(a) == 0 for a in available) and drop_incomplete:
continue
output_label = pd.Timestamp(f"{out_year}-{as_ints[0]:02d}-01")
expected_time.append(output_label)
# use concatenate to handle empty array when dec value does not exist
expected_vals.append(np.concatenate(available).sum())
expected = (
# we construct expected in the standard calendar
xr.DataArray(expected_vals, dims="time", coords={"time": expected_time})
)
if has_cftime:
# and then convert to the expected calendar,
expected = expected.convert_calendar(
calendar, align_on="date", use_cftime=use_cftime
)
# and finally sort since DJF will be out-of-order
expected = expected.sortby("time")
rs = SeasonResampler(seasons, drop_incomplete=drop_incomplete)
# through resample
actual = da.resample(time=rs).sum()
assert_identical(actual, expected)
@requires_cftime
def test_season_resampler_errors(self):
time = date_range("2001-01-01", "2002-12-30", freq="D", calendar="360_day")
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
# non-datetime array
with pytest.raises(ValueError):
DataArray(np.ones(5), dims="time").groupby(time=SeasonResampler(["DJF"]))
# ndim > 1 array
with pytest.raises(ValueError):
DataArray(
np.ones((5, 5)), dims=("t", "x"), coords={"x": np.arange(5)}
).groupby(x=SeasonResampler(["DJF"]))
# overlapping seasons
with pytest.raises(ValueError):
da.groupby(time=SeasonResampler(["DJFM", "MAMJ", "JJAS", "SOND"])).sum()
@requires_cftime
def test_season_resampler_groupby_identical(self):
time = date_range("2001-01-01", "2002-12-30", freq="D")
da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
# through resample
resampler = SeasonResampler(["DJF", "MAM", "JJA", "SON"])
rs = da.resample(time=resampler).sum()
# through groupby
gb = da.groupby(time=resampler).sum()
assert_identical(rs, gb)
@pytest.mark.parametrize(
"chunk",
[
pytest.param(
True, marks=pytest.mark.skipif(not has_dask, reason="requires dask")
),
False,
],
)
def test_datetime_mean(chunk, use_cftime):
ds = xr.Dataset(
{
"var1": (
("time",),
xr.date_range(
"2021-10-31", periods=10, freq="D", use_cftime=use_cftime
),
),
"var2": (("x",), list(range(10))),
}
)
if chunk:
ds = ds.chunk()
assert "var1" in ds.groupby("x").mean("time")
assert "var1" in ds.mean("x")
def test_mean_with_mixed_types():
"""Test that mean correctly handles datasets with mixed types including strings"""
ds = xr.Dataset(
{
"numbers": (("x",), [1.0, 2.0, 3.0, 4.0]),
"integers": (("x",), [10, 20, 30, 40]),
"strings": (("x",), ["a", "b", "c", "d"]),
"datetime": (
("x",),
pd.date_range("2021-01-01", periods=4, freq="D"),
),
"timedelta": (
("x",),
pd.timedelta_range("1 day", periods=4, freq="D"),
),
}
)
# Direct mean should exclude strings but include datetime/timedelta
result = ds.mean()
assert "numbers" in result.data_vars
assert "integers" in result.data_vars
assert "strings" not in result.data_vars
assert "datetime" in result.data_vars
assert "timedelta" in result.data_vars
# Also test mean with specific dimension
result_dim = ds.mean("x")
assert "numbers" in result_dim.data_vars
assert "integers" in result_dim.data_vars
assert "strings" not in result_dim.data_vars
assert "datetime" in result_dim.data_vars
assert "timedelta" in result_dim.data_vars
def test_mean_with_string_coords():
"""Test that mean works when strings are in coordinates, not data vars"""
ds = xr.Dataset(
{
"temperature": (("city", "time"), np.random.rand(3, 4)),
"humidity": (("city", "time"), np.random.rand(3, 4)),
},
coords={
"city": ["New York", "London", "Tokyo"],
"time": pd.date_range("2021-01-01", periods=4, freq="D"),
},
)
# Mean across string coordinate should work
result = ds.mean("city")
assert result.sizes == {"time": 4}
assert "temperature" in result.data_vars
assert "humidity" in result.data_vars
# Groupby with string coordinate should work
grouped = ds.groupby("city")
result_grouped = grouped.mean()
assert "temperature" in result_grouped.data_vars
assert "humidity" in result_grouped.data_vars
def test_mean_datetime_edge_cases():
"""Test mean with datetime edge cases like NaT"""
# Test with NaT values
dates_with_nat = pd.date_range("2021-01-01", periods=4, freq="D")
dates_with_nat_array = dates_with_nat.values.copy()
dates_with_nat_array[1] = np.datetime64("NaT")
ds = xr.Dataset(
{
"dates": (("x",), dates_with_nat_array),
"values": (("x",), [1.0, 2.0, 3.0, 4.0]),
}
)
# Mean should handle NaT properly (skipna behavior)
result = ds.mean()
assert "dates" in result.data_vars
assert "values" in result.data_vars
# The mean should skip NaT and compute mean of the other 3 dates
assert not result.dates.isnull().item()
# Test with timedelta
timedeltas = pd.timedelta_range("1 day", periods=4, freq="D")
ds_td = xr.Dataset(
{
"timedeltas": (("x",), timedeltas),
"values": (("x",), [1.0, 2.0, 3.0, 4.0]),
}
)
result_td = ds_td.mean()
assert "timedeltas" in result_td.data_vars
assert result_td["timedeltas"].values == np.timedelta64(
216000000000000, "ns"
) # 2.5 days
@requires_cftime
def test_mean_with_cftime_objects():
"""Test mean with cftime objects (issue #5897)"""
ds = xr.Dataset(
{
"var1": (
("time",),
xr.date_range("2021-10-31", periods=10, freq="D", use_cftime=True),
),
"var2": (("x",), list(range(10))),
}
)
# Test averaging over time dimension - var1 should be included
result_time = ds.mean("time")
assert "var1" in result_time.data_vars
assert "var2" not in result_time.dims
# Test averaging over x dimension - should work normally
result_x = ds.mean("x")
assert "var2" in result_x.data_vars
assert "var1" in result_x.data_vars
assert result_x.var2.item() == 4.5 # mean of 0-9
# Test that mean preserves object arrays containing datetime-like objects
import cftime
dates = np.array(
[cftime.DatetimeNoLeap(2021, i, 1) for i in range(1, 5)], dtype=object
)
ds2 = xr.Dataset(
{
"cftime_dates": (("x",), dates),
"numbers": (("x",), [1.0, 2.0, 3.0, 4.0]),
"object_strings": (("x",), np.array(["a", "b", "c", "d"], dtype=object)),
}
)
# Mean should include cftime dates but not string objects
result = ds2.mean()
assert "cftime_dates" in result.data_vars
assert "numbers" in result.data_vars
assert "object_strings" not in result.data_vars
@requires_dask
@requires_cftime
def test_mean_with_cftime_objects_dask():
"""Test mean with cftime objects using dask backend (issue #5897)"""
ds = xr.Dataset(
{
"var1": (
("time",),
xr.date_range("2021-10-31", periods=10, freq="D", use_cftime=True),
),
"var2": (("x",), list(range(10))),
}
)
# Test with dask backend
dsc = ds.chunk({})
result_time_dask = dsc.mean("time")
assert "var1" in result_time_dask.data_vars
result_x_dask = dsc.mean("x")
assert "var2" in result_x_dask.data_vars
assert result_x_dask.var2.compute().item() == 4.5
def test_groupby_bins_datetime_mean():
"""Test groupby_bins with datetime mean (issue #6995)"""
times = pd.date_range("2020-01-01", "2020-02-01", freq="1h")
index = np.arange(len(times))
bins = np.arange(0, len(index), 5)
ds = xr.Dataset(
{"time": ("index", times), "float": ("index", np.linspace(0, 1, len(index)))},
coords={"index": index},
)
# The time variable should be preserved and averaged
result = ds.groupby_bins("index", bins).mean()
assert "time" in result.data_vars
assert "float" in result.data_vars
assert result.time.dtype == np.dtype("datetime64[ns]")
def test_groupby_bins_mean_time_series():
"""Test groupby_bins mean on time series data (issue #10217)"""
ds = xr.Dataset(
{
"measurement": ("trial", np.arange(0, 100, 10)),
"time": ("trial", pd.date_range("20240101T1500", "20240101T1501", 10)),
}
)
# Time variable should be preserved in the aggregation
ds_agged = ds.groupby_bins("trial", 5).mean()
assert "time" in ds_agged.data_vars
assert "measurement" in ds_agged.data_vars
assert ds_agged.time.dtype == np.dtype("datetime64[ns]")
# TODO: Possible property tests to add to this module
# 1. lambda x: x
# 2. grouped-reduce on unique coords is identical to array
# 3. group_over == groupby-reduce along other dimensions
# 4. result is equivalent for transposed input
| TestSeasonGrouperAndResampler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 3213,
"end": 3613
} | class ____[T1 = str, *Ts2 = Unpack[tuple[T1, ...]]]: ...
tc1 = ClassTC()
reveal_type(tc1, expected_text="ClassTC[str, *tuple[str, ...]]")
tc2 = ClassTC[int]()
reveal_type(tc2, expected_text="ClassTC[int, *tuple[int, ...]]")
tc3 = ClassTC[int, *tuple[()]]()
reveal_type(tc3, expected_text="ClassTC[int]")
tc4 = ClassTC[int, *tuple[None]]()
reveal_type(tc4, expected_text="ClassTC[int, None]")
| ClassTC |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 23546,
"end": 23671
} | class ____(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
| LinkifyHandler |
python | django-extensions__django-extensions | tests/management/commands/test_syncdata.py | {
"start": 396,
"end": 2125
} | class ____(TestCase):
"""Tests for SyncData command exceptions."""
def test_should_return_SyncDataError_when_unknown_fixture_format(self):
with pytest.raises(
CommandError,
match="Problem installing fixture 'foo': jpeg is not a known serialization format.",
):
call_command("syncdata", "foo.jpeg", verbosity=2)
def test_should_return_SyncDataError_when_file_not_contains_valid_fixture_data(
self,
):
with pytest.raises(
CommandError,
match=r"No fixture data found for 'invalid_fixture'. \(File format may be invalid.\)",
):
call_command("syncdata", "invalid_fixture.xml", verbosity=2)
def test_should_return_SyncDataError_when_file_has_non_existent_field_in_fixture_data(
self,
):
with pytest.raises(
CommandError,
match=r"Problem installing fixture '.+fixture_with_nonexistent_field.json'",
):
call_command("syncdata", "fixture_with_nonexistent_field.json", verbosity=1)
with pytest.raises(
CommandError,
match="django.core.exceptions.FieldDoesNotExist: User has no field named 'non_existent_field'",
):
call_command("syncdata", "fixture_with_nonexistent_field.json", verbosity=1)
def test_should_return_SyncDataError_when_multiple_fixtures(self):
with pytest.raises(
CommandError,
match="Multiple fixtures named 'users' in '{}'. Aborting.".format(
TEST_FIXTURE_DIR
),
):
call_command("syncdata", "users", verbosity=2)
@override_settings(FIXTURE_DIRS=[TEST_FIXTURE_DIR])
| SyncDataExceptionsTests |
python | h5py__h5py | h5py/tests/test_vds/test_lowlevel_vds.py | {
"start": 3074,
"end": 4133
} | class ____:
FEM_PIXELS_PER_CHIP_X = 256
FEM_PIXELS_PER_CHIP_Y = 256
FEM_CHIPS_PER_STRIPE_X = 8
FEM_CHIPS_PER_STRIPE_Y = 1
FEM_STRIPES_PER_MODULE = 2
@property
def sensor_module_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y * self.FEM_STRIPES_PER_MODULE
return y_pixels, x_pixels,
@property
def fem_stripe_dimensions(self):
x_pixels = self.FEM_PIXELS_PER_CHIP_X * self.FEM_CHIPS_PER_STRIPE_X
y_pixels = self.FEM_PIXELS_PER_CHIP_Y * self.FEM_CHIPS_PER_STRIPE_Y
return y_pixels, x_pixels,
def generate_sensor_module_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.sensor_module_dimensions, dtype=dtype)
dset.fill(value)
return dset
def generate_fem_stripe_image(self, value, dtype='uint16'):
dset = np.empty(shape=self.fem_stripe_dimensions, dtype=dtype)
dset.fill(value)
return dset
| ExcaliburData |
python | Pylons__pyramid | tests/test_predicates.py | {
"start": 865,
"end": 2098
} | class ____(unittest.TestCase):
def _makeOne(self, val):
from pyramid.predicates import RequestMethodPredicate
return RequestMethodPredicate(val, None)
def test_ctor_get_but_no_head(self):
inst = self._makeOne('GET')
self.assertEqual(inst.val, ('GET', 'HEAD'))
def test___call___true_single(self):
inst = self._makeOne('GET')
request = Dummy()
request.method = 'GET'
result = inst(None, request)
self.assertTrue(result)
def test___call___true_multi(self):
inst = self._makeOne(('GET', 'HEAD'))
request = Dummy()
request.method = 'GET'
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne(('GET', 'HEAD'))
request = Dummy()
request.method = 'POST'
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne(('HEAD', 'GET'))
self.assertEqual(inst.text(), 'request_method = GET,HEAD')
def test_phash(self):
inst = self._makeOne(('HEAD', 'GET'))
self.assertEqual(inst.phash(), 'request_method = GET,HEAD')
| TestRequestMethodPredicate |
python | walkccc__LeetCode | solutions/654. Maximum Binary Tree/654.py | {
"start": 0,
"end": 417
} | class ____:
def constructMaximumBinaryTree(self, nums: list[int]) -> TreeNode | None:
def build(i: int, j: int) -> TreeNode | None:
if i > j:
return None
maxNum = max(nums[i:j + 1])
maxIndex = nums.index(maxNum)
root = TreeNode(maxNum)
root.left = build(i, maxIndex - 1)
root.right = build(maxIndex + 1, j)
return root
return build(0, len(nums) - 1)
| Solution |
python | openai__openai-python | src/openai/resources/realtime/calls.py | {
"start": 30690,
"end": 31293
} | class ____:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = _legacy_response.to_raw_response_wrapper(
calls.create,
)
self.accept = _legacy_response.to_raw_response_wrapper(
calls.accept,
)
self.hangup = _legacy_response.to_raw_response_wrapper(
calls.hangup,
)
self.refer = _legacy_response.to_raw_response_wrapper(
calls.refer,
)
self.reject = _legacy_response.to_raw_response_wrapper(
calls.reject,
)
| CallsWithRawResponse |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_kubernetes_engine.py | {
"start": 48567,
"end": 49806
} | class ____:
def test_template_fields(self):
assert set(GKEDeleteJobOperator.template_fields) == set(GKEOperatorMixin.template_fields) | set(
KubernetesDeleteJobOperator.template_fields
)
def test_gcp_conn_id_required(self):
with pytest.raises(AirflowException):
GKEDeleteJobOperator(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_name=GKE_CLUSTER_NAME,
name=K8S_JOB_NAME,
namespace=K8S_NAMESPACE,
task_id=TEST_TASK_ID,
gcp_conn_id=None,
)
def test_config_file_throws_error(self):
expected_error_message = "config_file is not an allowed parameter for the GKEDeleteJobOperator."
with pytest.raises(AirflowException, match=expected_error_message):
GKEDeleteJobOperator(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_name=GKE_CLUSTER_NAME,
name=K8S_JOB_NAME,
namespace=K8S_NAMESPACE,
task_id=TEST_TASK_ID,
config_file="/path/to/alternative/kubeconfig",
)
| TestGKEDeleteJobOperator |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py | {
"start": 19088,
"end": 19174
} | class ____(Sam2VideoMaskDownSamplerLayer):
pass
| Sam3TrackerVideoMaskDownSamplerLayer |
python | keras-team__keras | keras/src/layers/rnn/gru.py | {
"start": 13820,
"end": 28784
} | class ____(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or backend-native)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation
when using the TensorFlow backend.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based
on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
hidden state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `reset_after=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = np.random.random((32, 10, 8))
>>> gru = keras.layers.GRU(4)
>>> output = gru(inputs)
>>> output.shape
(32, 4)
>>> gru = keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default: `False`). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). `False` is `"before"`,
`True` is `"after"` (default and cuDNN compatible).
use_cudnn: Whether to use a cuDNN-backed implementation. `"auto"` will
attempt to use cuDNN when feasible, and will fallback to the
default implementation if not.
Call arguments:
inputs: A 3D tensor, with shape `(batch, timesteps, feature)`.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked (optional).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored. Defaults to `None`.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
cell when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional). Defaults to `None`.
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, `None` causes creation
of zero-filled initial state tensors). Defaults to `None`.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=True,
use_cudnn="auto",
**kwargs,
):
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
reset_after=reset_after,
dtype=kwargs.get("dtype", None),
trainable=kwargs.get("trainable", True),
name="gru_cell",
seed=seed,
implementation=kwargs.pop("implementation", 2),
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
activity_regularizer=activity_regularizer,
**kwargs,
)
self.input_spec = InputSpec(ndim=3)
if use_cudnn not in ("auto", True, False):
raise ValueError(
"Invalid valid received for argument `use_cudnn`. "
"Expected one of {'auto', True, False}. "
f"Received: use_cudnn={use_cudnn}"
)
self.use_cudnn = use_cudnn
if (
backend.backend() == "tensorflow"
and backend.cudnn_ok(
cell.activation,
cell.recurrent_activation,
self.unroll,
cell.use_bias,
reset_after=reset_after,
)
and use_cudnn in (True, "auto")
):
self.supports_jit = False
def inner_loop(self, sequences, initial_state, mask, training=False):
if tree.is_nested(initial_state):
initial_state = initial_state[0]
if tree.is_nested(mask):
mask = mask[0]
if self.use_cudnn in ("auto", True):
if not self.recurrent_dropout:
try:
if training and self.dropout:
dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :])
dp_mask = ops.expand_dims(dp_mask, axis=1)
dp_mask = ops.broadcast_to(
dp_mask, ops.shape(sequences)
)
dp_sequences = sequences * dp_mask
else:
dp_sequences = sequences
# Backends are allowed to specify (optionally) optimized
# implementation of the inner GRU loop. In the case of
# TF for instance, it will leverage cuDNN when feasible, and
# it will raise NotImplementedError otherwise.
out = backend.gru(
dp_sequences,
initial_state,
mask,
kernel=self.cell.kernel,
recurrent_kernel=self.cell.recurrent_kernel,
bias=self.cell.bias,
activation=self.cell.activation,
recurrent_activation=self.cell.recurrent_activation,
return_sequences=self.return_sequences,
go_backwards=self.go_backwards,
unroll=self.unroll,
reset_after=self.cell.reset_after,
)
# We disable jit_compile for the model in this case,
# since cuDNN ops aren't XLA compatible.
if backend.backend() == "tensorflow":
self.supports_jit = False
return out
except NotImplementedError:
pass
if self.use_cudnn is True:
raise ValueError(
"use_cudnn=True was specified, "
"but cuDNN is not supported for this layer configuration "
"with this backend. Pass use_cudnn='auto' to fallback "
"to a non-cuDNN implementation."
)
return super().inner_loop(
sequences, initial_state, mask=mask, training=training
)
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"reset_after": self.reset_after,
"seed": self.cell.seed,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| GRU |
python | sympy__sympy | sympy/sets/fancysets.py | {
"start": 923,
"end": 2079
} | class ____(Set, metaclass=Singleton):
"""
Represents the rational numbers. This set is also available as
the singleton ``S.Rationals``.
Examples
========
>>> from sympy import S
>>> S.Half in S.Rationals
True
>>> iterable = iter(S.Rationals)
>>> [next(iterable) for i in range(12)]
[0, 1, -1, 1/2, 2, -1/2, -2, 1/3, 3, -1/3, -3, 2/3]
"""
is_iterable = True
_inf = S.NegativeInfinity
_sup = S.Infinity
is_empty = False
is_finite_set = False
def _contains(self, other):
if not isinstance(other, Expr):
return S.false
return tfn[other.is_rational]
def __iter__(self):
yield S.Zero
yield S.One
yield S.NegativeOne
d = 2
while True:
for n in range(d):
if igcd(n, d) == 1:
yield Rational(n, d)
yield Rational(d, n)
yield Rational(-n, d)
yield Rational(-d, n)
d += 1
@property
def _boundary(self):
return S.Reals
def _kind(self):
return SetKind(NumberKind)
| Rationals |
python | paramiko__paramiko | tests/test_client.py | {
"start": 3774,
"end": 8714
} | class ____(unittest.TestCase):
def setUp(self):
self.sockl = socket.socket()
self.sockl.bind(("localhost", 0))
self.sockl.listen(1)
self.addr, self.port = self.sockl.getsockname()
self.connect_kwargs = dict(
hostname=self.addr,
port=self.port,
username="slowdive",
look_for_keys=False,
)
self.event = threading.Event()
self.kill_event = threading.Event()
def tearDown(self):
# Shut down client Transport
if hasattr(self, "tc"):
self.tc.close()
# Shut down shared socket
if hasattr(self, "sockl"):
# Signal to server thread that it should shut down early; it checks
# this immediately after accept(). (In scenarios where connection
# actually succeeded during the test, this becomes a no-op.)
self.kill_event.set()
# Forcibly connect to server sock in case the server thread is
# hanging out in its accept() (e.g. if the client side of the test
# fails before it even gets to connecting); there's no other good
# way to force an accept() to exit.
put_a_sock_in_it = socket.socket()
put_a_sock_in_it.connect((self.addr, self.port))
put_a_sock_in_it.close()
# Then close "our" end of the socket (which _should_ cause the
# accept() to bail out, but does not, for some reason. I blame
# threading.)
self.sockl.close()
def _run(
self,
allowed_keys=None,
delay=0,
public_blob=None,
kill_event=None,
server_name=None,
):
if allowed_keys is None:
allowed_keys = FINGERPRINTS.keys()
self.socks, addr = self.sockl.accept()
# If the kill event was set at this point, it indicates an early
# shutdown, so bail out now and don't even try setting up a Transport
# (which will just verbosely die.)
if kill_event and kill_event.is_set():
self.socks.close()
return
self.ts = paramiko.Transport(self.socks)
if server_name is not None:
self.ts.local_version = server_name
keypath = _support("rsa.key")
host_key = paramiko.RSAKey.from_private_key_file(keypath)
self.ts.add_server_key(host_key)
keypath = _support("ecdsa-256.key")
host_key = paramiko.ECDSAKey.from_private_key_file(keypath)
self.ts.add_server_key(host_key)
server = NullServer(allowed_keys=allowed_keys, public_blob=public_blob)
if delay:
time.sleep(delay)
self.ts.start_server(self.event, server)
def _test_connection(self, **kwargs):
"""
(Most) kwargs get passed directly into SSHClient.connect().
The exceptions are ``allowed_keys``/``public_blob``/``server_name``
which are stripped and handed to the ``NullServer`` used for testing.
"""
run_kwargs = {"kill_event": self.kill_event}
for key in ("allowed_keys", "public_blob", "server_name"):
run_kwargs[key] = kwargs.pop(key, None)
# Server setup
threading.Thread(target=self._run, kwargs=run_kwargs).start()
host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
# Client setup
self.tc = SSHClient()
self.tc.get_host_keys().add(
f"[{self.addr}]:{self.port}", "ssh-rsa", public_host_key
)
# Actual connection
self.tc.connect(**dict(self.connect_kwargs, **kwargs))
# Authentication successful?
self.event.wait(1.0)
self.assertTrue(self.event.is_set())
self.assertTrue(self.ts.is_active())
self.assertEqual(
self.connect_kwargs["username"], self.ts.get_username()
)
self.assertEqual(True, self.ts.is_authenticated())
self.assertEqual(False, self.tc.get_transport().gss_kex_used)
# Command execution functions?
stdin, stdout, stderr = self.tc.exec_command("yes")
schan = self.ts.accept(1.0)
# Nobody else tests the API of exec_command so let's do it here for
# now. :weary:
assert isinstance(stdin, paramiko.ChannelStdinFile)
assert isinstance(stdout, paramiko.ChannelFile)
assert isinstance(stderr, paramiko.ChannelStderrFile)
schan.send("Hello there.\n")
schan.send_stderr("This is on stderr.\n")
schan.close()
self.assertEqual("Hello there.\n", stdout.readline())
self.assertEqual("", stdout.readline())
self.assertEqual("This is on stderr.\n", stderr.readline())
self.assertEqual("", stderr.readline())
# Cleanup
stdin.close()
stdout.close()
stderr.close()
| ClientTest |
python | patrys__httmock | tests.py | {
"start": 5596,
"end": 6293
} | class ____(unittest.TestCase):
@all_requests
def response_content(self, url, request):
return {'status_code': 200, 'content': 'Oh hai'}
def test_all_requests_response(self):
with HTTMock(self.response_content):
r = requests.get('https://example.com/')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, b'Oh hai')
@all_requests
def string_response_content(self, url, request):
return 'Hello'
def test_all_str_response(self):
with HTTMock(self.string_response_content):
r = requests.get('https://example.com/')
self.assertEqual(r.content, b'Hello')
| AllRequestsMethodDecoratorTest |
python | django__django | django/contrib/gis/gdal/field.py | {
"start": 4809,
"end": 4884
} | class ____(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
| OFTBinary |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/api_fastapi/datamodels/roles.py | {
"start": 924,
"end": 1040
} | class ____(BaseModel):
"""Outgoing representation of an action (permission name)."""
name: str
| ActionResponse |
python | pytest-dev__pytest-cov | src/pytest_cov/engine.py | {
"start": 9621,
"end": 11238
} | class ____(CovController):
"""Implementation for centralised operation."""
@_ensure_topdir
def start(self):
self.cov = coverage.Coverage(
source=self.cov_source,
branch=self.cov_branch,
data_suffix=True,
config_file=self.cov_config,
)
if self.cov.config.dynamic_context == 'test_function':
message = (
'Detected dynamic_context=test_function in coverage configuration. '
'This is unnecessary as this plugin provides the more complete --cov-context option.'
)
warnings.warn(CentralCovContextWarning(message), stacklevel=1)
self.combining_cov = coverage.Coverage(
source=self.cov_source,
branch=self.cov_branch,
data_suffix=f'{filename_suffix(True)}.combine',
data_file=os.path.abspath(self.cov.config.data_file), # noqa: PTH100
config_file=self.cov_config,
)
# Erase or load any previous coverage data and start coverage.
if not self.cov_append:
self.cov.erase()
self.cov.start()
super().start()
@_ensure_topdir
def finish(self):
"""Stop coverage, save data to file and set the list of coverage objects to report on."""
super().finish()
self.cov.stop()
self.cov.save()
self.cov = self.combining_cov
self.cov.load()
self.cov.combine()
self.cov.save()
node_desc = self.get_node_desc(sys.platform, sys.version_info)
self.node_descs.add(node_desc)
| Central |
python | bokeh__bokeh | src/bokeh/embed/util.py | {
"start": 2162,
"end": 6114
} | class ____:
''' This class merely provides a non-None default value for ``theme``
arguments, since ``None`` itself is a meaningful value for users to pass.
'''
pass
@contextmanager
def OutputDocumentFor(objs: Sequence[Model], apply_theme: Theme | type[FromCurdoc] | None = None,
always_new: bool = False) -> Iterator[Document]:
''' Find or create a (possibly temporary) Document to use for serializing
Bokeh content.
Typical usage is similar to:
.. code-block:: python
with OutputDocumentFor(models):
(docs_json, [render_item]) = standalone_docs_json_and_render_items(models)
Inside the context manager, the models will be considered to be part of a single
Document, with any theme specified, which can thus be serialized as a unit. Where
possible, OutputDocumentFor attempts to use an existing Document. However, this is
not possible in three cases:
* If passed a series of models that have no Document at all, a new Document will
be created, and all the models will be added as roots. After the context manager
exits, the new Document will continue to be the models' document.
* If passed a subset of Document.roots, then OutputDocumentFor temporarily "re-homes"
the models in a new bare Document that is only available inside the context manager.
* If passed a list of models that have different documents, then OutputDocumentFor
temporarily "re-homes" the models in a new bare Document that is only available
inside the context manager.
OutputDocumentFor will also perform document validation before yielding, if
``settings.perform_document_validation()`` is True.
objs (seq[Model]) :
a sequence of Models that will be serialized, and need a common document
apply_theme (Theme or FromCurdoc or None, optional):
Sets the theme for the doc while inside this context manager. (default: None)
If None, use whatever theme is on the document that is found or created
If FromCurdoc, use curdoc().theme, restoring any previous theme afterwards
If a Theme instance, use that theme, restoring any previous theme afterwards
always_new (bool, optional) :
Always return a new document, even in cases where it is otherwise possible
to use an existing document on models.
Yields:
Document
'''
# Note: Comms handling relies on the fact that the new_doc returned
# has models with the same IDs as they were started with
if not isinstance(objs, Sequence) or len(objs) == 0 or not all(isinstance(x, Model) for x in objs):
raise ValueError("OutputDocumentFor expects a non-empty sequence of Models")
def finish() -> None:
pass
docs = {obj.document for obj in objs if obj.document is not None}
if always_new:
def finish() -> None:
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
else:
if len(docs) == 0:
doc = _new_doc()
for model in objs:
doc.add_root(model)
# handle a single shared document
elif len(docs) == 1:
doc = docs.pop()
# we are not using all the roots, make a quick clone for outputting purposes
if set(objs) != set(doc.roots):
def finish() -> None:
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
# we are using all the roots of a single doc, just use doc as-is
pass
# models have mixed docs, just make a quick clone
else:
def finish():
_dispose_temp_doc(objs)
doc = _create_temp_doc(objs)
if settings.perform_document_validation():
doc.validate()
_set_temp_theme(doc, apply_theme)
yield doc
_unset_temp_theme(doc)
finish()
| FromCurdoc |
python | pypa__setuptools | setuptools/tests/test_namespaces.py | {
"start": 107,
"end": 4515
} | class ____:
def test_mixed_site_and_non_site(self, tmpdir):
"""
Installing two packages sharing the same namespace, one installed
to a site dir and the other installed just to a path on PYTHONPATH
should leave the namespace in tact and both packages reachable by
import.
"""
pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
site_packages = tmpdir / 'site-packages'
path_packages = tmpdir / 'path-packages'
targets = site_packages, path_packages
# use pip to install to the target directory
install_cmd = [
sys.executable,
'-m',
'pip.__main__',
'install',
str(pkg_A),
'-t',
str(site_packages),
]
subprocess.check_call(install_cmd)
namespaces.make_site_dir(site_packages)
install_cmd = [
sys.executable,
'-m',
'pip.__main__',
'install',
str(pkg_B),
'-t',
str(path_packages),
]
subprocess.check_call(install_cmd)
try_import = [
sys.executable,
'-c',
'import myns.pkgA; import myns.pkgB',
]
with paths_on_pythonpath(map(str, targets)):
subprocess.check_call(try_import)
def test_pkg_resources_import(self, tmpdir):
"""
Ensure that a namespace package doesn't break on import
of pkg_resources.
"""
pkg = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
target = tmpdir / 'packages'
target.mkdir()
install_cmd = [
sys.executable,
'-m',
'pip',
'install',
'-t',
str(target),
str(pkg),
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(install_cmd)
namespaces.make_site_dir(target)
try_import = [
sys.executable,
'-c',
'import pkg_resources',
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(try_import)
def test_namespace_package_installed_and_cwd(self, tmpdir):
"""
Installing a namespace packages but also having it in the current
working directory, only one version should take precedence.
"""
pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
target = tmpdir / 'packages'
# use pip to install to the target directory
install_cmd = [
sys.executable,
'-m',
'pip.__main__',
'install',
str(pkg_A),
'-t',
str(target),
]
subprocess.check_call(install_cmd)
namespaces.make_site_dir(target)
# ensure that package imports and pkg_resources imports
pkg_resources_imp = [
sys.executable,
'-c',
'import pkg_resources; import myns.pkgA',
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(pkg_resources_imp, cwd=str(pkg_A))
def test_packages_in_the_same_namespace_installed_and_cwd(self, tmpdir):
"""
Installing one namespace package and also have another in the same
namespace in the current working directory, both of them must be
importable.
"""
pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
target = tmpdir / 'packages'
# use pip to install to the target directory
install_cmd = [
sys.executable,
'-m',
'pip.__main__',
'install',
str(pkg_A),
'-t',
str(target),
]
subprocess.check_call(install_cmd)
namespaces.make_site_dir(target)
# ensure that all packages import and pkg_resources imports
pkg_resources_imp = [
sys.executable,
'-c',
'import pkg_resources; import myns.pkgA; import myns.pkgB',
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(pkg_resources_imp, cwd=str(pkg_B))
| TestNamespaces |
python | walkccc__LeetCode | solutions/1918. Kth Smallest Subarray Sum/1918.py | {
"start": 0,
"end": 412
} | class ____:
def kthSmallestSubarraySum(self, nums: list[int], k: int) -> int:
def numSubarrayLessThan(m: int) -> int:
res = 0
summ = 0
l = 0
for r, num in enumerate(nums):
summ += num
while summ > m:
summ -= nums[l]
l += 1
res += r - l + 1
return res
return bisect.bisect_left(range(sum(nums)), k, key=numSubarrayLessThan)
| Solution |
python | walkccc__LeetCode | solutions/2746. Decremental String Concatenation/2746.py | {
"start": 0,
"end": 706
} | class ____:
def minimizeConcatenatedLength(self, words: list[str]) -> int:
@functools.lru_cache(None)
def dp(i: int, first: str, last: str) -> int:
"""
Returns the minimum concatenated length of the first i words starting with
`first` and ending in `last`.
"""
if i == len(words):
return 0
nextFirst = words[i][0]
nextLast = words[i][-1]
return len(words[i]) + min(
# join(words[i - 1], words[i])
dp(i + 1, first, nextLast) - (last == nextFirst),
# join(words[i], words[i - 1])
dp(i + 1, nextFirst, last) - (first == nextLast)
)
return len(words[0]) + dp(1, words[0][0], words[0][-1])
| Solution |
python | django__django | tests/template_tests/filter_tests/test_slugify.py | {
"start": 206,
"end": 1000
} | class ____(SimpleTestCase):
"""
Running slugify on a pre-escaped string leads to odd behavior,
but the result is still safe.
"""
@setup(
{
"slugify01": (
"{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}"
)
}
)
def test_slugify01(self):
output = self.engine.render_to_string(
"slugify01", {"a": "a & b", "b": mark_safe("a & b")}
)
self.assertEqual(output, "a-b a-amp-b")
@setup({"slugify02": "{{ a|slugify }} {{ b|slugify }}"})
def test_slugify02(self):
output = self.engine.render_to_string(
"slugify02", {"a": "a & b", "b": mark_safe("a & b")}
)
self.assertEqual(output, "a-b a-amp-b")
| SlugifyTests |
python | getsentry__sentry | src/sentry/notifications/types.py | {
"start": 2154,
"end": 2390
} | class ____(StrEnum):
ALERTS = "alerts"
APPROVAL = "approval"
DEPLOY = "deploy"
EMAIL = "email"
QUOTA = "quota"
REPORTS = "reports"
WORKFLOW = "workflow"
SPIKE_PROTECTION = "spikeProtection"
| FineTuningAPIKey |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramInference1.py | {
"start": 906,
"end": 1206
} | class ____:
pass
Undefined = _Undefined()
def func4(a=1, b=None, c=Undefined, d=lambda x: x):
reveal_type(a, expected_text="int")
reveal_type(b, expected_text="Unknown | None")
reveal_type(c, expected_text="_Undefined | Unknown")
reveal_type(d, expected_text="Unknown")
| _Undefined |
python | getsentry__sentry | tests/sentry/issue_detection/test_large_http_payload_detector.py | {
"start": 714,
"end": 14525
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(self, event: dict[str, Any]) -> list[PerformanceProblem]:
detector = LargeHTTPPayloadDetector(self._settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def test_detects_large_http_payload_issue(self) -> None:
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/endpoint1",
"hash1",
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
),
]
event = create_event(spans)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1015-5e5543895c0f1f12c2d468da8c7f2d9e4dca81dc",
op="http",
desc="GET /api/0/organizations/endpoint1",
type=PerformanceLargeHTTPPayloadGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
"op": "http",
},
evidence_display=[],
)
]
def test_respects_project_option(self) -> None:
project = self.create_project()
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/endpoint1",
"hash1",
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
event["project_id"] = project.id
settings = get_detection_settings(project.id)
detector = LargeHTTPPayloadDetector(settings, event)
assert detector.is_creation_allowed_for_project(project)
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"large_http_payload_detection_enabled": False},
)
settings = get_detection_settings(project.id)
detector = LargeHTTPPayloadDetector(settings, event)
assert not detector.is_creation_allowed_for_project(project)
def test_does_not_issue_if_url_is_not_an_http_span(self) -> None:
spans = [
create_span(
"resource.script",
desc="https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.js",
duration=1000.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == []
def test_does_not_issue_if_url_is_not_a_json_asset(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.mp3",
duration=1000.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == []
def test_issues_if_url_is_a_json_asset(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json",
duration=1000.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1015-707544115c386d60b7b550634d582d8e47d9c5dd",
op="http",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json",
type=PerformanceLargeHTTPPayloadGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
"op": "http",
},
evidence_display=[],
)
]
def test_ignores_query_parameters(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json?foo=bar",
duration=1000.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1015-707544115c386d60b7b550634d582d8e47d9c5dd",
op="http",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json",
type=PerformanceLargeHTTPPayloadGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
"op": "http",
},
evidence_display=[],
)
]
def test_ignores_query_parameters_with_trailing_slash(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json/?foo=bar",
duration=1000.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1015-e84e3f3951f80edcd72d5a0a08adae09e333e2ea",
op="http",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json",
type=PerformanceLargeHTTPPayloadGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
"op": "http",
},
evidence_display=[],
)
]
def test_does_not_trigger_detection_for_http_span_lower_than_100_ms_duration(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json/?foo=bar",
duration=1.0,
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert self.find_problems(event) == []
def test_handles_string_payload_size_threshold(self) -> None:
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/endpoint1",
"hash2",
data={
"http.response_transfer_size": "50_000_000",
"http.response_content_length": "50_000_000",
"http.decoded_response_content_length": "50_000_000",
},
),
]
event = create_event(spans)
assert self.find_problems(event) == [
PerformanceProblem(
fingerprint="1-1015-5e5543895c0f1f12c2d468da8c7f2d9e4dca81dc",
op="http",
desc="GET /api/0/organizations/endpoint1",
type=PerformanceLargeHTTPPayloadGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=["bbbbbbbbbbbbbbbb"],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": ["bbbbbbbbbbbbbbbb"],
"op": "http",
},
evidence_display=[],
)
]
def test_does_not_trigger_detection_for_prefetch_spans(self) -> None:
spans = [
create_span(
"http.client",
hash="hash1",
desc="GET https://s1.sentry-cdn.com/_static/dist/sentry/entrypoints/app.json/?foo=bar",
duration=1000.0,
data={
"http.request.prefetch": True,
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
)
]
event = create_event(spans)
assert len(self.find_problems(event)) == 0
@with_feature("organizations:large-http-payload-detector-improvements")
def test_does_not_trigger_detection_for_filtered_paths(self) -> None:
project = self.create_project()
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"large_http_payload_filtered_paths": "/api/0/organizations/download/"},
)
settings = get_detection_settings(project.id, organization=self.organization)
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/download/endpoint1",
"hash1",
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
),
]
event = create_event(spans)
detector = LargeHTTPPayloadDetector(settings, event)
run_detector_on_data(detector, event)
assert len(detector.stored_problems) == 0
@with_feature("organizations:large-http-payload-detector-improvements")
def test_does_not_trigger_detection_for_filtered_paths_without_trailing_slash(self) -> None:
project = self.create_project()
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"large_http_payload_filtered_paths": "/api/0/organizations/user"},
)
settings = get_detection_settings(project.id, organization=self.organization)
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/users/100",
"hash1",
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
),
]
event = create_event(spans)
detector = LargeHTTPPayloadDetector(settings, event)
run_detector_on_data(detector, event)
assert len(detector.stored_problems) == 1
spans = [
create_span(
"http.client",
1000,
"GET /api/0/organizations/user/100",
"hash1",
data={
"http.response_transfer_size": 50_000_000,
"http.response_content_length": 50_000_000,
"http.decoded_response_content_length": 50_000_000,
},
),
]
event = create_event(spans)
detector = LargeHTTPPayloadDetector(settings, event)
run_detector_on_data(detector, event)
assert len(detector.stored_problems) == 0
| LargeHTTPPayloadDetectorTest |
python | milvus-io__pymilvus | tests/test_bulk_writer_stage.py | {
"start": 691,
"end": 6013
} | class ____:
"""Test stage RESTful API functions."""
@pytest.fixture
def mock_response(self) -> Mock:
"""Create a mock response object."""
response = Mock(spec=requests.Response)
response.status_code = 200
response.json.return_value = {"code": 0, "message": "success", "data": {}}
return response
@pytest.fixture
def api_params(self) -> Dict[str, str]:
"""Common API parameters."""
return {
"url": "https://api.cloud.zilliz.com",
"api_key": "test_api_key",
}
@patch("pymilvus.bulk_writer.stage_restful.requests.get")
def test_list_stages_success(
self, mock_get: Mock, mock_response: Mock, api_params: Dict[str, str]
) -> None:
"""Test successful list_stages call."""
mock_get.return_value = mock_response
mock_response.json.return_value = {
"code": 0,
"message": "success",
"data": {"stages": ["stage1", "stage2"]},
}
response = list_stages(
**api_params,
project_id="test_project",
current_page=1,
page_size=10,
)
assert response.status_code == 200
assert response.json()["data"]["stages"] == ["stage1", "stage2"]
mock_get.assert_called_once()
@patch("pymilvus.bulk_writer.stage_restful.requests.get")
def test_list_stages_failure(
self, mock_get: Mock, mock_response: Mock, api_params: Dict[str, str]
) -> None:
"""Test failed list_stages call."""
mock_response.json.return_value = {
"code": 1001,
"message": "Invalid API key",
"data": {},
}
mock_get.return_value = mock_response
with pytest.raises(MilvusException, match="Invalid API key"):
list_stages(**api_params, project_id="test_project")
@patch("pymilvus.bulk_writer.stage_restful.requests.post")
def test_create_stage_success(
self, mock_post: Mock, mock_response: Mock, api_params: Dict[str, str]
) -> None:
"""Test successful create_stage call."""
mock_post.return_value = mock_response
mock_response.json.return_value = {
"code": 0,
"message": "success",
"data": {"stageId": "stage123"},
}
response = create_stage(
**api_params,
project_id="test_project",
region_id="us-west-2",
stage_name="test_stage",
)
assert response.status_code == 200
assert response.json()["data"]["stageId"] == "stage123"
mock_post.assert_called_once()
@patch("pymilvus.bulk_writer.stage_restful.requests.delete")
def test_delete_stage_success(
self, mock_delete: Mock, mock_response: Mock, api_params: Dict[str, str]
) -> None:
"""Test successful delete_stage call."""
mock_delete.return_value = mock_response
response = delete_stage(**api_params, stage_name="test_stage")
assert response.status_code == 200
mock_delete.assert_called_once()
@patch("pymilvus.bulk_writer.stage_restful.requests.post")
def test_apply_stage_success(
self, mock_post: Mock, mock_response: Mock, api_params: Dict[str, str]
) -> None:
"""Test successful apply_stage call."""
mock_post.return_value = mock_response
mock_response.json.return_value = {
"code": 0,
"message": "success",
"data": {
"stageName": "test_stage",
"stagePrefix": "prefix/",
"endpoint": "s3.amazonaws.com",
"bucketName": "test-bucket",
"region": "us-west-2",
"cloud": "aws",
"condition": {"maxContentLength": 1073741824},
"credentials": {
"tmpAK": "test_access_key",
"tmpSK": "test_secret_key",
"sessionToken": "test_token",
"expireTime": "2024-12-31T23:59:59Z",
},
},
}
response = apply_stage(
**api_params,
stage_name="test_stage",
path="data/",
)
assert response.status_code == 200
data = response.json()["data"]
assert data["stageName"] == "test_stage"
assert data["endpoint"] == "s3.amazonaws.com"
mock_post.assert_called_once()
@patch("pymilvus.bulk_writer.stage_restful.requests.get")
def test_http_error_handling(
self, mock_get: Mock, api_params: Dict[str, str]
) -> None:
"""Test HTTP error handling."""
mock_get.return_value.status_code = 404
with pytest.raises(MilvusException, match="status code: 404"):
list_stages(**api_params, project_id="test_project")
@patch("pymilvus.bulk_writer.stage_restful.requests.get")
def test_network_error_handling(
self, mock_get: Mock, api_params: Dict[str, str]
) -> None:
"""Test network error handling."""
mock_get.side_effect = requests.exceptions.ConnectionError("Network error")
with pytest.raises(MilvusException, match="Network error"):
list_stages(**api_params, project_id="test_project")
| TestStageRestful |
python | pandas-dev__pandas | asv_bench/benchmarks/rolling.py | {
"start": 9975,
"end": 10461
} | class ____:
# https://github.com/pandas-dev/pandas/issues/38038
# specific example where the rolling operation on a larger dataframe
# is relatively cheap (few but large groups), but creation of
# MultiIndex of result can be expensive
def setup(self):
N = 100000
self.df = pd.DataFrame({"A": [1, 2] * (N // 2), "B": np.random.randn(N)})
def time_rolling_multiindex_creation(self):
self.df.groupby("A").rolling(3).mean()
| GroupbyLargeGroups |
python | tensorflow__tensorflow | tensorflow/python/eager/context.py | {
"start": 83694,
"end": 106860
} | class ____(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
__slots__ = ["_device_name", "_ctx", "_stack"]
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
# TODO(b/189233748): Consolidate the device string parsing logic with
# tensorflow/core/util/device_name_utils.cc.
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError as exc:
# Error while trying to compute the cache key.
raise ValueError(
"Expecting a string device name. Got %s(%s)"
% (type(new_device_name), new_device_name)
) from exc
except KeyError as exc:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, str):
raise ValueError(
"Expecting a string device name. Got %s(%s)"
% (type(new_device_name), new_device_name)
) from exc
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]
) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError("Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not change directly.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
ctx.mark_as_global_context()
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
global _device_parsing_cache
# Garbage collect and clear scalar cache to avoid Tensor from current context
# polluting next context.
gc.collect()
pywrap_tfe.TFE_ClearScalarCache()
with _context_lock:
if _context is not None:
_context._clear_caches()
_context = None
_create_context()
_device_parsing_cache = {}
def _reset_jit_compiler_flags():
"""Clears and re-initializes the TF JIT compiler flags.
Should only be used for testing.
"""
pywrap_tfe.TF_ResetJitCompilerFlags()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def initialize_logical_devices():
"""Initialize the virtual devices."""
context()._initialize_logical_devices() # pylint: disable=protected-access
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def anonymous_name():
"""Returns the anonymous shared name.
In eager mode we create anonymous resources to avoid spurious sharing issues.
The runtime generates a unique name on our behalf when the reserved
anonymous shared name is used as a shared name.
Returns:
The anonymous shared name.
"""
# The magic value is defined as
# `tensorflow::ResourceHandle::ANONYMOUS_NAME` in C++.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
# Used by b/167638505 for keras backend API and Lambda layer.
@tf_export("__internal__.eager_context.eager_mode", v1=[])
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to perform
automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
# Expose some properties of Context as internally public APIs (b/160348781).
@tf_export("__internal__.eager_context.get_config", v1=[])
def get_config():
"""Get the ConfigProto of Context.
Returns:
The ConfigProto of Context.
"""
return context().config
@tf_export("__internal__.eager_context.get_device_name", v1=[])
def get_device_name():
"""Get the device name for the current thread.
Returns:
The device name for the current thread.
"""
return context().device_name
@tf_export("__internal__.eager_context.set_soft_device_placement", v1=[])
def set_soft_device_placement(enabled):
"""Set if soft device placements should be allowed.
Args:
enabled: Whether to enable soft device placement.
"""
context().soft_device_placement = enabled
@tf_export("__internal__.eager_context.get_executor", v1=[])
def get_executor():
"""Get the Executor of the current thread.
Returns:
The Executor of the current thread.
"""
return context().executor
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Turns logging for device placement decisions on or off.
Operations execute on a particular device, producing and consuming tensors on
that device. This may change the performance of the operation or require
TensorFlow to copy data to or from an accelerator, so knowing where operations
execute is useful for debugging performance issues.
For more advanced profiling, use the [TensorFlow
profiler](https://www.tensorflow.org/guide/profiler).
Device placement for operations is typically controlled by a `tf.device`
scope, but there are exceptions, for example operations on a `tf.Variable`
which follow the initial placement of the variable. Turning off soft device
placement (with `tf.config.set_soft_device_placement`) provides more explicit
control.
>>> tf.debugging.set_log_device_placement(True)
>>> tf.ones([])
>>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:GPU:0
>>> with tf.device("CPU"):
... tf.ones([])
>>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:CPU:0
>>> tf.debugging.set_log_device_placement(False)
Turning on `tf.debugging.set_log_device_placement` also logs the placement of
ops inside `tf.function` when the function is called.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
if mode is None:
yield
else:
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_graphs(optimized=True):
"""Collects a flat list of pre- or post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Args:
optimized: whether to collect optimized graphs or non-optimized graphs
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
if optimized:
graphs.append(graph.post_optimization_graph)
else:
graphs.append(graph.pre_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def set_server_def_retries(retries):
"""Set the number of retries to use when calling SetServerDef.
In cases where many servers run in high-preemption environments, jobs could
be preempted during startup and initial connection via SetServerDef. Retries
allow for more robust connection in these environments.
Args:
retries: int specifying the number of connection retries before failing.
Retries follow an exponential backoff waiting period with min value 1ms,
max value 10s, and exponent 1.3.
"""
context().set_server_def_retries(retries)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
@tf_export("experimental.async_scope")
@tf_contextlib.contextmanager
def async_scope():
"""Context manager for grouping async operations.
Ops/function calls inside the scope can return before finishing the actual
execution. When exiting the async scope, a synchronization barrier will be
automatically added to ensure the completion of all async op and function
execution, potentially raising exceptions if async execution results in
an error state.
Users may write the following code to asynchronously invoke `train_step_fn`
and log the `loss` metric for every `num_steps` steps in a training loop.
`train_step_fn` internally consumes data using `iterator.get_next()`, and may
throw OutOfRangeError when running out of data. In the case:
```
try:
with tf.experimental.async_scope():
for _ in range(num_steps):
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
logging.info('loss = %s', loss.numpy())
```
Yields:
Context manager for grouping async operations.
"""
# TODO(haoyuzhang): replace env var once we have a config method to turn on
# and off async streaming RPC
remote_async_env_var = "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"
old_policy = os.environ.get(remote_async_env_var)
try:
os.environ[remote_async_env_var] = str(True)
yield
# Note: sync local and remote executors iff the async block does not raise
# an exception. Triggering sync after an exception may lead to derived
# runtime errors and unexpected exception types.
context().sync_executors()
finally:
if old_policy is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = old_policy
def async_wait():
"""Sync all async operations and raise any errors during execution.
In async execution mode, an op/function call can return before finishing the
actual execution. Calling this method creates a synchronization barrier for
all async op and function execution. It only returns when all pending nodes
are finished, potentially raising exceptions if async execution results in
an error state. It is a no-op if the context is not initialized.
"""
disable_async_executor_env_var = "TF_PS_DISABLE_ASYNC_EXECUTOR_GLOBALLY"
if os.environ.get(disable_async_executor_env_var) == str(True):
return
if context()._context_handle is not None: # pylint: disable=protected-access
context().sync_executors()
@tf_export("experimental.async_clear_error")
def async_clear_error():
"""Clear pending operations and error statuses in async execution.
In async execution mode, an error in op/function execution can lead to errors
in subsequent ops/functions that are scheduled but not yet executed. Calling
this method clears all pending operations and reset the async execution state.
Example:
```
while True:
try:
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
break
logging.info('loss = %s', loss.numpy())
```
"""
context().clear_executor_errors()
def add_c_function(c_func):
"""Add a C API TF_Function to the context."""
context().add_c_function(c_func)
def get_c_function(name):
"""Get a C API TF_Function from the context."""
return context().get_c_function(name)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
def get_function_def(name):
return context().get_function_def(name)
def is_custom_device(device_name):
"""Calls TFE_IsCustomDevice.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Args:
device_name: A string indicating the name to check whether it is a
registered custom device.
Returns:
A boolean.
"""
return context().is_custom_device(device_name)
def register_custom_device(device_capsule, device_name, device_info_capsule):
"""Calls TFE_RegisterCustomDevice to register a custom device with Python.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Note that custom devices are not currently supported inside `tf.function`s.
Args:
device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'
containing a pointer to a TFE_CustomDevice struct. The capsule retains
ownership of the memory.
device_name: A string indicating the name to register the custom device
under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may
subsequently be passed to `with tf.device(...):`.
device_info_capsule: A PyCapsule with the name set to
'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific
struct with the initial state of the custom device (the void* device_info
argument to TFE_RegisterCustomDevice). This method takes ownership of the
memory and clears the capsule destructor.
"""
context().register_custom_device(
device_capsule, device_name, device_info_capsule
)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| _EagerDeviceContext |
python | joke2k__faker | faker/providers/company/ro_RO/__init__.py | {
"start": 45,
"end": 687
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
"{{last_name}}",
)
company_prefixes = ("S.C.", "S.S.I.", "A.D.")
company_suffixes = (
"SRL",
"SA",
"SCA",
"SNC",
"SCS",
"AFJ",
"ASF",
"CON",
"CRL",
"INC",
"LOC",
"OC1",
"OC2",
"OC3",
"PFA",
"RA",
"SCS",
"SPI",
"URL",
)
def company_suffix(self) -> str:
return self.random_element(self.company_suffixes)
| Provider |
python | sphinx-doc__sphinx | sphinx/util/inspect.py | {
"start": 22480,
"end": 38488
} | class ____(Mapping[str, Any]):
"""Pseudo namespace class for :confval:`autodoc_type_aliases`.
Useful for looking up nested objects via ``namespace.foo.bar.Class``.
"""
def __init__(self, mapping: Mapping[str, str]) -> None:
super().__init__()
self.__mapping = mapping
def __getitem__(self, key: object) -> Any:
if not isinstance(key, str):
raise KeyError
if key in self.__mapping:
# exactly matched
return TypeAliasForwardRef(self.__mapping[key])
else:
prefix = key + '.'
nested = {k: v for k, v in self.__mapping.items() if k.startswith(prefix)}
if nested:
# sub modules or classes found
return TypeAliasModule(key, nested)
else:
raise KeyError
def __contains__(self, key: object) -> bool:
if not isinstance(key, str):
return False
ns = self.__mapping
prefix = f'{key}.'
return key in ns or any(k.startswith(prefix) for k in ns)
def __iter__(self) -> Iterator[str]:
for k in self.__mapping:
yield k
for i in range(k.count('.')):
yield k.rsplit('.', i + 1)[0]
def __len__(self) -> int:
return sum(k.count('.') + 1 for k in self.__mapping)
def _should_unwrap(subject: _SignatureType) -> bool:
"""Check the function should be unwrapped on getting signature."""
__globals__ = getglobals(subject)
# contextmanger should be unwrapped
return (
__globals__.get('__name__') == 'contextlib'
and __globals__.get('__file__') == contextlib.__file__
)
# Python 3.14 uses deferred evaluation of annotations by default.
# Using annotationlib's FORWARDREF format gives us more robust handling
# of forward references in type annotations.
signature_kwds: dict[str, Any] = {}
if sys.version_info[:2] >= (3, 14):
import annotationlib # type: ignore[import-not-found]
signature_kwds['annotation_format'] = annotationlib.Format.FORWARDREF
def signature(
subject: _SignatureType,
bound_method: bool = False,
type_aliases: Mapping[str, str] | None = None,
) -> Signature:
"""Return a Signature object for the given *subject*.
:param bound_method: Specify *subject* is a bound method or not.
When *subject* is a built-in callable, *bound_method* is ignored.
"""
if type_aliases is None:
type_aliases = {}
try:
if _should_unwrap(subject):
signature = inspect.signature(subject, **signature_kwds) # type: ignore[arg-type]
else:
signature = inspect.signature(
subject, # type: ignore[arg-type]
follow_wrapped=True,
**signature_kwds,
)
except ValueError:
# follow built-in wrappers up (ex. functools.lru_cache)
signature = inspect.signature(subject, **signature_kwds) # type: ignore[arg-type]
parameters = list(signature.parameters.values())
return_annotation = signature.return_annotation
try:
# Resolve annotations using ``get_type_hints()`` and type_aliases.
localns = TypeAliasNamespace(type_aliases)
annotations = typing.get_type_hints(subject, None, localns, include_extras=True)
for i, param in enumerate(parameters):
if param.name in annotations:
annotation = annotations[param.name]
if isinstance(annotation, TypeAliasForwardRef):
annotation = annotation.name
parameters[i] = param.replace(annotation=annotation)
if 'return' in annotations:
if isinstance(annotations['return'], TypeAliasForwardRef):
return_annotation = annotations['return'].name
else:
return_annotation = annotations['return']
except Exception:
# ``get_type_hints()`` does not support some kind of objects like partial,
# ForwardRef and so on.
pass
# For built-in objects, we use the signature that was specified
# by the extension module even if we detected the subject to be
# a possible bound method.
if bound_method and not inspect.isbuiltin(subject):
if inspect.ismethod(subject):
# ``inspect.signature()`` considers the subject is a bound method and removes
# first argument from signature. Therefore no skips are needed here.
pass
else:
if len(parameters) > 0:
parameters.pop(0)
# To allow to create signature object correctly for pure python functions,
# pass an internal parameter __validate_parameters__=False to Signature
#
# For example, this helps a function having a default value `inspect._empty`.
# See: https://github.com/sphinx-doc/sphinx/issues/7935
return Signature(
parameters, return_annotation=return_annotation, __validate_parameters__=False
)
def evaluate_signature(
sig: Signature,
globalns: dict[str, Any] | None = None,
localns: Mapping[str, Any] | None = None,
) -> Signature:
"""Evaluate unresolved type annotations in a signature object."""
if globalns is None:
globalns = {}
if localns is None:
localns = globalns
parameters = list(sig.parameters.values())
for i, param in enumerate(parameters):
if param.annotation:
annotation = _evaluate(param.annotation, globalns, localns)
parameters[i] = param.replace(annotation=annotation)
return_annotation = sig.return_annotation
if return_annotation:
return_annotation = _evaluate(return_annotation, globalns, localns)
return sig.replace(parameters=parameters, return_annotation=return_annotation)
def _evaluate_forwardref(
ref: ForwardRef,
globalns: dict[str, Any] | None,
localns: Mapping[str, Any] | None,
) -> Any:
"""Evaluate a forward reference."""
if sys.version_info[:2] >= (3, 14):
# https://docs.python.org/dev/library/annotationlib.html#annotationlib.ForwardRef.evaluate
# https://docs.python.org/dev/library/typing.html#typing.evaluate_forward_ref
return typing.evaluate_forward_ref(ref, globals=globalns, locals=localns)
if sys.version_info >= (3, 12, 4):
# ``type_params`` were added in 3.13 and the signature of _evaluate()
# is not backward-compatible (it was backported to 3.12.4, so anything
# before 3.12.4 still has the old signature).
#
# See: https://github.com/python/cpython/pull/118104.
return ref._evaluate( # pyright: ignore[reportDeprecated]
globalns, localns, type_params=(), recursive_guard=frozenset()
) # type: ignore[call-arg]
return ref._evaluate(globalns, localns, recursive_guard=frozenset())
def _evaluate(
annotation: Any,
globalns: dict[str, Any],
localns: Mapping[str, Any],
) -> Any:
"""Evaluate unresolved type annotation."""
try:
if isinstance(annotation, str):
ref = ForwardRef(annotation)
annotation = _evaluate_forwardref(ref, globalns, localns)
if isinstance(annotation, ForwardRef):
annotation = _evaluate_forwardref(ref, globalns, localns)
elif isinstance(annotation, str):
# might be a ForwardRef'ed annotation in overloaded functions
ref = ForwardRef(annotation)
annotation = _evaluate_forwardref(ref, globalns, localns)
except (NameError, TypeError):
# failed to evaluate type. skipped.
pass
return annotation
def stringify_signature(
sig: Signature,
show_annotation: bool = True,
show_return_annotation: bool = True,
unqualified_typehints: bool = False,
short_literals: bool = False,
) -> str:
"""Stringify a :class:`~inspect.Signature` object.
:param show_annotation: If enabled, show annotations on the signature
:param show_return_annotation: If enabled, show annotation of the return value
:param unqualified_typehints: If enabled, show annotations as unqualified
(ex. io.StringIO -> StringIO)
:param short_literals: If enabled, use short literal types.
"""
args, retann = _stringify_signature_to_parts(
sig=sig,
show_annotation=show_annotation,
show_return_annotation=show_return_annotation,
unqualified_typehints=unqualified_typehints,
short_literals=short_literals,
)
if retann:
return f'{args} -> {retann}'
return str(args)
def _stringify_signature_to_parts(
sig: Signature,
show_annotation: bool = True,
show_return_annotation: bool = True,
unqualified_typehints: bool = False,
short_literals: bool = False,
) -> tuple[str, str]:
mode: _StringifyMode
if unqualified_typehints:
mode = 'smart'
else:
mode = 'fully-qualified'
EMPTY = Parameter.empty
args = []
last_kind = None
for param in sig.parameters.values():
if (
param.kind != Parameter.POSITIONAL_ONLY
and last_kind == Parameter.POSITIONAL_ONLY
):
# PEP-570: Separator for Positional Only Parameter: /
args.append('/')
if param.kind == Parameter.KEYWORD_ONLY and last_kind in {
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY,
None,
}:
# PEP-3102: Separator for Keyword Only Parameter: *
args.append('*')
arg = StringIO()
if param.kind is Parameter.VAR_POSITIONAL:
arg.write('*' + param.name)
elif param.kind is Parameter.VAR_KEYWORD:
arg.write('**' + param.name)
else:
arg.write(param.name)
if show_annotation and param.annotation is not EMPTY:
arg.write(': ')
arg.write(
stringify_annotation(
param.annotation, mode, short_literals=short_literals
)
)
if param.default is not EMPTY:
if show_annotation and param.annotation is not EMPTY:
arg.write(' = ')
else:
arg.write('=')
arg.write(object_description(param.default))
args.append(arg.getvalue())
last_kind = param.kind
if last_kind is Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
args.append('/')
concatenated_args = ', '.join(args)
concatenated_args = f'({concatenated_args})'
if (
sig.return_annotation is EMPTY
or not show_annotation
or not show_return_annotation
):
retann = ''
else:
retann = stringify_annotation(
sig.return_annotation, mode, short_literals=short_literals
)
return concatenated_args, retann
def signature_from_str(signature: str) -> Signature:
"""Create a :class:`~inspect.Signature` object from a string."""
code = 'def func' + signature + ': pass'
module = ast.parse(code)
function = typing.cast('ast.FunctionDef', module.body[0])
return signature_from_ast(function, code)
def signature_from_ast(node: ast.FunctionDef, code: str = '') -> Signature:
"""Create a :class:`~inspect.Signature` object from an AST node."""
EMPTY = Parameter.empty
args: ast.arguments = node.args
defaults: tuple[ast.expr | None, ...] = tuple(args.defaults)
pos_only_offset = len(args.posonlyargs)
defaults_offset = pos_only_offset + len(args.args) - len(defaults)
# The sequence ``D = args.defaults`` contains non-None AST expressions,
# so we can use ``None`` as a sentinel value for that to indicate that
# there is no default value for a specific parameter.
#
# Let *p* be the number of positional-only and positional-or-keyword
# arguments. Note that ``0 <= len(D) <= p`` and ``D[0]`` is the default
# value corresponding to a positional-only *or* a positional-or-keyword
# argument. Since a non-default argument cannot follow a default argument,
# the sequence *D* can be completed on the left by adding None sentinels
# so that ``len(D) == p`` and ``D[i]`` is the *i*-th default argument.
defaults = (None,) * defaults_offset + defaults
# construct the parameter list
params: list[Parameter] = []
# positional-only arguments (introduced in Python 3.8)
for arg, defexpr in zip(args.posonlyargs, defaults, strict=False):
params.append(_define(Parameter.POSITIONAL_ONLY, arg, code, defexpr=defexpr))
# normal arguments
for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict=False):
params.append(
_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr)
)
# variadic positional argument (no possible default expression)
if args.vararg:
params.append(
_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None)
)
# keyword-only arguments
for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict=False):
params.append(_define(Parameter.KEYWORD_ONLY, arg, code, defexpr=defexpr))
# variadic keyword argument (no possible default expression)
if args.kwarg:
params.append(_define(Parameter.VAR_KEYWORD, args.kwarg, code, defexpr=None))
return_annotation = ast_unparse(node.returns, code) or EMPTY
return Signature(params, return_annotation=return_annotation)
def _define(
kind: _ParameterKind,
arg: ast.arg,
code: str,
*,
defexpr: ast.expr | None,
) -> Parameter:
EMPTY = Parameter.empty
default = EMPTY if defexpr is None else DefaultValue(ast_unparse(defexpr, code))
annotation = ast_unparse(arg.annotation, code) or EMPTY
return Parameter(arg.arg, kind, default=default, annotation=annotation)
def getdoc(
obj: Any,
attrgetter: _AttrGetter = safe_getattr,
allow_inherited: bool = False,
cls: Any = None,
name: str | None = None,
) -> str | None:
"""Get the docstring for the object.
This tries to obtain the docstring for some kind of objects additionally:
* partial functions
* inherited docstring
* inherited decorated methods
"""
if cls and name and is_classmethod_like(obj, cls, name):
for basecls in getmro(cls):
meth = basecls.__dict__.get(name)
if not meth:
continue
# Built-in class methods do not have '__func__'
# but they may have a docstring.
if hasattr(meth, '__func__') or is_classmethod_descriptor(meth):
doc: str | None = getdoc(getattr(meth, '__func__', meth))
if doc is not None or not allow_inherited:
return doc
doc = _getdoc_internal(obj)
if ispartial(obj) and doc == obj.__class__.__doc__:
return getdoc(obj.func)
elif doc is None and allow_inherited:
if cls and name:
# Check a docstring of the attribute or method from super classes.
for basecls in getmro(cls):
meth = safe_getattr(basecls, name, None)
if meth is not None:
doc = _getdoc_internal(meth)
if doc is not None:
break
if doc is None:
# retry using `inspect.getdoc()`
for basecls in getmro(cls):
meth = safe_getattr(basecls, name, None)
if meth is not None:
doc = inspect.getdoc(meth)
if doc is not None:
break
if doc is None:
doc = inspect.getdoc(obj)
return doc
def _getdoc_internal(obj: Any, /) -> str | None:
doc = safe_getattr(obj, '__doc__', None)
if isinstance(doc, str):
return doc
return None
| TypeAliasNamespace |
python | sphinx-doc__sphinx | sphinx/pycode/parser.py | {
"start": 5959,
"end": 7631
} | class ____(TokenProcessor):
"""Python source code parser to pick up comments after assignments.
This parser takes code which starts with an assignment statement,
and returns the comment for the variable if one exists.
"""
def __init__(self, lines: list[str]) -> None:
super().__init__(lines)
self.comment: str | None = None
def fetch_rvalue(self) -> list[Token]:
"""Fetch right-hand value of assignment."""
tokens = []
while current := self.fetch_token():
tokens.append(current)
if current == [OP, '(']:
tokens += self.fetch_until([OP, ')'])
elif current == [OP, '{']:
tokens += self.fetch_until([OP, '}'])
elif current == [OP, '[']:
tokens += self.fetch_until([OP, ']'])
elif current == INDENT:
tokens += self.fetch_until(DEDENT)
elif current == [OP, ';']: # NoQA: SIM114
break
elif current and current.kind not in {OP, NAME, NUMBER, STRING}:
break
return tokens
def parse(self) -> None:
"""Parse the code and obtain comment after assignment."""
# skip lvalue (or whole of AnnAssign)
while (tok := self.fetch_token()) and not tok.match(
[OP, '='], NEWLINE, COMMENT
):
assert tok
assert tok is not None
# skip rvalue (if exists)
if tok == [OP, '=']:
self.fetch_rvalue()
tok = self.current
assert tok is not None
if tok == COMMENT:
self.comment = tok.value
| AfterCommentParser |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_slice_op_test.py | {
"start": 1157,
"end": 17862
} | class ____(test.TestCase):
def _SparseTensor_4x6(self, val_dtype=np.int64):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
4], [2, 0],
[2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(
np.int64)
val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0], [2, 2,
1]]).astype(
np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x4x2())
def _SparseTensor_4x6_empty(self, val_dtype=np.int64):
ind = np.empty(shape=(0, 2), dtype=np.int64)
val = np.array([]).astype(val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
@test_util.run_deprecated_v1
def testSliceMatrixRows(self):
with self.session():
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
self.assertAllEqual(
sp_tensor0.indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensor0.dense_shape, [2, 6])
self.assertAllEqual(
sp_tensor1.indices,
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
self.assertAllEqual(sp_tensor1.values, [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape, [2, 6])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenCols(self):
with self.session():
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 3])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 3], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
self.assertAllEqual(
sp_tensor0.indices,
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
self.assertAllEqual(sp_tensor0.values, [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensor0.dense_shape, [5, 3])
self.assertAllEqual(sp_tensor1.indices,
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor1.values, [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensor1.dense_shape, [5, 2])
self.assertAllEqual(sp_tensor2.indices,
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor2.values, [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensor2.dense_shape, [5, 2])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
self.assertAllEqual(sp_tensor0.indices,
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor0.values, [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensor0.dense_shape, [5, 2])
self.assertAllEqual(sp_tensor1.indices,
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensor1.values, [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensor1.dense_shape, [5, 2])
self.assertAllEqual(sp_tensor2.indices,
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensor2.values, [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensor2.dense_shape, [5, 2])
self.assertAllEqual(sp_tensor3.indices, [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensor3.values, [16, 46])
self.assertAllEqual(sp_tensor3.dense_shape, [5, 1])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenRows(self):
with self.session():
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensor0.values,
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensor0.dense_shape, [3, 7])
self.assertAllEqual(
sp_tensor1.indices,
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor1.values, [30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensor1.dense_shape, [2, 7])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
self.assertAllEqual(
sp_tensor0.indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensor0.dense_shape, [2, 7])
self.assertAllEqual(sp_tensor1.values, [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape, [2, 7])
self.assertAllEqual(sp_tensor2.indices, [[0, 1], [0, 4], [0, 6]])
self.assertAllEqual(sp_tensor2.values, [41, 44, 46])
self.assertAllEqual(sp_tensor2.dense_shape, [1, 7])
return
@test_util.run_deprecated_v1
def testSliceAllRows(self):
with self.session():
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [1, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices, [[0, 0], [0, 2], [0, 4], [0, 5]])
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5])
self.assertAllEqual(sp_tensor0.dense_shape, [1, 6])
self.assertAllEqual(sp_tensor1.indices, [[0, 1], [0, 3], [0, 4]])
self.assertAllEqual(sp_tensor1.values, [11, 13, 14])
self.assertAllEqual(sp_tensor1.dense_shape, [1, 6])
self.assertAllEqual(sp_tensor2.indices, [[0, 0], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor2.values, [20, 23, 25])
self.assertAllEqual(sp_tensor2.dense_shape, [1, 6])
self.assertAllEqual(sp_tensor3.indices, [[0, 0], [0, 2], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor3.values, [30, 32, 33, 35])
self.assertAllEqual(sp_tensor3.dense_shape, [1, 6])
@test_util.run_deprecated_v1
def testSliceColumns(self):
with self.session():
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 2])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
self.assertAllEqual(sparse_tensor0.indices,
[[0, 0], [1, 1], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values, [0, 11, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape, [4, 2])
self.assertAllEqual(sparse_tensor1.indices,
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensor1.values, [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensor1.dense_shape, [4, 2])
self.assertAllEqual(sparse_tensor2.indices,
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensor2.values, [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensor2.dense_shape, [4, 2])
@test_util.run_deprecated_v1
def testSliceAllColumns(self):
with self.session():
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 1], [4, 1])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 2], [4, 1])
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
self.assertAllEqual(sparse_tensor0.indices, [[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values, [0, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor1.indices, [[1, 0]])
self.assertAllEqual(sparse_tensor1.values, [11])
self.assertAllEqual(sparse_tensor1.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor2.indices, [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensor2.values, [2, 32])
self.assertAllEqual(sparse_tensor2.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor3.indices, [[1, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor3.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor3.values, [13, 23, 33])
self.assertAllEqual(sparse_tensor4.indices, [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensor4.values, [4, 14])
self.assertAllEqual(sparse_tensor4.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor5.indices, [[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor5.values, [5, 25, 35])
self.assertAllEqual(sparse_tensor5.dense_shape, [4, 1])
def testSliceEmpty(self):
with test_util.use_gpu():
sp_empty = self._SparseTensor_4x6_empty()
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_empty, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 1], [0, 0])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 1], [2, 1])
empty_inds = np.empty(shape=(0, 2), dtype=np.int64)
self.assertAllEqual(sparse_tensor0.indices, empty_inds)
self.assertAllEqual(sparse_tensor0.values, [])
self.assertAllEqual(sparse_tensor0.dense_shape, [4, 1])
self.assertAllEqual(sparse_tensor1.indices, empty_inds)
self.assertAllEqual(sparse_tensor1.values, [])
self.assertAllEqual(sparse_tensor1.dense_shape, [0, 0])
self.assertAllEqual(sparse_tensor2.indices, empty_inds)
self.assertAllEqual(sparse_tensor2.values, [])
self.assertAllEqual(sparse_tensor2.dense_shape, [2, 1])
@test_util.run_deprecated_v1
def testGradients(self):
sp_input = self._SparseTensor_4x6(val_dtype=np.float32)
start_and_size = [([0, 0], [4, 2]),
([0, 2], [5, 2]),
([0, 4], [5, 3])]
with self.session():
for start, size in start_and_size:
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
nnz_in = len(self.evaluate(sp_input.values))
nnz_out = len(self.evaluate(sp_output.values))
err = gradient_checker.compute_gradient_error(
[sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,))
self.assertLess(err, 1e-3)
def testGradientsExplicit(self):
sp_input = self._SparseTensor_4x6()
start, size = [0, 0], [4, 1]
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
input_grad_vals = sparse_ops.sparse_slice_grad(sp_output.values,
sp_input.indices, start,
sp_output.indices)
# pyformat: disable
self.assertAllEqual(input_grad_vals, [0, 0, 0, 0,
0, 0, 0,
20, 0, 0,
30, 0, 0, 0])
# pyformat: enable
start, size = [0, 1], [4, 1]
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
input_grad_vals = sparse_ops.sparse_slice_grad(sp_output.values,
sp_input.indices, start,
sp_output.indices)
# pyformat: disable
self.assertAllEqual(input_grad_vals, [0, 0, 0, 0,
11, 0, 0,
0, 0, 0,
0, 0, 0, 0])
# pyformat: enable
start, size = [1, 3], [3, 1]
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
input_grad_vals = sparse_ops.sparse_slice_grad(sp_output.values,
sp_input.indices, start,
sp_output.indices)
# pyformat: disable
self.assertAllEqual(input_grad_vals, [0, 0, 0, 0,
0, 13, 0,
0, 23, 0,
0, 0, 33, 0])
# pyformat: enable
# Test empty slice of non-empty input.
start, size = [2, 1], [2, 1]
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
input_grad_vals = sparse_ops.sparse_slice_grad(sp_output.values,
sp_input.indices, start,
sp_output.indices)
# pyformat: disable
self.assertAllEqual(input_grad_vals, [0, 0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0, 0])
# pyformat: enable
sp_input = self._SparseTensor_4x6_empty()
start, size = [0, 0], [4, 1]
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
input_grad_vals = sparse_ops.sparse_slice_grad(sp_output.values,
sp_input.indices, start,
sp_output.indices)
self.assertAllEqual(input_grad_vals, [])
def testNegativeSize(self):
with self.session(use_gpu=False):
with self.assertRaises(errors.InvalidArgumentError):
res = sparse_ops.gen_sparse_ops.sparse_slice(
indices=[[0, 0]],
values=[0],
shape=[1, 1],
start=[10, 10],
size=[-100, 100])
self.evaluate(res)
def testLargeSize(self):
with self.session(use_gpu=False):
# Confirm potential integer overflow due to size is handled by op.
res = sparse_ops.gen_sparse_ops.sparse_slice(
indices=[[0, 0]],
values=[0],
shape=[1, 1],
start=[2**62, -1],
size=[2**62, 2**62])
self.evaluate(res)
def testInvalidSparseInput(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
'Number of elements .* do not match',
):
self.evaluate(
gen_sparse_ops.sparse_slice(
indices=[[0, 0, 0]],
values=[0, 1, 2],
shape=[3, 3],
start=[0, 0],
size=[1, 1],
)
)
if __name__ == '__main__':
test.main()
| SparseSliceOpTest |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cache_errors.py | {
"start": 3025,
"end": 3885
} | class ____(StreamlitAPIException):
def __init__(
self,
cache_type: CacheType,
cached_func: Callable[..., Any],
) -> None:
func_name = get_cached_func_name_md(cached_func)
decorator_name = get_decorator_api_name(cache_type)
msg = (
f"""
While running {func_name}, a streamlit element is called on some layout block
created outside the function. This is incompatible with replaying the cached
effect of that element, because the referenced block might not exist when
the replay happens.
How to fix this:
* Move the creation of $THING inside {func_name}.
* Move the call to the streamlit element outside of {func_name}.
* Remove the `@st.{decorator_name}` decorator from {func_name}.
"""
).strip("\n")
super().__init__(msg)
R = TypeVar("R")
| CacheReplayClosureError |
python | getsentry__sentry | tests/sentry/overwatch/endpoints/test_overwatch_rpc.py | {
"start": 1056,
"end": 10899
} | class ____(APITestCase):
def _auth_header_for_get(self, url: str, params: dict[str, str], secret: str) -> str:
# For GET we sign an empty JSON array body per Rpcsignature rpc0
message = b"[]"
signature = hmac.new(secret.encode(), message, hashlib.sha256).hexdigest()
return f"Rpcsignature rpc0:{signature}"
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_requires_auth(self):
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
# Missing auth
resp = self.client.get(url, {"sentryOrgId": "123"})
assert resp.status_code == 403
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_missing_sentry_org_id_returns_400(self):
"""Test that missing sentryOrgId parameter returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
auth = self._auth_header_for_get(url, {}, "test-secret")
resp = self.client.get(url, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "sentryOrgId" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_invalid_sentry_org_id_returns_400(self):
"""Test that invalid sentryOrgId (non-integer) returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {"sentryOrgId": "not-a-number", "gitOrgName": "test-org", "provider": "github"}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "must be a valid integer" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_negative_sentry_org_id_returns_400(self):
"""Test that negative sentryOrgId returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {"sentryOrgId": "-123", "gitOrgName": "test-org", "provider": "github"}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "must be a positive integer" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_zero_sentry_org_id_returns_400(self):
"""Test that zero sentryOrgId returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {"sentryOrgId": "0", "gitOrgName": "test-org", "provider": "github"}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "must be a positive integer" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_missing_git_org_name_returns_400(self):
"""Test that missing gitOrgName parameter returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {"sentryOrgId": "123"}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "gitOrgName" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_missing_provider_returns_400(self):
"""Test that missing provider parameter returns 400."""
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {"sentryOrgId": "123", "gitOrgName": "test-org"}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 400
assert "provider" in resp.data["detail"]
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_returns_default_when_no_config(self):
"""Test that default config is returned when no configuration exists."""
org = self.create_organization()
git_org_name = "test-github-org"
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_integration(
organization=org,
provider="github",
name=git_org_name,
external_id=f"github:{git_org_name}",
status=ObjectStatus.ACTIVE,
)
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {
"sentryOrgId": str(org.id),
"gitOrgName": git_org_name,
"provider": "github",
}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 200
assert resp.data == PREVENT_AI_CONFIG_DEFAULT
assert resp.data["organization"] == {}
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_returns_config_when_exists(self):
"""Test that saved configuration is returned when it exists."""
org = self.create_organization()
git_org_name = "test-github-org"
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=org,
provider="github",
name=git_org_name,
external_id=f"github:{git_org_name}",
status=ObjectStatus.ACTIVE,
)
PreventAIConfiguration.objects.create(
organization_id=org.id,
integration_id=integration.id,
data=VALID_ORG_CONFIG,
)
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {
"sentryOrgId": str(org.id),
"gitOrgName": git_org_name,
"provider": "github",
}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 200
assert resp.data["organization"] == VALID_ORG_CONFIG
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_returns_404_when_integration_not_found(self):
"""Test that 404 is returned when GitHub integration doesn't exist."""
org = self.create_organization()
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {
"sentryOrgId": str(org.id),
"gitOrgName": "nonexistent-org",
"provider": "github",
}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 404
assert resp.data["detail"] == "GitHub integration not found"
@patch(
"sentry.overwatch.endpoints.overwatch_rpc.settings.OVERWATCH_RPC_SHARED_SECRET",
["test-secret"],
)
def test_config_with_repo_overrides(self):
"""Test that configuration with repo overrides is properly retrieved."""
org = self.create_organization()
git_org_name = "test-github-org"
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=org,
provider="github",
name=git_org_name,
external_id=f"github:{git_org_name}",
status=ObjectStatus.ACTIVE,
)
config_with_overrides = deepcopy(VALID_ORG_CONFIG)
config_with_overrides["repo_overrides"] = {
"my-repo": {
"bug_prediction": {
"enabled": True,
"sensitivity": "high",
"triggers": {"on_command_phrase": True, "on_ready_for_review": False},
},
"test_generation": {
"enabled": True,
"triggers": {"on_command_phrase": True, "on_ready_for_review": True},
},
"vanilla": {
"enabled": False,
"sensitivity": "low",
"triggers": {"on_command_phrase": False, "on_ready_for_review": False},
},
}
}
PreventAIConfiguration.objects.create(
organization_id=org.id,
integration_id=integration.id,
data=config_with_overrides,
)
url = reverse("sentry-api-0-prevent-pr-review-configs-resolved")
params = {
"sentryOrgId": str(org.id),
"gitOrgName": git_org_name,
"provider": "github",
}
auth = self._auth_header_for_get(url, params, "test-secret")
resp = self.client.get(url, params, HTTP_AUTHORIZATION=auth)
assert resp.status_code == 200
assert (
resp.data["organization"]["repo_overrides"]["my-repo"]["bug_prediction"]["sensitivity"]
== "high"
)
| TestPreventPrReviewResolvedConfigsEndpoint |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 35711,
"end": 36299
} | class ____(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
| TestMinMaxIntegerField |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol40.py | {
"start": 315,
"end": 362
} | class ____(P1Parent[S], Protocol[S]): ...
| P1Child |
python | PyCQA__pylint | doc/data/messages/i/invalid-slots-object/good.py | {
"start": 0,
"end": 50
} | class ____:
__slots__ = ("name", "surname")
| Person |
python | pytorch__pytorch | torch/_inductor/ops_handler.py | {
"start": 25421,
"end": 26139
} | class ____(DefaultHandler):
name = "NoopHandler"
def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
return None
@staticmethod
def masked(mask, body, other) -> None:
return None
@staticmethod
def frexp(x) -> tuple[None, None]:
return (None, None)
@staticmethod
def scan(dtypes, combine_fn, values) -> tuple[None, ...]:
return (None,) * len(values)
@staticmethod
def sort(dtypes, values, stable, descending) -> tuple[None, ...]:
return (None,) * len(values)
@staticmethod
def indirect_indexing(index_var, size, check=True, wrap_neg=True) -> sympy.Symbol:
return sympy.S.Zero
| NoopHandler |
python | doocs__leetcode | solution/0400-0499/0475.Heaters/Solution.py | {
"start": 0,
"end": 808
} | class ____:
def findRadius(self, houses: List[int], heaters: List[int]) -> int:
houses.sort()
heaters.sort()
def check(r):
m, n = len(houses), len(heaters)
i = j = 0
while i < m:
if j >= n:
return False
mi = heaters[j] - r
mx = heaters[j] + r
if houses[i] < mi:
return False
if houses[i] > mx:
j += 1
else:
i += 1
return True
left, right = 0, int(1e9)
while left < right:
mid = (left + right) >> 1
if check(mid):
right = mid
else:
left = mid + 1
return left
| Solution |
python | protocolbuffers__protobuf | python/google/protobuf/internal/unknown_fields_test.py | {
"start": 13177,
"end": 16921
} | class ____(unittest.TestCase):
def setUp(self):
self.descriptor = missing_enum_values_pb2.TestEnumValues.DESCRIPTOR
self.message = missing_enum_values_pb2.TestEnumValues()
# TestEnumValues.ZERO = 0, but does not exist in the other NestedEnum.
self.message.optional_nested_enum = (
missing_enum_values_pb2.TestEnumValues.ZERO)
self.message.repeated_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message.packed_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message_data = self.message.SerializeToString()
self.missing_message = missing_enum_values_pb2.TestMissingEnumValues()
self.missing_message.ParseFromString(self.message_data)
# CheckUnknownField() is an additional Pure Python check which checks
# a detail of unknown fields. It cannot be used by the C++
# implementation because some protect members are called.
# The test is added for historical reasons. It is not necessary as
# serialized string is checked.
def CheckUnknownField(self, name, expected_value):
field_descriptor = self.descriptor.fields_by_name[name]
unknown_field_set = unknown_fields.UnknownFieldSet(self.missing_message)
self.assertIsInstance(unknown_field_set, unknown_fields.UnknownFieldSet)
count = 0
for field in unknown_field_set:
if field.field_number == field_descriptor.number:
count += 1
if field_descriptor.is_repeated:
self.assertIn(field.data, expected_value)
else:
self.assertEqual(expected_value, field.data)
if field_descriptor.is_repeated:
self.assertEqual(count, len(expected_value))
else:
self.assertEqual(count, 1)
def testUnknownParseMismatchEnumValue(self):
just_string = missing_enum_values_pb2.JustString()
just_string.dummy = 'blah'
missing = missing_enum_values_pb2.TestEnumValues()
# The parse is invalid, storing the string proto into the set of
# unknown fields.
missing.ParseFromString(just_string.SerializeToString())
# Fetching the enum field shouldn't crash, instead returning the
# default value.
self.assertEqual(missing.optional_nested_enum, 0)
def testUnknownEnumValue(self):
self.assertFalse(self.missing_message.HasField('optional_nested_enum'))
self.assertEqual(self.missing_message.optional_nested_enum, 2)
# Clear does not do anything.
serialized = self.missing_message.SerializeToString()
self.missing_message.ClearField('optional_nested_enum')
self.assertEqual(self.missing_message.SerializeToString(), serialized)
def testUnknownRepeatedEnumValue(self):
self.assertEqual([], self.missing_message.repeated_nested_enum)
def testUnknownPackedEnumValue(self):
self.assertEqual([], self.missing_message.packed_nested_enum)
def testCheckUnknownFieldValueForEnum(self):
unknown_field_set = unknown_fields.UnknownFieldSet(self.missing_message)
self.assertEqual(len(unknown_field_set), 5)
self.CheckUnknownField('optional_nested_enum',
self.message.optional_nested_enum)
self.CheckUnknownField('repeated_nested_enum',
self.message.repeated_nested_enum)
self.CheckUnknownField('packed_nested_enum',
self.message.packed_nested_enum)
def testRoundTrip(self):
new_message = missing_enum_values_pb2.TestEnumValues()
new_message.ParseFromString(self.missing_message.SerializeToString())
self.assertEqual(self.message, new_message)
if __name__ == '__main__':
unittest.main()
| UnknownEnumValuesTest |
python | Lightning-AI__lightning | src/lightning/pytorch/core/hooks.py | {
"start": 13127,
"end": 25213
} | class ____:
"""Hooks to be used for data related stuff."""
def __init__(self) -> None:
"""
Attributes:
prepare_data_per_node:
If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data.
allow_zero_length_dataloader_with_multiple_devices:
If True, dataloader with zero length within local rank is allowed.
Default value is False.
"""
super().__init__()
self.prepare_data_per_node: bool = True
self.allow_zero_length_dataloader_with_multiple_devices: bool = False
def prepare_data(self) -> None:
"""Use this to download and prepare data. Downloading and saving data with multiple processes (distributed
settings) will result in corrupted data. Lightning ensures this method is called only within a single process,
so you can safely add your downloading logic within.
.. warning:: DO NOT set state to the model (use ``setup`` instead)
since this is NOT called on every device
Example::
def prepare_data(self):
# good
download_data()
tokenize()
etc()
# bad
self.split = data_split
self.some_state = some_other_state()
In a distributed environment, ``prepare_data`` can be called in two ways
(using :ref:`prepare_data_per_node<common/lightning_module:prepare_data_per_node>`)
1. Once per node. This is the default and is only called on LOCAL_RANK=0.
2. Once in total. Only called on GLOBAL_RANK=0.
Example::
# DEFAULT
# called once per node on LOCAL_RANK=0 of that node
class LitDataModule(LightningDataModule):
def __init__(self):
super().__init__()
self.prepare_data_per_node = True
# call on GLOBAL_RANK=0 (great for shared file systems)
class LitDataModule(LightningDataModule):
def __init__(self):
super().__init__()
self.prepare_data_per_node = False
This is called before requesting the dataloaders:
.. code-block:: python
model.prepare_data()
initialize_distributed()
model.setup(stage)
model.train_dataloader()
model.val_dataloader()
model.test_dataloader()
model.predict_dataloader()
"""
def setup(self, stage: str) -> None:
"""Called at the beginning of fit (train + validate), validate, test, or predict. This is a good hook when you
need to build models dynamically or adjust something about them. This hook is called on every process when
using DDP.
Args:
stage: either ``'fit'``, ``'validate'``, ``'test'``, or ``'predict'``
Example::
class LitModel(...):
def __init__(self):
self.l1 = None
def prepare_data(self):
download_data()
tokenize()
# don't do this
self.something = else
def setup(self, stage):
data = load_data(...)
self.l1 = nn.Linear(28, data.num_classes)
"""
def teardown(self, stage: str) -> None:
"""Called at the end of fit (train + validate), validate, test, or predict.
Args:
stage: either ``'fit'``, ``'validate'``, ``'test'``, or ``'predict'``
"""
def train_dataloader(self) -> TRAIN_DATALOADERS:
"""An iterable or collection of iterables specifying training samples.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
The dataloader you return will not be reloaded unless you set
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.reload_dataloaders_every_n_epochs` to
a positive integer.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`
- :meth:`prepare_data`
- :meth:`setup`
Note:
Lightning tries to add the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
"""
raise MisconfigurationException("`train_dataloader` must be implemented to be used with the Lightning Trainer")
def test_dataloader(self) -> EVAL_DATALOADERS:
r"""An iterable or collection of iterables specifying test samples.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~lightning.pytorch.trainer.trainer.Trainer.test`
- :meth:`prepare_data`
- :meth:`setup`
Note:
Lightning tries to add the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
Note:
If you don't need a test dataset and a :meth:`test_step`, you don't need to implement
this method.
"""
raise MisconfigurationException("`test_dataloader` must be implemented to be used with the Lightning Trainer")
def val_dataloader(self) -> EVAL_DATALOADERS:
r"""An iterable or collection of iterables specifying validation samples.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
The dataloader you return will not be reloaded unless you set
:paramref:`~lightning.pytorch.trainer.trainer.Trainer.reload_dataloaders_every_n_epochs` to
a positive integer.
It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
- :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`
- :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`
- :meth:`prepare_data`
- :meth:`setup`
Note:
Lightning tries to add the correct sampler for distributed and arbitrary hardware
There is no need to set it yourself.
Note:
If you don't need a validation dataset and a :meth:`validation_step`, you don't need to
implement this method.
"""
raise MisconfigurationException("`val_dataloader` must be implemented to be used with the Lightning Trainer")
def predict_dataloader(self) -> EVAL_DATALOADERS:
r"""An iterable or collection of iterables specifying prediction samples.
For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.
It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
- :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`
- :meth:`prepare_data`
- :meth:`setup`
Note:
Lightning tries to add the correct sampler for distributed and arbitrary hardware
There is no need to set it yourself.
Return:
A :class:`torch.utils.data.DataLoader` or a sequence of them specifying prediction samples.
"""
raise MisconfigurationException(
"`predict_dataloader` must be implemented to be used with the Lightning Trainer"
)
def transfer_batch_to_device(self, batch: Any, device: torch.device, dataloader_idx: int) -> Any:
"""Override this hook if your :class:`~torch.utils.data.DataLoader` returns tensors wrapped in a custom data
structure.
The data types listed below (and any arbitrary nesting of them) are supported out of the box:
- :class:`torch.Tensor` or anything that implements `.to(...)`
- :class:`list`
- :class:`dict`
- :class:`tuple`
For anything else, you need to define how the data is moved to the target device (CPU, GPU, TPU, ...).
Note:
This hook should only transfer the data and not modify it, nor should it move the data to
any other device than the one passed in as argument (unless you know what you are doing).
To check the current state of execution of this hook you can use
``self.trainer.training/testing/validating/predicting`` so that you can
add different logic as per your requirement.
Args:
batch: A batch of data that needs to be transferred to a new device.
device: The target device as defined in PyTorch.
dataloader_idx: The index of the dataloader to which the batch belongs.
Returns:
A reference to the data on the new device.
Example::
def transfer_batch_to_device(self, batch, device, dataloader_idx):
if isinstance(batch, CustomBatch):
# move all tensors in your custom data structure to the device
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
elif dataloader_idx == 0:
# skip device transfer for the first dataloader or anything you wish
pass
else:
batch = super().transfer_batch_to_device(batch, device, dataloader_idx)
return batch
See Also:
- :meth:`move_data_to_device`
- :meth:`apply_to_collection`
"""
return move_data_to_device(batch, device)
def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any:
"""Override to alter or apply batch augmentations to your batch before it is transferred to the device.
Note:
To check the current state of execution of this hook you can use
``self.trainer.training/testing/validating/predicting`` so that you can
add different logic as per your requirement.
Args:
batch: A batch of data that needs to be altered or augmented.
dataloader_idx: The index of the dataloader to which the batch belongs.
Returns:
A batch of data
Example::
def on_before_batch_transfer(self, batch, dataloader_idx):
batch['x'] = transforms(batch['x'])
return batch
See Also:
- :meth:`on_after_batch_transfer`
- :meth:`transfer_batch_to_device`
"""
return batch
def on_after_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any:
"""Override to alter or apply batch augmentations to your batch after it is transferred to the device.
Note:
To check the current state of execution of this hook you can use
``self.trainer.training/testing/validating/predicting`` so that you can
add different logic as per your requirement.
Args:
batch: A batch of data that needs to be altered or augmented.
dataloader_idx: The index of the dataloader to which the batch belongs.
Returns:
A batch of data
Example::
def on_after_batch_transfer(self, batch, dataloader_idx):
batch['x'] = gpu_transforms(batch['x'])
return batch
See Also:
- :meth:`on_before_batch_transfer`
- :meth:`transfer_batch_to_device`
"""
return batch
| DataHooks |
python | kamyu104__LeetCode-Solutions | Python/largest-number-after-mutating-substring.py | {
"start": 29,
"end": 525
} | class ____(object):
def maximumNumber(self, num, change):
"""
:type num: str
:type change: List[int]
:rtype: str
"""
mutated = False
result = map(int, list(num))
for i, d in enumerate(result):
if change[d] < d:
if mutated:
break
elif change[d] > d:
result[i] = str(change[d])
mutated = True
return "".join(map(str, result))
| Solution |
python | django__django | django/views/generic/dates.py | {
"start": 18564,
"end": 19903
} | class ____(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
Base view for a list of objects published on a given day.
This requires subclassing to provide a response mixin.
"""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(
year,
self.get_year_format(),
month,
self.get_month_format(),
day,
self.get_day_format(),
)
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (
None,
qs,
{
"day": date,
"previous_day": self.get_previous_day(date),
"next_day": self.get_next_day(date),
"previous_month": self.get_previous_month(date),
"next_month": self.get_next_month(date),
},
)
| BaseDayArchiveView |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver36.py | {
"start": 247,
"end": 582
} | class ____(BaseContainer[P]):
def __init__(self, obj: P) -> None:
self.item = obj
def func1(obj: BaseContainer[T]) -> T:
return obj.item
func1(Container(1))
func1(Container(1.0))
# This should generate an error because str isn't compatible with
# the bound of the TypeVar in Container.
func1(Container(""))
| Container |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_betweenness_centrality.py | {
"start": 414,
"end": 16088
} | class ____:
def test_K5(self):
"""Betweenness centrality: K5"""
G = nx.complete_graph(5)
b = nx.betweenness_centrality(G, weight=None, normalized=False)
b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_K5_endpoints(self):
"""Betweenness centrality: K5 endpoints"""
G = nx.complete_graph(5)
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
# normalized = True case
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P3_normalized(self):
"""Betweenness centrality: P3 normalized"""
G = nx.path_graph(3)
b = nx.betweenness_centrality(G, weight=None, normalized=True)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P3(self):
"""Betweenness centrality: P3"""
G = nx.path_graph(3)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
b = nx.betweenness_centrality(G, weight=None, normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_sample_from_P3(self):
"""Betweenness centrality: P3 sample"""
G = nx.path_graph(3)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1)
# python versions give different results with same seed
b_approx1 = {0: 0.0, 1: 1.0, 2: 0.0}
b_approx2 = {0: 0.0, 1: 0.5, 2: 0.0}
for n in sorted(G):
assert b[n] in (b_approx1[n], b_approx2[n])
def test_P3_endpoints(self):
"""Betweenness centrality: P3 endpoints"""
G = nx.path_graph(3)
b_answer = {0: 2.0, 1: 3.0, 2: 2.0}
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
# normalized = True case
b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3}
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_krackhardt_kite_graph(self):
"""Betweenness centrality: Krackhardt kite graph"""
G = nx.krackhardt_kite_graph()
b_answer = {
0: 1.667,
1: 1.667,
2: 0.000,
3: 7.333,
4: 0.000,
5: 16.667,
6: 16.667,
7: 28.000,
8: 16.000,
9: 0.000,
}
for b in b_answer:
b_answer[b] /= 2
b = nx.betweenness_centrality(G, weight=None, normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_krackhardt_kite_graph_normalized(self):
"""Betweenness centrality: Krackhardt kite graph normalized"""
G = nx.krackhardt_kite_graph()
b_answer = {
0: 0.023,
1: 0.023,
2: 0.000,
3: 0.102,
4: 0.000,
5: 0.231,
6: 0.231,
7: 0.389,
8: 0.222,
9: 0.000,
}
b = nx.betweenness_centrality(G, weight=None, normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_florentine_families_graph(self):
"""Betweenness centrality: Florentine families graph"""
G = nx.florentine_families_graph()
b_answer = {
"Acciaiuoli": 0.000,
"Albizzi": 0.212,
"Barbadori": 0.093,
"Bischeri": 0.104,
"Castellani": 0.055,
"Ginori": 0.000,
"Guadagni": 0.255,
"Lamberteschi": 0.000,
"Medici": 0.522,
"Pazzi": 0.000,
"Peruzzi": 0.022,
"Ridolfi": 0.114,
"Salviati": 0.143,
"Strozzi": 0.103,
"Tornabuoni": 0.092,
}
b = nx.betweenness_centrality(G, weight=None, normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_les_miserables_graph(self):
"""Betweenness centrality: Les Miserables graph"""
G = nx.les_miserables_graph()
b_answer = {
"Napoleon": 0.000,
"Myriel": 0.177,
"MlleBaptistine": 0.000,
"MmeMagloire": 0.000,
"CountessDeLo": 0.000,
"Geborand": 0.000,
"Champtercier": 0.000,
"Cravatte": 0.000,
"Count": 0.000,
"OldMan": 0.000,
"Valjean": 0.570,
"Labarre": 0.000,
"Marguerite": 0.000,
"MmeDeR": 0.000,
"Isabeau": 0.000,
"Gervais": 0.000,
"Listolier": 0.000,
"Tholomyes": 0.041,
"Fameuil": 0.000,
"Blacheville": 0.000,
"Favourite": 0.000,
"Dahlia": 0.000,
"Zephine": 0.000,
"Fantine": 0.130,
"MmeThenardier": 0.029,
"Thenardier": 0.075,
"Cosette": 0.024,
"Javert": 0.054,
"Fauchelevent": 0.026,
"Bamatabois": 0.008,
"Perpetue": 0.000,
"Simplice": 0.009,
"Scaufflaire": 0.000,
"Woman1": 0.000,
"Judge": 0.000,
"Champmathieu": 0.000,
"Brevet": 0.000,
"Chenildieu": 0.000,
"Cochepaille": 0.000,
"Pontmercy": 0.007,
"Boulatruelle": 0.000,
"Eponine": 0.011,
"Anzelma": 0.000,
"Woman2": 0.000,
"MotherInnocent": 0.000,
"Gribier": 0.000,
"MmeBurgon": 0.026,
"Jondrette": 0.000,
"Gavroche": 0.165,
"Gillenormand": 0.020,
"Magnon": 0.000,
"MlleGillenormand": 0.048,
"MmePontmercy": 0.000,
"MlleVaubois": 0.000,
"LtGillenormand": 0.000,
"Marius": 0.132,
"BaronessT": 0.000,
"Mabeuf": 0.028,
"Enjolras": 0.043,
"Combeferre": 0.001,
"Prouvaire": 0.000,
"Feuilly": 0.001,
"Courfeyrac": 0.005,
"Bahorel": 0.002,
"Bossuet": 0.031,
"Joly": 0.002,
"Grantaire": 0.000,
"MotherPlutarch": 0.000,
"Gueulemer": 0.005,
"Babet": 0.005,
"Claquesous": 0.005,
"Montparnasse": 0.004,
"Toussaint": 0.000,
"Child1": 0.000,
"Child2": 0.000,
"Brujon": 0.000,
"MmeHucheloup": 0.000,
}
b = nx.betweenness_centrality(G, weight=None, normalized=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_ladder_graph(self):
"""Betweenness centrality: Ladder graph"""
G = nx.Graph() # ladder_graph(3)
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667}
for b in b_answer:
b_answer[b] /= 2
b = nx.betweenness_centrality(G, weight=None, normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-3)
def test_disconnected_path(self):
"""Betweenness centrality: disconnected path"""
G = nx.Graph()
nx.add_path(G, [0, 1, 2])
nx.add_path(G, [3, 4, 5, 6])
b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0}
b = nx.betweenness_centrality(G, weight=None, normalized=False)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_disconnected_path_endpoints(self):
"""Betweenness centrality: disconnected path endpoints"""
G = nx.Graph()
nx.add_path(G, [0, 1, 2])
nx.add_path(G, [3, 4, 5, 6])
b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3}
b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
# normalized = True case
b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7)
def test_directed_path(self):
"""Betweenness centrality: directed path"""
G = nx.DiGraph()
nx.add_path(G, [0, 1, 2])
b = nx.betweenness_centrality(G, weight=None, normalized=False)
b_answer = {0: 0.0, 1: 1.0, 2: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_directed_path_normalized(self):
"""Betweenness centrality: directed path normalized"""
G = nx.DiGraph()
nx.add_path(G, [0, 1, 2])
b = nx.betweenness_centrality(G, weight=None, normalized=True)
b_answer = {0: 0.0, 1: 0.5, 2: 0.0}
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
@pytest.mark.parametrize(
("normalized", "endpoints", "is_directed", "k", "expected"),
[
(True, True, True, None, {0: 1.0, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}),
(True, True, True, 1, {0: 1.0, 1: 1.0, 2: 0.25, 3: 0.25, 4: 0.25}),
(True, True, False, None, {0: 1.0, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4}),
(True, True, False, 1, {0: 1.0, 1: 1.0, 2: 0.25, 3: 0.25, 4: 0.25}),
(True, False, True, None, {0: 1.0, 1: 0, 2: 0.0, 3: 0.0, 4: 0.0}),
(True, False, True, 1, {0: 1.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}),
(True, False, False, None, {0: 1.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}),
(True, False, False, 1, {0: 1.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}),
(False, True, True, None, {0: 20.0, 1: 8.0, 2: 8.0, 3: 8.0, 4: 8.0}),
(False, True, True, 1, {0: 20.0, 1: 20.0, 2: 5.0, 3: 5.0, 4: 5.0}),
(False, True, False, None, {0: 10.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0}),
(False, True, False, 1, {0: 10.0, 1: 10.0, 2: 2.5, 3: 2.5, 4: 2.5}),
(False, False, True, None, {0: 12.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}),
(False, False, True, 1, {0: 12.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}),
(False, False, False, None, {0: 6.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}),
(False, False, False, 1, {0: 6.0, 1: math.nan, 2: 0.0, 3: 0.0, 4: 0.0}),
],
)
def test_scale_with_k_on_star_graph(
self, normalized, endpoints, is_directed, k, expected
):
# seed=1 selects node 1 as the initial node when using k=1.
# Recall node 0 is the center of the star graph.
G = nx.star_graph(4)
if is_directed:
G = G.to_directed()
b = nx.betweenness_centrality(
G, k=k, seed=1, endpoints=endpoints, normalized=normalized
)
assert b == pytest.approx(expected, nan_ok=True)
@pytest.mark.parametrize(
("normalized", "endpoints", "is_directed", "k", "expected"),
[
(
*(True, True, True, None), # Use *() splatting for better autoformat
{0: 14 / 20, 1: 14 / 20, 2: 14 / 20, 3: 14 / 20, 4: 14 / 20},
),
(
*(True, True, True, 3),
{0: 9 / 12, 1: 11 / 12, 2: 9 / 12, 3: 6 / 12, 4: 7 / 12},
),
(
*(True, True, False, None),
{0: 10 / 20, 1: 10 / 20, 2: 10 / 20, 3: 10 / 20, 4: 10 / 20},
),
(
*(True, True, False, 3),
{0: 8 / 12, 1: 7 / 12, 2: 4 / 12, 3: 4 / 12, 4: 7 / 12},
),
(
*(True, False, True, None),
{0: 6 / 12, 1: 6 / 12, 2: 6 / 12, 3: 6 / 12, 4: 6 / 12},
),
(
*(True, False, True, 3),
# Use 6 instead of 9 for denominator for source nodes 0, 1, and 4
{0: 3 / 6, 1: 5 / 6, 2: 6 / 9, 3: 3 / 9, 4: 1 / 6},
),
(
*(True, False, False, None),
{0: 2 / 12, 1: 2 / 12, 2: 2 / 12, 3: 2 / 12, 4: 2 / 12},
),
(
*(True, False, False, 3),
# Use 6 instead of 9 for denominator for source nodes 0, 1, and 4
{0: 2 / 6, 1: 1 / 6, 2: 1 / 9, 3: 1 / 9, 4: 1 / 6},
),
(False, True, True, None, {0: 14, 1: 14, 2: 14, 3: 14, 4: 14}),
(
*(False, True, True, 3),
{0: 9 * 5 / 3, 1: 11 * 5 / 3, 2: 9 * 5 / 3, 3: 6 * 5 / 3, 4: 7 * 5 / 3},
),
(False, True, False, None, {0: 5, 1: 5, 2: 5, 3: 5, 4: 5}),
(
*(False, True, False, 3),
{0: 8 * 5 / 6, 1: 7 * 5 / 6, 2: 4 * 5 / 6, 3: 4 * 5 / 6, 4: 7 * 5 / 6},
),
(False, False, True, None, {0: 6, 1: 6, 2: 6, 3: 6, 4: 6}),
(
*(False, False, True, 3),
# Use 2 instead of 3 for denominator for source nodes 0, 1, and 4
{0: 3 * 4 / 2, 1: 5 * 4 / 2, 2: 6 * 4 / 3, 3: 3 * 4 / 3, 4: 1 * 4 / 2},
),
(False, False, False, None, {0: 1, 1: 1, 2: 1, 3: 1, 4: 1}),
(
*(False, False, False, 3),
# Use 4 instead of 6 for denominator for source nodes 0, 1, and 4
{0: 2 * 4 / 4, 1: 1 * 4 / 4, 2: 1 * 4 / 6, 3: 1 * 4 / 6, 4: 1 * 4 / 4},
),
],
)
def test_scale_with_k_on_cycle_graph(
self, normalized, endpoints, is_directed, k, expected
):
# seed=1 selects nodes 0, 1, and 4 as the initial nodes when using k=3.
G = nx.cycle_graph(5, create_using=nx.DiGraph if is_directed else nx.Graph)
b = nx.betweenness_centrality(
G, k=k, seed=1, endpoints=endpoints, normalized=normalized
)
assert b == pytest.approx(expected)
def test_k_out_of_bounds_raises(self):
G = nx.cycle_graph(4)
with pytest.raises(ValueError, match="larger"):
nx.betweenness_centrality(G, k=5)
with pytest.raises(ValueError, match="negative"):
nx.betweenness_centrality(G, k=-1)
with pytest.raises(ZeroDivisionError):
nx.betweenness_centrality(G, k=0)
with pytest.raises(ZeroDivisionError):
nx.betweenness_centrality(G, k=0, normalized=False)
# Test edge case: use full population when k == len(G)
# Should we warn or raise instead?
b1 = nx.betweenness_centrality(G, k=4, endpoints=False)
b2 = nx.betweenness_centrality(G, endpoints=False)
assert b1 == b2
| TestBetweennessCentrality |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 11752,
"end": 11884
} | class ____(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
| TestIDCTIIInt |
python | walkccc__LeetCode | solutions/244. Shortest Word Distance II/244.py | {
"start": 0,
"end": 575
} | class ____:
def __init__(self, wordsDict: list[str]):
self.wordToIndices = collections.defaultdict(list)
for i, word in enumerate(wordsDict):
self.wordToIndices[word].append(i)
def shortest(self, word1: str, word2: str) -> int:
indices1 = self.wordToIndices[word1]
indices2 = self.wordToIndices[word2]
ans = math.inf
i = 0
j = 0
while i < len(indices1) and j < len(indices2):
ans = min(ans, abs(indices1[i] - indices2[j]))
if indices1[i] < indices2[j]:
i += 1
else:
j += 1
return ans
| WordDistance |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 13521,
"end": 13710
} | class ____(NotUselessSuperPy3):
def not_passing_keyword_only(self, first, *, second="second"):
return super().not_passing_keyword_only(first, second=second)
| AlsoNotUselessSuperPy3 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 12454,
"end": 12558
} | class ____(AdsInsights):
breakdowns = []
action_breakdowns = ["action_type"]
| AdsInsightsActionType |
python | django__django | tests/admin_inlines/models.py | {
"start": 1908,
"end": 1972
} | class ____(models.Model):
dummy = models.IntegerField()
| Holder |
python | huggingface__transformers | src/transformers/quantizers/base.py | {
"start": 3569,
"end": 17446
} | class ____(ABC):
"""
Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization.
This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method
yet.
Attributes
quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`):
The quantization config that defines the quantization parameters of your model that you want to quantize.
modules_to_not_convert (`list[str]`, *optional*):
The list of module names to not convert when quantizing the model.
required_packages (`list[str]`, *optional*):
The list of required pip packages to install prior to using the quantizer
requires_calibration (`bool`):
Whether the quantization method requires to calibrate the model before using it.
requires_parameters_quantization (`bool`):
Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is
required to create a new xxxParameter in order to properly quantize the model.
"""
requires_calibration = False
required_packages = None
requires_parameters_quantization = False
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
self.quantization_config = quantization_config
# -- Handle extra kwargs below --
self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
self.pre_quantized = kwargs.pop("pre_quantized", True)
if not self.pre_quantized and self.requires_calibration:
raise ValueError(
f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
f"pass `pre_quantized=True` while knowing what you are doing."
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
"""
Some quantization methods require to explicitly set the dtype of the model to a
target dtype. You need to override this method in case you want to make sure that behavior is
preserved
Args:
dtype (`torch.dtype`):
The input dtype that is passed in `from_pretrained`
"""
return dtype
def update_device_map(self, device_map: dict[str, Any] | None) -> dict[str, Any] | None:
"""
Override this method if you want to pass a override the existing device map with a new
one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
passed, the device_map is set to `"auto"``
Args:
device_map (`Union[dict, str]`, *optional*):
The device_map that is passed through the `from_pretrained` method.
"""
return device_map
def adjust_target_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
"""
Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
Args:
dtype (`torch.dtype`, *optional*):
The dtype that is used to compute the device_map.
"""
return dtype
def param_element_size(self, model: "PreTrainedModel", param_name: str, param: "torch.Tensor") -> float:
"Return the element size (in bytes) for `param_name`."
if self.param_needs_quantization(model, param_name):
from accelerate.utils import CustomDtype
mapping = {
torch.int8: 1,
CustomDtype.INT4: 0.5,
CustomDtype.FP8: 1,
CustomDtype.INT2: 0.25,
}
# The value passed is actually not used when the method is overridden
if (custom_dtype := self.adjust_target_dtype(torch.float16)) in mapping:
return mapping[custom_dtype]
return param.element_size()
def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]:
"""
Override this method if you want to adjust the `missing_keys`.
Args:
missing_keys (`list[str]`, *optional*):
The list of missing keys in the checkpoint compared to the state dict of the model
"""
return missing_keys
def update_expected_keys(self, model, expected_keys: list[str], loaded_keys: list[str]) -> list[str]:
"""
Override this method if you want to adjust the `update_expected_keys`.
Args:
expected_keys (`list[str]`, *optional*):
The list of the expected keys in the initialized model.
loaded_keys (`list[str]`, *optional*):
The list of the loaded keys in the checkpoint.
"""
return expected_keys
def update_unexpected_keys(self, model, unexpected_keys: list[str]) -> list[str]:
return unexpected_keys
def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]:
"""adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
return max_memory
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
"""
Check whether a given param needs quantization as defined by `create_quantized_param`.
"""
return False
def create_quantized_param(self, *args, **kwargs):
"""
Take needed components from state_dict (those from which `param_needs_quantization` is True) and create
quantized param.
It usually also load the new param directly in the `model`.
Note: only applicable if requires_parameters_quantization == True.
"""
if not self.requires_parameters_quantization:
raise AttributeError(
f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
)
def validate_environment(self, *args, **kwargs):
"""
This method is used to potentially check for potential conflicts with arguments that are
passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers.
If no explicit check are needed, simply return nothing.
"""
return
def update_tp_plan(self, config):
"updates the tp plan for the scales"
return config
def update_ep_plan(self, config):
"updates the tp plan for the scales"
return config
def _process_model_before_weight_loading(self, model, **kwargs):
return model
def preprocess_model(self, model: "PreTrainedModel", config, dtype=None, checkpoint_files=None, **kwargs):
"""
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
"""
model.is_quantized = True
model.quantization_method = self.quantization_config.quant_method
if self.pre_quantized:
self._convert_model_for_quantization(model)
self._process_model_before_weight_loading(model, **kwargs)
# We store the original dtype for quantized models as we cannot easily retrieve it
# once the weights have been quantized
# Note that once you have loaded a quantized model, you can't change its dtype so this will
# remain a single source of truth
original_dtype = dtype if dtype is not None else torch.get_default_dtype()
config._pre_quantization_dtype = original_dtype
_assign_original_dtype(model, original_dtype)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def postprocess_model(self, model: "PreTrainedModel", **kwargs):
"""
Post-process the model post weights loading.
Make sure to override the abstract method `_process_model_after_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_after_weight_loading`.
"""
return self._process_model_after_weight_loading(model, **kwargs)
def remove_quantization_config(self, model):
"""
Remove the quantization config from the model.
"""
if hasattr(model, "hf_quantizer"):
del model.hf_quantizer
if hasattr(model.config, "quantization_config"):
del model.config.quantization_config
if hasattr(model.config, "_pre_quantization_dtype"):
del model.config._pre_quantization_dtype
if hasattr(model, "quantization_method"):
del model.quantization_method
model.is_quantized = False
def dequantize(self, model):
"""
Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance.
Note not all quantization schemes support this.
"""
model = self._dequantize(model)
# Delete quantizer and quantization config
del model.hf_quantizer
del model.config.quantization_config
del model.config._pre_quantization_dtype
del model.quantization_method
model.is_quantized = False
return model
def get_accelerator_warm_up_factor(self):
"""
The factor to be used in `caching_allocator_warmup` to get the number of bytes to pre-allocate to warm up accelerator.
A factor of 2 means we allocate all bytes in the empty model (since we allocate in fp16), a factor of 4 means
we allocate half the memory of the weights residing in the empty model, etc...
"""
# By default we return 4, i.e. half the model size (this corresponds to the case where the model is not
# really pre-processed, i.e. we do not have the info that weights are going to be 8 bits before actual
# weight loading)
return 4
def _dequantize(self, model):
raise NotImplementedError(
f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub."
)
def get_param_name(self, param_name: str) -> str:
"""
Override this method if you want to adjust the `param_name`.
"""
return param_name
@staticmethod
def get_modules_to_not_convert(
model: "PreTrainedModel",
skip_modules: list[str] | None = None,
keep_in_fp32_modules: list[str] | None = None,
add_default_skips: bool = False,
):
if skip_modules is None or add_default_skips:
modules_to_not_convert = get_keys_to_not_convert(model)
else:
modules_to_not_convert = []
if skip_modules is not None:
modules_to_not_convert.extend(skip_modules)
if keep_in_fp32_modules is not None:
modules_to_not_convert.extend(keep_in_fp32_modules)
return modules_to_not_convert
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return False
@property
def is_compileable(self) -> bool:
"""Flag indicating whether the quantized model can be compiled"""
return False
def get_state_dict_and_metadata(self, model, safe_serialization=False):
"""Get state dict and metadata. Useful when we need to modify a bit the state dict due to quantization"""
return None, {}
def update_state_dict_with_metadata(self, state_dict, metadata):
"""Update state dict with metadata. Default behaviour returns state_dict"""
return state_dict
@abstractmethod
def is_serializable(self, safe_serialization=None): ...
@property
@abstractmethod
def is_trainable(self): ...
def _convert_model_for_quantization(self, model):
from accelerate import init_empty_weights
for name, module in model.named_modules():
module_class_name = module.__class__.__name__
if module_class_name in MODULES_TO_PATCH_FOR_QUANTIZATION and (
self.quantization_config.quant_method
in MODULES_TO_PATCH_FOR_QUANTIZATION[module_class_name]["quantization_methods"]
):
with init_empty_weights():
parent_module, name = get_module_from_name(model, name)
parent_module._modules[name] = MODULES_TO_PATCH_FOR_QUANTIZATION[module_class_name]["module_name"](
model.config.get_text_config()
)
def get_quantize_ops(self):
raise NotImplementedError(
f"{self.quantization_config.quant_method} is not available yet and will be supported soon."
)
def get_weight_conversions(self):
return []
| HfQuantizer |
python | python__mypy | mypy/stubutil.py | {
"start": 7715,
"end": 11718
} | class ____(TypeStrVisitor):
"""Visitor used to print existing annotations in a file.
The main difference from TypeStrVisitor is a better treatment of
unbound types.
Notes:
* This visitor doesn't add imports necessary for annotations, this is done separately
by ImportTracker.
* It can print all kinds of types, but the generated strings may not be valid (notably
callable types) since it prints the same string that reveal_type() does.
* For Instance types it prints the fully qualified names.
"""
# TODO: Generate valid string representation for callable types.
# TODO: Use short names for Instances.
def __init__(
self,
stubgen: BaseStubGenerator,
known_modules: list[str] | None = None,
local_modules: list[str] | None = None,
) -> None:
super().__init__(options=mypy.options.Options())
self.stubgen = stubgen
self.known_modules = known_modules
self.local_modules = local_modules or ["builtins"]
def visit_any(self, t: AnyType) -> str:
s = super().visit_any(t)
self.stubgen.import_tracker.require_name(s)
return s
def visit_unbound_type(self, t: UnboundType) -> str:
s = t.name
fullname = self.stubgen.resolve_name(s)
if fullname == "typing.Union":
return " | ".join([item.accept(self) for item in t.args])
if fullname == "typing.Optional":
if len(t.args) == 1:
return f"{t.args[0].accept(self)} | None"
return self.stubgen.add_name("_typeshed.Incomplete")
if fullname in TYPING_BUILTIN_REPLACEMENTS:
s = self.stubgen.add_name(TYPING_BUILTIN_REPLACEMENTS[fullname], require=True)
if self.known_modules is not None and "." in s:
# see if this object is from any of the modules that we're currently processing.
# reverse sort so that subpackages come before parents: e.g. "foo.bar" before "foo".
for module_name in self.local_modules + sorted(self.known_modules, reverse=True):
if s.startswith(module_name + "."):
if module_name in self.local_modules:
s = s[len(module_name) + 1 :]
arg_module = module_name
break
else:
arg_module = s[: s.rindex(".")]
if arg_module not in self.local_modules:
self.stubgen.import_tracker.add_import(arg_module, require=True)
elif s == "NoneType":
# when called without analysis all types are unbound, so this won't hit
# visit_none_type().
s = "None"
else:
self.stubgen.import_tracker.require_name(s)
if t.args:
s += f"[{self.args_str(t.args)}]"
elif t.empty_tuple_index:
s += "[()]"
return s
def visit_none_type(self, t: NoneType) -> str:
return "None"
def visit_type_list(self, t: TypeList) -> str:
return f"[{self.list_str(t.items)}]"
def visit_union_type(self, t: UnionType) -> str:
return " | ".join([item.accept(self) for item in t.items])
def visit_unpack_type(self, t: UnpackType) -> str:
if self.options.python_version >= (3, 11):
return f"*{t.type.accept(self)}"
return super().visit_unpack_type(t)
def args_str(self, args: Iterable[Type]) -> str:
"""Convert an array of arguments to strings and join the results with commas.
The main difference from list_str is the preservation of quotes for string
arguments
"""
types = ["builtins.bytes", "builtins.str"]
res = []
for arg in args:
arg_str = arg.accept(self)
if isinstance(arg, UnboundType) and arg.original_str_fallback in types:
res.append(f"'{arg_str}'")
else:
res.append(arg_str)
return ", ".join(res)
| AnnotationPrinter |
python | apache__airflow | providers/fab/tests/unit/fab/www/test_auth.py | {
"start": 4908,
"end": 6685
} | class ____:
def setup_method(self):
mock_call.reset_mock()
def method_test(self, _view, arg):
mock_call()
return True
@patch("airflow.providers.fab.www.auth.get_auth_manager")
def test_has_access_with_details_when_authorized(
self, mock_get_auth_manager, decorator_name, is_authorized_method_name, items, request
):
items = request.getfixturevalue(items)
auth_manager = Mock()
is_authorized_method = Mock()
is_authorized_method.return_value = True
setattr(auth_manager, is_authorized_method_name, is_authorized_method)
mock_get_auth_manager.return_value = auth_manager
result = getattr(auth, decorator_name)("GET")(self.method_test)(None, items)
mock_call.assert_called_once()
assert result is True
@pytest.mark.db_test
@patch("airflow.providers.fab.www.auth.get_auth_manager")
def test_has_access_with_details_when_unauthorized(
self, mock_get_auth_manager, app, decorator_name, is_authorized_method_name, items, request
):
items = request.getfixturevalue(items)
auth_manager = Mock()
is_authorized_method = Mock()
is_authorized_method.return_value = False
setattr(auth_manager, is_authorized_method_name, is_authorized_method)
mock_get_auth_manager.return_value = auth_manager
with app.test_request_context():
result = getattr(auth, decorator_name)("GET")(self.method_test)(None, items)
mock_call.assert_not_called()
assert result.status_code == 302
@pytest.mark.parametrize(
"dag_access_entity",
[
DagAccessEntity.XCOM,
DagAccessEntity.RUN,
DagAccessEntity.TASK_INSTANCE,
],
)
| TestHasAccessWithDetails |
python | networkx__networkx | networkx/algorithms/tests/test_cluster.py | {
"start": 4673,
"end": 6314
} | class ____:
def test_clustering(self):
G = nx.DiGraph()
assert list(nx.clustering(G).values()) == []
assert nx.clustering(G) == {}
def test_path(self):
G = nx.path_graph(10, create_using=nx.DiGraph())
assert list(nx.clustering(G).values()) == [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
assert nx.clustering(G) == {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
assert nx.clustering(G, 0) == 0
def test_k5(self):
G = nx.complete_graph(5, create_using=nx.DiGraph())
assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1]
assert nx.average_clustering(G) == 1
G.remove_edge(1, 2)
assert list(nx.clustering(G).values()) == [
11 / 12,
1,
1,
11 / 12,
11 / 12,
]
assert nx.clustering(G, [1, 4]) == {1: 1, 4: 11 / 12}
G.remove_edge(2, 1)
assert list(nx.clustering(G).values()) == [
5 / 6,
1,
1,
5 / 6,
5 / 6,
]
assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337}
assert nx.clustering(G, 4) == 5 / 6
def test_triangle_and_edge(self):
G = nx.cycle_graph(3, create_using=nx.DiGraph())
G.add_edge(0, 4)
assert nx.clustering(G)[0] == 1 / 6
| TestDirectedClustering |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 26833,
"end": 27187
} | class ____:
def foo():
some_func_call(
"xxxxxxxxxx",
"xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x "
'"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; '
"xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" ",
None,
("xxxxxxxxxxx",),
),
| A |
python | google__jax | jax/_src/pallas/cost_estimate.py | {
"start": 1854,
"end": 8360
} | class ____:
avals_in: Sequence[Any]
avals_out: Sequence[Any]
def cost_estimate_jaxpr(
jaxpr: jax_core.ClosedJaxpr,
) -> pallas_core.CostEstimate:
"""Returns the cost estimate for the given Jaxpr."""
jaxpr, _ = jaxpr.jaxpr, jaxpr.consts
total_cost = CostEstimate(flops=0, transcendentals=0, bytes_accessed=0)
for eqn in jaxpr.eqns:
rule = _cost_rules.get(eqn.primitive, None)
if rule is not None:
context = Context(avals_in=[v.aval for v in eqn.invars],
avals_out=[v.aval for v in eqn.outvars])
op_cost = rule(context, **eqn.params)
total_cost = total_cost + op_cost
return pallas_core.CostEstimate(
flops=total_cost.flops,
transcendentals=total_cost.transcendentals,
bytes_accessed=total_cost.bytes_accessed,
)
def estimate_cost(fun, *args, **kwargs) -> pallas_core.CostEstimate:
"""Computes a cost estimate for the given function.
Args:
fun: The function to compute the cost estimate for.
*args: The arguments to the function. Can be jax.ShapeDtypeStruct or
jax.Array.
**kwargs: The keyword arguments to the function.
Returns:
A pallas_core.CostEstimate object containing the cost estimate.
"""
flattened_args, treedef = tree_util.tree_flatten(args)
partial_fun = functools.partial(fun, **kwargs)
wrapped_fun, _ = api_util.flatten_fun_nokwargs(
lu.wrap_init(partial_fun,
debug_info=api_util.debug_info("cost_estimate", fun,
args, {})),
treedef)
avals = [jax_core.ShapedArray(a.shape, a.dtype) for a in flattened_args]
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, avals)
estimate = cost_estimate_jaxpr(jax_core.ClosedJaxpr(jaxpr, consts))
input_bytes = sum(
math.prod(a.shape) * a.dtype.itemsize for a in flattened_args)
output_bytes = sum(
math.prod(a.aval.shape) * a.aval.dtype.itemsize for a in jaxpr.outvars)
return pallas_core.CostEstimate(
flops=estimate.flops,
transcendentals=estimate.transcendentals,
bytes_accessed=estimate.bytes_accessed + input_bytes + output_bytes,
)
def binary_cost_rule(ctx: Context, **_) -> CostEstimate:
aval_out, = ctx.avals_out
out_flops = math.prod(aval_out.shape)
return CostEstimate(
flops=out_flops,
transcendentals=0,
bytes_accessed=0,
)
BINARY_OPS = [
lax.add_p,
lax.mul_p,
lax.sub_p,
lax.div_p,
lax.min_p,
lax.max_p,
lax.or_p,
lax.and_p,
lax.xor_p,
]
for op in BINARY_OPS:
register_cost_rule(op, binary_cost_rule)
def unary_cost_rule(transcendental: bool):
def cost_rule(ctx: Context, **_) -> CostEstimate:
x_aval, = ctx.avals_in
new_flops = 0
new_transcendentals = 0
if transcendental:
new_transcendentals += math.prod(x_aval.shape)
else:
new_flops += math.prod(x_aval.shape)
return CostEstimate(
flops=new_flops,
transcendentals=new_transcendentals,
bytes_accessed=0,
)
return cost_rule
UN_OPS = [
lax.neg_p,
lax.floor_p,
lax.ceil_p,
lax.round_p,
lax.not_p,
]
for op in UN_OPS:
register_cost_rule(op, unary_cost_rule(transcendental=False))
TRANSCENDENTAL_OPS = [
lax.cos_p,
lax.sin_p,
lax.tan_p,
lax.sinh_p,
lax.cosh_p,
lax.tanh_p,
lax.acos_p,
lax.asin_p,
lax.atan_p,
lax.exp_p,
lax.log_p,
lax.logistic_p,
lax.sqrt_p,
]
for op in TRANSCENDENTAL_OPS:
register_cost_rule(op, unary_cost_rule(transcendental=True))
def _integer_pow_cost_rule(ctx: Context, *, y: int) -> CostEstimate:
x_aval, = ctx.avals_in
num_elements = math.prod(x_aval.shape)
if y == 0 or y == 1:
# No flops, the result is 0 or a copy of the input.
cost_per_element = 0
else:
# We assume integer pow is implemented using repeated squaring.
# The cost is log(y) squarings, plus one multiply per non-zero bit.
highest_bit = math.floor(math.log(y, 2))
cost_per_element = highest_bit + y.bit_count()
return CostEstimate(
flops=num_elements * cost_per_element,
transcendentals=0,
bytes_accessed=0,
)
register_cost_rule(lax.integer_pow_p, _integer_pow_cost_rule)
def dot_general_cost_rule(ctx: Context,
dimension_numbers: lax.DotDimensionNumbers,
**_) -> CostEstimate:
x_aval, y_aval = ctx.avals_in
x_shape, y_shape = x_aval.shape, y_aval.shape
(lhs_contracting_dims, rhs_contracting_dims), (
lhs_batch_dims, rhs_batch_dims) = dimension_numbers
assert len(lhs_contracting_dims) == len(rhs_contracting_dims)
assert len(lhs_batch_dims) == len(rhs_batch_dims)
flops = 1
# Flops along a contracting dim is 2*dim (addition and multiplication)
for i in range(len(lhs_contracting_dims)):
lhs_dim, rhs_dim = lhs_contracting_dims[i], rhs_contracting_dims[i]
assert x_shape[lhs_dim] == y_shape[rhs_dim]
flops *= 2 * x_shape[lhs_dim]
# Now we handle all other dimensions.
for i, lhs_dim in enumerate(x_shape):
if i in lhs_contracting_dims:
continue
flops *= lhs_dim
for i, rhs_dim in enumerate(y_shape):
if i in rhs_contracting_dims:
continue
# Don't double-count batch dims (we already counted for LHS)
if i in rhs_batch_dims:
continue
flops *= rhs_dim
return CostEstimate(
flops=flops,
transcendentals=0,
bytes_accessed=0,
)
register_cost_rule(lax.dot_general_p, dot_general_cost_rule)
# Higher-order primitives
def _pjit_cost_rule(ctx, *, jaxpr: jax_core.ClosedJaxpr, **_):
del ctx
inner_cost = cost_estimate_jaxpr(jaxpr)
return CostEstimate(
flops=inner_cost.flops,
transcendentals=inner_cost.transcendentals,
bytes_accessed=inner_cost.bytes_accessed,
)
register_cost_rule(pjit.jit_p, _pjit_cost_rule)
def _custom_vjp_rule(ctx, *, call_jaxpr: jax_core.ClosedJaxpr, **_):
del ctx
inner_cost = cost_estimate_jaxpr(call_jaxpr)
return CostEstimate(
flops=inner_cost.flops,
transcendentals=inner_cost.transcendentals,
bytes_accessed=inner_cost.bytes_accessed,
)
register_cost_rule(custom_derivatives.custom_vjp_call_p, _custom_vjp_rule)
def _run_state_rule(*_, jaxpr: jax_core.Jaxpr, **_2):
inner_cost = cost_estimate_jaxpr(pe.close_jaxpr(jaxpr))
return CostEstimate(
flops=inner_cost.flops,
transcendentals=inner_cost.transcendentals,
bytes_accessed=inner_cost.bytes_accessed,
)
register_cost_rule(discharge.run_state_p, _run_state_rule)
| Context |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 152238,
"end": 152831
} | class ____(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# F-strings
#
#-------------------------------------------------------------------
| RawCNameExprNode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/unit_tests/integration/test_source.py | {
"start": 1046,
"end": 5944
} | class ____(TestCase):
def setUp(self) -> None:
self._config = ConfigBuilder().client_id(_CLIENT_ID).client_secret(_CLIENT_SECRET).refresh_token(_REFRESH_TOKEN).build()
self._source = SourceSalesforce(
CatalogBuilder().with_stream(_STREAM_NAME, SyncMode.full_refresh).build(), self._config, StateBuilder().build()
)
self._http_mocker = HttpMocker()
self._http_mocker.__enter__()
def tearDown(self) -> None:
self._http_mocker.__exit__(None, None, None)
def test_given_transient_error_fetching_schema_when_streams_then_retry(self) -> None:
given_authentication(self._http_mocker, _CLIENT_ID, _CLIENT_SECRET, _REFRESH_TOKEN, _INSTANCE_URL)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/sobjects"),
HttpResponse(json.dumps({"sobjects": [{"name": _STREAM_NAME, "queryable": True}]})),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/sobjects/{_STREAM_NAME}/describe"),
[HttpResponse("", status_code=406), SalesforceDescribeResponseBuilder().field("a_field_name").build()],
)
streams = self._source.streams(self._config)
assert len(streams) == 2 # _STREAM_NAME and Describe which is always added
assert _FIELD_NAME in next(filter(lambda stream: stream.name == _STREAM_NAME, streams)).get_json_schema()["properties"]
def test_given_errors_fetching_schema_when_streams_then_raise_exception(self) -> None:
given_authentication(self._http_mocker, _CLIENT_ID, _CLIENT_SECRET, _REFRESH_TOKEN, _INSTANCE_URL)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/sobjects"),
HttpResponse(json.dumps({"sobjects": [{"name": _STREAM_NAME, "queryable": True}]})),
)
self._http_mocker.get(
HttpRequest(f"{_BASE_URL}/sobjects/{_STREAM_NAME}/describe"),
HttpResponse("", status_code=406),
)
with pytest.raises(AirbyteTracedException) as exception:
self._source.streams(self._config)
assert exception.value.failure_type == FailureType.transient_error # 406 is considered a transient error
def test_read_stream_with_malformed_json_response_error_then_raise_exception(self) -> None:
mock_response = Mock()
mock_response.json.side_effect = exceptions.JSONDecodeError("Expecting value", "<html>Error</html>", 0)
mock_response.url = _BASE_URL
http_error = exceptions.HTTPError(response=mock_response)
with patch(
"airbyte_cdk.sources.concurrent_source.concurrent_source_adapter.ConcurrentSourceAdapter._read_stream"
) as mock_read_stream:
mock_read_stream.side_effect = http_error
with pytest.raises(exceptions.HTTPError) as exception:
list(self._source._read_stream(Mock(), Mock(), Mock(), Mock(), Mock()))
assert type(exception.value.__cause__) == exceptions.JSONDecodeError
assert exception.value.response.url == _BASE_URL
assert type(exception.value) == exceptions.HTTPError
assert exception.value == http_error
def test_read_stream_with_correct_json_response_error_then_raise_exception(self) -> None:
mock_response = Mock()
mock_response.json.return_value = [{"errorCode": "REQUEST_LIMIT_EXCEEDED"}]
mock_response.url = _BASE_URL
http_error = exceptions.HTTPError(response=mock_response)
with patch(
"airbyte_cdk.sources.concurrent_source.concurrent_source_adapter.ConcurrentSourceAdapter._read_stream"
) as mock_read_stream:
mock_read_stream.side_effect = http_error
with pytest.raises(exceptions.HTTPError) as exception:
list(self._source._read_stream(Mock(), Mock(), Mock(), Mock(), Mock()))
assert exception.value.response.json()[0]["errorCode"] == "REQUEST_LIMIT_EXCEEDED"
assert exception.value.response.url == _BASE_URL
assert exception.value == http_error
assert type(exception.value) == exceptions.HTTPError
def test_read_stream_with_forbidden_and_limit_exceeded_error_code_then_raise_exception(self) -> None:
mock_response = Mock()
mock_response.json.return_value = [{"errorCode": "REQUEST_LIMIT_EXCEEDED"}]
mock_response.url = _BASE_URL
mock_response.status_code = 403
http_error = exceptions.HTTPError(response=mock_response)
with patch(
"airbyte_cdk.sources.concurrent_source.concurrent_source_adapter.ConcurrentSourceAdapter._read_stream"
) as mock_read_stream:
mock_read_stream.side_effect = http_error
with pytest.raises(AirbyteStopSync) as exception:
list(self._source._read_stream(Mock(), Mock(), Mock(), Mock(), Mock()))
assert type(exception.value) == AirbyteStopSync
| StreamGenerationTest |
python | pydata__xarray | asv_bench/benchmarks/indexing.py | {
"start": 5252,
"end": 5616
} | class ____:
# https://github.com/pydata/xarray/issues/2227
def setup(self):
self.ds = xr.Dataset(
{"a": ("time", np.arange(10_000_000))},
coords={"time": np.arange(10_000_000)},
)
self.time_filter = self.ds.time > 50_000
def time_indexing(self):
self.ds.isel(time=self.time_filter)
| BooleanIndexing |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 444295,
"end": 444833
} | class ____(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super().__init__(pos, key_value_pairs=items)
| DefaultsKwDictNode |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 5209,
"end": 6421
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
task = CloudMemorystoreExportInstanceOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
output_config=TEST_OUTPUT_CONFIG,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.export_instance.assert_called_once_with(
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
output_config=TEST_OUTPUT_CONFIG,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreExportInstanceOperator |
python | django__django | django/contrib/contenttypes/management/__init__.py | {
"start": 134,
"end": 4665
} | class ____(migrations.RunPython):
def __init__(self, app_label, old_model, new_model):
self.app_label = app_label
self.old_model = old_model
self.new_model = new_model
super().__init__(self.rename_forward, self.rename_backward)
def _rename(self, apps, schema_editor, old_model, new_model):
ContentType = apps.get_model("contenttypes", "ContentType")
db = schema_editor.connection.alias
if not router.allow_migrate_model(db, ContentType):
return
try:
content_type = ContentType.objects.db_manager(db).get_by_natural_key(
self.app_label, old_model
)
except ContentType.DoesNotExist:
pass
else:
content_type.model = new_model
try:
with transaction.atomic(using=db):
content_type.save(using=db, update_fields={"model"})
except IntegrityError:
# Gracefully fallback if a stale content type causes a
# conflict as remove_stale_contenttypes will take care of
# asking the user what should be done next.
content_type.model = old_model
else:
# Clear the cache as the `get_by_natural_key()` call will cache
# the renamed ContentType instance by its old model name.
ContentType.objects.clear_cache()
def rename_forward(self, apps, schema_editor):
self._rename(apps, schema_editor, self.old_model, self.new_model)
def rename_backward(self, apps, schema_editor):
self._rename(apps, schema_editor, self.new_model, self.old_model)
def inject_rename_contenttypes_operations(
plan=None, apps=global_apps, using=DEFAULT_DB_ALIAS, **kwargs
):
"""
Insert a `RenameContentType` operation after every planned `RenameModel`
operation.
"""
if plan is None:
return
# Determine whether or not the ContentType model is available.
try:
ContentType = apps.get_model("contenttypes", "ContentType")
except LookupError:
available = False
else:
if not router.allow_migrate_model(using, ContentType):
return
available = True
for migration, backward in plan:
if (migration.app_label, migration.name) == ("contenttypes", "0001_initial"):
# There's no point in going forward if the initial contenttypes
# migration is unapplied as the ContentType model will be
# unavailable from this point.
if backward:
break
else:
available = True
continue
# The ContentType model is not available yet.
if not available:
continue
inserts = []
for index, operation in enumerate(migration.operations):
if isinstance(operation, migrations.RenameModel):
operation = RenameContentType(
migration.app_label,
operation.old_name_lower,
operation.new_name_lower,
)
inserts.append((index + 1, operation))
for inserted, (index, operation) in enumerate(inserts):
migration.operations.insert(inserted + index, operation)
def create_contenttypes(
app_config,
verbosity=2,
interactive=True,
using=DEFAULT_DB_ALIAS,
apps=global_apps,
**kwargs,
):
"""
Create content types for models in the given app.
"""
if not app_config.models_module:
return
try:
app_config = apps.get_app_config(app_config.label)
ContentType = apps.get_model("contenttypes", "ContentType")
except LookupError:
return
if not router.allow_migrate_model(using, ContentType):
return
all_model_names = {model._meta.model_name for model in app_config.get_models()}
if not all_model_names:
return
ContentType.objects.clear_cache()
existing_model_names = set(
ContentType.objects.using(using)
.filter(app_label=app_config.label)
.values_list("model", flat=True)
)
cts = [
ContentType(app_label=app_config.label, model=model_name)
for model_name in sorted(all_model_names - existing_model_names)
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print(f"Adding content type '{ct.app_label} | {ct.model}'")
| RenameContentType |
python | huggingface__transformers | src/transformers/models/deepseek_v3/modeling_deepseek_v3.py | {
"start": 24235,
"end": 25268
} | class ____(PreTrainedModel):
config: DeepseekV3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["DeepseekV3DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": DeepseekV3DecoderLayer,
"attentions": DeepseekV3Attention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, DeepseekV3TopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, DeepseekV3NaiveMoe):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
| DeepseekV3PreTrainedModel |
python | lazyprogrammer__machine_learning_examples | unsupervised_class3/dcgan_tf.py | {
"start": 3672,
"end": 4543
} | class ____(object):
def __init__(self, name, M1, M2, apply_batch_norm, f=tf.nn.relu):
self.W = tf.get_variable(
"W_%s" % name,
shape=(M1, M2),
initializer=tf.random_normal_initializer(stddev=0.02),
)
self.b = tf.get_variable(
"b_%s" % name,
shape=(M2,),
initializer=tf.zeros_initializer(),
)
self.f = f
self.name = name
self.apply_batch_norm = apply_batch_norm
self.params = [self.W, self.b]
def forward(self, X, reuse, is_training):
a = tf.matmul(X, self.W) + self.b
# apply batch normalization
if self.apply_batch_norm:
a = tf.contrib.layers.batch_norm(
a,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
reuse=reuse,
scope=self.name,
)
return self.f(a)
| DenseLayer |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 67746,
"end": 71873
} | class ____(CStructOrUnionDefNode, BlockNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [CBaseTypeNode]
# templates [(string, bool)] or None
# decorators [DecoratorNode] or None
decorators = None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
num_optional_templates = sum(not required for _, required in self.templates)
if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]):
error(self.pos, "Required template parameters must precede optional template parameters.")
self.entry = env.declare_cpp_class(
self.name, None, self.pos, self.cname,
base_classes=[], visibility=self.visibility, templates=template_types)
def analyse_declarations(self, env):
if not env.is_cpp():
warning(self.pos,
"Using 'cppclass' while Cython is not in c++ mode",
level=1)
if self.templates is None:
template_types = template_names = None
else:
template_names = [template_name for template_name, _ in self.templates]
template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
for template_name, required in self.templates]
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env, templates=template_names)
def base_ok(base_class):
if base_class.is_cpp_class or base_class.is_struct:
return True
else:
error(self.pos, "Base class '%s' not a struct or class." % base_class)
base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility=self.visibility, templates=template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if scope is not None:
scope.type = self.entry.type
defined_funcs = []
def func_attributes(attributes):
for attr in attributes:
if isinstance(attr, CFuncDefNode):
yield attr
elif isinstance(attr, CompilerDirectivesNode):
yield from func_attributes(attr.body.stats)
elif isinstance(attr, CppClassNode) and attr.attributes is not None:
yield from func_attributes(attr.attributes)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
declare = getattr(attr, 'declare', None)
if declare:
attr.declare(scope)
attr.analyse_declarations(scope)
for func in func_attributes(self.attributes):
defined_funcs.append(func)
if self.templates is not None:
func.template_declaration = "template <typename %s>" % ", typename ".join(template_names)
self.body = StatListNode(self.pos, stats=defined_funcs)
self.scope = scope
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(self.entry.type.scope)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(self.entry.type.scope, code)
def generate_execution_code(self, code):
self.body.generate_execution_code(code)
def annotate(self, code):
self.body.annotate(code)
| CppClassNode |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 31022,
"end": 31201
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ALPHABETICAL", "TAG_COMMIT_DATE")
| RefOrderField |
python | RaRe-Technologies__gensim | gensim/models/rpmodel.py | {
"start": 1362,
"end": 6020
} | class ____(interfaces.TransformationABC):
def __init__(self, corpus, id2word=None, num_topics=300):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional
Mapping `token_id` -> `token`, will be determine from corpus if `id2word == None`.
num_topics : int, optional
Number of topics.
"""
self.id2word = id2word
self.num_topics = num_topics
if corpus is not None:
self.initialize(corpus)
self.add_lifecycle_event("created", msg=f"created {self}")
def __str__(self):
return "%s<num_terms=%s, num_topics=%s>" % (self.__class__.__name__, self.num_terms, self.num_topics)
def initialize(self, corpus):
"""Initialize the random projection matrix.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
if self.id2word is None:
logger.info("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif self.id2word:
self.num_terms = 1 + max(self.id2word)
else:
self.num_terms = 0
shape = self.num_topics, self.num_terms
logger.info("constructing %s random matrix", str(shape))
# Now construct the projection matrix itself.
# Here i use a particular form, derived in "Achlioptas: Database-friendly random projection",
# and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).
randmat = 1 - 2 * np.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1
# convert from int32 to floats, for faster multiplications
self.projection = np.asfortranarray(randmat, dtype=np.float32)
# TODO: check whether the Fortran-order shenanigans still make sense. In the original
# code (~2010), this made a BIG difference for np BLAS implementations; perhaps now the wrappers
# are smarter and this is no longer needed?
def __getitem__(self, bow):
"""Get random-projection representation of the input vector or corpus.
Parameters
----------
bow : {list of (int, int), iterable of list of (int, int)}
Input document or corpus.
Returns
-------
list of (int, float)
if `bow` is document OR
:class:`~gensim.interfaces.TransformedCorpus`
if `bow` is corpus.
Examples
----------
.. sourcecode:: pycon
>>> from gensim.models import RpModel
>>> from gensim.corpora import Dictionary
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(text) for text in common_texts] # convert texts to BoW format
>>>
>>> model = RpModel(corpus, id2word=dictionary) # fit model
>>>
>>> # apply model to document, result is vector in BoW format, i.e. [(1, 0.3), ... ]
>>> result = model[corpus[0]]
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
if getattr(self, 'freshly_loaded', False):
# This is a hack to work around a bug in np, where a FORTRAN-order array
# unpickled from disk segfaults on using it.
self.freshly_loaded = False
self.projection = self.projection.copy('F') # simply making a fresh copy fixes the broken array
vec = matutils.sparse2full(bow, self.num_terms).reshape(self.num_terms, 1) / np.sqrt(self.num_topics)
vec = np.asfortranarray(vec, dtype=np.float32)
topic_dist = np.dot(self.projection, vec) # (k, d) * (d, 1) = (k, 1)
return [
(topicid, float(topicvalue)) for topicid, topicvalue in enumerate(topic_dist.flat)
if np.isfinite(topicvalue) and not np.allclose(topicvalue, 0.0)
]
def __setstate__(self, state):
"""Sets the internal state and updates freshly_loaded to True, called when unpicked.
Parameters
----------
state : dict
State of the class.
"""
self.__dict__ = state
self.freshly_loaded = True
| RpModel |
python | celery__celery | t/smoke/tests/quorum_queues/test_quorum_queues.py | {
"start": 1035,
"end": 1669
} | class ____:
def test_signature(self, celery_setup: CeleryTestSetup):
sig = identity.si("test_signature").set(queue=celery_setup.worker.worker_queue)
assert sig.delay().get(timeout=RESULT_TIMEOUT) == "test_signature"
def test_group(self, celery_setup: CeleryTestSetup):
sig = group(
group(add.si(1, 1), add.si(2, 2)),
group([add.si(1, 1), add.si(2, 2)]),
group(s for s in [add.si(1, 1), add.si(2, 2)]),
)
res = sig.apply_async(queue=celery_setup.worker.worker_queue)
assert res.get(timeout=RESULT_TIMEOUT) == [2, 4, 2, 4, 2, 4]
| test_quorum_queues |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 16974,
"end": 17794
} | class ____(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes"
)
default_args = ("x",)
| W12 |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/random.py | {
"start": 4394,
"end": 11767
} | class ____(HypothesisRandom):
VERSION = 10**6
def __init__(self, *, note_method_calls: bool, data: ConjectureData) -> None:
super().__init__(note_method_calls=note_method_calls)
self.__data = data
self.__state = RandomState()
def __repr__(self) -> str:
return "HypothesisRandom(generated data)"
def __copy__(self) -> "ArtificialRandom":
result = ArtificialRandom(
note_method_calls=self._note_method_calls,
data=self.__data,
)
result.setstate(self.getstate())
return result
def __convert_result(self, method, kwargs, result):
if method == "choice":
return kwargs.get("seq")[result]
if method in ("choices", "sample"):
seq = kwargs["population"]
return [seq[i] for i in result]
if method == "shuffle":
seq = kwargs["x"]
original = list(seq)
for i, i2 in enumerate(result):
seq[i] = original[i2]
return None
return result
def _hypothesis_do_random(self, method, kwargs):
if method == "choices":
key = (method, len(kwargs["population"]), kwargs.get("k"))
elif method == "choice":
key = (method, len(kwargs["seq"]))
elif method == "shuffle":
key = (method, len(kwargs["x"]))
else:
key = (method, *sorted(kwargs))
try:
result, self.__state = self.__state.next_states[key]
except KeyError:
pass
else:
return self.__convert_result(method, kwargs, result)
if method == "_randbelow":
result = self.__data.draw_integer(0, kwargs["n"] - 1)
elif method == "random":
# See https://github.com/HypothesisWorks/hypothesis/issues/4297
# for numerics/bounds of "random" and "betavariate"
result = self.__data.draw(floats(0, 1, exclude_max=True))
elif method == "betavariate":
result = self.__data.draw(floats(0, 1))
elif method == "uniform":
a = normalize_zero(kwargs["a"])
b = normalize_zero(kwargs["b"])
result = self.__data.draw(floats(a, b))
elif method in ("weibullvariate", "gammavariate"):
result = self.__data.draw(floats(min_value=0.0, allow_infinity=False))
elif method in ("gauss", "normalvariate"):
mu = kwargs["mu"]
result = mu + self.__data.draw(
floats(allow_nan=False, allow_infinity=False)
)
elif method == "vonmisesvariate":
result = self.__data.draw(floats(0, 2 * math.pi))
elif method == "randrange":
if kwargs["stop"] is None:
stop = kwargs["start"]
start = 0
else:
start = kwargs["start"]
stop = kwargs["stop"]
step = kwargs["step"]
if start == stop:
raise ValueError(f"empty range for randrange({start}, {stop}, {step})")
if step != 1:
endpoint = (stop - start) // step
if (start - stop) % step == 0:
endpoint -= 1
i = self.__data.draw_integer(0, endpoint)
result = start + i * step
else:
result = self.__data.draw_integer(start, stop - 1)
elif method == "randint":
result = self.__data.draw_integer(kwargs["a"], kwargs["b"])
# New in Python 3.12, so not taken by our coverage job
elif method == "binomialvariate": # pragma: no cover
result = self.__data.draw_integer(0, kwargs["n"])
elif method == "choice":
seq = kwargs["seq"]
result = self.__data.draw_integer(0, len(seq) - 1)
elif method == "choices":
k = kwargs["k"]
result = self.__data.draw(
lists(
integers(0, len(kwargs["population"]) - 1),
min_size=k,
max_size=k,
)
)
elif method == "sample":
k = kwargs["k"]
seq = kwargs["population"]
if k > len(seq) or k < 0:
raise ValueError(
f"Sample size {k} not in expected range 0 <= k <= {len(seq)}"
)
if k == 0:
result = []
else:
result = self.__data.draw(
lists(
sampled_from(range(len(seq))),
min_size=k,
max_size=k,
unique=True,
)
)
elif method == "getrandbits":
result = self.__data.draw_integer(0, 2 ** kwargs["n"] - 1)
elif method == "triangular":
low = normalize_zero(kwargs["low"])
high = normalize_zero(kwargs["high"])
mode = normalize_zero(kwargs["mode"])
if mode is None:
result = self.__data.draw(floats(low, high))
elif self.__data.draw_boolean(0.5):
result = self.__data.draw(floats(mode, high))
else:
result = self.__data.draw(floats(low, mode))
elif method in ("paretovariate", "expovariate", "lognormvariate"):
result = self.__data.draw(floats(min_value=0.0))
elif method == "shuffle":
result = self.__data.draw(permutations(range(len(kwargs["x"]))))
elif method == "randbytes":
n = int(kwargs["n"])
result = self.__data.draw_bytes(min_size=n, max_size=n)
else:
raise NotImplementedError(method)
new_state = RandomState()
self.__state.next_states[key] = (result, new_state)
self.__state = new_state
return self.__convert_result(method, kwargs, result)
def seed(self, seed):
self.__state = state_for_seed(self.__data, seed)
def getstate(self):
if self.__state.state_id is not None:
return self.__state.state_id
if self.__data.states_for_ids is None:
self.__data.states_for_ids = {}
states_for_ids = self.__data.states_for_ids
self.__state.state_id = len(states_for_ids)
states_for_ids[self.__state.state_id] = self.__state
return self.__state.state_id
def setstate(self, state):
self.__state = self.__data.states_for_ids[state]
DUMMY_RANDOM = Random(0)
def convert_kwargs(name, kwargs):
kwargs = dict(kwargs)
signature = sig_of(name)
params = signature.parameters
bound = signature.bind(DUMMY_RANDOM, **kwargs)
bound.apply_defaults()
for k in list(kwargs):
if (
kwargs[k] is params[k].default
or params[k].kind != inspect.Parameter.KEYWORD_ONLY
):
kwargs.pop(k)
arg_names = list(params)[1:]
args = []
for a in arg_names:
if params[a].kind == inspect.Parameter.KEYWORD_ONLY:
break
args.append(bound.arguments[a])
kwargs.pop(a, None)
while args:
name = arg_names[len(args) - 1]
if args[-1] is params[name].default:
args.pop()
else:
break
return (args, kwargs)
| ArtificialRandom |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeature.py | {
"start": 1113,
"end": 1258
} | class ____(AbstractMetaFeature):
def __init__(self):
super(MetaFeature, self).__init__()
self.type_ = "METAFEATURE"
| MetaFeature |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread.py | {
"start": 700,
"end": 1058
} | class ____(BaseModel):
reason: Optional[str] = None
"""Reason that the thread was closed. Defaults to null when no reason is recorded."""
type: Literal["closed"]
"""Status discriminator that is always `closed`."""
Status: TypeAlias = Annotated[Union[StatusActive, StatusLocked, StatusClosed], PropertyInfo(discriminator="type")]
| StatusClosed |
python | ray-project__ray | python/ray/tune/tests/execution/test_controller_callback_integration.py | {
"start": 721,
"end": 2142
} | class ____(Callback):
CKPT_FILE_TMPL = "test-callback-state-{}.json"
def __init__(self):
self.counter = 0
def on_trial_result(self, iteration, trials, trial, result, **info):
self.counter += 1
def get_state(self) -> Optional[Dict]:
return {"counter": self.counter}
def set_state(self, state: Dict):
self.counter = state["counter"]
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_callback_save_restore(
ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir
):
"""Check that callback state is restored correctly.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testCallbackSaveRestore
"""
storage = mock_storage_context()
runner = TuneController(callbacks=[StatefulCallback()], storage=storage)
runner.add_trial(Trial(MOCK_TRAINABLE_NAME, stub=True, storage=storage))
for i in range(3):
runner._callbacks.on_trial_result(
iteration=i, trials=None, trial=None, result=None
)
runner.checkpoint(force=True, wait=True)
callback = StatefulCallback()
runner2 = TuneController(callbacks=[callback], storage=storage)
assert callback.counter == 0
runner2.resume(resume_config=ResumeConfig())
assert callback.counter == 3
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| StatefulCallback |
python | python-poetry__poetry | tests/installation/test_installer.py | {
"start": 1759,
"end": 1958
} | class ____(InstalledRepository):
@classmethod
def load(
cls, env: Env, with_dependencies: bool = False
) -> CustomInstalledRepository:
return cls()
| CustomInstalledRepository |
python | readthedocs__readthedocs.org | readthedocs/payments/tests/test_utils.py | {
"start": 174,
"end": 723
} | class ____(PaymentMixin, TestCase):
@requests_mock.Mocker(kw="mock_request")
def test_cancel_subscription(self, mock_request):
subscription_id = "sub_1234567890"
delete_request = mock_request.delete(
f"https://api.stripe.com/v1/subscriptions/{subscription_id}",
json={
"id": subscription_id,
"object": "subscription",
"status": "canceled",
},
)
cancel_subscription(subscription_id)
assert delete_request.called
| TestUtils |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph_path_exists/test_path_exists.py | {
"start": 18,
"end": 843
} | class ____(unittest.TestCase):
def test_path_exists(self):
nodes = []
graph = GraphPathExists()
for id in range(0, 6):
nodes.append(graph.add_node(id))
graph.add_edge(0, 1, 5)
graph.add_edge(0, 4, 3)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 3, 5)
graph.add_edge(1, 4, 4)
graph.add_edge(2, 1, 6)
graph.add_edge(3, 2, 7)
graph.add_edge(3, 4, 8)
self.assertEqual(graph.path_exists(nodes[0], nodes[2]), True)
self.assertEqual(graph.path_exists(nodes[0], nodes[0]), True)
self.assertEqual(graph.path_exists(nodes[4], nodes[5]), False)
print('Success: test_path_exists')
def main():
test = TestPathExists()
test.test_path_exists()
if __name__ == '__main__':
main()
| TestPathExists |
python | huggingface__transformers | tests/models/qwen2/test_modeling_qwen2.py | {
"start": 2002,
"end": 54148
} | class ____(unittest.TestCase):
@slow
def test_model_450m_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-0.5B", device_map="auto")
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-1.9537, -1.6193, -1.4123, -1.4673, -1.8511, -1.9309, -1.9826, -2.1776]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([3.2025, 7.1265, 4.6058, 3.6423, 1.6357, 3.9265, 5.1883, 5.8760, 2.7942, 4.4823, 3.2571, 2.1063, 3.4275, 4.2028, 1.9767, 5.2115, 6.6756, 6.3999, 6.0483, 5.7378, 5.6660, 5.2298, 5.4103, 5.1248, 5.4376, 2.4570, 2.6107, 5.4039, 2.8077, 4.7777]) # fmt: skip
print(out[0, 0, :30])
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_450m_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking and I"""
)
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B", use_fast=False)
model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-0.5B", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@require_bitsandbytes
@slow
@require_flash_attn
@pytest.mark.flash_attn_test
def test_model_450m_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = Qwen2ForCausalLM.from_pretrained(
"Qwen/Qwen2-0.5B",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_450m_long_prompt_sdpa(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-0.5B", device_map="auto", attn_implementation="sdpa")
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = assistant_model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
backend_empty_cache(torch_device)
gc.collect()
EXPECTED_TEXT_COMPLETION = (
"My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking and I"
)
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_speculative_generation(self):
EXPECTED_TEXT_COMPLETION = (
"My favourite condiment is 100% natural and organic, and I love to use it to make my own sauces."
)
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B", use_fast=False)
model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-0.5B", device_map="auto", dtype=torch.float16)
assistant_model = Qwen2ForCausalLM.from_pretrained("Qwen/Qwen2-0.5B", device_map="auto", dtype=torch.float16)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
set_seed(0)
generated_ids = model.generate(
input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
qwen_model = "Qwen/Qwen2-0.5B"
tokenizer = AutoTokenizer.from_pretrained(qwen_model, pad_token="</s>", padding_side="right")
expected_text_completions = Expectations({
("cuda", None): [
"My favourite condiment is 100% natural, organic, gluten free, vegan, and free from preservatives. I"
],
("cuda", 8): [
"My favourite condiment is 100% natural, organic, gluten free, vegan, and vegetarian. I love to use"
],
("rocm", (9, 4)): [
"My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking, but"
],
("rocm", (9, 5)): [
"My favourite condiment is 100% natural, organic, gluten free, vegan, and vegetarian. I love to use"
]
}) # fmt: off
EXPECTED_TEXT_COMPLETION = expected_text_completions.get_expectation()
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = Qwen2ForCausalLM.from_pretrained(
qwen_model,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = ["My favourite condiment is "]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
strict = version.parse(torch.__version__) != version.parse(
"2.7.0"
) # Due to https://github.com/pytorch/pytorch/issues/150994
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
strict=strict,
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
@pytest.mark.flash_attn_test
@require_flash_attn
@slow
def test_3b_generation(self):
model_id = "Qwen/Qwen2.5-3B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = Qwen2ForCausalLM.from_pretrained(
model_id, use_sliding_window=True, max_window_layers=28, sliding_window=2048, dtype=torch.float16
).to(torch_device)
# we need a long text to test sliding window
# fmt: off
LONG_TEXT = """The Warring States period in Chinese history (c. 475 – 221 BC) comprises the final centuries of the Zhou dynasty (c. 1046 – 256 BC), which were characterized by warfare, bureaucratic and military reform, and political consolidation. It followed the Spring and Autumn period and concluded with the wars of conquest that saw the state of Qin annex each of the other contender states by 221 BC and found the Qin dynasty, the first imperial dynastic state in East Asian history.
While scholars have identified several different dates as marking the beginning of the Warring States period, Sima Qian's choice of 475 BC is the most often cited. The era largely corresponds to the second half of the Eastern Zhou period, where the king of Zhou formally ruled as Chinese sovereign, but had lost political power and functioned in practice as a figurehead. This dynamic served as the backdrop for the machinations of the eponymous Warring States. The label "Warring States period" derives from the Record of the Warring States, a work of history compiled during the early Han dynasty (202 BC – 220 AD).
Geography
The political geography of the era was dominated by the Seven Warring States, namely:
Besides these seven major states other smaller states survived into the period. They include:
Periodisation
The eastward flight of the Zhou court in 771 BC marks the start of the Spring and Autumn period. No one single incident or starting point inaugurated the Warring States era. The political situation of the period represented a culmination of historical trends of conquest and annexation which also characterised the Spring and Autumn period. As a result, there is some controversy as to the beginning of the era. Proposed starting points include:
History
Background and formation
The Eastern Zhou dynasty began its fall around 5th century BC. As their influence waned, they had to rely on armies in allied states rather than their own military force. Hundreds of smaller polities coalesced into seven major states which included: Chu, Han, Qin, Wei, Yan, Qi and Zhao. However, there eventually was a shift in alliances because each state's ruler wanted independence. This caused hundreds of wars between 535 and 286 BC. The victorious state would have overall rule and control in China.
The system of feudal states created by the Western Zhou dynasty underwent enormous changes after 771 BC with the flight of the Zhou court to modern-day Luoyang and the diminution of its relevance and power. The Spring and Autumn period led to a few states gaining power at the expense of many others, the latter no longer able to depend on central authority for legitimacy or protection. During the Warring States period, many rulers claimed the Mandate of Heaven to justify their conquest of other states and spread their influence.
The struggle for hegemony eventually created a state system dominated by several large states, such as Jin, Chu, Qin, Yan, and Qi, while the smaller states of the Central Plain tended to be their satellites and tributaries. Other major states also existed, such as Wu and Yue in the southeast. The last decades of the Spring and Autumn era were marked by increased stability, as the result of peace negotiations between Jin and Chu which established their respective spheres of influence. This situation ended with the partition of Jin, whereby the state was divided between the houses of Han, Zhao and Wei, leading to the seven major warring states.
Partition of Jin (453–403 BC)
The rulers of Jin had steadily lost political powers since the middle of the 6th century BC to their nominally subordinate nobles and military commanders, a situation arising from the traditions of the Jin which forbade the enfeoffment of relatives of the ducal house. This allowed other clans to gain fiefs and military authority, and decades of internecine struggle led to the establishment of four major families, the Han, Zhao, Wei and Zhi.
The Battle of Jinyang saw the allied Han, Zhao and Wei destroy the Zhi family (453 BC) and their lands were distributed among them. With this, they became the de facto rulers of most of Jin's territory, though this situation would not be officially recognised until half a century later. The Jin division created a political vacuum that enabled during the first 50 years expansion of Chu and Yue northward and Qi southward. Qin increased its control of the local tribes and began its expansion southwest to Sichuan.
Early Warring States
The three Jins recognized (403–364 BC)
In 403 BC, the court of King Weilie of Zhou officially recognized Zhao, Wei and Han as immediate vassals, thereby raising them to the same rank as the other warring states.
From before 405 until 383 BC the three Jins were united under the leadership of Wei and expanded in all directions. The most important figure was Marquess Wen of Wei (445–396 BC). In 408–406 BC he conquered the State of Zhongshan to the northeast on the other side of Zhao. At the same time he pushed west across the Yellow River to the Luo River taking the area of Xihe (literally 'west of the river').
The growing power of Wei caused Zhao to back away from the alliance. In 383 BC it moved its capital to Handan and attacked the small state of Wey. Wey appealed to Wei which attacked Zhao on the western side. Being in danger, Zhao called in Chu. As usual, Chu used this as a pretext to annex territory to its north, but the diversion allowed Zhao to occupy a part of Wei. This conflict marked the end of the power of the united Jins and the beginning a period of shifting alliances and wars on several fronts.
In 376 BC, the states of Han, Wei and Zhao deposed Duke Jing of Jin and divided the last remaining Jin territory between themselves, which marked the final end of the Jin state.
In 370 BC, Marquess Wu of Wei died without naming a successor, which led to a war of succession. After three years of civil war, Zhao from the north and Han from the south invaded Wei. On the verge of conquering Wei, the leaders of Zhao and Han fell into disagreement about what to do with Wei, and both armies abruptly retreated. As a result, King Hui of Wei (still a Marquess at the time) was able to ascend the throne of Wei.
Zhao extended from the Shanxi plateau across the plain to the borders of Qi. Wei reached east to Qi, Lu, and Song. To the south, the weaker state of Han held the east–west part of the Yellow River valley, surrounded the Zhou royal domain at Luoyang and held an area north of Luoyang called Shangdang.
Qi resurgence under Tian (379–340 BC)
Duke Kang of Qi died in 379 BC with no heir from the house of Jiang, which had ruled Qi since the state's founding. The throne instead passed to the future King Wei, from the house of Tian. The Tian had been very influential at court towards the end of Jiang rule, and now openly assumed power.
The new ruler set about reclaiming territories that had been lost to other states. He launched a successful campaign against Zhao, Wey and Wei, once again extending Qi territory to the Great Wall. Sima Qian writes that the other states were so awestruck that nobody dared attack Qi for more than 20 years. The demonstrated military prowess also had a calming effect on Qi's own population, which experienced great domestic tranquility during Wei's reign.
By the end of King Wei's reign, Qi had become the strongest of the states and proclaimed itself "king"; establishing independence from the Zhou dynasty (see below).
Wars of Wei
King Hui of Wei (370–319 BC) set about restoring the state. In 362–359 BC he exchanged territories with Han and Zhao in order to make the boundaries of the three states more rational.
In 364 BC, Wei was defeated by Qin at the Battle of Shimen and was only saved by the intervention of Zhao. Qin won another victory in 362 BC. In 361 BC the Wei capital was moved east to Daliang to be out of the reach of Qin.
In 354 BC, King Hui of Wei started a large-scale attack on Zhao. By 353 BC, Zhao was losing badly and its capital, Handan, was under siege. The state of Qi intervened. The famous Qi strategist, Sun Bin the great-great-great-grandson of Sun Tzu, the author of the Art of War, proposed to attack the Wei capital while the Wei army was tied up besieging Zhao. The strategy was a success; the Wei army hastily moved south to protect its capital, was caught on the road and decisively defeated at the Battle of Guiling. The battle is remembered in the second of the Thirty-Six Stratagems, "besiege Wei, save Zhao"—meaning to attack a vulnerable spot to relieve pressure at another point.
Domestically, King Hui patronized philosophy and the arts, and is perhaps best remembered for hosting the Confucian philosopher Mencius at his court; their conversations form the first two chapters of the book which bears Meng Zi's name.
Dukes become kings
Qi and Wei became kingdoms (344 BC)
The title of king (wang, 王) was held by figurehead rulers of the Zhou dynasty, while the rulers of most states held the title of duke (gong, 公) or marquess (hou, 侯). A major exception was Chu, whose rulers were called kings since King Wu of Chu started using the title c. 703 BC.
In 344 BC the rulers of Qi and Wei mutually recognized each other as kings: King Wei of Qi and King Hui of Wei, in effect declaring their independence from the Zhou court. This marked a major turning point: unlike those in the Spring and Autumn period, the new generation of rulers ascending the thrones in the Warring States period would not entertain even the pretence of being vassals of the Zhou dynasty, instead proclaiming themselves fully independent kingdoms.
Shang Yang reforms Qin (356–338 BC)
During the early Warring States period Qin generally avoided conflicts with the other states. This changed during the reign of Duke Xiao, when prime minister Shang Yang made centralizing and authoritarian reforms in accordance with his Legalist philosophy between the years 356 and 338 BC.
Shang introduced land reforms, privatized land, rewarded farmers who exceeded harvest quotas, enslaved farmers who failed to meet quotas, and used enslaved subjects as rewards for those who met government policies. As manpower was short in Qin relative to the other states at the time, Shang enacted policies to increase its manpower. As Qin peasants were recruited into the military, he encouraged active immigration of peasants from other states into Qin as a replacement workforce; this policy simultaneously increased the manpower of Qin and weakened the manpower of Qin's rivals.
Shang made laws forcing citizens to marry at a young age and passed tax laws to encourage raising multiple children. He also enacted policies to free convicts who worked in opening wastelands for agriculture. Shang abolished primogeniture and created a double tax on households that had more than one son living in the household, to break up large clans into nuclear families. Shang also moved the capital to reduce the influence of nobles on the administration.
The rise of Qin was recognized by the royal court, and in 343 BC the king conferred the title of Count (伯 Bó) on Duke Xiao. As was customary, a conference was hosted which the feudal lords attended, and during which the Son of Heaven bestowed the title.
After the reforms Qin became much more aggressive. In 340 Qin took land from Wèi after it had been defeated by Qi. In 316 Qin conquered Shu and Ba in Sichuan to the southwest. Development of this area took a long time but slowly added greatly to Qin's wealth and power.
Qin defeats Wei (341–340 BC)
In 341 BC, Wei attacked Han. Qi allowed Han to be nearly defeated and then intervened. The generals from the Battle of Guiling met again (Sun Bin and Tian Ji versus Pang Juan), using the same tactic, attacking Wei's capital. Sun Bin feigned a retreat and then turned on the overconfident Wei troops and decisively defeated them at the Battle of Maling. After the battle all three of the Jin successor states appeared before King Xuan of Qi, pledging their loyalty.
In the following year Qin attacked the weakened Wei. Wei was devastatingly defeated and ceded a large part of its territory in return for truce. With Wei severely weakened, Qi and Qin became the dominant states in China.
Wei came to rely on Qi for protection, with King Hui of Wei meeting King Xuan of Qi on two occasions. After Hui's death, his successor King Xiang also established a good relationship with his Qi counterpart, with both promising to recognize the other as "king".
Chu conquers Yue (334 BC)
Early in the Warring States period, Chu was one of the strongest states in China. The state rose to a new level of power around 389 BC when King Dao of Chu (楚悼王) named the famous reformer Wu Qi as his chancellor.
Chu rose to its peak in 334 BC, when it conquered Yue to its east on the Pacific coast. The series of events leading up to this began when Yue prepared to attack Qi to its north. The King of Qi sent an emissary who persuaded the King of Yue to attack Chu instead. Yue initiated a large-scale attack at Chu but was defeated by Chu's counter-attack. Chu then proceeded to conquer Yue.
Qin, Han and Yan became kingdoms (325–323 BC)
King Xian of Zhou had attempted to use what little royal prerogative he had left by appointing the dukes Xian (384–362 BC), Xiao (361–338 BC) and Hui (338–311 BC) of Qin as hegemons, thereby in theory making Qin the chief ally of the court.
However, in 325 the confidence of Duke Hui grew so great that he proclaimed himself "king" of Qin; adopting the same title as the king of Zhou and thereby effectively proclaiming independence from the Zhou dynasty. King Hui of Qin was guided by his prime minister Zhang Yi, a prominent representative of the School of Diplomacy.
He was followed in 323 BC by King Xuanhui of Han and King Yi of Yan, as well as King Cuo of the minor state Zhongshan. In 318 BC even the ruler of Song, a relatively minor state, declared himself king. Uniquely, while King Wuling of Zhao had joined the other kings in declaring himself king, he retracted this order in 318 BC, after Zhao suffered a great defeat at the hands of Qin.
Partition of Zhou (314 BC)
King Kao of Zhou had enfeoffed his younger brother as Duke Huan of Henan. Three generations later, this cadet branch of the royal house began calling themselves "dukes of East Zhou".
Upon the ascension of King Nan in 314, East Zhou became an independent state. The king came to reside in what became known as West Zhou.
Horizontal and vertical alliances (334–249 BC)
Towards the end of the Warring States period, the state of Qin became disproportionately powerful compared with the other six states. As a result, the policies of the six states became overwhelmingly oriented towards dealing with the Qin threat, with two opposing schools of thought. One school advocated a 'vertical' or north–south alliance called hezong (合縱) in which the states would ally with each other to repel Qin. The other advocated a 'horizontal' or east–west alliance called lianheng (連橫{), in which a state would ally with Qin to participate in its ascendancy.
There were some initial successes in hezong, though mutual suspicions between allied states led to the breakdown of such alliances. Qin repeatedly exploited the horizontal alliance strategy to defeat the states one by one. During this period, many philosophers and tacticians travelled around the states, recommending that the rulers put their respective ideas into use. These "lobbyists", such as Su Qin, who advocated vertical alliances, and Zhang Yi, who advocated horizontal alliances, were famous for their tact and intellect, and were collectively known as the School of Diplomacy, whose Chinese name (縱橫家 'the school of the vertical and horizontal') was derived from the two opposing ideas.
Su Qin and the first vertical alliance (334–300 BC)
Beginning in 334 BC the diplomat Su Qin spent years visiting the courts of Yan, Zhao, Han, Wei, Qi and Chu and persuaded them to form a united front against Qin. In 318 BC all states except Qi launched a joint attack on Qin, which was not successful.
King Hui of Qin died in 311 BC, followed by prime minister Zhang Yi one year later. The new monarch, King Wu, reigned only four years before dying without legitimate heirs. Some damaging turbulence ensued throughout 307 BC before a son of King Hui by a concubine (i.e. a younger half-brother of King Wu) could be established as King Zhao, who in stark contrast to his predecessor went on to rule for an unprecedented 53 years.
After the failure of the first vertical alliance, Su Qin eventually came to live in Qi, where he was favored by King Xuan and drew the envy of the ministers. An assassination attempt in 300 BC left Su mortally wounded but not dead. Sensing death approaching, he advised the newly crowned King Min have him publicly executed to draw out the assassins. King Min complied with Su's request and killed him, putting an end to the first generation of Vertical alliance thinkers.
The first horizontal alliance (300–287 BC)
King Min of Qi came to be highly influenced by Lord Mengchang, a grandson of the former King Wei of Qi. Lord Mengchang made a westward alliance with the states of Wei and Han. In the far west, Qin, which had been weakened by a succession struggle in 307, yielded to the new coalition and appointed Lord Mengchang its chief minister. The alliance between Qin and Qi was sealed by a Qin princess marrying King Min. This horizontal or east–west alliance might have secured peace except that it excluded the State of Zhao.
Around 299 BC, the ruler of Zhao became the last of the seven major states to proclaim himself "king".
In 298 BC, Zhao offered Qin an alliance and Lord Mengchang was driven out of Qin. The remaining three allies, Qi, Wei and Han, attacked Qin, driving up the Yellow River below Shanxi to the Hangu Pass. After 3 years of fighting they took the pass and forced Qin to return territory to Han and Wei. They next inflicted major defeats on Yan and Chu. During the 5-year administration of Lord Mengchang, Qi was the major power in China.
In 294, Lord Mengchang was implicated in a coup d'état and fled to Wei. His alliance system collapsed.
Qi and Qin made a truce and pursued their own interests. Qi moved south against the state of Song whilst the Qin General Bai Qi pushed back eastward against a Han/Wei alliance, gaining victory at the Battle of Yique.
In 288, King Zhao of Qin and King Min of Qi took the title di (帝 'emperor'), of the west and east respectively. They swore a covenant and started planning an attack on Zhao.
Su Dai and the second vertical alliance
In 287 BC the strategist Su Dai, younger brother of Su Qin and possibly an agent of Yan, persuaded King Min that the Zhao war would only benefit Qin. King Min agreed and formed a 'vertical' alliance with the other states against Qin. Qin backed off, abandoned the presumptuous title of "Di", and restored territory to Wei and Zhao. In 286 Qi annexed the state of Song.
The second horizontal alliance and fall of Qi
In 285 BC, the success of Qi had frightened the other states. Under the leadership of Lord Mengchang, who was exiled in Wei, Qin, Zhao, Wei and Yan formed an alliance. Yan had normally been a relatively weak ally of Qi and Qi feared little from this quarter. Yan's onslaught under general Yue Yi came as a devastating surprise. Simultaneously, the other allies attacked from the west. Chu declared itself an ally of Qi but contented itself with annexing some territory to its north. Qi's armies were destroyed while the territory of Qi was reduced to the two cities of Ju and Jimo. King Min himself was later captured and executed by his own followers.
King Min was succeeded by King Xiang in 283 BC. His general Tian Dan was eventually able to restore much of Qi's territory, but it never regained the influence it had under King Min.
Qin and Zhao expansion
In 278 BC, the Qin general Bai Qi attacked from Qin's new territory in Sichuan to the west of Chu. The capital of Ying was captured and Chu's western lands on the Han River were lost. The effect was to shift Chu significantly to the east.
After Chu was defeated in 278, the remaining great powers were Qin in the west and Zhao in the north-center. There was little room for diplomatic maneuver and matters were decided by wars. Zhao had been much strengthened by King Wuling of Zhao (325–299). In 307 he enlarged his cavalry by copying the northern nomads. In 306 he took more land in the northern Shanxi plateau. In 305 he defeated the north-eastern border state of Zhongshan. In 304 he pushed far to the north-west and occupied the east–west section of the Yellow River in the north of the Ordos Loop. King Huiwen of Zhao (298–266) chose able servants and expanded against the weakened Qi and Wei. In 296 his general Lian Po defeated two Qin armies.
In 269 BC Fan Sui became chief advisor to Qin. He advocated authoritarian reforms, irrevocable expansion and an alliance with distant states to attack nearby states (the twenty-third of the Thirty-Six Stratagems). His maxim "attack not only the territory, but also the people" enunciated a policy of mass slaughter that became increasingly frequent.
Qin-Zhao wars (282–257 BC)
In 265 King Zhaoxiang of Qin made the first move by attacking the weak state of Han which held the Yellow River gateway into Qin. He moved north-east across Wei territory to cut off the Han exclave of Shangdang north of Luoyang and south of Zhao. The Han king agreed to surrender Shangdang, but the local governor refused and presented it to King Xiaocheng of Zhao. Zhao sent out Lian Po who based his armies at Changping and Qin sent out general Wang He. Lian Po was too wise to risk a decisive battle with the Qin army and remained inside his fortifications. Qin could not break through and the armies were locked in stalemate for three years. The Zhao king decided that Lian Po was not aggressive enough and sent out Zhao Kuo who promised a decisive battle. At the same time Qin secretly replaced Wang He with the notoriously violent Bai Qi. When Zhao Kuo left his fortifications, Bai Qi used a Cannae maneuver, falling back in the center and surrounding the Zhao army from the sides. After being surrounded for 46 days, the starving Zhao troops surrendered in September 260 BC. It is said that Bai Qi had all the prisoners killed and that Zhao lost 400,000 men.
Qin was too exhausted to follow up its victory. Some time later it sent an army to besiege the Zhao capital but the army was destroyed when it was attacked from the rear. Zhao survived, but there was no longer a state that could resist Qin on its own. The other states could have survived if they remained united against Qin, but they did not.
In 257 BC, Qin army failed to besiege Handan and was defeated by the allied force of Zhao, Wei and Chu during the Battle of Handan.
End of Zhou dynasty (256–249 BC)
The forces of King Zhao of Qin defeated King Nan of Zhou and conquered West Zhou in 256 BC, claiming the Nine Cauldrons and thereby symbolically becoming The Son of Heaven.
King Zhao's exceptionally long reign ended in 251 BC. His son King Xiaowen, already an old man, died just three days after his coronation and was succeeded by his son King Zhuangxiang of Qin. The new Qin king proceeded to conquer East Zhou, seven years after the fall of West Zhou. Thus the 800-year Zhou dynasty, nominally China's longest-ruling regime, finally came to an end.
Sima Qian contradicts himself regarding the ultimate fate of the East Zhou court. Chapter 4 (The Annals of Zhou) concludes with the sentence "thus the sacrifices of Zhou ended", but in the following chapter 5 (The Annals of Qin) we learn that "Qin did not prohibit their sacrifices; the Lord of Zhou was allotted a patch of land in Yangren where he could continue his ancestral sacrifices".
Qin unites China (247–221 BC)
King Zhuangxiang of Qin ruled for only three years. He was succeeded by his son Zheng, who unlike the two elderly kings that preceded him was only 13 years old at his coronation. As an adult, Zheng became a brilliant commander who, in the span of just nine years, unified China.
Conquest of Han
In 230 BC, Qin conquered Han. Han, the weakest of the Seven Warring States, was adjacent to the much stronger Qin, and had suffered continuous assaults by Qin in earlier years of the Warring States period. This went on until Emperor Qin Shi Huang sent general Wang Jian to attack Zhao. King An of Han, frightened by the thought that Han would be the next target of the Qin state, immediately sent diplomats to surrender the entire kingdom without a fight, saving the Han populace from the terrible potential consequences of an unsuccessful resistance.
Conquest of Wei
In 225 BC, Qin conquered Wei. The Qin army led a direct invasion into Wei by besieging its capital Daliang but soon realized that the city walls were too tough to break into. They devised a new strategy in which they utilized the power of a local river that was linked to the Yellow River. The river was used to flood the city's walls, causing massive devastation to the city. Upon realizing the situation, King Jia of Wei hurriedly came out of the capital and surrendered it to the Qin army in order to avoid further bloodshed of his people.
Conquest of Chu
In 223 BC, Qin conquered Chu.
The first invasion was however an utter disaster when 200,000 Qin troops, led by the general, Li Xin, were defeated by 500,000 Chu troops in the unfamiliar territory of Huaiyang, modern-day northern Jiangsu and Anhui provinces. Xiang Yan, the Chu commander, had lured Qin by allowing a few initial victories, but then counterattacked and burnt two large Qin camps.
In 222 BC, Wang Jian was recalled to lead a second military invasion with 600,000 men against the Chu state. High in morale after their victory in the previous year, the Chu forces were content to sit back and defend against what they expected to be a siege of Chu. However, Wang Jian decided to weaken Chu's resolve and tricked the Chu army by appearing to be idle in his fortifications whilst secretly training his troops to fight in Chu territory. After a year, the Chu defenders decided to disband due to apparent lack of action from the Qin. Wang Jian invaded at that point, with full force, and overran Huaiyang and the remaining Chu forces. Chu lost the initiative and could only sustain local guerrilla-style resistance until it too was fully conquered with the destruction of Shouchun and the death of its last leader, Lord Changping, in 223 BC. At their peak, the combined armies of Chu and Qin are estimated to have ranged from hundreds of thousands to a million soldiers, more than those involved in the campaign of Changping between Qin and Zhao 35 years earlier.
Conquest of Zhao and Yan
In 222 BC, Qin conquered Zhao and Yan.
After the conquest of Zhao, the Qin army turned its attention towards Yan. Realizing the danger and gravity of this situation, Crown Prince Dan of Yan had sent Jing Ke to assassinate King Zheng of Qin, but this failure only helped to fuel the rage and determination of the Qin king, and he increased the number of troops to conquer the Yan state.
Conquest of Qi
In 221 BC, Qin conquered Qi, the final unconquered state. It had not previously contributed or helped other states when Qin was conquering them. As soon as Qin's intention to invade it became clear, Qi swiftly surrendered all its cities, completing the unification of China and ushering in the Qin dynasty. The last Qi king lived out his days in exile in Gong and was not given a posthumous name, therefore he is known to posterity by his personal name Jian.
Aftermath
The Qin king Ying Zheng declared himself as Qin Shi Huangdi, "The first Sovereign Emperor of Qin".
In the rule of the Qin state, the union was based solely on military power. The feudal holdings were abolished, and noble families were forced to live in the capital city Xianyang, in order to be supervised. A national road (as well as greater use of canals) allowed for faster and easier deployment and supply of the army. The peasants were given a wider range of land rights, although they were subject to taxation, creating a large amount of revenue to the state.
Military theory and practice
Increasing scale of warfare
The chariot remained a major factor in Chinese warfare long after it went out of fashion in the Middle East. Near the beginning of the Warring States period there is a shift from chariots to massed infantry, possibly associated with the invention of the crossbow. This had two major effects. First it led the dukes to weaken their chariot-riding nobility so they could get direct access to the peasantry who could be drafted as infantry. This change was associated with the shift from aristocratic to bureaucratic government. Second, it led to a massive increase in the scale of warfare. When the Zhou overthrew the Shang at the Battle of Muye they used 45,000 troops and 300 chariots. For the Warring States period the following figures for the military strengths of various states are reported:
For major battles, the following figures are reported:
Many scholars think these numbers are exaggerated (records are inadequate, they are much larger than those from similar societies, soldiers were paid by the number of enemies they killed and the Han dynasty had an interest in exaggerating the bloodiness of the age before China was unified). Regardless of exaggeration, it seems clear that warfare had become excessive during this period. The bloodshed and misery of the Warring States period goes a long way in explaining China's traditional and current preference for a united throne.
Military developments
The Warring States period saw the introduction of many innovations to the art of warfare in China, such as the use of iron and of cavalry.
Warfare in the Warring States period evolved considerably from the Spring and Autumn period, as most armies made use of infantry and cavalry in battles, and the use of chariots became less widespread. The use of massed infantry made warfare bloodier and reduced the importance of the aristocracy, which in turn made the kings more despotic. From this period onward, as the various states competed with each other by mobilizing their armies to war, nobles in China belonged to the literate class, rather than to the warrior class as had previously been the case.
The various states fielded massive armies of infantry, cavalry, and chariots. Complex logistical systems maintained by efficient government bureaucracies were needed to supply, train, and control such large forces. The size of the armies ranged from tens of thousands to several hundred thousand men.
Iron weapons became more widespread and began to replace bronze. Most armour and weapons of this period were made from iron.
The first official native Chinese cavalry unit was formed in 307 BC during the military reforms of King Wuling of Zhao, who advocated 'nomadic dress and horse archery'. But the war chariot still retained its prestige and importance, despite the tactical superiority of cavalry.
The crossbow was the preferred long-range weapon of this period, due to several reasons. The crossbow could be mass-produced easily, and mass training of crossbowmen was possible. These qualities made it a powerful weapon against the enemy.
Infantrymen deployed a variety of weapons, but the most popular was the dagger-axe. The dagger-axe came in various lengths, from 9 to 18 feet; the weapon consisted of a thrusting spear with a slashing blade appended to it. Dagger-axes were an extremely popular weapon in various kingdoms, especially for the Qin, who produced 18-foot-long pike-like weapons.
The Qiang battle spear was named as the king 'wang' of all ancient weapons. It had the biggest impact on the battlefield and was quite difficult to master. The second important weapon of that era was the double-edged battle sword Jian. The fighting methods of using the Qiang spear and Jian sword were very different from what we see in movies or re-enactment shows today. Professional warriors of that era used the military concepts of "Master" Sun Tzu and created several successful "Ge Dou" martial schools.
Military thought
The Warring States was a great period for military strategy; of the Seven Military Classics of China, four were written during this period:
Culture and society
The Warring States period was an era of warfare in ancient China, as well as bureaucratic and military reforms and consolidation; the major states, ruling over large territories, quickly sought to consolidate their powers, leading to the final erosion of the Zhou court's prestige. As a sign of this shift, the rulers of all the major states (except for Chu, which had claimed kingly title much earlier) abandoned their former feudal titles for the title of 王, or King, claiming equality with the rulers of the Zhou.
At the same time, the constant conflict and need for innovative social and political models led to the development of many philosophical doctrines, later known as the Hundred Schools of Thought. The most notable schools of thought include Mohism (expounded by Mozi), Confucianism (represented by Mencius and Xunzi), Legalism (represented by Shang Yang, Shen Buhai, Shen Dao and Han Fei) and Taoism (represented by Zhuangzi and Lao Tzu).
The many states that were competing between each other attempted to display their power not only militarily but in their courts and in state philosophy. Many differing rulers adopted the differing philosophies to their own advantage or that of their kingdom.
Mencius attempted to instate Confucianism as a state philosophy, proposing that through the governing of moral principles like benevolence and righteousness, the state would win popular support from one state and those neighboring, eliminating the need of a war altogether. Mencius had attempted to convince King Hui of Liang, although was unsuccessful since the king saw no advantage in the period of wars.
Mohism was developed by Mozi (468–376 BC) and it provided a unified moral and political philosophy based on impartiality and benevolence. Mohists had the belief that people change depending on environments around. The same was applied to rulers, which is why one must be cautious of foreign influences. Mozi was very much against warfare, although he was a great tactician in defense. He defended the small state of Song from many attempts of the Chu state.
Taoism was advocated by Laozi, and believed that human nature was good and can achieve perfection by returning to its original state. It believed that like a baby, humans are simple and innocent although with development of civilizations it lost its innocence only to be replaced by fraud and greed. Contrarily to other schools, it did not want to gain influence in the offices of states and Laozi even refused to be the minister of the state of Chu.
Legalism created by Shang Yang in 338 BC, rejected all notions of religion and practices, and believed a nation should be governed by strict law. Not only were severe punishments applied, but they would be grouped with the families and made mutually responsible for criminal act. It proposed radical reforms, and established a society based on solid ranks. Peasants were encouraged to practice agriculture as occupation, and military performance was rewarded. Laws were also applied to all ranks with no exception; even the king was not above punishment. The philosophy was adapted by the Qin state and it created it into an organized, centralized state with a bureaucracy chosen on the basis of merit.
This period is most famous for the establishment of complex bureaucracies and centralized governments, as well as a clear legal system. The developments in political and military organization were the basis of the power of the Qin state, which conquered the other states and unified them under the Qin dynasty in 221 BC.
Nobles, bureaucrats and reformers
The phenomenon of intensive warfare, based on mass formations of infantry rather than the traditional chariots, was one major trend which led to the creation of strong central bureaucracies in each of the major states. At the same time, the process of secondary feudalism which permeated the Spring and Autumn period, and led to such events as the partition of Jin and the usurpation of Qi by the Tian clan, was eventually reversed by the same process of bureaucratisation.
Under the demands of warfare, the states adopted bureaucratic reforms in the Warring States period. Wei adopted these in 445 BC, Zhao in 403 BC, Chu in 390 BC, Han in 355 BC, Qi in 357 BC and Qin in 350 BC. Power was centralised by curbing the landed aristocrats and sinecures and creating a new hierarchy based on meritorious service to the state, which were drawn from the lower rungs of society. Systematic auditing and reporting systems, and fixed salaries for officials were created.
The reforms of Shang Yang in Qin, and of Wu Qi in Chu, both centred on increased centralisation, the suppression of the nobility, and a vastly increased scope of government based on Legalist ideals, which were necessary to mobilise the large armies of the period.
Sophisticated arithmetic
A bundle of 21 bamboo slips from the Tsinghua collection dated to 305 BC are the world's earliest example of a two digit decimal multiplication table, indicating that sophisticated commercial arithmetic was already established during this period.
Rod numerals were used to represent both negative and positive integers, and rational numbers, a true positional number system, with a blank for zero dating back to the Warring States period.
The nine linked-rings puzzle, an advanced puzzle device which requires mathematical analysis to solve, was invented during the period.
Literature
An important literary achievement of the Warring States period is the Zuo Commentary on the Spring and Autumn Annals, which summarizes the preceding Spring and Autumn period. The less famous work Guoyu is thought to be by the same author.
Many sayings of Spring and Autumn philosophers, which had previously been circulated orally, were put into writing in the Warring States. These include the Analects and The Art of War.
Economic developments
The Warring States period saw the proliferation of iron working in China, replacing bronze as the dominant type of metal used in warfare. Areas such as Shu (present-day Sichuan) and Yue (present-day Zhejiang) were also brought into the Chinese cultural sphere during this time. Trade also became important, and some merchants had considerable power in politics, the most prominent of which was Lü Buwei, who rose to become Chancellor of Qin and was a key supporter of the eventual Qin Shihuang.
At the same time, the increased resources of consolidated, bureaucratic states, coupled with the logistical needs of mass levies and large-scale warfare, led to the proliferation of economic projects such as large-scale waterworks. Major examples of such waterworks include the Dujiangyan Irrigation System, which controlled the Min River in Sichuan and turned the former backwater region into a major Qin logistical base, and the Zhengguo Canal which irrigated large areas of land in the Guanzhong Plain, again increasing Qin's agricultural output.
The Guanzi is considered one of the most foundational texts of the developing political economy in the Warring States period. It addresses principles of price regulation in the context of effectively dealing with commodities that are "light" (connoting a commodity which is unimportant, non-essential, or inexpensive) or "heavy" (a commodity which is important, essential, or expensive) and how whether a commodity is "light" or "heavy" is understood in relation to other commodities.
In summary:"""
# fmt: on
input_ids = tokenizer(LONG_TEXT, return_tensors="pt").input_ids.to(torch_device)
generated_ids = model.generate(input_ids, max_new_tokens=20)[:, input_ids.shape[1] :]
torch.testing.assert_close(generated_ids.cpu(), torch.tensor([[279, 467, 19859, 4180, 4168, 572, 264, 882, 315, 2244, 2297, 304, 5616, 13, 576, 66827, 66846, 572, 304, 17704]], dtype=torch.long)) # fmt: skip
self.assertEqual(
tokenizer.decode(generated_ids[0]),
" the Warring States period was a time of great change in China. The Zhou dynasty was in decline",
)
model.config._attn_implementation = "eager"
new_generated_ids = model.generate(input_ids, max_new_tokens=20)[:, input_ids.shape[1] :]
with self.subTest("Eager matches sdpa"):
torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4)
# `flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`
# Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model.
# model.config._attn_implementation = "flex_attention"
# new_generated_ids = model.generate(input_ids, max_new_tokens=20)[:, input_ids.shape[1] :]
# with self.subTest("Eager matches Flex attention"):
# torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4)
model.config._attn_implementation = "flash_attention_2"
new_generated_ids = model.generate(input_ids, max_new_tokens=20)[:, input_ids.shape[1] :]
with self.subTest("Eager matches flash attention"):
torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4)
| Qwen2IntegrationTest |
python | ray-project__ray | python/ray/_private/test_utils.py | {
"start": 36955,
"end": 45459
} | class ____(_QueueActor):
async def get_batch(self, batch_size=None, total_timeout=None, first_timeout=None):
start = timeit.default_timer()
try:
first = await asyncio.wait_for(self.queue.get(), first_timeout)
batch = [first]
if total_timeout:
end = timeit.default_timer()
total_timeout = max(total_timeout - (end - start), 0)
except asyncio.TimeoutError:
raise Empty
if batch_size is None:
if total_timeout is None:
total_timeout = 0
while True:
try:
start = timeit.default_timer()
batch.append(
await asyncio.wait_for(self.queue.get(), total_timeout)
)
if total_timeout:
end = timeit.default_timer()
total_timeout = max(total_timeout - (end - start), 0)
except asyncio.TimeoutError:
break
else:
for _ in range(batch_size - 1):
try:
start = timeit.default_timer()
batch.append(
await asyncio.wait_for(self.queue.get(), total_timeout)
)
if total_timeout:
end = timeit.default_timer()
total_timeout = max(total_timeout - (end - start), 0)
except asyncio.TimeoutError:
break
return batch
def is_placement_group_removed(pg):
table = ray.util.placement_group_table(pg)
if "state" not in table:
return False
return table["state"] == "REMOVED"
def placement_group_assert_no_leak(pgs_created):
for pg in pgs_created:
ray.util.remove_placement_group(pg)
def wait_for_pg_removed():
for pg_entry in ray.util.placement_group_table().values():
if pg_entry["state"] != "REMOVED":
return False
return True
wait_for_condition(wait_for_pg_removed)
cluster_resources = ray.cluster_resources()
cluster_resources.pop("memory")
cluster_resources.pop("object_store_memory")
def wait_for_resource_recovered():
for resource, val in ray.available_resources().items():
if resource in cluster_resources and cluster_resources[resource] != val:
return False
if "_group_" in resource:
return False
return True
wait_for_condition(wait_for_resource_recovered)
def monitor_memory_usage(
print_interval_s: int = 30,
record_interval_s: int = 5,
warning_threshold: float = 0.9,
):
"""Run the memory monitor actor that prints the memory usage.
The monitor will run on the same node as this function is called.
Params:
interval_s: The interval memory usage information is printed
warning_threshold: The threshold where the
memory usage warning is printed.
Returns:
The memory monitor actor.
"""
assert ray.is_initialized(), "The API is only available when Ray is initialized."
@ray.remote(num_cpus=0)
class MemoryMonitorActor:
def __init__(
self,
print_interval_s: float = 20,
record_interval_s: float = 5,
warning_threshold: float = 0.9,
n: int = 10,
):
"""The actor that monitor the memory usage of the cluster.
Params:
print_interval_s: The interval where
memory usage is printed.
record_interval_s: The interval where
memory usage is recorded.
warning_threshold: The threshold where
memory warning is printed
n: When memory usage is printed,
top n entries are printed.
"""
# -- Interval the monitor prints the memory usage information. --
self.print_interval_s = print_interval_s
# -- Interval the monitor records the memory usage information. --
self.record_interval_s = record_interval_s
# -- Whether or not the monitor is running. --
self.is_running = False
# -- The used_gb/total_gb threshold where warning message omits. --
self.warning_threshold = warning_threshold
# -- The monitor that calculates the memory usage of the node. --
self.monitor = memory_monitor.MemoryMonitor()
# -- The top n memory usage of processes are printed. --
self.n = n
# -- The peak memory usage in GB during lifetime of monitor. --
self.peak_memory_usage = 0
# -- The top n memory usage of processes
# during peak memory usage. --
self.peak_top_n_memory_usage = ""
# -- The last time memory usage was printed --
self._last_print_time = 0
# -- logger. --
logging.basicConfig(level=logging.INFO)
def ready(self):
pass
async def run(self):
"""Run the monitor."""
self.is_running = True
while self.is_running:
now = time.time()
used_gb, total_gb = self.monitor.get_memory_usage()
top_n_memory_usage = memory_monitor.get_top_n_memory_usage(n=self.n)
if used_gb > self.peak_memory_usage:
self.peak_memory_usage = used_gb
self.peak_top_n_memory_usage = top_n_memory_usage
if used_gb > total_gb * self.warning_threshold:
logging.warning(
"The memory usage is high: " f"{used_gb / total_gb * 100}%"
)
if now - self._last_print_time > self.print_interval_s:
logging.info(f"Memory usage: {used_gb} / {total_gb}")
logging.info(f"Top {self.n} process memory usage:")
logging.info(top_n_memory_usage)
self._last_print_time = now
await asyncio.sleep(self.record_interval_s)
async def stop_run(self):
"""Stop running the monitor.
Returns:
True if the monitor is stopped. False otherwise.
"""
was_running = self.is_running
self.is_running = False
return was_running
async def get_peak_memory_info(self):
"""Return the tuple of the peak memory usage and the
top n process information during the peak memory usage.
"""
return self.peak_memory_usage, self.peak_top_n_memory_usage
current_node_ip = ray._private.worker.global_worker.node_ip_address
# Schedule the actor on the current node.
memory_monitor_actor = MemoryMonitorActor.options(
resources={f"node:{current_node_ip}": 0.001}
).remote(
print_interval_s=print_interval_s,
record_interval_s=record_interval_s,
warning_threshold=warning_threshold,
)
print("Waiting for memory monitor actor to be ready...")
ray.get(memory_monitor_actor.ready.remote())
print("Memory monitor actor is ready now.")
memory_monitor_actor.run.remote()
return memory_monitor_actor
def setup_tls():
"""Sets up required environment variables for tls"""
import pytest
if sys.platform == "darwin":
pytest.skip("Cryptography doesn't install in Mac build pipeline")
cert, key = generate_self_signed_tls_certs()
temp_dir = tempfile.mkdtemp("ray-test-certs")
cert_filepath = os.path.join(temp_dir, "server.crt")
key_filepath = os.path.join(temp_dir, "server.key")
with open(cert_filepath, "w") as fh:
fh.write(cert)
with open(key_filepath, "w") as fh:
fh.write(key)
os.environ["RAY_USE_TLS"] = "1"
os.environ["RAY_TLS_SERVER_CERT"] = cert_filepath
os.environ["RAY_TLS_SERVER_KEY"] = key_filepath
os.environ["RAY_TLS_CA_CERT"] = cert_filepath
return key_filepath, cert_filepath, temp_dir
def teardown_tls(key_filepath, cert_filepath, temp_dir):
os.remove(key_filepath)
os.remove(cert_filepath)
os.removedirs(temp_dir)
del os.environ["RAY_USE_TLS"]
del os.environ["RAY_TLS_SERVER_CERT"]
del os.environ["RAY_TLS_SERVER_KEY"]
del os.environ["RAY_TLS_CA_CERT"]
| _BatchQueueActor |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 165832,
"end": 167290
} | class ____:
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_allclose(lag0.c, [1], atol=1.5e-13, rtol=0)
assert_allclose(lag1.c, [-1, 1], atol=1.5e-13, rtol=0)
assert_allclose(lag2.c, array([1, -4,2]) / 2.0, atol=1.5e-13, rtol=0)
assert_allclose(lag3.c, array([-1, 9,-18,6])/6.0, atol=1.5e-13, rtol=0)
assert_allclose(lag4.c, array([1, -16,72,-96,24])/24.0,
atol=1.5e-13, rtol=0)
assert_allclose(lag5.c, array([-1, 25, -200, 600, -600, 120]) / 120.0,
atol=1.5e-13, rtol=0)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c, [1])
assert_equal(lag1.c, [-1, k + 1])
assert_allclose(lag2.c, array([1, -2 * (k + 2), (k + 1.) * (k + 2.)]) / 2.0,
atol=1.5e-7, rtol=0)
expected = array([-1,
3 * (k + 3),
-3 * (k + 2) * (k + 3),
(k + 1) * (k + 2) * (k + 3)]) / 6.0
assert_allclose(lag3.c, expected, atol=1.5e-7, rtol=0)
| TestLaguerre |
python | plotly__plotly.py | plotly/graph_objs/volume/_stream.py | {
"start": 233,
"end": 3494
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume"
_path_str = "volume.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/key_binding/key_processor.py | {
"start": 944,
"end": 1935
} | class ____:
"""
:param key: A `Keys` instance or text (one character).
:param data: The received string on stdin. (Often vt100 escape codes.)
"""
def __init__(self, key: Keys | str, data: str | None = None) -> None:
assert isinstance(key, Keys) or len(key) == 1
if data is None:
if isinstance(key, Keys):
data = key.value
else:
data = key # 'key' is a one character string.
self.key = key
self.data = data
def __repr__(self) -> str:
return f"{self.__class__.__name__}(key={self.key!r}, data={self.data!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, KeyPress):
return False
return self.key == other.key and self.data == other.data
"""
Helper object to indicate flush operation in the KeyProcessor.
NOTE: the implementation is very similar to the VT100 parser.
"""
_Flush = KeyPress("?", data="_Flush")
| KeyPress |
python | pyca__cryptography | tests/hazmat/primitives/test_hash_vectors.py | {
"start": 3341,
"end": 3706
} | class ____:
test_b2b = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "blake2"),
["blake2b.txt"],
hashes.BLAKE2b(digest_size=64),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(
hashes.BLAKE2s(digest_size=32)
),
skip_message="Does not support BLAKE2s",
)
| TestBLAKE2b |
python | ijl__orjson | test/test_dataclass.py | {
"start": 852,
"end": 1014
} | class ____:
__slots__ = ("_c", "a", "b", "d")
a: str
b: int
_c: str
d: InitVar[str]
cls_var: ClassVar[str] = "cls"
@dataclass
| Slotsdataclass |
python | cloudpipe__cloudpickle | cloudpickle/cloudpickle.py | {
"start": 19484,
"end": 43984
} | class ____:
"""Sentinel for empty closures."""
@classmethod
def __reduce__(cls):
return cls.__name__
def _make_function(code, globals, name, argdefs, closure):
# Setting __builtins__ in globals is needed for nogil CPython.
globals["__builtins__"] = __builtins__
return types.FunctionType(code, globals, name, argdefs, closure)
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError("this route should not be executed")
return (lambda: cell).__closure__[0]
def _make_cell(value=_empty_cell_value):
cell = _make_empty_cell()
if value is not _empty_cell_value:
cell.cell_contents = value
return cell
def _make_skeleton_class(
type_constructor, name, bases, type_kwargs, class_tracker_id, extra
):
"""Build dynamic class with an empty __dict__ to be filled once memoized
If class_tracker_id is not None, try to lookup an existing class definition
matching that id. If none is found, track a newly reconstructed class
definition under that id so that other instances stemming from the same
class id will also reuse this class definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
# We need to intern the keys of the type_kwargs dict to avoid having
# different pickles for the same dynamic class depending on whether it was
# dynamically created or reconstructed from a pickled stream.
type_kwargs = {sys.intern(k): v for k, v in type_kwargs.items()}
skeleton_class = types.new_class(
name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
)
return _lookup_class_or_track(class_tracker_id, skeleton_class)
def _make_skeleton_enum(
bases, name, qualname, members, module, class_tracker_id, extra
):
"""Build dynamic enum with an empty __dict__ to be filled once memoized
The creation of the enum class is inspired by the code of
EnumMeta._create_.
If class_tracker_id is not None, try to lookup an existing enum definition
matching that id. If none is found, track a newly reconstructed enum
definition under that id so that other instances stemming from the same
class id will also reuse this enum definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
# enums always inherit from their base Enum class at the last position in
# the list of base classes:
enum_base = bases[-1]
metacls = enum_base.__class__
classdict = metacls.__prepare__(name, bases)
for member_name, member_value in members.items():
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, name, bases, classdict)
enum_class.__module__ = module
enum_class.__qualname__ = qualname
return _lookup_class_or_track(class_tracker_id, enum_class)
def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
tv = typing.TypeVar(
name,
*constraints,
bound=bound,
covariant=covariant,
contravariant=contravariant,
)
return _lookup_class_or_track(class_tracker_id, tv)
def _decompose_typevar(obj):
return (
obj.__name__,
obj.__bound__,
obj.__constraints__,
obj.__covariant__,
obj.__contravariant__,
_get_or_create_tracker_id(obj),
)
def _typevar_reduce(obj):
# TypeVar instances require the module information hence why we
# are not using the _should_pickle_by_reference directly
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
if module_and_name is None:
return (_make_typevar, _decompose_typevar(obj))
elif _is_registered_pickle_by_value(module_and_name[0]):
return (_make_typevar, _decompose_typevar(obj))
return (getattr, module_and_name)
def _get_bases(typ):
if "__orig_bases__" in getattr(typ, "__dict__", {}):
# For generic types (see PEP 560)
# Note that simply checking `hasattr(typ, '__orig_bases__')` is not
# correct. Subclasses of a fully-parameterized generic class does not
# have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
# will return True because it's defined in the base class.
bases_attr = "__orig_bases__"
else:
# For regular class objects
bases_attr = "__bases__"
return getattr(typ, bases_attr)
def _make_dict_keys(obj, is_ordered=False):
if is_ordered:
return OrderedDict.fromkeys(obj).keys()
else:
return dict.fromkeys(obj).keys()
def _make_dict_values(obj, is_ordered=False):
if is_ordered:
return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
else:
return {i: _ for i, _ in enumerate(obj)}.values()
def _make_dict_items(obj, is_ordered=False):
if is_ordered:
return OrderedDict(obj).items()
else:
return obj.items()
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__module__" in obj.__dict__:
type_kwargs["__module__"] = obj.__module__
__dict__ = obj.__dict__.get("__dict__", None)
if isinstance(__dict__, property):
type_kwargs["__dict__"] = __dict__
return (
type(obj),
obj.__name__,
_get_bases(obj),
type_kwargs,
_get_or_create_tracker_id(obj),
None,
)
def _enum_getnewargs(obj):
members = {e.name: e.value for e in obj}
return (
obj.__bases__,
obj.__name__,
obj.__qualname__,
members,
obj.__module__,
_get_or_create_tracker_id(obj),
None,
)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
# Hack to circumvent non-predictable memoization caused by string interning.
# See the inline comment in _class_setstate for details.
"__name__": "".join(func.__name__),
"__qualname__": "".join(func.__qualname__),
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
if func.__closure__ is not None:
closure_values = list(map(_get_cell_contents, func.__closure__))
else:
closure_values = ()
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values)
)
slotstate["__globals__"] = f_globals
# Hack to circumvent non-predictable memoization caused by string interning.
# See the inline comment in _class_setstate for details.
state = {"".join(k): v for k, v in func.__dict__.items()}
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop("__weakref__", None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, don't pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop("_abc_cache", None)
clsdict.pop("_abc_negative_cache", None)
clsdict.pop("_abc_negative_cache_version", None)
registry = clsdict.pop("_abc_registry", None)
if registry is None:
# The abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop("_abc_impl", None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop("__dict__", None) # unpicklable property object
if sys.version_info >= (3, 14):
# PEP-649/749: __annotate_func__ contains a closure that references the class
# dict. We need to exclude it from pickling. Python will recreate it when
# __annotations__ is accessed at unpickling time.
clsdict.pop("__annotate_func__", None)
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = {e.name: e.value for e in obj}
# Cleanup the clsdict that will be passed to _make_skeleton_enum:
# Those attributes are already handled by the metaclass.
for attrname in [
"_generate_next_value_",
"_member_names_",
"_member_map_",
"_member_type_",
"_value2member_map_",
]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""code object reducer."""
# If you are not sure about the order of arguments, take a look at help
# of the specific type from types, for example:
# >>> from types import CodeType
# >>> help(CodeType)
# Hack to circumvent non-predictable memoization caused by string interning.
# See the inline comment in _class_setstate for details.
co_name = "".join(obj.co_name)
# co_filename is not used in the constructor of code objects, so we can
# safely set it to indicate that this is dynamic code. This also makes
# the payload deterministic, independent of where the function is defined
# which is especially useful when defining classes in jupyter/ipython
# cells which do not have a deterministic filename.
co_filename = "".join("<dynamic-code>")
# Create shallow copies of these tuple to make cloudpickle payload deterministic.
# When creating a code object during load, copies of these four tuples are
# created, while in the main process, these tuples can be shared.
# By always creating copies, we make sure the resulting payload is deterministic.
co_names = tuple(name for name in obj.co_names)
co_varnames = tuple(name for name in obj.co_varnames)
co_freevars = tuple(name for name in obj.co_freevars)
co_cellvars = tuple(name for name in obj.co_cellvars)
if hasattr(obj, "co_exceptiontable"):
# Python 3.11 and later: there are some new attributes
# related to the enhanced exceptions.
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
co_names,
co_varnames,
co_filename,
co_name,
obj.co_qualname,
obj.co_firstlineno,
obj.co_linetable,
obj.co_exceptiontable,
co_freevars,
co_cellvars,
)
elif hasattr(obj, "co_linetable"):
# Python 3.10 and later: obj.co_lnotab is deprecated and constructor
# expects obj.co_linetable instead.
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
co_names,
co_varnames,
co_filename,
co_name,
obj.co_firstlineno,
obj.co_linetable,
co_freevars,
co_cellvars,
)
elif hasattr(obj, "co_nmeta"): # pragma: no cover
# "nogil" Python: modified attributes from 3.9
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_framesize,
obj.co_ndefaultargs,
obj.co_nmeta,
obj.co_flags,
obj.co_code,
obj.co_consts,
co_varnames,
co_filename,
co_name,
obj.co_firstlineno,
obj.co_lnotab,
obj.co_exc_handlers,
obj.co_jump_table,
co_freevars,
co_cellvars,
obj.co_free2reg,
obj.co_cell2reg,
)
else:
# Backward compat for 3.8 and 3.9
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
co_names,
co_varnames,
co_filename,
co_name,
obj.co_firstlineno,
obj.co_lnotab,
co_freevars,
co_cellvars,
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer."""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents,)
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file."""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s" % obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except OSError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _should_pickle_by_reference(obj):
return subimport, (obj.__name__,)
else:
# Some external libraries can populate the "__builtins__" entry of a
# module's `__dict__` with unpicklable objects (see #316). For that
# reason, we do not attempt to pickle the "__builtins__" entry, and
# restore a default value for it at unpickling time.
state = obj.__dict__.copy()
state.pop("__builtins__", None)
return dynamic_subimport, (obj.__name__, state)
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""Save a class that can't be referenced as a module attribute.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from importable modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum,
_enum_getnewargs(obj),
_enum_getstate(obj),
None,
None,
_class_setstate,
)
else:
return (
_make_skeleton_class,
_class_getnewargs(obj),
_class_getstate(obj),
None,
None,
_class_setstate,
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj."""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _should_pickle_by_reference(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj),)
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj),)
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj),)
def _odict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), True)
def _odict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), True)
def _odict_items_reduce(obj):
return _make_dict_items, (dict(obj), True)
def _dataclass_field_base_reduce(obj):
return _get_dataclass_field_type_sentinel, (obj.name,)
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
obj.__closure__[i].cell_contents = value
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
# Note: setting attribute names on a class automatically triggers their
# interning in CPython:
# https://github.com/python/cpython/blob/v3.12.0/Objects/object.c#L957
#
# This means that to get deterministic pickling for a dynamic class that
# was initially defined in a different Python process, the pickler
# needs to ensure that dynamic class and function attribute names are
# systematically copied into a non-interned version to avoid
# unpredictable pickle payloads.
#
# Indeed the Pickler's memoizer relies on physical object identity to break
# cycles in the reference graph of the object being serialized.
setattr(obj, attrname, attr)
if sys.version_info >= (3, 13) and "__firstlineno__" in state:
# Set the Python 3.13+ only __firstlineno__ attribute one more time, as it
# will be automatically deleted by the `setattr(obj, attrname, attr)` call
# above when `attrname` is "__firstlineno__". We assume that preserving this
# information might be important for some users and that it not stale in the
# context of cloudpickle usage, hence legitimate to propagate. Furthermore it
# is necessary to do so to keep deterministic chained pickling as tested in
# test_deterministic_str_interning_for_chained_dynamic_class_pickling.
obj.__firstlineno__ = state["__firstlineno__"]
if registry is not None:
for subclass in registry:
obj.register(subclass)
# PEP-649/749: During pickling, we excluded the __annotate_func__ attribute but it
# will be created by Python. Subsequently, annotations will be recreated when
# __annotations__ is accessed.
return obj
# COLLECTION OF DATACLASS UTILITIES
# ---------------------------------
# There are some internal sentinel values whose identity must be preserved when
# unpickling dataclass fields. Each sentinel value has a unique name that we can
# use to retrieve its identity at unpickling time.
_DATACLASSE_FIELD_TYPE_SENTINELS = {
dataclasses._FIELD.name: dataclasses._FIELD,
dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
}
def _get_dataclass_field_type_sentinel(name):
return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
| _empty_cell_value |
python | django__django | tests/custom_managers/models.py | {
"start": 5146,
"end": 5381
} | class ____(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
| Car |
python | getsentry__sentry | src/sentry/integrations/api/serializers/rest_framework/data_forwarder.py | {
"start": 876,
"end": 1170
} | class ____(TypedDict, total=False):
instance_url: str
index: str
source: str
token: str
SQS_REQUIRED_KEYS = ["queue_url", "region", "access_key", "secret_key"]
SEGMENT_REQUIRED_KEYS = ["write_key"]
SPLUNK_REQUIRED_KEYS = ["instance_url", "index", "source", "token"]
| SplunkConfig |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 258981,
"end": 264378
} | class ____(
ValueChannelMixin, core.ValueDefWithConditionMarkPropFieldOrDatumDefnumber
):
"""
FillOpacityValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fillOpacity"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> FillOpacityValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> FillOpacityValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| FillOpacityValue |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-siliconflow/tests/test_embeddings_siliconflow.py | {
"start": 253,
"end": 3936
} | class ____:
def __init__(self, json_data) -> None:
self._json_data = json_data
def raise_for_status(self) -> None: ...
async def __aenter__(self) -> "MockAsyncResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[types.TracebackType],
) -> None:
pass
async def json(self) -> dict:
return self._json_data
def test_embedding_class():
emb = SiliconFlowEmbedding()
assert isinstance(emb, BaseEmbedding)
def test_float_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
def test_base64_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": "AAD2Qg==", "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...", encoding_format="base64")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "base64",
},
headers=embedding._headers,
)
@pytest.mark.asyncio
async def test_float_format_embedding_async():
input_text = "..."
mock_response = MockAsyncResponse(
json_data={
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
)
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch(
"aiohttp.ClientSession.post", return_value=mock_response
) as mock_post:
actual_result = await embedding.aget_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
| MockAsyncResponse |
python | pypa__setuptools | setuptools/wheel.py | {
"start": 2398,
"end": 9532
} | class ____:
def __init__(self, filename) -> None:
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError(f'invalid wheel name: {filename!r}')
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
"""List tags (py_version, abi, platform) supported by this wheel."""
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
"""Is the wheel compatible with the current platform?"""
return next((True for t in self.tags() if t in _get_supported_tags()), False)
def egg_name(self):
return (
_egg_basename(
self.project_name,
self.version,
platform=(None if self.platform == 'any' else get_platform()),
)
+ ".egg"
)
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if dirname.endswith('.dist-info') and canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name)
):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir) -> None:
"""Install wheel as an egg directory."""
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = f'{self.project_name}-{self.version}'
dist_info = self.get_dist_info(zf)
dist_data = f'{dist_basename}.data'
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8')
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
if not wheel_v1:
raise ValueError(f'unsupported wheel format version: {wheel_version}')
# Extract to target directory.
_unpack_zipfile_obj(zf, destination_eggdir)
dist_info = os.path.join(destination_eggdir, dist_info)
install_requires, extras_require = Wheel._convert_requires(
destination_eggdir, dist_info
)
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = setuptools.Distribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
with disable_info_traces():
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
@staticmethod
def _convert_requires(destination_eggdir, dist_info):
md = metadata.Distribution.at(dist_info).metadata
deps = md.get_all('Requires-Dist') or []
reqs = list(map(Requirement, deps))
extras = extras_from_deps(deps)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req = Requirement(str(req))
req.marker = None
return str(req)
def eval(req, **env):
return not req.marker or req.marker.evaluate(env)
def for_extra(req):
try:
markers = req.marker._markers
except AttributeError:
markers = ()
return set(
marker[2].value
for marker in markers
if isinstance(marker, tuple) and marker[0].value == 'extra'
)
install_requires = list(
map(raw_req, filter(eval, itertools.filterfalse(for_extra, reqs)))
)
extras_require = {
extra: list(
map(
raw_req,
(req for req in reqs if for_extra(req) and eval(req, extra=extra)),
)
)
for extra in extras
}
return install_requires, extras_require
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(
os.path.exists,
(
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
),
):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
namespace_packages = _read_utf8_with_fallback(namespace_packages).split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
if not os.path.exists(mod_init):
with open(mod_init, 'w', encoding="utf-8") as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
| Wheel |
python | Netflix__metaflow | metaflow/_vendor/click/formatting.py | {
"start": 2817,
"end": 9281
} | class ____(object):
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(self, indent_increment=2, width=None, max_width=None):
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(get_terminal_size()[0], max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer = []
def write(self, string):
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self):
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self):
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(self, prog, args="", prefix="Usage: "):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading):
"""Writes a heading into the buffer."""
self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent))
def write_paragraph(self):
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " " * self.current_indent
self.write(
wrap_text(
text,
text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write("{:>{w}}{}".format("", first, w=self.current_indent))
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write("{}\n".format(lines[0]))
for line in lines[1:]:
self.write(
"{:>{w}}{}\n".format(
"", line, w=first_col + self.current_indent
)
)
if len(lines) > 1:
# separate long help from next option
self.write("\n")
else:
self.write("\n")
@contextmanager
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self):
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self):
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ", ".join(x[1] for x in rv)
return rv, any_prefix_is_slash
| HelpFormatter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.