language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/core/servers/basehttp.py | {
"start": 3729,
"end": 6327
} | class ____(simple_server.ServerHandler):
http_version = "1.1"
def __init__(self, stdin, stdout, stderr, environ, **kwargs):
"""
Use a LimitedStream so that unread request data will be ignored at
the end of the request. WSGIRequest uses a LimitedStream but it
shouldn't discard the data since the upstream servers usually do this.
This fix applies only for testserver/runserver.
"""
try:
content_length = int(environ.get("CONTENT_LENGTH"))
except (ValueError, TypeError):
content_length = 0
super().__init__(
LimitedStream(stdin, content_length), stdout, stderr, environ, **kwargs
)
def cleanup_headers(self):
super().cleanup_headers()
if (
self.environ["REQUEST_METHOD"] == "HEAD"
and "Content-Length" in self.headers
and str(self.headers["Content-Length"]) == "0"
):
del self.headers["Content-Length"]
# HTTP/1.1 requires support for persistent connections. Send 'close' if
# the content length is unknown to prevent clients from reusing the
# connection.
if (
self.environ["REQUEST_METHOD"] != "HEAD"
and "Content-Length" not in self.headers
):
self.headers["Connection"] = "close"
# Persistent connections require threading server.
elif not isinstance(self.request_handler.server, socketserver.ThreadingMixIn):
self.headers["Connection"] = "close"
# Mark the connection for closing if it's set as such above or if the
# application sent the header.
if self.headers.get("Connection") == "close":
self.request_handler.close_connection = True
def close(self):
self.get_stdin().read()
super().close()
def finish_response(self):
if self.environ["REQUEST_METHOD"] == "HEAD":
try:
deque(self.result, maxlen=0) # Consume iterator.
# Don't call self.finish_content() as, if the headers have not
# been sent and Content-Length isn't set, it'll default to "0"
# which will prevent omission of the Content-Length header with
# HEAD requests as permitted by RFC 9110 Section 9.3.2.
# Instead, send the headers, if not sent yet.
if not self.headers_sent:
self.send_headers()
finally:
self.close()
else:
super().finish_response()
| ServerHandler |
python | google__jax | jax/experimental/array_serialization/serialization_test.py | {
"start": 28901,
"end": 28984
} | class ____:
a: int
c: str
d: int
@jax.tree_util.register_static
| CustomDataclass |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/fields_on_correct_type.py | {
"start": 999,
"end": 1054
} | class ____(Counter, OrderedDict):
pass
| OrderedCounter |
python | google__pytype | pytype/tests/test_unpack.py | {
"start": 96,
"end": 9247
} | class ____(test_base.BaseTest):
"""Test unpacking of sequences via *xs."""
def test_build_with_unpack_indefinite(self):
ty = self.Infer("""
from typing import List
class A: pass
a: List[A] = []
b: List[str] = []
c = [*a, *b, 1]
d = {*a, *b, 1}
e = (*a, *b, 1)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, List, Set, Tuple, Union
class A: ...
a = ... # type: List[A]
b = ... # type: List[str]
c = ... # type: List[Union[A, str, int]]
d = ... # type: Set[Union[A, str, int]]
e = ... # type: Tuple[Union[A, str, int], ...]
""",
)
def test_empty(self):
ty, err = self.InferWithErrors("""
a, *b = [] # bad-unpacking[e]
c, *d = [1]
*e, f = [2]
g, *h, i = [1, 2]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, List
a: Any
b: List[nothing]
c: int
d: List[nothing]
e: List[nothing]
f: int
g: int
h: List[nothing]
i: int
""",
)
self.assertErrorSequences(err, {"e": ["0 values", "1 variable"]})
def test_unpack_indefinite_from_pytd(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Tuple
a: Tuple[int, ...]
b: Tuple[str, ...]
""",
)
ty = self.Infer(
"""
import foo
c = (*foo.a, *foo.b)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Tuple, Union
c: Tuple[Union[int, str], ...]
""",
)
def test_unpack_in_function_args(self):
# TODO(b/63407497): Enabling --strict-parameter-checks leads to a
# wrong-arg-types error on line 6.
self.options.tweak(strict_parameter_checks=False)
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Tuple
a: Tuple[int, ...]
b: Tuple[str, ...]
""",
)
errors = self.CheckWithErrors(
"""
import foo
class A: pass
def f(w: A, x: int, y: str, z: str):
pass
c = (*foo.a, *foo.b)
f(A(), *c, "hello")
f(A(), *c)
f(*c, "hello") # wrong-arg-types[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"w: A.*w: Union.int,.str."})
def test_unpack_concrete_in_function_args(self):
self.CheckWithErrors("""
def f(x: int, y: str):
pass
a = (1, 2)
f(*a) # wrong-arg-types
f(1, *("x", "y")) # wrong-arg-count
""")
def test_match_typed_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
def f(x:int, *args: str): ...
a: list
b: Any
""",
)
self.Check(
"""
import foo
foo.f(1, *foo.a)
foo.f(1, *foo.b)
foo.f(*foo.a)
""",
pythonpath=[d.path],
)
def test_path_join(self):
self.Check("""
import os
xs: list
os.path.join('x', *xs)
""")
def test_overloaded_function(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
@overload
def f(x:int, *args: str): ...
@overload
def f(x:str, *args: str): ...
a: list
b: Any
""",
)
self.Check(
"""
import foo
foo.f(1, *foo.a)
foo.f(1, *foo.b)
foo.f(*foo.a)
""",
pythonpath=[d.path],
)
def test_unpack_kwargs_without_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any, Dict, Optional
def f(x: int, y: str, z: bool = True, a: Optional[object] = None ): ...
a: Dict[str, Any]
b: dict
""",
)
self.Check(
"""
import foo
foo.f(1, 'a', **foo.a)
foo.f(1, 'a', **foo.b)
def g(x: int, y: str, **kwargs):
foo.f(x, y, **kwargs)
""",
pythonpath=[d.path],
)
def test_set_length_one_nondeterministic_unpacking(self):
self.Check("""
(x,) = {'a'}
""")
def test_frozenset_length_one_nondeterministic_unpacking(self):
self.Check("""
(x,) = frozenset(['a'])
""")
def test_set_nondeterministic_unpacking(self):
self.CheckWithErrors("""
(x, y) = {'a', 'b'} # bad-unpacking
""")
def test_frozenset_nondeterministic_unpacking(self):
self.CheckWithErrors("""
(x, y) = frozenset(['a', 'b']) # bad-unpacking
""")
def test_str(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Optional, Text
class A: ...
def f(
x: Text,
y: int,
k: bool = ...,
l: Optional[Text] = ...,
m: Optional[A] = ...,
) -> None: ...
""",
)
self.Check(
"""
import foo
from typing import Text
def g(self, x: str, **kwargs) -> None:
foo.f(x, 1, **kwargs)
""",
pythonpath=[d.path],
)
def test_unknown_length_tuple(self):
self.Check("""
from typing import Tuple
def f(*args: str):
pass
x: Tuple[str, ...]
f(*x, 'a', 'b', 'c')
""")
def test_dont_unpack_iterable(self):
# Check that we don't treat x as a splat in the call to f() just because
# it's an indefinite iterable.
self.Check("""
class Foo(list):
pass
def f(x: Foo, y: int, z: bool = True):
pass
def g(x: Foo, **kwargs):
f(x, 10, **kwargs)
""")
def test_erroneous_splat(self):
# Don't crash on an unnecessary splat.
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any, Sequence
def f(x: Sequence[Any], y: str): ...
def g(x: Sequence[Any], y: Sequence[str]): ...
""",
)
self.CheckWithErrors(
"""
import itertools
from typing import List
import foo
x: list
y: List[int]
foo.f(*x, "a")
foo.f(*x, *y) # wrong-arg-types
foo.g(*x, *y) # wrong-arg-types
a = itertools.product(*x, *y)
""",
pythonpath=[d.path],
)
def test_unpack_namedtuple(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(a, b, c, d, e, f): ...
""",
)
self.Check(
"""
import collections
import foo
X = collections.namedtuple('X', ('a', 'b', 'c'))
foo.f(*X(0, 1, 2), 3, 4, 5)
def g() -> X:
return X(0, 1, 2)
p = X(*g())
q = X(*g())
f = X(*(x - y for x, y in zip(p, q)))
""",
pythonpath=[d.path],
)
def test_posargs_and_namedargs(self):
self.Check("""
def f(x, y=1, z=2, a=3):
pass
def g(b=None):
f(*b, y=2, z=3)
""")
def test_dont_unpack_into_optional(self):
self.Check("""
def f(x: int, y: int, z: str = ...):
pass
def g(*args: int):
f(*args)
""")
def test_multiple_tuple_bindings(self):
ty = self.Infer("""
from typing import Tuple
class C:
def __init__(self, p, q):
self.p = p
self.q = q
x = [('a', 1), ('c', 3j), (2, 3)]
y = [C(*a).q for a in x]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, List, Tuple, Union
class C:
p: Any
q: Any
def __init__(self, p, q): ...
x: List[Tuple[Union[int, str], Union[complex, int]]]
y: List[Union[complex, int]]
""",
)
def test_type_parameter_instance(self):
ty = self.Infer("""
from typing import Dict, Tuple
class Key:
pass
class Value:
pass
def foo(x: Dict[Tuple[Key, Value], str]):
ret = []
for k, v in sorted(x.items()):
key, value = k
ret.append(key)
return ret
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, List, Tuple
class Key: ...
class Value: ...
def foo(x: Dict[Tuple[Key, Value], str]) -> List[Key]: ...
""",
)
def test_unpack_any_subclass_instance(self):
# Test for a corner case in b/261564270
with self.DepTree([(
"foo.pyi",
"""
from typing import Any
Base: Any
""",
)]):
self.Check("""
import foo
class A(foo.Base):
@classmethod
def make(cls, hello, world):
return cls(hello, world)
a = A.make(1, 2)
b = A.make(*a)
""")
if __name__ == "__main__":
test_base.main()
| TestUnpack |
python | bokeh__bokeh | examples/advanced/extensions/parallel_plot/parallel_reset.py | {
"start": 38,
"end": 185
} | class ____(ActionTool):
""" Tool to reset only plot axes and not selections
"""
__implementation__ = 'parallel_reset.ts'
| ParallelResetTool |
python | ray-project__ray | python/ray/train/torch/config.py | {
"start": 810,
"end": 1226
} | class ____:
def __enter__(self):
# Set default cuda device
if torch.cuda.is_available():
device = ray.train.torch.get_device()
if device.type == "cuda":
torch.cuda.set_device(device)
def __exit__(self, type, value, traceback):
# Propagate exceptions if any
return False
@PublicAPI(stability="stable")
@dataclass
| TorchConfigContextManager |
python | davidhalter__jedi | jedi/api/environment.py | {
"start": 1623,
"end": 4243
} | class ____(_BaseEnvironment):
"""
This class is supposed to be created by internal Jedi architecture. You
should not create it directly. Please use create_environment or the other
functions instead. It is then returned by that function.
"""
_subprocess = None
def __init__(self, executable, env_vars=None):
self._start_executable = executable
self._env_vars = env_vars
# Initialize the environment
self._get_subprocess()
def _get_subprocess(self):
if self._subprocess is not None and not self._subprocess.is_crashed:
return self._subprocess
try:
self._subprocess = CompiledSubprocess(self._start_executable,
env_vars=self._env_vars)
info = self._subprocess._send(None, _get_info)
except Exception as exc:
raise InvalidPythonEnvironment(
"Could not get version information for %r: %r" % (
self._start_executable,
exc))
# Since it could change and might not be the same(?) as the one given,
# set it here.
self.executable = info[0]
"""
The Python executable, matches ``sys.executable``.
"""
self.path = info[1]
"""
The path to an environment, matches ``sys.prefix``.
"""
self.version_info = _VersionInfo(*info[2])
"""
Like :data:`sys.version_info`: a tuple to show the current
Environment's Python version.
"""
return self._subprocess
def __repr__(self):
version = '.'.join(str(i) for i in self.version_info)
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
def get_inference_state_subprocess(
self,
inference_state: 'InferenceState',
) -> InferenceStateSubprocess:
return InferenceStateSubprocess(inference_state, self._get_subprocess())
@memoize_method
def get_sys_path(self):
"""
The sys path for this environment. Does not include potential
modifications from e.g. appending to :data:`sys.path`.
:returns: list of str
"""
# It's pretty much impossible to generate the sys path without actually
# executing Python. The sys path (when starting with -S) itself depends
# on how the Python version was compiled (ENV variables).
# If you omit -S when starting Python (normal case), additionally
# site.py gets executed.
return self._get_subprocess().get_sys_path()
| Environment |
python | ray-project__ray | python/ray/autoscaler/_private/event_system.py | {
"start": 1623,
"end": 3870
} | class ____:
"""Event system that handles storing and calling callbacks for events.
Attributes:
callback_map (Dict[str, List[Callable]]) : Stores list of callbacks
for events when registered.
"""
def __init__(self):
self.callback_map = {}
def add_callback_handler(
self,
event: str,
callback: Union[Callable[[Dict], None], List[Callable[[Dict], None]]],
):
"""Stores callback handler for event.
Args:
event: Event that callback should be called on. See
CreateClusterEvent for details on the events available to be
registered against.
callback (Callable[[Dict], None]): Callable object that is invoked
when specified event occurs.
"""
if event not in CreateClusterEvent.__members__.values():
cli_logger.warning(
f"{event} is not currently tracked, and this"
" callback will not be invoked."
)
self.callback_map.setdefault(event, []).extend(
[callback] if type(callback) is not list else callback
)
def execute_callback(
self, event: CreateClusterEvent, event_data: Optional[Dict[str, Any]] = None
):
"""Executes all callbacks for event.
Args:
event: Event that is invoked. See CreateClusterEvent
for details on the available events.
event_data (Dict[str, Any]): Argument that is passed to each
callable object stored for this particular event.
"""
if event_data is None:
event_data = {}
event_data["event_name"] = event
if event in self.callback_map:
for callback in self.callback_map[event]:
callback(event_data)
def clear_callbacks_for_event(self, event: str):
"""Clears stored callable objects for event.
Args:
event: Event that has callable objects stored in map.
See CreateClusterEvent for details on the available events.
"""
if event in self.callback_map:
del self.callback_map[event]
global_event_system = _EventSystem()
| _EventSystem |
python | huggingface__transformers | src/transformers/models/zamba/modeling_zamba.py | {
"start": 2614,
"end": 4011
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ZambaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
| ZambaRMSNorm |
python | ray-project__ray | python/ray/serve/_private/deployment_scheduler.py | {
"start": 1048,
"end": 3946
} | class ____(dict):
# Custom resource priority from environment variable
CUSTOM_PRIORITY: List[str] = RAY_SERVE_HIGH_PRIORITY_CUSTOM_RESOURCES
EPSILON = 1e-9
def get(self, key: str):
val = super().get(key)
if val is not None:
return val
# Implicit resources by default have 1 total
if key.startswith(ray._raylet.IMPLICIT_RESOURCE_PREFIX):
return 1
# Otherwise by default there is 0 of this resource
return 0
def can_fit(self, other):
keys = set(self.keys()) | set(other.keys())
# We add a small epsilon to avoid floating point precision issues.
return all(self.get(k) + self.EPSILON >= other.get(k) for k in keys)
def __eq__(self, other):
keys = set(self.keys()) | set(other.keys())
return all([self.get(k) == other.get(k) for k in keys])
def __add__(self, other):
keys = set(self.keys()) | set(other.keys())
kwargs = dict()
for key in keys:
if key.startswith(ray._raylet.IMPLICIT_RESOURCE_PREFIX):
kwargs[key] = min(1.0, self.get(key) + other.get(key))
else:
kwargs[key] = self.get(key) + other.get(key)
return Resources(kwargs)
def __sub__(self, other):
keys = set(self.keys()) | set(other.keys())
kwargs = {key: self.get(key) - other.get(key) for key in keys}
return Resources(kwargs)
def __lt__(self, other):
"""Determines priority when sorting a list of SoftResources.
1. Custom resources defined in RAY_SERVE_HIGH_PRIORITY_CUSTOM_RESOURCES (sorted by priority)
2. GPU
3. CPU
4. memory
5. Other custom resources
This means a resource with a larger number of high-priority resources is always
sorted higher than one with fewer, regardless of other types.
"""
keys = set(self.keys()) | set(other.keys())
custom_keys = keys - {"GPU", "CPU", "memory"}
for key in self.CUSTOM_PRIORITY:
if self.get(key) < other.get(key):
return True
elif self.get(key) > other.get(key):
return False
if self.get("GPU") < other.get("GPU"):
return True
elif self.get("GPU") > other.get("GPU"):
return False
if self.get("CPU") < other.get("CPU"):
return True
elif self.get("CPU") > other.get("CPU"):
return False
if self.get("memory") < other.get("memory"):
return True
elif self.get("memory") > other.get("memory"):
return False
for key in custom_keys - set(self.CUSTOM_PRIORITY):
if self.get(key) < other.get(key):
return True
elif self.get(key) > other.get(key):
return False
return False
| Resources |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 15398,
"end": 15887
} | class ____(RelativeLinkTestCase):
# Test a link to the same file with the virtual dataset created by
# File.build_virtual_dataset()
def make_vds(self, f):
with f.build_virtual_dataset('virtual', (2, 10), dtype='f4') as layout:
layout[0] = h5.VirtualSource(self.f1, 'data', shape=(10,))
layout[1] = h5.VirtualSource(self.f2, 'data', shape=(10,))
@ut.skipUnless(vds_support,
'VDS requires HDF5 >= 1.9.233')
| RelativeLinkBuildVDSTestCase |
python | tensorflow__tensorflow | third_party/xla/xla/backends/gpu/codegen/tools/ncu_rep_test.py | {
"start": 767,
"end": 4618
} | class ____(absltest.TestCase):
def test_get_metrics_by_kernel(self):
# That is a typical format of ncu-rep CSV output.
by_kernel = ncu_rep_lib.get_metrics_by_kernel([
["Kernel Name", "Metric 1", "Metric 2"],
["", "s", "Gb"],
["kernel1", "1", "2"],
["kernel2", "3", "4"],
])
self.assertEqual(
by_kernel,
[
{
"Kernel Name": ("kernel1", ""),
"Metric 1": ("1", "s"),
"Metric 2": ("2", "Gb"),
},
{
"Kernel Name": ("kernel2", ""),
"Metric 1": ("3", "s"),
"Metric 2": ("4", "Gb"),
},
],
)
def test_aggregate_kernel_metrics(self):
data = [
{
"ID": ("1", ""),
"Kernel Name": ("kernel1", ""),
"a.sum": ("12,345", "s"),
"b.max": ("2", "registers"),
"c.min": ("3", "b"),
"d": ("4", "b"),
"e": ("10", "b"),
},
{
"ID": ("2", ""),
"Kernel Name": ("kernel2", ""),
"a.sum": ("345,678.1", "s"),
"b.max": ("4", "registers"),
"c.min": ("5.0", "b"),
"d": ("6", "b"),
"e": ("11", "b"),
},
]
self.assertEqual(
ncu_rep_lib.aggregate_kernel_metrics(
["a.sum", "b.max", "c.min", "d"], data
),
[
["a.sum", "358023.1", "s"],
["b.max", "4", "registers"],
["c.min", "3", "b"],
["d", "4", "b"],
],
)
def test_filter_kernels(self):
data = [
{
"ID": ("1", ""),
"Kernel Name": ("kernel1", ""),
"a.sum": ("1,000.0", "s"),
"b.max": ("2", "registers"),
"c.min": ("3", "b"),
"d": ("4", "b"),
"e": ("10", "b"),
},
{
"ID": ("2", ""),
"Kernel Name": ("kernel2", ""),
"a.sum": ("3.0", "s"),
"b.max": ("4", "registers"),
"c.min": ("5.0", "b"),
"d": ("6", "b"),
"e": ("11", "b"),
},
]
self.assertEqual(ncu_rep_lib.filter_kernels(data, "id:1"), [data[0]])
self.assertEqual(ncu_rep_lib.filter_kernels(data, "name:^k.*2$"), [data[1]])
self.assertEqual(ncu_rep_lib.filter_kernels(data, "name:2"), [data[1]])
self.assertEqual(ncu_rep_lib.filter_kernels(data, "name:kernel"), data)
self.assertEqual(ncu_rep_lib.filter_kernels(data, "after:id:1"), [data[1]])
def test_write_metrics_markdown(self):
with io.StringIO() as f:
ncu_rep_lib.write_metrics_markdown(
f,
[
["Long Metric 1", "1.0000000000", "s"],
["Metric 2", "2", "Long Unit"],
],
)
self.assertEqual(
f.getvalue(),
"""Metric | Value | Unit
--------------|--------------|----------
Long Metric 1 | 1.0000000000 | s
Metric 2 | 2 | Long Unit
""",
)
def test_write_metrics_csv(self):
with io.StringIO() as f:
ncu_rep_lib.write_metrics_csv(
f,
[
["Long Metric 1", "1.0000000000", "s"],
["Metric 2", "2", "Long Unit"],
],
)
self.assertEqual(
f.getvalue(),
"""metric,value,unit
Long Metric 1,1.0000000000,s
Metric 2,2,Long Unit
""",
)
def test_write_metrics_raw(self):
with io.StringIO() as f:
ncu_rep_lib.write_metrics_raw(
f,
[
["Long Metric 1", "1.0000000000", "s"],
["Metric 2", "2", "Long Unit"],
],
)
self.assertEqual(
f.getvalue(),
"""1.0000000000 s
2 Long Unit
""",
)
if __name__ == "__main__":
absltest.main()
| NcuRepTest |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/waiters.py | {
"start": 4787,
"end": 9442
} | class ____(Waiter[T]):
# Implementation of `Waiter` for use in asynchronous contexts
def __init__(self, call: Call[T]) -> None:
super().__init__(call=call)
# Delay instantiating loop and queue as there may not be a loop present yet
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._queue: Optional[asyncio.Queue[Optional[Call[T]]]] = None
self._early_submissions: list[Call[T]] = []
self._done_callbacks: list[Call[Any]] = []
self._done_event = Event()
self._done_waiting = False
def submit(self, call: Call[T]) -> Call[T]:
"""
Submit a callback to execute while waiting.
"""
if self.call_is_done():
raise RuntimeError(f"The call {self._call} is already done.")
call.set_runner(self)
if not self._queue:
# If the loop is not yet available, just push the call to a stack
self._early_submissions.append(call)
return call
# We must put items in the queue from the event loop that owns it
if TYPE_CHECKING:
assert self._loop is not None
call_soon_in_loop(self._loop, self._queue.put_nowait, call)
return call
def _resubmit_early_submissions(self) -> None:
if TYPE_CHECKING:
assert self._queue is not None
assert self._loop is not None
for call in self._early_submissions:
# We must put items in the queue from the event loop that owns it
call_soon_in_loop(self._loop, self._queue.put_nowait, call)
self._early_submissions = []
async def _handle_waiting_callbacks(self) -> None:
logger.debug("Waiter %r watching for callbacks", self)
tasks: list[Awaitable[None]] = []
if TYPE_CHECKING:
assert self._queue is not None
try:
while True:
callback = await self._queue.get()
if callback is None:
break
# Ensure that callbacks are cancelled if the parent call is cancelled so
# waiting never runs longer than the call
self._call.future.add_cancel_callback(callback.future.cancel)
retval = callback.run()
if inspect.isawaitable(retval):
tasks.append(retval)
del callback
# Tasks are collected and awaited as a group; if each task was awaited in
# the above loop, async work would not be executed concurrently
await asyncio.gather(*tasks)
finally:
self._done_waiting = True
@contextlib.asynccontextmanager
async def _handle_done_callbacks(self) -> AsyncGenerator[None, Any]:
try:
yield
finally:
# Call done callbacks
while self._done_callbacks:
callback = self._done_callbacks.pop()
if callback:
# We shield against cancellation so we can run the callback
with anyio.CancelScope(shield=True):
await self._run_done_callback(callback)
async def _run_done_callback(self, callback: Call[Any]) -> None:
coro = callback.run()
if coro:
await coro
def add_done_callback(self, callback: Call[Any]) -> None:
if self._done_event.is_set():
raise RuntimeError("Cannot add done callbacks to done waiters.")
else:
self._done_callbacks.append(callback)
def _signal_stop_waiting(self) -> None:
# Only send a `None` to the queue if the waiter is still blocked reading from
# the queue. Otherwise, it's possible that the event loop is stopped.
if not self._done_waiting:
assert self._loop is not None
assert self._queue is not None
call_soon_in_loop(self._loop, self._queue.put_nowait, None)
async def wait(self) -> Call[T]:
# Assign the loop
self._loop = asyncio.get_running_loop()
self._queue = asyncio.Queue()
self._resubmit_early_submissions()
# Stop watching for work once the future is done
self._call.future.add_done_callback(lambda _: self._signal_stop_waiting())
self._call.future.add_done_callback(lambda _: self._done_event.set())
async with self._handle_done_callbacks():
await self._handle_waiting_callbacks()
# Wait for the future to be done
await self._done_event.wait()
_WAITERS_BY_THREAD[self._owner_thread].remove(self)
return self._call
| AsyncWaiter |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_us_state.py | {
"start": 765,
"end": 1787
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_us_state"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, dc_statehood=True, **kwargs):
return column.apply(lambda x: is_valid_state(x, dc_statehood))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidUSState |
python | Textualize__textual | src/textual/_compositor.py | {
"start": 1724,
"end": 2151
} | class ____:
"""An update generated by the compositor, which also doubles as console renderables."""
def render_segments(self, console: Console) -> str:
"""Render the update to raw data, suitable for writing to terminal.
Args:
console: Console instance.
Returns:
Raw data with escape sequences.
"""
return ""
@rich.repr.auto(angular=True)
| CompositorUpdate |
python | wireservice__csvkit | csvkit/utilities/csvsort.py | {
"start": 336,
"end": 2836
} | class ____(CSVKitUtility):
description = 'Sort CSV files. Like the Unix "sort" command, but for tabular data.'
def add_arguments(self):
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='A comma-separated list of column indices, names or ranges to sort by, e.g. "1,id,3-5". '
'Defaults to all columns.')
self.argparser.add_argument(
'-r', '--reverse', dest='reverse', action='store_true',
help='Sort in descending order.')
self.argparser.add_argument(
'-i', '--ignore-case', dest='ignore_case', action='store_true',
help='Perform case-independent sorting.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing the input.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
if self.additional_input_expected():
self.argparser.error('You must provide an input file or piped data.')
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=self.get_column_types(),
**self.reader_kwargs,
)
key = parse_column_identifiers(
self.args.columns,
table.column_names,
self.get_column_offset(),
)
if self.args.ignore_case:
key = ignore_case_sort(key)
table = table.order_by(key, reverse=self.args.reverse)
table.to_csv(self.output_file, **self.writer_kwargs)
def launch_new_instance():
utility = CSVSort()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| CSVSort |
python | getsentry__sentry | tests/sentry/workflow_engine/test_base.py | {
"start": 3805,
"end": 5396
} | class ____:
patches: list = []
def setup_condition_mocks(
self,
evaluate_value: Callable[[int, Any], DataConditionResult],
module_paths: list[str],
):
"""
Sets up a mock handler for a DataCondition. This method mocks out the registry of the class, and will
always return the `MockDataConditionHandler` class.
:param evaluate_value: The method you want to invoke when `evaluate_value` is called on the mock handler.
:param module_paths: A list of the paths to override for the data_condition_handler registry.
"""
class MockDataConditionHandler(DataConditionHandler[int]):
@staticmethod
def evaluate_value(value: Any, comparison: Any) -> Any:
return evaluate_value(value, comparison)
for module_path in module_paths:
new_patch = mock.patch(
f"{module_path}.condition_handler_registry.get",
return_value=MockDataConditionHandler(),
)
self.patches.append(new_patch)
new_patch.start()
return Factories.create_data_condition(
type=Condition.LEVEL, # this will be overridden by the mock, but it cannot be a operator
comparison=1.0,
condition_result=DetectorPriorityLevel.HIGH,
)
def teardown_condition_mocks(self):
"""
Removes the mocks / patches for the DataConditionHandler.
"""
for patch in self.patches:
patch.stop()
self.patches = []
| DataConditionHandlerMixin |
python | pandas-dev__pandas | asv_bench/benchmarks/io/sql.py | {
"start": 180,
"end": 1533
} | class ____:
params = ["sqlalchemy", "sqlite"]
param_names = ["connection"]
def setup(self, connection):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_all = f"SELECT * FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe(self, connection):
self.df.to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query(self, connection):
read_sql_query(self.query_all, self.con)
| SQL |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 1881,
"end": 2943
} | class ____(str, Enum):
testname = "testvalue"
testname2 = "testvalue2"
@pytest.fixture
def logger(caplog):
logging.config.dictConfig(
{
"version": 1,
"handlers": {
__name__: {
# Reset later
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {
__name__: {
"handlers": [__name__],
"level": logging.INFO,
"propagate": False,
}
},
"disable_existing_loggers": False,
}
)
formatter = ShortExcFormatter("%(levelname)s %(message)s")
logger = logging.getLogger(__name__)
caplog.handler.setFormatter(formatter)
logger.handlers = [caplog.handler]
filt = SecretsMasker()
configure_secrets_masker_for_test(filt)
SecretsMasker.enable_log_masking()
logger.addFilter(filt)
filt.add_mask("password")
return logger
| MyEnum |
python | matplotlib__matplotlib | lib/matplotlib/cbook.py | {
"start": 25597,
"end": 29241
} | class ____:
"""
A disjoint-set data structure.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retrieved by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
Examples
--------
>>> from matplotlib.cbook import Grouper
>>> class Foo:
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> list(grp)
[[a, b, c], [d, e]]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
self._mapping = weakref.WeakKeyDictionary(
{x: weakref.WeakSet([x]) for x in init})
self._ordering = weakref.WeakKeyDictionary()
for x in init:
if x not in self._ordering:
self._ordering[x] = len(self._ordering)
self._next_order = len(self._ordering) # Plain int to simplify pickling.
def __getstate__(self):
return {
**vars(self),
# Convert weak refs to strong ones.
"_mapping": {k: set(v) for k, v in self._mapping.items()},
"_ordering": {**self._ordering},
}
def __setstate__(self, state):
vars(self).update(state)
# Convert strong refs to weak ones.
self._mapping = weakref.WeakKeyDictionary(
{k: weakref.WeakSet(v) for k, v in self._mapping.items()})
self._ordering = weakref.WeakKeyDictionary(self._ordering)
def __contains__(self, item):
return item in self._mapping
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more arguments.
"""
mapping = self._mapping
try:
set_a = mapping[a]
except KeyError:
set_a = mapping[a] = weakref.WeakSet([a])
self._ordering[a] = self._next_order
self._next_order += 1
for arg in args:
try:
set_b = mapping[arg]
except KeyError:
set_b = mapping[arg] = weakref.WeakSet([arg])
self._ordering[arg] = self._next_order
self._next_order += 1
if set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.update(set_b)
for elem in set_b:
mapping[elem] = set_a
def joined(self, a, b):
"""Return whether *a* and *b* are members of the same set."""
return (self._mapping.get(a, object()) is self._mapping.get(b))
def remove(self, a):
"""Remove *a* from the grouper, doing nothing if it is not there."""
self._mapping.pop(a, {a}).remove(a)
self._ordering.pop(a, None)
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
unique_groups = {id(group): group for group in self._mapping.values()}
for group in unique_groups.values():
yield sorted(group, key=self._ordering.__getitem__)
def get_siblings(self, a):
"""Return all of the items joined with *a*, including itself."""
siblings = self._mapping.get(a, [a])
return sorted(siblings, key=self._ordering.get)
| Grouper |
python | walkccc__LeetCode | solutions/2740. Find the Value of the Partition/2740.py | {
"start": 0,
"end": 141
} | class ____:
def findValueOfPartition(self, nums: list[int]) -> int:
return min(b - a for a, b in itertools.pairwise(sorted(nums)))
| Solution |
python | pypa__hatch | tests/cli/run/test_run.py | {
"start": 72003,
"end": 81221
} | class ____:
@pytest.mark.requires_internet
def test_not_file(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(
project,
"default",
{"skip-install": True, "scripts": {"script.py": "python -c {args}"}, **project.config.envs["default"]},
)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py", "import pathlib,sys;pathlib.Path('test.txt').write_text(sys.executable)")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Creating environment: default
Checking dependencies
"""
)
output_file = project_path / "test.txt"
assert output_file.is_file()
env_data_path = data_path / "env" / "virtual"
assert env_data_path.is_dir()
project_data_path = env_data_path / project_path.name
assert project_data_path.is_dir()
storage_dirs = list(project_data_path.iterdir())
assert len(storage_dirs) == 1
storage_path = storage_dirs[0]
assert len(storage_path.name) == 8
env_dirs = list(storage_path.iterdir())
assert len(env_dirs) == 1
env_path = env_dirs[0]
assert env_path.name == project_path.name
assert str(env_path) in str(output_file.read_text())
@pytest.mark.requires_internet
def test_dependencies(self, hatch, helpers, temp_dir):
data_path = temp_dir / "data"
data_path.mkdir()
script = (temp_dir / "script.py").resolve()
script.write_text(
helpers.dedent(
"""
# /// script
# dependencies = [
# "binary",
# ]
# ///
import pathlib
import sys
import binary
pathlib.Path('test.txt').write_text(
f'{sys.executable}\\n{str(binary.convert_units(1024))}'
)
"""
)
)
with temp_dir.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
Creating environment: {script.id}
Checking dependencies
Syncing dependencies
"""
)
output_file = temp_dir / "test.txt"
assert output_file.is_file()
env_data_path = data_path / "env" / "virtual" / ".scripts"
assert env_data_path.is_dir()
env_path = env_data_path / script.id
assert env_path.is_dir()
assert env_path.name == script.id
executable_path, unit_conversion = output_file.read_text().splitlines()
executable = Path(executable_path)
assert executable.is_file()
assert data_path in executable.parents
assert unit_conversion == "(1.0, 'KiB')"
@pytest.mark.requires_internet
def test_dependencies_from_tool_config(self, hatch, helpers, temp_dir):
data_path = temp_dir / "data"
data_path.mkdir()
script = (temp_dir / "script.py").resolve()
script.write_text(
helpers.dedent(
"""
# /// script
# dependencies = []
#
# [tool.hatch]
# dependencies = [
# "binary",
# ]
# ///
import pathlib
import sys
import binary
pathlib.Path('test.txt').write_text(
f'{sys.executable}\\n{str(binary.convert_units(1024))}'
)
"""
)
)
with temp_dir.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
Creating environment: {script.id}
Checking dependencies
Syncing dependencies
"""
)
output_file = temp_dir / "test.txt"
assert output_file.is_file()
env_data_path = data_path / "env" / "virtual" / ".scripts"
assert env_data_path.is_dir()
env_path = env_data_path / script.id
assert env_path.is_dir()
assert env_path.name == script.id
executable_path, unit_conversion = output_file.read_text().splitlines()
executable = Path(executable_path)
assert executable.is_file()
assert data_path in executable.parents
assert unit_conversion == "(1.0, 'KiB')"
def test_unsupported_python_version(self, hatch, helpers, temp_dir):
data_path = temp_dir / "data"
data_path.mkdir()
script = (temp_dir / "script.py").resolve()
script.write_text(
helpers.dedent(
"""
# /// script
# requires-python = ">9000"
# ///
import pathlib
import sys
pathlib.Path('test.txt').write_text(sys.executable)
"""
)
)
with temp_dir.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
Unable to satisfy Python version constraint: >9000
"""
)
@pytest.mark.requires_internet
def test_python_version_constraint(self, hatch, helpers, temp_dir):
data_path = temp_dir / "data"
data_path.mkdir()
script = (temp_dir / "script.py").resolve()
# Cap the range at the current minor version so that the current Python
# will be used and distributions don't have to be downloaded
major, minor = sys.version_info[:2]
minor += 1
script.write_text(
helpers.dedent(
f"""
# /// script
# requires-python = "<{major}.{minor}"
# ///
import pathlib
import sys
pathlib.Path('test.txt').write_text(sys.executable)
"""
)
)
with temp_dir.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
Creating environment: {script.id}
Checking dependencies
"""
)
output_file = temp_dir / "test.txt"
assert output_file.is_file()
env_data_path = data_path / "env" / "virtual" / ".scripts"
assert env_data_path.is_dir()
env_path = env_data_path / script.id
assert env_path.is_dir()
assert env_path.name == script.id
executable = Path(output_file.read_text())
assert executable.is_file()
assert data_path in executable.parents
def test_python_version_constraint_from_tool_config(self, hatch, helpers, temp_dir):
data_path = temp_dir / "data"
data_path.mkdir()
script = (temp_dir / "script.py").resolve()
# Use the current minor version so that the current Python
# will be used and distributions don't have to be downloaded
major, minor = sys.version_info[:2]
script.write_text(
helpers.dedent(
f"""
# /// script
# requires-python = ">9000"
#
# [tool.hatch]
# python = "{major}.{minor}"
# ///
import pathlib
import sys
pathlib.Path('test.txt').write_text(sys.executable)
"""
)
)
with temp_dir.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("run", "script.py")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
Creating environment: {script.id}
Checking dependencies
"""
)
output_file = temp_dir / "test.txt"
assert output_file.is_file()
env_data_path = data_path / "env" / "virtual" / ".scripts"
assert env_data_path.is_dir()
env_path = env_data_path / script.id
assert env_path.is_dir()
assert env_path.name == script.id
executable = Path(output_file.read_text())
assert executable.is_file()
assert data_path in executable.parents
| TestScriptRunner |
python | pypa__pipenv | pipenv/vendor/click/_compat.py | {
"start": 1283,
"end": 2000
} | class ____(io.TextIOWrapper):
def __init__(
self,
stream: t.BinaryIO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_readable: bool = False,
force_writable: bool = False,
**extra: t.Any,
) -> None:
self._stream = stream = t.cast(
t.BinaryIO, _FixupStream(stream, force_readable, force_writable)
)
super().__init__(stream, encoding, errors, **extra)
def __del__(self) -> None:
try:
self.detach()
except Exception:
pass
def isatty(self) -> bool:
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
| _NonClosingTextIOWrapper |
python | plotly__plotly.py | plotly/graph_objs/funnel/_legendgrouptitle.py | {
"start": 233,
"end": 2932
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel"
_path_str = "funnel.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.funnel.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | patrick-kidger__equinox | equinox/internal/_omega.py | {
"start": 5058,
"end": 5273
} | class ____ωUpdateHelper:
def __init__(self, value, is_leaf):
self.value = value
self.is_leaf = is_leaf
def __getitem__(self, item):
return _ωUpdateRef(self.value, item, self.is_leaf)
| _ |
python | scipy__scipy | benchmarks/benchmarks/special.py | {
"start": 1346,
"end": 1576
} | class ____(Benchmark):
def setup(self):
n, x = np.arange(50, 500), np.logspace(0, 20, 100)
n, x = np.meshgrid(n, x)
self.n, self.x = n, x
def time_expn_large_n(self):
expn(self.n, self.x)
| Expn |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_comm.py | {
"start": 11777,
"end": 23632
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_fully_shard_communication_count(self):
"""
Tests that FSDP issues the expected number of all-gathers and
reduce-scatters during forward and backward.
"""
self.run_subtests(
{"reshard_after_forward": [True, False, 2, None]},
self._test_communication_count,
)
def _test_communication_count(
self,
reshard_after_forward: Union[bool, int, None],
):
torch.manual_seed(42)
model_args = ModelArgs()
model = Transformer(model_args)
fully_shard_fn = functools.partial(
fully_shard, reshard_after_forward=reshard_after_forward
)
num_blocks = 0
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard_fn(module)
num_blocks += 1
fully_shard_fn(model)
# We construct `num_blocks` plus 1 FSDP states/communication groups
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
with CommDebugMode() as fwd_comm_mode:
loss = model(inp)
fwd_comm_counts = fwd_comm_mode.get_comm_counts()
self.assertEqual(len(fwd_comm_counts), 1)
self.assertEqual(fwd_comm_counts[c10d_ops._allgather_base_], num_blocks + 1)
with CommDebugMode() as bwd_comm_mode:
loss.sum().backward()
bwd_comm_counts = bwd_comm_mode.get_comm_counts()
if reshard_after_forward is None:
# 2 means two types of collectives (all-gather, reduce-scatter)
self.assertEqual(len(bwd_comm_counts), 2)
# do not reshard root model
self.assertEqual(bwd_comm_counts[c10d_ops._allgather_base_], num_blocks)
elif reshard_after_forward:
self.assertEqual(len(bwd_comm_counts), 2)
self.assertEqual(bwd_comm_counts[c10d_ops._allgather_base_], num_blocks + 1)
else:
self.assertEqual(len(bwd_comm_counts), 1)
self.assertEqual(
bwd_comm_counts[c10d_ops._reduce_scatter_base_], num_blocks + 1
)
@skip_if_lt_x_gpu(2)
def test_manual_reshard_with_reshard_after_forward_false(self):
"""
Tests that we can manually call ``reshard`` on FSDP modules that were
initialized with ``reshard_after_forward=False`` and still run unshard.
"""
torch.manual_seed(42)
model_args = ModelArgs()
model = Transformer(model_args)
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard(module, reshard_after_forward=False)
model = fully_shard(model, reshard_after_forward=False)
num_fsdp_modules = sum(
isinstance(module, FSDPModule) for module in model.modules()
)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
with CommDebugMode() as fwd_comm_mode:
loss = model(inp)
fwd_comm_counts = fwd_comm_mode.get_comm_counts()
self.assertEqual(len(fwd_comm_counts), 1)
self.assertEqual(fwd_comm_counts[c10d_ops._allgather_base_], num_fsdp_modules)
for module in model.modules():
if isinstance(module, FSDPModule):
module.reshard()
with CommDebugMode() as bwd_comm_mode:
loss.sum().backward()
bwd_comm_counts = bwd_comm_mode.get_comm_counts()
self.assertEqual(len(bwd_comm_counts), 2)
self.assertEqual(bwd_comm_counts[c10d_ops._allgather_base_], num_fsdp_modules)
self.assertEqual(
bwd_comm_counts[c10d_ops._reduce_scatter_base_], num_fsdp_modules
)
@skip_if_lt_x_gpu(2)
@xfailIf(TEST_XPU) # https://github.com/intel/torch-xpu-ops/issues/1571
def test_set_reduce_scatter_divide_factor(self):
self.run_subtests(
{
"divide_factor": [self.world_size * 2, self.world_size],
"mesh_shape": [
(self.world_size,),
(self.world_size // 2, 2),
(self.world_size, 1),
],
},
self._test_set_reduce_scatter_divide_factor,
)
self.run_subtests(
{"divide_factor": [self.world_size]},
self._test_set_reduce_scatter_divide_factor_mixed_prevision,
)
def _test_set_reduce_scatter_divide_factor(
self, divide_factor: float, mesh_shape: tuple[int] | tuple[int, int]
):
torch.manual_seed(42)
model_args = ModelArgs(dropout_p=0.0, weight_tying=False)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
mesh_dim_names = ("outer",) if len(mesh_shape) == 1 else ("outer", "inner")
mesh = init_device_mesh(
device_type.type, mesh_shape, mesh_dim_names=mesh_dim_names
)
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard(module, reshard_after_forward=False, mesh=mesh)
model = fully_shard(model, reshard_after_forward=False, mesh=mesh)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
model.set_gradient_divide_factor(divide_factor)
# Get ref_model params which should have the specific division factor applied
block_params = set()
for ref_mod in ref_model.modules():
if isinstance(ref_mod, TransformerBlock):
block_params.update(ref_mod.parameters())
non_block_params = set(ref_model.parameters()) - block_params
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
for _ in range(10):
ref_loss = ref_model(inp).sum()
ref_loss.backward()
for param in ref_model.parameters():
factor = divide_factor if param in non_block_params else self.world_size
param.grad.mul_(1.0 / factor)
dist.all_reduce(param.grad)
loss = model(inp).sum()
loss.backward()
ref_optim.step()
optim.step()
self.assertEqual(ref_loss, loss)
# Check parity before calling zero_grad so that grads are also checked
check_sharded_parity(self, ref_model, model)
ref_optim.zero_grad()
optim.zero_grad()
def _test_set_reduce_scatter_divide_factor_mixed_prevision(
self, divide_factor: float
):
torch.manual_seed(42)
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
mp_policy = MixedPrecisionPolicy(
param_dtype=param_dtype, reduce_dtype=reduce_dtype
)
model = nn.Sequential(*[MLP(16) for _ in range(3)])
ref_model = copy.deepcopy(model).to(device_type)
ref_model_bf16 = copy.deepcopy(ref_model).to(param_dtype)
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
for mlp in model:
fully_shard(mlp, mp_policy=mp_policy)
model = fully_shard(model, mp_policy=mp_policy)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
model.set_gradient_divide_factor(divide_factor)
torch.manual_seed(42 + self.rank)
inp = torch.randn((4, 16), device=device_type.type, dtype=param_dtype)
for _ in range(10):
loss = model(inp).sum()
loss.backward()
optim.step()
optim.zero_grad()
ref_loss = ref_model_bf16(inp.to(param_dtype)).sum()
ref_loss.backward()
for param in ref_model_bf16.parameters():
param.grad.data = param.grad.to(torch.float32)
param.grad.mul_(1.0 / divide_factor)
dist.all_reduce(param.grad)
for param_fp32, param_bf16 in zip(
ref_model.parameters(), ref_model_bf16.parameters()
):
param_fp32.grad = param_bf16.grad
param_bf16.grad = None
ref_optim.step()
for param_fp32, param_bf16 in zip(
ref_model.parameters(), ref_model_bf16.parameters()
):
param_bf16.detach().copy_(param_fp32)
ref_optim.zero_grad()
self.assertEqual(ref_loss, loss)
check_sharded_parity(self, ref_model, model)
@skip_if_lt_x_gpu(2)
def test_set_reshard_after_forward(self):
"""
Tests that FSDP issues the expected number of all-gathers and
reduce-scatters during a train step when setting reshard_after_forward.
comm_count should perform same as test_fully_shard_communication_count.
"""
self.run_subtests(
{
"set_reshard_after_forward": [True, False, None],
"recurse": [True, False],
},
self._test_set_reshard_after_forward_by_communication_count,
)
def _test_set_reshard_after_forward_by_communication_count(
self,
set_reshard_after_forward: Union[bool, None],
recurse: bool,
):
torch.manual_seed(42)
model_args = ModelArgs()
model = Transformer(model_args).to(device_type)
if set_reshard_after_forward is None:
fully_shard_fn = fully_shard
else:
fully_shard_fn = functools.partial(
fully_shard, reshard_after_forward=not set_reshard_after_forward
)
num_blocks = 0
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard_fn(module)
num_blocks += 1
fully_shard_fn(model)
num_fsdp_modules = sum(
isinstance(module, FSDPModule) for module in model.modules()
)
if set_reshard_after_forward is not None:
model.set_reshard_after_forward(
reshard_after_forward=set_reshard_after_forward, recurse=recurse
)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 16), device=device_type.type)
with CommDebugMode() as fwd_comm_mode:
loss = model(inp)
fwd_comm_counts = fwd_comm_mode.get_comm_counts()
self.assertEqual(len(fwd_comm_counts), 1)
self.assertEqual(fwd_comm_counts[c10d_ops._allgather_base_], num_fsdp_modules)
with CommDebugMode() as bwd_comm_mode:
loss.sum().backward()
bwd_comm_counts = bwd_comm_mode.get_comm_counts()
# If recurse is False, set_reshard_after_forward only affects the root module
if set_reshard_after_forward is None:
self.assertEqual(len(bwd_comm_counts), 2)
self.assertEqual(bwd_comm_counts[c10d_ops._allgather_base_], num_blocks)
elif set_reshard_after_forward:
self.assertEqual(len(bwd_comm_counts), 2)
self.assertEqual(
bwd_comm_counts[c10d_ops._allgather_base_],
num_blocks + 1 if recurse else 1,
)
else:
if recurse:
self.assertEqual(len(bwd_comm_counts), 1)
else:
self.assertEqual(len(bwd_comm_counts), 2)
self.assertEqual(bwd_comm_counts[c10d_ops._allgather_base_], num_blocks)
self.assertEqual(
bwd_comm_counts[c10d_ops._reduce_scatter_base_], num_blocks + 1
)
| TestFullyShardCommunication |
python | jmcnamara__XlsxWriter | xlsxwriter/test/workbook/test_write_workbook_view.py | {
"start": 299,
"end": 4990
} | class ____(unittest.TestCase):
"""
Test the Workbook _write_workbook_view() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_workbook_view1(self):
"""Test the _write_workbook_view() method"""
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view2(self):
"""Test the _write_workbook_view() method"""
self.workbook.worksheet_meta.activesheet = 1
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" activeTab="1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view3(self):
"""Test the _write_workbook_view() method"""
self.workbook.worksheet_meta.firstsheet = 1
self.workbook.worksheet_meta.activesheet = 1
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" firstSheet="2" activeTab="1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view4(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(0, 0)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view5(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(None, None)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view6(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(1073, 644)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view7(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(123, 70)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="1845" windowHeight="1050"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view8(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_size(719, 490)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="10785" windowHeight="7350"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view9(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio()
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view10(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(34.6)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="346"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view11(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(0)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="0"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_workbook_view12(self):
"""Test the _write_workbook_view() method"""
self.workbook.set_tab_ratio(100)
self.workbook._write_workbook_view()
exp = """<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660" tabRatio="1000"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def tearDown(self):
self.workbook.fileclosed = 1
| TestWriteWorkbookView |
python | django__django | tests/i18n/tests.py | {
"start": 63840,
"end": 76147
} | class ____(SimpleTestCase):
rf = RequestFactory()
@override_settings(LANGUAGE_CODE="de")
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(gettext("Image"), "Bild")
with translation.override("en"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-us"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-ca"):
self.assertEqual(gettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
tests = [
# Good headers
("de", [("de", 1.0)]),
("en-AU", [("en-au", 1.0)]),
("es-419", [("es-419", 1.0)]),
("*;q=1.00", [("*", 1.0)]),
("en-AU;q=0.123", [("en-au", 0.123)]),
("en-au;q=0.5", [("en-au", 0.5)]),
("en-au;q=1.0", [("en-au", 1.0)]),
("da, en-gb;q=0.25, en;q=0.5", [("da", 1.0), ("en", 0.5), ("en-gb", 0.25)]),
("en-au-xx", [("en-au-xx", 1.0)]),
(
"de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125",
[
("de", 1.0),
("en-au", 0.75),
("en-us", 0.5),
("en", 0.25),
("es", 0.125),
("fa", 0.125),
],
),
("*", [("*", 1.0)]),
("de;q=0.", [("de", 0.0)]),
("en; q=1,", [("en", 1.0)]),
("en; q=1.0, * ; q=0.5", [("en", 1.0), ("*", 0.5)]),
(
"en" + "-x" * 20,
[("en-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x", 1.0)],
),
(
", ".join(["en; q=1.0"] * 20),
[("en", 1.0)] * 20,
),
# Bad headers
("en-gb;q=1.0000", []),
("en;q=0.1234", []),
("en;q=.2", []),
("abcdefghi-au", []),
("**", []),
("en,,gb", []),
("en-au;q=0.1.0", []),
(("X" * 97) + "Z,en", []),
("da, en-gb;q=0.8, en;q=0.7,#", []),
("de;q=2.0", []),
("de;q=0.a", []),
("12-345", []),
("", []),
("en;q=1e0", []),
("en-au;q=1.0", []),
# Invalid as language-range value too long.
("xxxxxxxx" + "-xxxxxxxx" * 500, []),
# Header value too long, only parse up to limit.
(", ".join(["en; q=1.0"] * 500), [("en", 1.0)] * 45),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(
trans_real.parse_accept_lang_header(value), tuple(expected)
)
def test_parse_literal_http_header(self):
tests = [
("pt-br", "pt-br"),
("pt", "pt"),
("es,de", "es"),
("es-a,de", "es"),
# There isn't a Django translation to a US variation of the Spanish
# language, a safe assumption. When the user sets it as the
# preferred language, the main 'es' translation should be selected
# instead.
("es-us", "es"),
# There isn't a main language (zh) translation of Django but there
# is a translation to variation (zh-hans) the user sets zh-hans as
# the preferred language, it should be selected without falling
# back nor ignoring it.
("zh-hans,de", "zh-hans"),
("NL", "nl"),
("fy", "fy"),
("ia", "ia"),
("sr-latn", "sr-latn"),
("zh-hans", "zh-hans"),
("zh-hant", "zh-hant"),
]
for header, expected in tests:
with self.subTest(header=header):
request = self.rf.get("/", headers={"accept-language": header})
self.assertEqual(get_language_from_request(request), expected)
@override_settings(
LANGUAGES=[
("en", "English"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE, etc.) use deprecated language codes. As
these language codes will be removed in Django 1.9, these will be
incorrectly matched. For example zh-tw (traditional) will be
interpreted as zh-hans (simplified), which is wrong. So we should also
accept these deprecated language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
request = self.rf.get("/", headers={"accept-language": "zh-cn,en"})
self.assertEqual(g(request), "zh-hans")
request = self.rf.get("/", headers={"accept-language": "zh-tw,en"})
self.assertEqual(g(request), "zh-hant")
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
request = self.rf.get("/", headers={"accept-language": "zh-my,en"})
self.assertEqual(get_language_from_request(request), "zh-hans")
def test_subsequent_code_fallback_language(self):
"""
Subsequent language codes should be used when the language code is not
supported.
"""
tests = [
("zh-Hans-CN", "zh-hans"),
("zh-hans-mo", "zh-hans"),
("zh-hans-HK", "zh-hans"),
("zh-Hant-HK", "zh-hant"),
("zh-hant-tw", "zh-hant"),
("zh-hant-SG", "zh-hant"),
]
for value, expected in tests:
with self.subTest(value=value):
request = self.rf.get("/", headers={"accept-language": f"{value},en"})
self.assertEqual(get_language_from_request(request), expected)
def test_parse_language_cookie(self):
g = get_language_from_request
request = self.rf.get("/")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "pt-br"
self.assertEqual("pt-br", g(request))
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "pt"
self.assertEqual("pt", g(request))
request = self.rf.get("/", headers={"accept-language": "de"})
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "es"
self.assertEqual("es", g(request))
# There isn't a Django translation to a US variation of the Spanish
# language, a safe assumption. When the user sets it as the preferred
# language, the main 'es' translation should be selected instead.
request = self.rf.get("/")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "es-us"
self.assertEqual(g(request), "es")
# There isn't a main language (zh) translation of Django but there is a
# translation to variation (zh-hans) the user sets zh-hans as the
# preferred language, it should be selected without falling back nor
# ignoring it.
request = self.rf.get("/", headers={"accept-language": "de"})
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "zh-hans"
self.assertEqual(g(request), "zh-hans")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("ar-dz", "Algerian Arabic"),
("de", "German"),
("de-at", "Austrian German"),
("pt-BR", "Portuguese (Brazil)"),
],
)
def test_get_supported_language_variant_real(self):
g = trans_real.get_supported_language_variant
self.assertEqual(g("en"), "en")
self.assertEqual(g("en-gb"), "en")
self.assertEqual(g("de"), "de")
self.assertEqual(g("de-at"), "de-at")
self.assertEqual(g("de-ch"), "de")
self.assertEqual(g("pt-br"), "pt-br")
self.assertEqual(g("pt-BR"), "pt-BR")
self.assertEqual(g("pt"), "pt-br")
self.assertEqual(g("pt-pt"), "pt-br")
self.assertEqual(g("ar-dz"), "ar-dz")
self.assertEqual(g("ar-DZ"), "ar-DZ")
with self.assertRaises(LookupError):
g("pt", strict=True)
with self.assertRaises(LookupError):
g("pt-pt", strict=True)
with self.assertRaises(LookupError):
g("xyz")
with self.assertRaises(LookupError):
g("xy-zz")
with self.assertRaises(LookupError):
g("x" * LANGUAGE_CODE_MAX_LENGTH)
with self.assertRaises(LookupError):
g("x" * (LANGUAGE_CODE_MAX_LENGTH + 1))
# 167 * 3 = 501 which is LANGUAGE_CODE_MAX_LENGTH + 1.
self.assertEqual(g("en-" * 167), "en")
with self.assertRaises(LookupError):
g("en-" * 167, strict=True)
self.assertEqual(g("en-" * 30000), "en") # catastrophic test
def test_get_supported_language_variant_null(self):
g = trans_null.get_supported_language_variant
self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE)
with self.assertRaises(LookupError):
g("pt")
with self.assertRaises(LookupError):
g("de")
with self.assertRaises(LookupError):
g("de-at")
with self.assertRaises(LookupError):
g("de", strict=True)
with self.assertRaises(LookupError):
g("de-at", strict=True)
with self.assertRaises(LookupError):
g("xyz")
@override_settings(
LANGUAGES=[
("en", "English"),
("en-latn-us", "Latin English"),
("de", "German"),
("de-1996", "German, orthography of 1996"),
("de-at", "Austrian German"),
("de-ch-1901", "German, Swiss variant, traditional orthography"),
("i-mingo", "Mingo"),
("kl-tunumiit", "Tunumiisiut"),
("nan-hani-tw", "Hanji"),
("pl", "Polish"),
],
)
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
tests = [
("/pl/", "pl"),
("/pl", "pl"),
("/xyz/", None),
("/en/", "en"),
("/en-gb/", "en"),
("/en-latn-us/", "en-latn-us"),
("/en-Latn-US/", "en-Latn-US"),
("/de/", "de"),
("/de-1996/", "de-1996"),
("/de-at/", "de-at"),
("/de-AT/", "de-AT"),
("/de-ch/", "de"),
("/de-ch-1901/", "de-ch-1901"),
("/de-simple-page-test/", None),
("/i-mingo/", "i-mingo"),
("/kl-tunumiit/", "kl-tunumiit"),
("/nan-hani-tw/", "nan-hani-tw"),
(f"/{'a' * 501}/", None),
]
for path, language in tests:
with self.subTest(path=path):
self.assertEqual(g(path), language)
def test_get_language_from_path_null(self):
g = trans_null.get_language_from_path
self.assertIsNone(g("/pl/"))
self.assertIsNone(g("/pl"))
self.assertIsNone(g("/xyz/"))
def test_cache_resetting(self):
"""
After setting LANGUAGE, the cache should be cleared and languages
previously valid should not be used (#14170).
"""
g = get_language_from_request
request = self.rf.get("/", headers={"accept-language": "pt-br"})
self.assertEqual("pt-br", g(request))
with self.settings(LANGUAGES=[("en", "English")]):
self.assertNotEqual("pt-br", g(request))
def test_i18n_patterns_returns_list(self):
with override_settings(USE_I18N=False):
self.assertIsInstance(i18n_patterns([]), list)
with override_settings(USE_I18N=True):
self.assertIsInstance(i18n_patterns([]), list)
| MiscTests |
python | great-expectations__great_expectations | tests/core/test__docs_decorators.py | {
"start": 642,
"end": 4036
} | class ____:
@pytest.mark.unit
def test_public_api_decorator_full_docstring(self):
normalized_docstring = inspect.cleandoc(_func_full_docstring_public_api.__doc__ or "")
assert normalized_docstring == inspect.cleandoc(
"--Public API--My docstring.\n"
"\n"
" Longer description.\n"
"\n"
" Args:\n"
" some_arg: describe some_arg\n"
" other_arg: describe other_arg\n"
" "
)
assert _func_full_docstring_public_api.__name__ == "_func_full_docstring_public_api"
@pytest.mark.unit
def test_public_api_decorator_only_summary(self):
assert _func_only_summary_public_api.__doc__ == "--Public API--My docstring."
assert _func_only_summary_public_api.__name__ == "_func_only_summary_public_api"
@pytest.mark.unit
def test_public_api_decorator_no_docstring(self):
assert _func_no_docstring_public_api.__doc__ == "--Public API--"
assert _func_no_docstring_public_api.__name__ == "_func_no_docstring_public_api"
@pytest.mark.unit
def test_public_api_methods_have_decorated_parent_class(self):
"""
In order for the public_api decorator to work and result in rendered docs,
the parent class of a decorated method must also be decorated.
Example:
# Failure to decorate this class means that `my_method` won't be rendered in the docs.
class MyClass:
@public_api
def my_method(self):
pass
"""
class_registry = public_api_introspector.class_registry
classes_that_need_public_api_decorator: dict[str, list[str]] = {}
for class_, methods in class_registry.items():
if public_api_introspector.CLASS_DEFINITION not in methods:
classes_that_need_public_api_decorator[class_] = sorted(methods)
print(f"Classes missing @public_api ->\n{pf(classes_that_need_public_api_decorator)}")
assert sorted(classes_that_need_public_api_decorator.keys()) == []
@pytest.mark.unit
def test_public_api_objects_have_docstrings(self):
"""
All objects that are decorated with @public_api should have a docstring.
"""
violations = public_api_introspector.docstring_violations
assert len(violations) == 0, (
f"Public API decorated objects without docstrings: {pf(violations)}"
)
# @deprecated
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
def _func_full_docstring_deprecated(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
@deprecated_method_or_class(version="1.2.3")
def _func_full_docstring_deprecated_no_message(some_arg, other_arg):
"""My docstring.
Longer description.
Args:
some_arg: describe some_arg
other_arg: describe other_arg
"""
pass
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
def _func_only_summary_deprecated(some_arg, other_arg):
"""My docstring."""
pass
@deprecated_method_or_class(version="1.2.3", message="This is deprecated!!")
def _func_no_docstring_deprecated(some_arg, other_arg):
pass
| TestPublicAPI |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 5210,
"end": 6361
} | class ____(Token):
""" Represents a Do loop in in Fortran.
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.ast import aug_assign, Print
>>> from sympy.codegen.fnodes import Do
>>> i, n = symbols('i n', integer=True)
>>> r = symbols('r', real=True)
>>> body = [aug_assign(r, '+', 1/i), Print([i, r])]
>>> do1 = Do(body, i, 1, n)
>>> print(fcode(do1, source_format='free'))
do i = 1, n
r = r + 1d0/i
print *, i, r
end do
>>> do2 = Do(body, i, 1, n, 2)
>>> print(fcode(do2, source_format='free'))
do i = 1, n, 2
r = r + 1d0/i
print *, i, r
end do
"""
__slots__ = _fields = ('body', 'counter', 'first', 'last', 'step', 'concurrent')
defaults = {'step': Integer(1), 'concurrent': false}
_construct_body = staticmethod(lambda body: CodeBlock(*body))
_construct_counter = staticmethod(sympify)
_construct_first = staticmethod(sympify)
_construct_last = staticmethod(sympify)
_construct_step = staticmethod(sympify)
_construct_concurrent = staticmethod(lambda arg: true if arg else false)
| Do |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_asof.py | {
"start": 530,
"end": 6248
} | class ____:
def test_basic(self, date_range_frame):
# Explicitly cast to float to avoid implicit cast when setting np.nan
df = date_range_frame.astype({"A": "float"})
N = 50
df.loc[df.index[15:30], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = df.asof(dates)
assert result.notna().all(axis=1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notna().all(axis=1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(axis=1).all()
def test_subset(self, date_range_frame):
N = 10
# explicitly cast to float to avoid implicit upcast when setting to np.nan
df = date_range_frame.iloc[:N].copy().astype({"A": "float"})
df.loc[df.index[4:8], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
# with a subset of A should be the same
result = df.asof(dates, subset="A")
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=["A", "B"])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives df.asof
result = df.asof(dates, subset="B")
expected = df.resample("25s", closed="right").ffill().reindex(dates)
expected.iloc[20:] = 9
# no "missing", so "B" can retain int dtype (df["A"].dtype platform-dependent)
expected["B"] = expected["B"].astype(df["B"].dtype)
tm.assert_frame_equal(result, expected)
def test_missing(self, date_range_frame):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
# Cast to 'float64' to avoid upcast when introducing nan in df.asof
df = date_range_frame.iloc[:N].copy().astype("float64")
result = df.asof("1989-12-31")
expected = Series(
index=["A", "B"], name=Timestamp("1989-12-31"), dtype=np.float64
)
tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(["1989-12-31"]))
expected = DataFrame(
index=to_datetime(["1989-12-31"]), columns=["A", "B"], dtype="float64"
)
tm.assert_frame_equal(result, expected)
# Check that we handle PeriodIndex correctly, dont end up with
# period.ordinal for series name
df = df.to_period("D")
result = df.asof("1989-12-31")
assert isinstance(result.name, Period)
def test_asof_all_nans(self, frame_or_series):
# GH 15713
# DataFrame/Series is all nans
result = frame_or_series([np.nan]).asof([0])
expected = frame_or_series([np.nan])
tm.assert_equal(result, expected)
def test_all_nans(self, date_range_frame):
# GH 15713
# DataFrame is all nans
# testing non-default indexes, multiple inputs
N = 150
rng = date_range_frame.index
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A"])
tm.assert_frame_equal(result, expected)
# testing multiple columns
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A", "B", "C"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
# testing scalar input
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof([3])
expected = DataFrame(np.nan, index=[3], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof(3)
expected = Series(np.nan, index=["A", "B"], name=3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"stamp,expected",
[
(
Timestamp("2018-01-01 23:22:43.325+00:00"),
Series(2, name=Timestamp("2018-01-01 23:22:43.325+00:00")),
),
(
Timestamp("2018-01-01 22:33:20.682+01:00"),
Series(1, name=Timestamp("2018-01-01 22:33:20.682+01:00")),
),
],
)
def test_time_zone_aware_index(self, stamp, expected):
# GH21194
# Testing awareness of DataFrame index considering different
# UTC and timezone
df = DataFrame(
data=[1, 2],
index=[
Timestamp("2018-01-01 21:00:05.001+00:00"),
Timestamp("2018-01-01 22:35:10.550+00:00"),
],
)
result = df.asof(stamp)
tm.assert_series_equal(result, expected)
def test_asof_periodindex_mismatched_freq(self):
N = 50
rng = period_range("1/1/1990", periods=N, freq="h")
df = DataFrame(np.random.default_rng(2).standard_normal(N), index=rng)
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
df.asof(rng.asfreq("D"))
def test_asof_preserves_bool_dtype(self):
# GH#16063 was casting bools to floats
dti = date_range("2017-01-01", freq="MS", periods=4)
ser = Series([True, False, True], index=dti[:-1])
ts = dti[-1]
res = ser.asof([ts])
expected = Series([True], index=[ts])
tm.assert_series_equal(res, expected)
| TestFrameAsof |
python | ray-project__ray | python/ray/serve/_private/test_utils.py | {
"start": 18139,
"end": 19248
} | class ____:
def __init__(self, name: str = None, tag_keys: Tuple[str] = None):
self.name = name
self.values = dict()
self.tags = tag_keys or ()
self.default_tags = dict()
def set_default_tags(self, tags: Dict[str, str]):
for key, tag in tags.items():
assert key in self.tags
self.default_tags[key] = tag
def set(self, value: Union[int, float], tags: Dict[str, str] = None):
merged_tags = self.default_tags.copy()
merged_tags.update(tags or {})
assert set(merged_tags.keys()) == set(self.tags)
d = self.values
for tag in self.tags[:-1]:
tag_value = merged_tags[tag]
if tag_value not in d:
d[tag_value] = dict()
d = d[tag_value]
d[merged_tags[self.tags[-1]]] = value
def get_value(self, tags: Dict[str, str]):
value = self.values
for tag in self.tags:
tag_value = tags[tag]
value = value.get(tag_value)
if value is None:
return
return value
| FakeGauge |
python | scrapy__scrapy | scrapy/utils/python.py | {
"start": 9309,
"end": 9828
} | class ____(AsyncIterator[_T]):
"""
Similar to MutableChain but for async iterables
"""
def __init__(self, *args: Iterable[_T] | AsyncIterator[_T]):
self.data: AsyncIterator[_T] = _async_chain(*args)
def extend(self, *iterables: Iterable[_T] | AsyncIterator[_T]) -> None:
self.data = _async_chain(self.data, _async_chain(*iterables))
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> _T:
return await self.data.__anext__()
| MutableAsyncChain |
python | getsentry__sentry | src/sentry/rules/registry.py | {
"start": 152,
"end": 969
} | class ____:
def __init__(self) -> None:
self._rules: dict[str, list[type[RuleBase]]] = defaultdict(list)
self._map: dict[str, type[RuleBase]] = {}
def __contains__(self, rule_id: str) -> bool:
return rule_id in self._map
def __iter__(self) -> Generator[tuple[str, type[RuleBase]]]:
for rule_type, rule_list in self._rules.items():
for rule in rule_list:
yield rule_type, rule
def add(self, rule: type[RuleBase]) -> None:
self._map[rule.id] = rule
self._rules[rule.rule_type].append(rule)
def get(self, rule_id: str, type: str | None = None) -> type[RuleBase] | None:
cls = self._map.get(rule_id)
if type is not None and cls not in self._rules[type]:
return None
return cls
| RuleRegistry |
python | jazzband__tablib | src/tablib/formats/_yaml.py | {
"start": 61,
"end": 1512
} | class ____:
title = 'yaml'
extensions = ('yaml', 'yml')
@classmethod
def export_set(cls, dataset):
"""Returns YAML representation of Dataset."""
return yaml.safe_dump(
dataset._package(), default_flow_style=None, allow_unicode=True
)
@classmethod
def export_book(cls, databook):
"""Returns YAML representation of Databook."""
return yaml.safe_dump(
databook._package(), default_flow_style=None, allow_unicode=True
)
@classmethod
def import_set(cls, dset, in_stream):
"""Returns dataset from YAML stream."""
dset.wipe()
dset.dict = yaml.safe_load(in_stream)
@classmethod
def import_book(cls, dbook, in_stream):
"""Returns databook from YAML stream."""
dbook.wipe()
for sheet in yaml.safe_load(in_stream):
data = tablib.Dataset()
data.title = sheet['title']
data.dict = sheet['data']
dbook.add_sheet(data)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is valid YAML."""
try:
_yaml = yaml.safe_load(stream)
if isinstance(_yaml, (list, tuple, dict)):
return True
else:
return False
except (yaml.parser.ParserError, yaml.reader.ReaderError,
yaml.scanner.ScannerError):
return False
| YAMLFormat |
python | conda__conda | tests/plugins/test_auth_handlers.py | {
"start": 434,
"end": 606
} | class ____(HTTPBasicAuth):
def __init__(self):
username = "user_two"
password = "pass_two"
super().__init__(username, password)
| CustomAltCondaAuth |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_constructors.py | {
"start": 24850,
"end": 25525
} | class ____:
def test_shallow_copy_empty(self):
# GH#13067
idx = PeriodIndex([], freq="M")
result = idx._view()
expected = idx
tm.assert_index_equal(result, expected)
def test_shallow_copy_disallow_i8(self):
# GH#24391
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="ndarray"):
pi._shallow_copy(pi.asi8)
def test_shallow_copy_requires_disallow_period_index(self):
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="PeriodIndex"):
pi._shallow_copy(pi)
| TestShallowCopy |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_jobs_delete.py | {
"start": 2102,
"end": 4576
} | class ____(ProjectEndpoint):
owner = ApiOwner.REPLAY
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (ReplayDeletionJobPermission,)
def get(self, request: Request, project) -> Response:
"""
Retrieve a collection of replay delete jobs.
"""
queryset = ReplayDeletionJobModel.objects.filter(
organization_id=project.organization_id, project_id=project.id
)
return self.paginate(
request=request,
queryset=queryset,
order_by="-date_added",
on_results=lambda x: {
"data": serialize(x, request.user, ReplayDeletionJobSerializer())
},
paginator_cls=OffsetPaginator,
)
def post(self, request: Request, project) -> Response:
"""
Create a new replay deletion job.
"""
serializer = ReplayDeletionJobCreateSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data["data"]
# Create the deletion job
job = ReplayDeletionJobModel.objects.create(
range_start=data["rangeStart"],
range_end=data["rangeEnd"],
environments=data["environments"],
organization_id=project.organization_id,
project_id=project.id,
query=data["query"] or "",
status="pending",
)
# We don't check Seer features because an org may have previously had them on, then turned them off.
has_seer_data = features.has("organizations:replay-ai-summaries", project.organization)
# We always start with an offset of 0 (obviously) but future work doesn't need to obey
# this. You're free to start from wherever you want.
run_bulk_replay_delete_job.delay(job.id, offset=0, has_seer_data=has_seer_data)
self.create_audit_entry(
request,
organization=project.organization,
target_object=job.id,
event=audit_log.get_event_id("REPLAYDELETIONJOBMODEL_START"),
data={},
)
response_data = serialize(job, request.user, ReplayDeletionJobSerializer())
response = {"data": response_data}
return Response(response, status=201)
@region_silo_endpoint
| ProjectReplayDeletionJobsIndexEndpoint |
python | pytorch__pytorch | torch/_inductor/template_heuristics/params.py | {
"start": 633,
"end": 1257
} | class ____(KernelTemplateParams):
"""Simple implementation that wraps a kwargs dict"""
# NOTE: this is a compatibility layer, until every template
# has time to define their own params class, with meaningful
# defaults etc.
def __init__(self, kwargs: dict[str, Any]):
self.kwargs = kwargs
def to_kwargs(self) -> dict[str, Any]:
return self.kwargs.copy()
def to_serializeable_dict(self) -> dict[str, Any]:
return self.kwargs.copy()
@classmethod
def from_dict(cls, data: dict[str, Any]) -> DictKernelTemplateParams:
return cls(data)
| DictKernelTemplateParams |
python | doocs__leetcode | solution/0800-0899/0850.Rectangle Area II/Solution.py | {
"start": 1230,
"end": 1875
} | class ____:
def rectangleArea(self, rectangles: List[List[int]]) -> int:
segs = []
alls = set()
for x1, y1, x2, y2 in rectangles:
segs.append((x1, y1, y2, 1))
segs.append((x2, y1, y2, -1))
alls.update([y1, y2])
segs.sort()
alls = sorted(alls)
tree = SegmentTree(alls)
m = {v: i for i, v in enumerate(alls)}
ans = 0
for i, (x, y1, y2, k) in enumerate(segs):
if i:
ans += tree.length * (x - segs[i - 1][0])
tree.modify(1, m[y1], m[y2] - 1, k)
ans %= int(1e9 + 7)
return ans
| Solution |
python | pypa__pip | src/pip/_internal/resolution/resolvelib/requirements.py | {
"start": 1513,
"end": 4102
} | class ____(Requirement):
def __init__(self, ireq: InstallRequirement) -> None:
assert ireq.link is None, "This is a link, not a specifier"
self._ireq = ireq
self._equal_cache: str | None = None
self._hash: int | None = None
self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)
@property
def _equal(self) -> str:
if self._equal_cache is not None:
return self._equal_cache
self._equal_cache = str(self._ireq)
return self._equal_cache
def __str__(self) -> str:
return str(self._ireq.req)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({str(self._ireq.req)!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SpecifierRequirement):
return NotImplemented
return self._equal == other._equal
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
self._hash = hash(self._equal)
return self._hash
@property
def project_name(self) -> NormalizedName:
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
return canonicalize_name(self._ireq.req.name)
@property
def name(self) -> str:
return format_name(self.project_name, self._extras)
def format_for_error(self) -> str:
# Convert comma-separated specifiers into "A, B, ..., F and G"
# This makes the specifier a bit more "human readable", without
# risking a change in meaning. (Hopefully! Not all edge cases have
# been checked)
parts = [s.strip() for s in str(self).split(",")]
if len(parts) == 0:
return ""
elif len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def get_candidate_lookup(self) -> CandidateLookup:
return None, self._ireq
def is_satisfied_by(self, candidate: Candidate) -> bool:
assert candidate.name == self.name, (
f"Internal issue: Candidate is not for this requirement "
f"{candidate.name} vs {self.name}"
)
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
spec = self._ireq.req.specifier
return spec.contains(candidate.version, prereleases=True)
| SpecifierRequirement |
python | pytorch__pytorch | test/onnx/model_defs/op_test.py | {
"start": 906,
"end": 1187
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fake_quant = torch.ao.quantization.FakeQuantize()
self.fake_quant.disable_observer()
def forward(self, x):
output = self.fake_quant(x)
return output
| FakeQuantNet |
python | tornadoweb__tornado | tornado/test/util_test.py | {
"start": 1963,
"end": 6330
} | class ____(unittest.TestCase):
def setUp(self):
self.saved = TestConfigurable._save_configuration()
self.saved3 = TestConfig3._save_configuration()
def tearDown(self):
TestConfigurable._restore_configuration(self.saved)
TestConfig3._restore_configuration(self.saved3)
def checkSubclasses(self):
# no matter how the class is configured, it should always be
# possible to instantiate the subclasses directly
self.assertIsInstance(TestConfig1(), TestConfig1)
self.assertIsInstance(TestConfig2(), TestConfig2)
obj = TestConfig1(a=1)
self.assertEqual(obj.a, 1)
obj2 = TestConfig2(b=2)
self.assertEqual(obj2.b, 2)
def test_default(self):
# In these tests we combine a typing.cast to satisfy mypy with
# a runtime type-assertion. Without the cast, mypy would only
# let us access attributes of the base class.
obj = cast(TestConfig1, TestConfigurable())
self.assertIsInstance(obj, TestConfig1)
self.assertIsNone(obj.a)
obj = cast(TestConfig1, TestConfigurable(a=1))
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 1)
self.checkSubclasses()
def test_config_class(self):
TestConfigurable.configure(TestConfig2)
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertIsNone(obj.b)
obj = cast(TestConfig2, TestConfigurable(b=2))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 2)
self.checkSubclasses()
def test_config_str(self):
TestConfigurable.configure("tornado.test.util_test.TestConfig2")
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertIsNone(obj.b)
obj = cast(TestConfig2, TestConfigurable(b=2))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 2)
self.checkSubclasses()
def test_config_args(self):
TestConfigurable.configure(None, a=3)
obj = cast(TestConfig1, TestConfigurable())
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 3)
obj = cast(TestConfig1, TestConfigurable(42, a=4))
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 4)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig1()
self.assertIsNone(obj.a)
def test_config_class_args(self):
TestConfigurable.configure(TestConfig2, b=5)
obj = cast(TestConfig2, TestConfigurable())
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 5)
obj = cast(TestConfig2, TestConfigurable(42, b=6))
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 6)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig2()
self.assertIsNone(obj.b)
def test_config_multi_level(self):
TestConfigurable.configure(TestConfig3, a=1)
obj = cast(TestConfig3A, TestConfigurable())
self.assertIsInstance(obj, TestConfig3A)
self.assertEqual(obj.a, 1)
TestConfigurable.configure(TestConfig3)
TestConfig3.configure(TestConfig3B, b=2)
obj2 = cast(TestConfig3B, TestConfigurable())
self.assertIsInstance(obj2, TestConfig3B)
self.assertEqual(obj2.b, 2)
def test_config_inner_level(self):
# The inner level can be used even when the outer level
# doesn't point to it.
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3A)
TestConfig3.configure(TestConfig3B)
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3B)
# Configuring the base doesn't configure the inner.
obj2 = TestConfigurable()
self.assertIsInstance(obj2, TestConfig1)
TestConfigurable.configure(TestConfig2)
obj3 = TestConfigurable()
self.assertIsInstance(obj3, TestConfig2)
obj = TestConfig3()
self.assertIsInstance(obj, TestConfig3B)
| ConfigurableTest |
python | pytorch__pytorch | torch/_inductor/ops_handler.py | {
"start": 31692,
"end": 31833
} | class ____(NamedTuple):
num_ops: int
used_ops: OrderedSet[str]
read_buffers: list[str]
nontrivial_read_count: int
| OpCountResult |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 11210,
"end": 12298
} | class ____(StringIORewind):
params = ([",", ";"], [".", "_"], [None, "high", "round_trip"])
param_names = ["sep", "decimal", "float_precision"]
def setup(self, sep, decimal, float_precision):
floats = [
"".join([random.choice(string.digits) for _ in range(28)])
for _ in range(15)
]
rows = sep.join([f"0{decimal}{{}}"] * 3) + "\n"
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
names=list("abc"),
float_precision=float_precision,
)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
engine="python",
float_precision=None,
names=list("abc"),
)
| ReadCSVFloatPrecision |
python | vyperlang__vyper | vyper/compiler/settings.py | {
"start": 1403,
"end": 6047
} | class ____:
compiler_version: Optional[str] = None
optimize: Optional[OptimizationLevel] = None
evm_version: Optional[str] = None
experimental_codegen: Optional[bool] = None
debug: Optional[bool] = None
enable_decimals: Optional[bool] = None
nonreentrancy_by_default: Optional[bool] = None
def __post_init__(self):
# sanity check inputs
if self.optimize is not None:
assert isinstance(self.optimize, OptimizationLevel)
if self.experimental_codegen is not None:
assert isinstance(self.experimental_codegen, bool)
if self.debug is not None:
assert isinstance(self.debug, bool)
if self.enable_decimals is not None:
assert isinstance(self.enable_decimals, bool)
if self.nonreentrancy_by_default is not None:
assert isinstance(self.nonreentrancy_by_default, bool)
# CMC 2024-04-10 consider hiding the `enable_decimals` member altogether
def get_enable_decimals(self) -> bool:
if self.enable_decimals is None:
return DEFAULT_ENABLE_DECIMALS
return self.enable_decimals
def as_cli(self):
ret = []
if self.optimize is not None:
ret.append(" --optimize " + str(self.optimize))
if self.experimental_codegen is True:
ret.append(" --venom-experimental")
if self.evm_version is not None:
ret.append(" --evm-version " + self.evm_version)
if self.debug is True:
ret.append(" --debug")
if self.enable_decimals is True:
ret.append(" --enable-decimals")
return "".join(ret)
def as_dict(self):
ret = dataclasses.asdict(self)
# compiler_version is not a compiler input, it can only come from
# source code pragma.
ret.pop("compiler_version", None)
ret = {k: v for (k, v) in ret.items() if v is not None}
if "optimize" in ret:
ret["optimize"] = str(ret["optimize"])
return ret
@classmethod
def from_dict(cls, data):
data = data.copy()
if "optimize" in data:
data["optimize"] = OptimizationLevel.from_string(data["optimize"])
return cls(**data)
def should_run_legacy_optimizer(settings: Settings):
if settings.optimize == OptimizationLevel.NONE:
return False
if settings.experimental_codegen and not VENOM_ENABLE_LEGACY_OPTIMIZER:
return False
return True
def merge_settings(
one: Settings, two: Settings, lhs_source="compiler settings", rhs_source="source pragma"
) -> Settings:
def _merge_one(lhs, rhs, helpstr):
if lhs is not None and rhs is not None and lhs != rhs:
# aesthetics, conjugate the verbs per english rules
s1 = "" if lhs_source.endswith("s") else "s"
s2 = "" if rhs_source.endswith("s") else "s"
raise ValueError(
f"settings conflict!\n\n {lhs_source}: {one}\n {rhs_source}: {two}\n\n"
f"({lhs_source} indicate{s1} {helpstr} {lhs}, but {rhs_source} indicate{s2} {rhs}.)"
)
return lhs if rhs is None else rhs
ret = Settings()
for field in dataclasses.fields(ret):
if field.name == "compiler_version":
continue
pretty_name = field.name.replace("_", "-") # e.g. evm_version -> evm-version
val = _merge_one(getattr(one, field.name), getattr(two, field.name), pretty_name)
setattr(ret, field.name, val)
return ret
# CMC 2024-04-10 do we need it to be Optional?
_settings = None
def get_global_settings() -> Optional[Settings]:
return _settings
def set_global_settings(new_settings: Optional[Settings]) -> None:
assert isinstance(new_settings, Settings) or new_settings is None
global _settings
_settings = new_settings
# could maybe refactor this, but it is easier for now than threading settings
# around everywhere.
@contextlib.contextmanager
def anchor_settings(new_settings: Settings) -> Generator:
"""
Set the globally available settings for the duration of this context manager
"""
assert new_settings is not None
global _settings
try:
tmp = get_global_settings()
set_global_settings(new_settings)
yield
finally:
set_global_settings(tmp)
def _opt_codesize():
return _settings.optimize == OptimizationLevel.CODESIZE
def _opt_gas():
return _settings.optimize == OptimizationLevel.GAS
def _opt_none():
return _settings.optimize == OptimizationLevel.NONE
def _is_debug_mode():
return get_global_settings().debug
| Settings |
python | scikit-learn__scikit-learn | sklearn/externals/_numpydoc/docscrape.py | {
"start": 19307,
"end": 23691
} | class ____(NumpyDocString):
extra_public_methods = ["__call__"]
def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
if not inspect.isclass(cls) and cls is not None:
raise ValueError(f"Expected a class or None, but got {cls!r}")
self._cls = cls
if "sphinx" in sys.modules:
from sphinx.ext.autodoc import ALL
else:
ALL = object()
if config is None:
config = {}
self.show_inherited_members = config.get("show_inherited_class_members", True)
if modulename and not modulename.endswith("."):
modulename += "."
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
_members = config.get("members", [])
if _members is ALL:
_members = None
_exclude = config.get("exclude-members", [])
if config.get("show_class_members", True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [
("Methods", self.methods),
("Attributes", self.properties),
]:
if not self[field]:
doc_list = []
for name in sorted(items):
if name in _exclude or (_members and name not in _members):
continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append(Parameter(name, "", splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
(not name.startswith("_") or name in self.extra_public_methods)
and isinstance(func, Callable)
and self._is_show_member(name)
)
]
@property
def properties(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
not name.startswith("_")
and not self._should_skip_member(name, self._cls)
and (
func is None
or isinstance(func, (property, cached_property))
or inspect.isdatadescriptor(func)
)
and self._is_show_member(name)
)
]
@staticmethod
def _should_skip_member(name, klass):
return (
# Namedtuples should skip everything in their ._fields as the
# docstrings for each of the members is: "Alias for field number X"
issubclass(klass, tuple)
and hasattr(klass, "_asdict")
and hasattr(klass, "_fields")
and name in klass._fields
)
def _is_show_member(self, name):
return (
# show all class members
self.show_inherited_members
# or class member is not inherited
or name in self._cls.__dict__
)
def get_doc_object(
obj,
what=None,
doc=None,
config=None,
class_doc=ClassDoc,
func_doc=FunctionDoc,
obj_doc=ObjDoc,
):
if what is None:
if inspect.isclass(obj):
what = "class"
elif inspect.ismodule(obj):
what = "module"
elif isinstance(obj, Callable):
what = "function"
else:
what = "object"
if config is None:
config = {}
if what == "class":
return class_doc(obj, func_doc=func_doc, doc=doc, config=config)
elif what in ("function", "method"):
return func_doc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return obj_doc(obj, doc, config=config) | ClassDoc |
python | pytorch__pytorch | test/mobile/model_test/nn_ops.py | {
"start": 7374,
"end": 8222
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.transformers = nn.ModuleList(
[
nn.Transformer(
d_model=2, nhead=2, num_encoder_layers=1, num_decoder_layers=1
),
nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=2, nhead=2), num_layers=1
),
nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=2, nhead=2), num_layers=1
),
]
)
def forward(self):
input = torch.rand(1, 16, 2)
tgt = torch.rand((1, 16, 2))
r = self.transformers[0](input, tgt)
r = self.transformers[1](input)
r = self.transformers[2](input, tgt)
return len(r)
| NNTransformerModule |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 11897,
"end": 12501
} | class ____(ToolBase):
"""Tool to toggle the major grids of the figure."""
description = 'Toggle major grids'
default_keymap = property(lambda self: mpl.rcParams['keymap.grid'])
def trigger(self, sender, event, data=None):
sentinel = str(uuid.uuid4())
# Trigger grid switching by temporarily setting :rc:`keymap.grid`
# to a unique key and sending an appropriate event.
with (cbook._setattr_cm(event, key=sentinel),
mpl.rc_context({'keymap.grid': sentinel})):
mpl.backend_bases.key_press_handler(event, self.figure.canvas)
| ToolGrid |
python | sphinx-doc__sphinx | sphinx/roles.py | {
"start": 11420,
"end": 13537
} | class ____(ReferenceRole):
def run(self) -> tuple[list[Node], list[system_message]]:
target_id = 'index-%s' % self.env.new_serialno('index')
formatted_target = _format_rfc_target(self.target)
entries = [('single', f'RFC; {formatted_target}', target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference(
'', '', internal=False, refuri=refuri, classes=['rfc']
)
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
title = formatted_target
reference += nodes.strong(title, title)
except ValueError:
msg = self.inliner.reporter.error(
__('invalid RFC number %s') % self.target, line=self.lineno
)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self) -> str:
base_url = self.inliner.document.settings.rfc_base_url
ret = self.target.partition('#')
if ret[1]:
return base_url + self.inliner.rfc_url % int(ret[0]) + '#' + ret[2]
else:
return base_url + self.inliner.rfc_url % int(ret[0])
def _format_rfc_target(target: str, /) -> str:
"""Takes an RFC number with an optional anchor (like ``123#section-2.5.3``)
and attempts to produce a human-friendly title for it.
We have a set of known anchors that we format nicely,
everything else we leave alone.
"""
number, _, anchor = target.partition('#')
if anchor:
first, _, remaining = anchor.partition('-')
if first in {'appendix', 'page', 'section'}:
if remaining:
return f'RFC {number} {first.title()} {remaining}'
return f'RFC {number} {first.title()}'
return f'RFC {target}'
| RFC |
python | sqlalchemy__sqlalchemy | test/engine/test_reconnect.py | {
"start": 45674,
"end": 51068
} | class ____(fixtures.TestBase):
"""Test for the reconnect recipe given at doc/build/faq/connections.rst.
Make sure the above document is updated if changes are made here.
"""
# this recipe works on PostgreSQL also but only if the connection
# is cut off from the server side, otherwise the connection.cursor()
# method rightly fails because we explicitly closed the connection.
# since we don't have a fixture
# that can do this we currently rely on the MySQL drivers that allow
# us to call cursor() even when the connection were closed. In order
# to get a real "cut the server off" kind of fixture we'd need to do
# something in provisioning that seeks out the TCP connection at the
# OS level and kills it.
__only_on__ = ("+mysqldb", "+pymysql")
def make_engine(self, engine):
num_retries = 3
retry_interval = 0.5
def _run_with_retries(fn, context, cursor, statement, *arg, **kw):
for retry in range(num_retries + 1):
try:
fn(cursor, statement, context=context, *arg)
except engine.dialect.dbapi.Error as raw_dbapi_err:
connection = context.root_connection
if engine.dialect.is_disconnect(
raw_dbapi_err, connection, cursor
):
if retry > num_retries:
raise
engine.logger.error(
"disconnection error, retrying operation",
exc_info=True,
)
connection.invalidate()
connection.rollback()
time.sleep(retry_interval)
context.cursor = cursor = (
connection.connection.cursor()
)
else:
raise
else:
return True
e = engine.execution_options(isolation_level="AUTOCOMMIT")
@event.listens_for(e, "do_execute_no_params")
def do_execute_no_params(cursor, statement, context):
return _run_with_retries(
context.dialect.do_execute_no_params,
context,
cursor,
statement,
)
@event.listens_for(e, "do_execute")
def do_execute(cursor, statement, parameters, context):
return _run_with_retries(
context.dialect.do_execute,
context,
cursor,
statement,
parameters,
)
return e
__backend__ = True
def setup_test(self):
self.engine = engines.reconnecting_engine()
self.meta = MetaData()
self.table = Table(
"sometable",
self.meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
self.meta.create_all(self.engine)
def teardown_test(self):
self.meta.drop_all(self.engine)
self.engine.dispose()
def test_restart_on_execute_no_txn(self):
engine = self.make_engine(self.engine)
with engine.connect() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(conn.execute(select(1)).scalar(), 1)
def test_restart_on_execute_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
eq_(conn.execute(select(1)).scalar(), 1)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(conn.execute(select(1)).scalar(), 1)
def test_autocommits_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
conn.execute(
self.table.insert(),
[
{"id": 1, "name": "some name 1"},
{"id": 2, "name": "some name 2"},
{"id": 3, "name": "some name 3"},
],
)
self.engine.test_shutdown()
self.engine.test_restart()
eq_(
conn.execute(
select(self.table).order_by(self.table.c.id)
).fetchall(),
[(1, "some name 1"), (2, "some name 2"), (3, "some name 3")],
)
def test_fail_on_executemany_txn(self):
engine = self.make_engine(self.engine)
with engine.begin() as conn:
conn.execute(
self.table.insert(),
[
{"id": 1, "name": "some name 1"},
{"id": 2, "name": "some name 2"},
{"id": 3, "name": "some name 3"},
],
)
self.engine.test_shutdown()
self.engine.test_restart()
assert_raises(
exc.DBAPIError,
conn.execute,
self.table.insert(),
[
{"id": 4, "name": "some name 4"},
{"id": 5, "name": "some name 5"},
{"id": 6, "name": "some name 6"},
],
)
conn.rollback()
| ReconnectRecipeTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 547241,
"end": 548031
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for CreatedIssueContribution."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("CreatedIssueContributionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("CreatedIssueContribution"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| CreatedIssueContributionConnection |
python | sympy__sympy | sympy/utilities/matchpy_connector.py | {
"start": 6336,
"end": 11948
} | class ____:
"""
Replacer object to perform multiple pattern matching and subexpression
replacements in SymPy expressions.
Examples
========
Example to construct a simple first degree equation solver:
>>> from sympy.utilities.matchpy_connector import WildDot, Replacer
>>> from sympy import Equality, Symbol
>>> x = Symbol("x")
>>> a_ = WildDot("a_", optional=1)
>>> b_ = WildDot("b_", optional=0)
The lines above have defined two wildcards, ``a_`` and ``b_``, the
coefficients of the equation `a x + b = 0`. The optional values specified
indicate which expression to return in case no match is found, they are
necessary in equations like `a x = 0` and `x + b = 0`.
Create two constraints to make sure that ``a_`` and ``b_`` will not match
any expression containing ``x``:
>>> from matchpy import CustomConstraint
>>> free_x_a = CustomConstraint(lambda a_: not a_.has(x))
>>> free_x_b = CustomConstraint(lambda b_: not b_.has(x))
Now create the rule replacer with the constraints:
>>> replacer = Replacer(common_constraints=[free_x_a, free_x_b])
Add the matching rule:
>>> replacer.add(Equality(a_*x + b_, 0), -b_/a_)
Let's try it:
>>> replacer.replace(Equality(3*x + 4, 0))
-4/3
Notice that it will not match equations expressed with other patterns:
>>> eq = Equality(3*x, 4)
>>> replacer.replace(eq)
Eq(3*x, 4)
In order to extend the matching patterns, define another one (we also need
to clear the cache, because the previous result has already been memorized
and the pattern matcher will not iterate again if given the same expression)
>>> replacer.add(Equality(a_*x, b_), b_/a_)
>>> replacer._matcher.clear()
>>> replacer.replace(eq)
4/3
"""
def __init__(self, common_constraints: list = [], lambdify: bool = False, info: bool = False):
self._matcher = matchpy.ManyToOneMatcher()
self._common_constraint = common_constraints
self._lambdify = lambdify
self._info = info
self._wildcards: Dict[str, Wildcard] = {}
def _get_lambda(self, lambda_str: str) -> Callable[..., Expr]:
exec("from sympy import *")
return eval(lambda_str, locals())
def _get_custom_constraint(self, constraint_expr: Expr, condition_template: str) -> Callable[..., Expr]:
wilds = [x.name for x in constraint_expr.atoms(_WildAbstract)]
lambdaargs = ', '.join(wilds)
fullexpr = _get_srepr(constraint_expr)
condition = condition_template.format(fullexpr)
return matchpy.CustomConstraint(
self._get_lambda(f"lambda {lambdaargs}: ({condition})"))
def _get_custom_constraint_nonfalse(self, constraint_expr: Expr) -> Callable[..., Expr]:
return self._get_custom_constraint(constraint_expr, "({}) != False")
def _get_custom_constraint_true(self, constraint_expr: Expr) -> Callable[..., Expr]:
return self._get_custom_constraint(constraint_expr, "({}) == True")
def add(self, expr: Expr, replacement, conditions_true: List[Expr] = [],
conditions_nonfalse: List[Expr] = [], info: Any = None) -> None:
expr = _sympify(expr)
replacement = _sympify(replacement)
constraints = self._common_constraint[:]
constraint_conditions_true = [
self._get_custom_constraint_true(cond) for cond in conditions_true]
constraint_conditions_nonfalse = [
self._get_custom_constraint_nonfalse(cond) for cond in conditions_nonfalse]
constraints.extend(constraint_conditions_true)
constraints.extend(constraint_conditions_nonfalse)
pattern = matchpy.Pattern(expr, *constraints)
if self._lambdify:
lambda_str = f"lambda {', '.join((x.name for x in expr.atoms(_WildAbstract)))}: {_get_srepr(replacement)}"
lambda_expr = self._get_lambda(lambda_str)
replacement = lambda_expr
else:
self._wildcards.update({str(i): i for i in expr.atoms(Wildcard)})
if self._info:
replacement = ReplacementInfo(replacement, info)
self._matcher.add(pattern, replacement)
def replace(self, expression, max_count: int = -1):
# This method partly rewrites the .replace method of ManyToOneReplacer
# in MatchPy.
# License: https://github.com/HPAC/matchpy/blob/master/LICENSE
infos = []
replaced = True
replace_count = 0
while replaced and (max_count < 0 or replace_count < max_count):
replaced = False
for subexpr, pos in matchpy.preorder_iter_with_position(expression):
try:
replacement_data, subst = next(iter(self._matcher.match(subexpr)))
if self._info:
replacement = replacement_data.replacement
infos.append(replacement_data.info)
else:
replacement = replacement_data
if self._lambdify:
result = replacement(**subst)
else:
result = replacement.xreplace({self._wildcards[k]: v for k, v in subst.items()})
expression = matchpy.functions.replace(expression, pos, result)
replaced = True
break
except StopIteration:
pass
replace_count += 1
if self._info:
return expression, infos
else:
return expression
| Replacer |
python | facebookresearch__faiss | tests/test_build_blocks.py | {
"start": 3515,
"end": 4256
} | class ____(unittest.TestCase):
def test_1(self):
# try with dimensions that are multiples of 16 or not
rs = np.random.RandomState(123)
swig_ptr = faiss.swig_ptr
for dim in 16, 32, 20, 25:
for _repeat in 1, 2, 3, 4, 5:
a = rs.rand(dim).astype('float32')
b = rs.rand(dim).astype('float32')
c = np.zeros(dim, dtype='float32')
bf = rs.uniform(5.0) - 2.5
idx = faiss.fvec_madd_and_argmin(
dim, swig_ptr(a), bf, swig_ptr(b),
swig_ptr(c))
ref_c = a + b * bf
assert np.abs(c - ref_c).max() < 1e-5
assert idx == ref_c.argmin()
| TestMAdd |
python | facebook__pyre-check | client/tests/coverage_data_tests.py | {
"start": 41757,
"end": 44744
} | class ____(testslide.TestCase):
def test_find_module_paths__basic(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_module_paths(
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
excludes=[],
),
[
root_path / "a/s1.py",
root_path / "b/s2.py",
],
)
self.assertCountEqual(
find_module_paths([root_path], excludes=[]),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/c/s3.py",
],
)
def test_find_module_paths__with_exclude(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_module_paths(
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
excludes=[r".*2\.py"],
),
[
root_path / "a/s1.py",
],
)
self.assertCountEqual(
find_module_paths(
[root_path],
excludes=[r".*2\.py"],
),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/c/s3.py",
],
)
def test_find_module_paths__with_duplicates(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["a/s1.py", "a/s2.py"],
)
self.assertCountEqual(
find_module_paths(
[
root_path / "a/s1.py",
root_path / "a",
],
excludes=[],
),
[
root_path / "a/s1.py",
root_path / "a/s2.py",
],
)
| ModuleFindingHelpersTest |
python | run-llama__llama_index | llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/base.py | {
"start": 1197,
"end": 5725
} | class ____(BaseModel, Generic[T]):
model_config = ConfigDict(arbitrary_types_allowed=True)
open_spans: Dict[str, T] = Field(
default_factory=dict, description="Dictionary of open spans."
)
completed_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
dropped_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
current_span_ids: Dict[Any, Optional[str]] = Field(
default={}, description="Id of current spans in a given thread."
)
_lock: Optional[threading.Lock] = PrivateAttr()
def __init__(
self,
open_spans: Dict[str, T] = {},
completed_spans: List[T] = [],
dropped_spans: List[T] = [],
current_span_ids: Dict[Any, str] = {},
):
super().__init__(
open_spans=open_spans,
completed_spans=completed_spans,
dropped_spans=dropped_spans,
current_span_ids=current_span_ids,
)
self._lock = None
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "BaseSpanHandler"
@property
def lock(self) -> threading.Lock:
if self._lock is None:
self._lock = threading.Lock()
return self._lock
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
if id_ in self.open_spans:
pass # should probably raise an error here
else:
span = self.new_span(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_span_id=parent_id,
tags=tags,
)
if span:
with self.lock:
self.open_spans[id_] = span
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
span = self.prepare_to_exit_span(
id_=id_, bound_args=bound_args, instance=instance, result=result
)
if span:
with self.lock:
del self.open_spans[id_]
def span_drop(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for dropping a span i.e. early exit."""
span = self.prepare_to_drop_span(
id_=id_, bound_args=bound_args, instance=instance, err=err
)
if span:
with self.lock:
del self.open_spans[id_]
@abstractmethod
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Create a span.
Subclasses of BaseSpanHandler should create the respective span type T
and return it. Only NullSpanHandler should return a None here.
"""
...
@abstractmethod
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to exit a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be exited. If None is returned, then the span won't actually
be exited.
"""
...
@abstractmethod
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to drop a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be dropped. If None is returned, then the span won't actually
be dropped.
"""
...
| BaseSpanHandler |
python | getsentry__sentry | src/sentry/users/services/user/model.py | {
"start": 3886,
"end": 3958
} | class ____(RpcModel):
user: RpcUser
created: bool
| UserCreateResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/formats/jsonl_spec.py | {
"start": 128,
"end": 238
} | class ____(str, Enum):
ignore = "ignore"
infer = "infer"
error = "error"
| UnexpectedFieldBehaviorEnum |
python | getsentry__sentry | src/sentry/preprod/api/bases/preprod_artifact_endpoint.py | {
"start": 847,
"end": 1249
} | class ____(ProjectPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
# Some simple actions, like triggering comparisons, should be allowed
"POST": ["project:read", "project:write", "project:admin"],
"PUT": ["project:read", "project:write", "project:admin"],
"DELETE": ["project:admin"],
}
| ProjectPreprodArtifactPermission |
python | readthedocs__readthedocs.org | readthedocs/projects/views/base.py | {
"start": 2915,
"end": 3948
} | class ____:
"""
Protects views for spammy projects.
It shows a ``Project marked as spam`` page and return 410 GONE if the
project's dashboard is denied.
"""
def is_show_dashboard_denied_wrapper(self):
"""
Determine if the project has reached dashboard denied treshold.
This function is wrapped just for testing purposes,
so we are able to mock it from outside.
"""
if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
from readthedocsext.spamfighting.utils import ( # noqa
is_show_dashboard_denied,
)
if is_show_dashboard_denied(self.get_project()):
return True
return False
def get(self, request, *args, **kwargs):
if self.is_show_dashboard_denied_wrapper():
template_name = "errors/dashboard/spam.html"
return render(request, template_name=template_name, status=410)
return super().get(request, *args, **kwargs)
| ProjectSpamMixin |
python | kubernetes-client__python | kubernetes/client/models/v1_limit_range.py | {
"start": 383,
"end": 6534
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1LimitRangeSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1LimitRange - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1LimitRange. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1LimitRange. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1LimitRange.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1LimitRange. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1LimitRange. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1LimitRange. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1LimitRange.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1LimitRange. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1LimitRange. # noqa: E501
:return: The metadata of this V1LimitRange. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1LimitRange.
:param metadata: The metadata of this V1LimitRange. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1LimitRange. # noqa: E501
:return: The spec of this V1LimitRange. # noqa: E501
:rtype: V1LimitRangeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1LimitRange.
:param spec: The spec of this V1LimitRange. # noqa: E501
:type: V1LimitRangeSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LimitRange):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LimitRange):
return True
return self.to_dict() != other.to_dict()
| V1LimitRange |
python | kamyu104__LeetCode-Solutions | Python/flip-game-ii.py | {
"start": 235,
"end": 1183
} | class ____(object):
def canWin(self, s):
g, g_final = [0], 0
for p in itertools.imap(len, re.split('-+', s)):
while len(g) <= p:
# Theorem 2: g[game] = g[subgame1]^g[subgame2]^g[subgame3]...
# and find first missing number.
g += min(set(xrange(p)) - {x^y for x, y in itertools.izip(g[:len(g)/2], g[-2:-len(g)/2-2:-1])}),
g_final ^= g[p]
return g_final > 0 # Theorem 1: First player must win iff g(current_state) != 0
# Time: O(n + c^3 * 2^c * logc), n is length of string, c is count of "++"
# Space: O(c * 2^c)
# hash solution.
# We have total O(2^c) game strings,
# and each hash key in hash table would cost O(c),
# each one has O(c) choices to the next one,
# and each one would cost O(clogc) to sort,
# so we get O((c * 2^c) * (c * clogc)) = O(c^3 * 2^c * logc) time.
# To cache the results of all combinations, thus O(c * 2^c) space.
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 31950,
"end": 32486
} | class ____(typing.Generic[_ValueType]):
value: _ValueType # the same name we have in `__init__`
def __init__(self, value: int) -> None:
"""By this example we show, that ``int`` is more important than ``_ValueType``."""
assert isinstance(value, int)
@given(st.data())
def test_constructor_is_more_important(data):
"""Constructor types should take precedence over all other annotations."""
data.draw(st.builds(AnnotatedConstructor))
def use_signature(self, value: str) -> None: ...
| AnnotatedConstructor |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 21651,
"end": 27621
} | class ____(TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.MultiGraph())
cls.G.add_edge(1, 2, key=3, foo="bar")
cls.eview = nx.reportviews.MultiEdgeView
def modify_edge(self, G, e, **kwds):
if len(e) == 2:
e = e + (0,)
G._adj[e[0]][e[1]][e[2]].update(kwds)
def test_str(self):
ev = self.eview(self.G)
replist = [(n, n + 1, 0) for n in range(8)]
replist.insert(2, (1, 2, 3))
rep = str(replist)
assert str(ev) == rep
def test_getitem(self):
G = self.G.copy()
ev = G.edges
G.edges[0, 1, 0]["foo"] = "bar"
assert ev[0, 1, 0] == {"foo": "bar"}
# slicing
with pytest.raises(nx.NetworkXError):
G.edges[0:5]
def test_repr(self):
ev = self.eview(self.G)
rep = (
"MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), "
+ "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])"
)
assert repr(ev) == rep
def test_call(self):
ev = self.eview(self.G)
assert id(ev) == id(ev(keys=True))
assert id(ev) == id(ev(data=False, keys=True))
assert id(ev) != id(ev(keys=False))
assert id(ev) != id(ev(data=True))
assert id(ev) != id(ev(nbunch=1))
def test_data(self):
ev = self.eview(self.G)
assert id(ev) != id(ev.data())
assert id(ev) == id(ev.data(data=False, keys=True))
assert id(ev) != id(ev.data(keys=False))
assert id(ev) != id(ev.data(data=True))
assert id(ev) != id(ev.data(nbunch=1))
def test_iter(self):
ev = self.eview(self.G)
for u, v, k in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1, 0)
assert iter(ev) != ev
assert iter(iev) == iev
def test_iterkeys(self):
G = self.G
evr = self.eview(G)
ev = evr(keys=True)
for u, v, k in ev:
pass
assert k == 0
ev = evr(keys=True, data="foo", default=1)
for u, v, k, wt in ev:
pass
assert wt == 1
self.modify_edge(G, (2, 3, 0), foo="bar")
ev = evr(keys=True, data=True)
for e in ev:
assert len(e) == 4
if set(e[:2]) == {2, 3}:
assert e[2] == 0
assert e[3] == {"foo": "bar"}
checked = True
elif set(e[:3]) == {1, 2, 3}:
assert e[2] == 3
assert e[3] == {"foo": "bar"}
checked_multi = True
else:
assert e[2] == 0
assert e[3] == {}
assert checked
assert checked_multi
ev = evr(keys=True, data="foo", default=1)
for e in ev:
if set(e[:2]) == {1, 2} and e[2] == 3:
assert e[3] == "bar"
if set(e[:2]) == {1, 2} and e[2] == 0:
assert e[3] == 1
if set(e[:2]) == {2, 3}:
assert e[2] == 0
assert e[3] == "bar"
assert len(e) == 4
checked_wt = True
assert checked_wt
ev = evr(keys=True)
for e in ev:
assert len(e) == 3
elist = sorted([(i, i + 1, 0) for i in range(8)] + [(1, 2, 3)])
assert sorted(ev) == elist
# test that the keyword arguments are passed correctly
ev = evr((1, 2), "foo", keys=True, default=1)
with pytest.raises(TypeError):
evr((1, 2), "foo", True, 1)
with pytest.raises(TypeError):
evr((1, 2), "foo", True, default=1)
for e in ev:
if set(e[:2]) == {1, 2}:
assert e[2] in {0, 3}
if e[2] == 3:
assert e[3] == "bar"
else: # e[2] == 0
assert e[3] == 1
if G.is_directed():
assert len(list(ev)) == 3
else:
assert len(list(ev)) == 4
def test_or(self):
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
result = {(n, n + 1, 0) for n in range(8)}
result.update(some_edges)
result.update({(1, 2, 3)})
assert ev | some_edges == result
assert some_edges | ev == result
def test_sub(self):
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
result = {(n, n + 1, 0) for n in range(8)}
result.remove((0, 1, 0))
result.update({(1, 2, 3)})
assert ev - some_edges, result
assert some_edges - ev, result
def test_xor(self):
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
if self.G.is_directed():
result = {(n, n + 1, 0) for n in range(1, 8)}
result.update({(1, 0, 0), (0, 2, 0), (1, 2, 3)})
assert ev ^ some_edges == result
assert some_edges ^ ev == result
else:
result = {(n, n + 1, 0) for n in range(1, 8)}
result.update({(0, 2, 0), (1, 2, 3)})
assert ev ^ some_edges == result
assert some_edges ^ ev == result
def test_and(self):
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
if self.G.is_directed():
assert ev & some_edges == {(0, 1, 0)}
assert some_edges & ev == {(0, 1, 0)}
else:
assert ev & some_edges == {(0, 1, 0), (1, 0, 0)}
assert some_edges & ev == {(0, 1, 0), (1, 0, 0)}
def test_contains_with_nbunch(self):
ev = self.eview(self.G)
evn = ev(nbunch=[0, 2])
assert (0, 1) in evn
assert (1, 2) in evn
assert (2, 3) in evn
assert (3, 4) not in evn
assert (4, 5) not in evn
assert (5, 6) not in evn
assert (7, 8) not in evn
assert (8, 9) not in evn
| TestMultiEdgeView |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/dataclass_taint.py | {
"start": 1953,
"end": 2634
} | class ____:
bad: int
benign: str
def test_class_attr_model_tainted_directly() -> None:
# not an issue
DataClassWithClassAttributeTaintedDirectly(bad=1, benign=_test_source())
# should be an issue, properly raised.
DataClassWithClassAttributeTaintedDirectly(bad=_test_source(), benign="1")
# not an issue
data_object_no_issue = DataClassWithClassAttributeTaintedDirectly(bad=1, benign="1")
data_object_no_issue.benign = _test_source()
# is an issue and raised
data_object_issue = DataClassWithClassAttributeTaintedDirectly(bad=1, benign="1")
data_object_issue.bad = _test_source()
@dataclass
| DataClassWithClassAttributeTaintedDirectly |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 15587,
"end": 17669
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Qwen3VLMoeTextConfig, layer_idx: int):
super().__init__()
self.self_attn = Qwen3VLMoeTextAttention(config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen3VLMoeTextSparseMoeBlock(config)
else:
self.mlp = Qwen3VLMoeTextMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hidden_size = config.hidden_size
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| Qwen3VLMoeTextDecoderLayer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_gen_ai.py | {
"start": 7819,
"end": 8821
} | class ____:
@mock.patch(GEN_AI_PATH.format("GenAIGenerativeModelHook"))
def test_execute(self, mock_hook):
op = GenAIGenerateContentOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
model=GEMINI_MODEL,
contents=CONTENTS,
generation_config=GENERATE_FROM_CACHED_MODEL_CONFIG,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.generate_content.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
model=GEMINI_MODEL,
contents=CONTENTS,
generation_config=GENERATE_FROM_CACHED_MODEL_CONFIG,
)
| TestGenAIGenerateFromCachedContentOperator |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_function_base.py | {
"start": 137993,
"end": 138973
} | class ____(TestCase):
@parametrize(
"type_in, type_out",
[
("l", "D"),
("h", "F"),
("H", "F"),
("b", "F"),
("B", "F"),
("g", "G"),
],
)
def test_sort_real(self, type_in, type_out):
# sort_complex() type casting for real input types
a = np.array([5, 3, 6, 2, 1], dtype=type_in)
actual = np.sort_complex(a)
expected = np.sort(a).astype(type_out)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
def test_sort_complex(self):
# sort_complex() handling of complex input
a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype="D")
expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype="D")
actual = np.sort_complex(a)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
if __name__ == "__main__":
run_tests()
| TestSortComplex |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 71846,
"end": 72142
} | class ____(BaseModel):
"""
TaskInstanceHistory Collection serializer for responses.
"""
task_instances: Annotated[list[TaskInstanceHistoryResponse], Field(title="Task Instances")]
total_entries: Annotated[int, Field(title="Total Entries")]
| TaskInstanceHistoryCollectionResponse |
python | celery__celery | t/unit/app/test_beat.py | {
"start": 31923,
"end": 32959
} | class ____:
def xxx_start_stop_process(self):
pytest.importorskip('_multiprocessing')
from billiard.process import Process
s = beat.EmbeddedService(self.app)
assert isinstance(s, Process)
assert isinstance(s.service, beat.Service)
s.service = MockService()
class _Popen:
terminated = False
def terminate(self):
self.terminated = True
with patch('celery.platforms.close_open_fds'):
s.run()
assert s.service.started
s._popen = _Popen()
s.stop()
assert s.service.stopped
assert s._popen.terminated
def test_start_stop_threaded(self):
s = beat.EmbeddedService(self.app, thread=True)
from threading import Thread
assert isinstance(s, Thread)
assert isinstance(s.service, beat.Service)
s.service = MockService()
s.run()
assert s.service.started
s.stop()
assert s.service.stopped
| test_EmbeddedService |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_category.py | {
"start": 1836,
"end": 1912
} | class ____:
def __init__(self, units):
self.units = units
| FakeAxis |
python | kamyu104__LeetCode-Solutions | Python/harshad-number.py | {
"start": 39,
"end": 330
} | class ____(object):
def sumOfTheDigitsOfHarshadNumber(self, x):
"""
:type x: int
:rtype: int
"""
result = 0
y = x
while y:
y, r = divmod(y, 10)
result += r
return result if x%result == 0 else -1
| Solution |
python | gevent__gevent | src/gevent/tests/test__socket.py | {
"start": 2056,
"end": 17842
} | class ____(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if not isinstance(long_data, bytes):
long_data = long_data.encode('ascii')
def setUp(self):
super(TestTCP, self).setUp()
if '-v' in sys.argv:
printed = []
try:
from time import perf_counter as now
except ImportError:
from time import time as now
def log(*args):
if not printed:
print()
printed.append(1)
print("\t -> %0.6f" % now(), *args)
orig_cot = self._close_on_teardown
def cot(o):
log("Registering for teardown", o)
def c(o=o):
log("Closing on teardown", o)
o.close()
o = None
orig_cot(c)
return o
self._close_on_teardown = cot
else:
def log(*_args):
"Does nothing"
self.log = log
self.listener = self._close_on_teardown(self._setup_listener())
# It is important to watch the lifetimes of socket objects and
# ensure that:
# (1) they are closed; and
# (2) *before* the next test begins.
#
# For example, it's a bad bad thing to leave a greenlet running past the
# scope of the individual test method if that greenlet will close
# a socket object --- especially if that socket object might also have been
# closed explicitly.
#
# On Windows, we've seen issue with filenos getting reused while something
# still thinks they have the original fileno around. When they later
# close that fileno, a completely unrelated object is closed.
self.port = self.listener.getsockname()[1]
def _setup_listener(self):
return tcp_listener()
def create_connection(self, host=None, port=None, timeout=None,
blocking=None):
sock = self._close_on_teardown(socket.socket())
sock.connect((host or params.DEFAULT_CONNECT, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return sock
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
log = self.log
log("test_sendall using method", client_method)
read_data = []
accepted_event = Event()
def accept_and_read():
log("\taccepting", self.listener)
conn, _ = self.listener.accept()
try:
with conn.makefile(mode='rb') as r:
log("\taccepted on server; client conn is", conn, "file is", r)
accepted_event.set()
log("\treading")
read_data.append(r.read())
log("\tdone reading", r, "got bytes", len(read_data[0]))
del r
finally:
conn.close()
del conn
server = Thread(target=accept_and_read)
try:
log("creating client connection")
client = self.create_connection(**client_args)
# It's important to wait for the server to fully accept before
# we shutdown and close the socket. In SSL mode, the number
# and timing of data exchanges to complete the handshake and
# thus exactly when greenlet switches occur, varies by TLS version.
#
# It turns out that on < TLS1.3, we were getting lucky and the
# server was the greenlet that raced ahead and blocked in r.read()
# before the client returned from create_connection().
#
# But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the
# one that raced ahead while the server had yet to return from
# self.listener.accept(). So the client sent the data to the socket,
# and closed, before the server could do anything, and the server,
# when it got switched to by server.join(), found its new socket
# dead.
accepted_event.wait()
log("Client got accepted event from server", client, "; sending data", len(data))
try:
x = getattr(client, client_method)(data)
log("Client sent data: result from method", x)
finally:
log("Client will unwrap and shutdown")
if hasattr(client, 'unwrap'):
# Are we dealing with an SSLSocket? If so, unwrap it
# before attempting to shut down the socket. This does the
# SSL shutdown handshake and (hopefully) stops ``accept_and_read``
# from generating ``ConnectionResetError`` on AppVeyor.
try:
client = client.unwrap()
except (ValueError, OSError):
# PyPy raises _cffi_ssl._stdssl.error.SSLSyscallError,
# which is an IOError in 2.7 and OSError in 3.7
pass
try:
# The implicit reference-based nastiness of Python 2
# sockets interferes, especially when using SSL sockets.
# The best way to get a decent FIN to the server is to shutdown
# the output. Doing that on Python 3, OTOH, is contraindicated
# except on PyPy, so this used to read ``PY2 or PYPY``. But
# it seems that a shutdown is generally good practice, and I didn't
# document what errors we saw without it. Per issue #1637
# lets do a shutdown everywhere, but only after removing any
# SSL wrapping.
client.shutdown(socket.SHUT_RDWR)
except OSError:
pass
log("Client will close")
client.close()
finally:
server.join(10)
assert not server.is_alive()
if server.terminal_exc:
reraise(*server.terminal_exc)
if match_data is None:
match_data = self.long_data
read_data = read_data[0].split(b',')
match_data = match_data.split(b',')
self.assertEqual(read_data[0], match_data[0])
self.assertEqual(len(read_data), len(match_data))
self.assertEqual(read_data, match_data)
def test_sendall_str(self):
self._test_sendall(self.long_data)
if six.PY2:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
@skipOnMacOnCI("Sometimes fails for no apparent reason (buffering?)")
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
remote_client, _ = self.listener.accept()
self._close_on_teardown(remote_client)
# start reading, then, while reading, start writing. the reader should not hang forever
sender = Thread(target=remote_client.sendall,
args=((b't' * N),))
try:
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
finally:
sender.join()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = self._close_on_teardown(client.makefile())
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
def accept():
# make sure the conn object stays alive until the end;
# premature closing triggers a ResourceWarning and
# EOF on the client.
conn, _ = self.listener.accept()
self._close_on_teardown(conn)
acceptor = Thread(target=accept)
client = self.create_connection()
try:
client.settimeout(1)
start = time.time()
with self.assertRaises(self.TIMEOUT_ERROR):
client.recv(1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
finally:
acceptor.join()
# Subclasses can disable this
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
# In 2018, this needs to be increased *again* as a smaller value was
# still often being sent.
_test_sendall_data = b'hello' * 100000000
# This doesn't make much sense...why are we really skipping this?
@greentest.skipOnWindows("On Windows send() accepts whatever is thrown at it")
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
with self.assertRaises(self.TIMEOUT_ERROR):
client.sendall(self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
self.assertTimeWithinRange(took, 0.09, 0.21)
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.flush()
fd.close()
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
# Closing the socket doesn't close the file
client_file = client.makefile(mode='rb')
client.close()
line = client_file.readline()
self.assertEqual(line, b'hello\n')
self.assertEqual(client_file.read(), b'')
client_file.close()
finally:
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, _ = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
finally:
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertIs(s.family, socket.AF_INET)
self.assertEqual(s.type, socket.SOCK_DGRAM)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertIs(s.family, socket.AF_INET)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
def test_connect_ex_nonblocking_bad_connection(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
ret = s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, support.find_unused_port()))
self.assertIsInstance(ret, errno_types)
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_gaierror(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
with self.assertRaises(socket.gaierror):
s.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_not_call_connect(self):
# Issue 1931
def do_it(sock):
try:
with self.assertRaises(socket.gaierror):
sock.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
sock.close()
# An instance attribute doesn't matter because we can't set it
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with self.assertRaises(AttributeError):
s.connect = None
s.close()
# A subclass
class S(socket.socket):
def connect(self, *args):
raise AssertionError('Should not be called')
s = S(socket.AF_INET, socket.SOCK_STREAM)
do_it(s)
def test_connect_ex_nonblocking_overflow(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
with self.assertRaises(OverflowError):
s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, 65539))
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'SOCK_CLOEXEC'),
"Requires SOCK_CLOEXEC")
def test_connect_with_type_flags_ignored(self):
# Issue 944
# If we have SOCK_CLOEXEC or similar, we shouldn't be passing
# them through to the getaddrinfo call that connect() makes
SOCK_CLOEXEC = socket.SOCK_CLOEXEC # pylint:disable=no-member
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM | SOCK_CLOEXEC)
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close()
acceptor = Thread(target=accept_once)
try:
s.connect((params.DEFAULT_CONNECT, self.port))
fd = s.makefile(mode='rb')
self.assertEqual(fd.readline(), b'hello\n')
fd.close()
s.close()
finally:
acceptor.join()
| TestTCP |
python | kamyu104__LeetCode-Solutions | Python/partition-array-to-minimize-xor.py | {
"start": 52,
"end": 800
} | class ____(object):
def minXor(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
INF = float("inf")
prefix = [0]*(len(nums)+1)
for i in xrange(len(nums)):
prefix[i+1] = prefix[i]^nums[i]
dp = prefix[:]
dp[0] = INF
for l in xrange(2, k+1):
for i in reversed(xrange(l-1, len(dp))):
mn = INF
for j in xrange(l-1, i):
v = prefix[i]^prefix[j]
mx = dp[j] if dp[j] > v else v
if mx < mn:
mn = mx
dp[i] = mn
return dp[-1]
# Time: O(n^2 * k)
# Space: O(n)
# dp, prefix sum
| Solution |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 36430,
"end": 41212
} | class ____(fixtures.TablesTest):
__requires__ = ("insert_executemany_returning",)
run_define_tables = "each"
__sparse_driver_backend__ = True
define_tables = InsertReturnDefaultsTest.define_tables
def test_insert_executemany_no_defaults_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"data": "d1"},
{"data": "d2"},
{"data": "d3"},
{"data": "d4"},
{"data": "d5"},
{"data": "d6"},
],
)
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"id": 1, "insdef": 0, "upddef": None},
{"id": 2, "insdef": 0, "upddef": None},
{"id": 3, "insdef": 0, "upddef": None},
{"id": 4, "insdef": 0, "upddef": None},
{"id": 5, "insdef": 0, "upddef": None},
{"id": 6, "insdef": 0, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(1,), (2,), (3,), (4,), (5,), (6,)],
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if return defaults is supported",
lambda: result.returned_defaults,
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if primary key returning is supported",
lambda: result.inserted_primary_key,
)
def test_insert_executemany_insdefault_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"data": "d1", "insdef": 11},
{"data": "d2", "insdef": 12},
{"data": "d3", "insdef": 13},
{"data": "d4", "insdef": 14},
{"data": "d5", "insdef": 15},
{"data": "d6", "insdef": 16},
],
)
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"id": 1, "upddef": None},
{"id": 2, "upddef": None},
{"id": 3, "upddef": None},
{"id": 4, "upddef": None},
{"id": 5, "upddef": None},
{"id": 6, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(1,), (2,), (3,), (4,), (5,), (6,)],
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if return defaults is supported",
lambda: result.returned_defaults,
)
assert_raises_message(
sa_exc.InvalidRequestError,
"This statement was an executemany call; "
"if primary key returning is supported",
lambda: result.inserted_primary_key,
)
def test_insert_executemany_only_pk_passed(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(),
[
{"id": 10, "data": "d1"},
{"id": 11, "data": "d2"},
{"id": 12, "data": "d3"},
{"id": 13, "data": "d4"},
{"id": 14, "data": "d5"},
{"id": 15, "data": "d6"},
],
)
if connection.dialect.insert_null_pk_still_autoincrements:
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"id": 10, "insdef": 0, "upddef": None},
{"id": 11, "insdef": 0, "upddef": None},
{"id": 12, "insdef": 0, "upddef": None},
{"id": 13, "insdef": 0, "upddef": None},
{"id": 14, "insdef": 0, "upddef": None},
{"id": 15, "insdef": 0, "upddef": None},
],
)
else:
eq_(
[row._mapping for row in result.returned_defaults_rows],
[
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
{"insdef": 0, "upddef": None},
],
)
eq_(
result.inserted_primary_key_rows,
[(10,), (11,), (12,), (13,), (14,), (15,)],
)
| InsertManyReturnDefaultsTest |
python | pytest-dev__pytest | src/_pytest/warning_types.py | {
"start": 496,
"end": 642
} | class ____(PytestWarning):
"""Warning emitted by the cache plugin in various situations."""
__module__ = "pytest"
@final
| PytestCacheWarning |
python | pypa__pip | src/pip/_vendor/packaging/_elffile.py | {
"start": 460,
"end": 515
} | class ____(enum.IntEnum):
C32 = 1
C64 = 2
| EIClass |
python | OmkarPathak__pygorithm | tests/test_sorting.py | {
"start": 3020,
"end": 3226
} | class ____(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return merge_sort.sorti(arr, verbose=False)
| TestMergeSortIterative |
python | huggingface__transformers | src/transformers/models/roberta/modeling_roberta.py | {
"start": 38117,
"end": 38991
} | class ____(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@auto_docstring(
custom_intro="""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| RobertaLMHead |
python | sqlalchemy__sqlalchemy | test/orm/test_versioning.py | {
"start": 59654,
"end": 61035
} | class ____(fixtures.MappedTest):
"""test for #8056"""
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"version_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
# will need parameter quoting for Oracle and PostgreSQL
# dont use 'key' to make sure no the awkward name is definitely
# in the params
Column("_version%id", Integer, nullable=False),
Column("value", String(40), nullable=False),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Foo = cls.classes.Foo
vt = cls.tables.version_table
cls.mapper_registry.map_imperatively(
Foo,
vt,
version_id_col=vt.c["_version%id"],
properties={"version": vt.c["_version%id"]},
)
def test_round_trip(self, fixture_session):
Foo = self.classes.Foo
f1 = Foo(value="v1")
fixture_session.add(f1)
fixture_session.commit()
f1.value = "v2"
with conditional_sane_rowcount_warnings(update=True):
fixture_session.commit()
eq_(f1.version, 2)
| QuotedBindVersioningTest |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 54628,
"end": 56690
} | class ____(unittest.TestCase):
"""Tests person in the ru_RU locale"""
def setUp(self):
self.fake = Faker("ru_RU")
Faker.seed(0)
def test_translit(self):
assert translit("Александр Сергеевич Пушкин") == "Aleksandr Sergeevich Pushkin"
assert translit("Анна Андреевна Ахматова") == "Anna Andreevna Akhmatova"
assert translit("Михаил") == "Mikhail"
assert translit("Фёдор") == "Fedor"
assert translit("Екатерина") == "Yekaterina"
assert translit("Анастасия") == "Anastasiya"
assert translit("Юрьевич") == "Yurevich"
assert translit("Никитична") == "Nikitichna"
assert translit("Щербакова") == "Shcherbakova"
assert translit("Маяковский") == "Mayakovskiy"
assert translit("Петров-Водкин") == "Petrov-Vodkin"
assert translit("Воронцова-Дашкова") == "Vorontsova-Dashkova"
assert translit("А.С.Пушкин") == "A.S.Pushkin"
assert translit("А. С. Пушкин") == "A. S. Pushkin"
assert translit("тов. И.И.Сидоров") == "tov. I.I.Sidorov"
assert translit("г-н А.Б.Петров") == "g-n A.B.Petrov"
assert translit("г-жа Ю.М.Петрова") == "g-zha Yu.M.Petrova"
def test_name_female(self):
first_name = self.fake.first_name_female()
assert first_name in RuProvider.first_names_female
middle_name = self.fake.middle_name_female()
assert middle_name in RuProvider.middle_names_female
last_name = self.fake.last_name_female()
assert last_name in RuProvider.last_names_female
def test_name_male(self):
first_name = self.fake.first_name_male()
assert first_name in RuProvider.first_names_male
middle_name = self.fake.middle_name_male()
assert middle_name in RuProvider.middle_names_male
last_name = self.fake.last_name_male()
assert last_name in RuProvider.last_names_male
def test_language_name(self):
language_name = self.fake.language_name()
assert language_name in RuProvider.language_names
| TestRuRU |
python | pytorch__pytorch | torch/_inductor/codegen/simd_kernel_features.py | {
"start": 14460,
"end": 15312
} | class ____:
"""Tracks the memory usage of a single loop in the generated kernel"""
reads: dict[str, OrderedSet[MemoryDep]] = dataclasses.field(
default_factory=functools.partial(collections.defaultdict, OrderedSet)
)
writes: dict[str, OrderedSet[MemoryDep]] = dataclasses.field(
default_factory=functools.partial(collections.defaultdict, OrderedSet)
)
def remove(self, name: str) -> None:
self.reads.pop(name, None)
self.writes.pop(name, None)
def __bool__(self) -> bool:
return bool(self.reads or self.writes)
def __repr__(self) -> str:
return f"""MemoryEstimate(
reads={[*itertools.chain.from_iterable(self.reads.values())]!r},
writes={[*itertools.chain.from_iterable(self.writes.values())]!r}
)"""
@dataclasses.dataclass
| MemoryEstimate |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 87148,
"end": 88620
} | class ____(UserDefinedObjectVariable):
"""
Represents user defined objects that are subclasses of lists.
Internally, it uses a ListVariable to represent the list part of the
variable tracker. For everything else, it falls back to
UserDefinedObjectVariable.
"""
def __init__(self, value, list_vt=None, **kwargs):
super().__init__(value, **kwargs)
self._list_vt = list_vt
if self._list_vt is None:
assert self.source is None, (
"list_vt must be constructed by builder.py when source is present"
)
self._list_vt = variables.ListVariable([], mutation_type=ValueMutationNew())
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
assert self._list_vt is not None
method = self._maybe_get_baseclass_method(name)
if method in list_methods:
return self._list_vt.call_method(tx, name, args, kwargs)
return super().call_method(tx, name, args, kwargs)
def unpack_var_sequence(self, tx):
assert self._list_vt is not None
if type(self.value).__iter__ is list.__iter__:
return self._list_vt.unpack_var_sequence(tx)
raise NotImplementedError
def is_underlying_vt_modified(self, side_effects):
return side_effects.is_modified(self._list_vt)
| UserDefinedListVariable |
python | huggingface__transformers | tests/quantization/bnb/test_mixed_int8.py | {
"start": 26256,
"end": 28448
} | class ____(BaseMixedInt8Test):
def setUp(self):
super().setUp()
def test_multi_gpu_loading(self):
r"""
This tests that the model has been loaded and can be used correctly on a multi-GPU setup.
Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 0,
"transformer.h.10": 1,
"transformer.h.11": 1,
"transformer.h.12": 1,
"transformer.h.13": 1,
"transformer.h.14": 1,
"transformer.h.15": 1,
"transformer.h.16": 1,
"transformer.h.17": 0,
"transformer.h.18": 0,
"transformer.h.19": 0,
"transformer.h.20": 0,
"transformer.h.21": 0,
"transformer.h.22": 0,
"transformer.h.23": 1,
"transformer.ln_f": 0,
}
model_parallel = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map=device_map
)
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1})
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Second real batch
output_parallel = model_parallel.generate(
input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
@require_torch_multi_accelerator
@apply_skip_if_not_implemented
| MixedInt8TestMultiGpu |
python | streamlit__streamlit | lib/streamlit/elements/exception.py | {
"start": 1890,
"end": 12330
} | class ____:
@gather_metrics("exception")
def exception(
self, exception: BaseException, width: WidthWithoutContent = "stretch"
) -> DeltaGenerator:
"""Display an exception.
When accessing the app through ``localhost``, in the lower-right corner
of the exception, Streamlit displays links to Google and ChatGPT that
are prefilled with the contents of the exception message.
Parameters
----------
exception : Exception
The exception to display.
width : "stretch" or int
The width of the exception element. This can be one of the following:
- ``"stretch"`` (default): The width of the element matches the
width of the parent container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
Example
-------
>>> import streamlit as st
>>>
>>> e = RuntimeError("This is an exception of type RuntimeError")
>>> st.exception(e)
.. output ::
https://doc-status-exception.streamlit.app/
height: 220px
"""
return _exception(self.dg, exception, width=width)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
# TODO(lawilby): confirm whether we want to track metrics here with lukasmasuch.
@gather_metrics("exception")
def _exception(
dg: DeltaGenerator,
exception: BaseException,
width: WidthWithoutContent = "stretch",
is_uncaught_app_exception: bool = False,
) -> DeltaGenerator:
exception_proto = ExceptionProto()
marshall(exception_proto, exception, width, is_uncaught_app_exception)
return dg._enqueue("exception", exception_proto)
def marshall(
exception_proto: ExceptionProto,
exception: BaseException,
width: WidthWithoutContent = "stretch",
is_uncaught_app_exception: bool = False,
) -> None:
"""Marshalls an Exception.proto message.
Parameters
----------
exception_proto : Exception.proto
The Exception protobuf to fill out.
exception : BaseException
The exception whose data we're extracting.
width : int or "stretch"
The width of the exception display. Can be either an integer (pixels) or "stretch".
Defaults to "stretch".
is_uncaught_app_exception: bool
The exception originates from an uncaught error during script execution.
"""
validate_width(width)
is_markdown_exception = isinstance(exception, MarkdownFormattedException)
# Some exceptions (like UserHashError) have an alternate_name attribute so
# we can pretend to the user that the exception is called something else.
if getattr(exception, "alternate_name", None) is not None:
exception_proto.type = exception.alternate_name # type: ignore[attr-defined]
else:
exception_proto.type = type(exception).__name__
stack_trace = _get_stack_trace_str_list(exception)
exception_proto.stack_trace.extend(stack_trace)
exception_proto.is_warning = isinstance(exception, Warning)
width_config = WidthConfig()
if isinstance(width, int):
width_config.pixel_width = width
else:
width_config.use_stretch = True
exception_proto.width_config.CopyFrom(width_config)
try:
if isinstance(exception, SyntaxError):
# SyntaxErrors have additional fields (filename, text, lineno,
# offset) that we can use for a nicely-formatted message telling
# the user what to fix.
exception_proto.message = _format_syntax_error_message(exception)
else:
exception_proto.message = str(exception).strip()
exception_proto.message_is_markdown = is_markdown_exception
except Exception as str_exception:
# Sometimes the exception's __str__/__unicode__ method itself
# raises an error.
exception_proto.message = ""
_LOGGER.warning(
"""
Streamlit was unable to parse the data from an exception in the user's script.
This is usually due to a bug in the Exception object itself. Here is some info
about that Exception object, so you can report a bug to the original author:
Exception type:
%s
Problem:
%s
Traceback:
%s
""",
type(exception).__name__,
str_exception,
"\n".join(_get_stack_trace_str_list(str_exception)),
)
if is_uncaught_app_exception:
show_error_details = config.get_option("client.showErrorDetails")
show_message = (
show_error_details == config.ShowErrorDetailsConfigOptions.FULL
or config.ShowErrorDetailsConfigOptions.is_true_variation(
show_error_details
)
)
# False is a legacy config option still in-use in community cloud. It is equivalent
# to "stacktrace".
show_trace = (
show_message
or show_error_details == config.ShowErrorDetailsConfigOptions.STACKTRACE
or config.ShowErrorDetailsConfigOptions.is_false_variation(
show_error_details
)
)
show_type = (
show_trace
or show_error_details == config.ShowErrorDetailsConfigOptions.TYPE
)
if not show_message:
exception_proto.message = _GENERIC_UNCAUGHT_EXCEPTION_TEXT
if not show_type:
exception_proto.ClearField("type")
else:
type_str = str(type(exception))
exception_proto.type = type_str.replace("<class '", "").replace("'>", "")
if not show_trace:
exception_proto.ClearField("stack_trace")
def _format_syntax_error_message(exception: SyntaxError) -> str:
"""Returns a nicely formatted SyntaxError message that emulates
what the Python interpreter outputs.
For example:
> File "raven.py", line 3
> st.write('Hello world!!'))
> ^
> SyntaxError: invalid syntax
"""
if exception.text:
caret_indent = (
" " * max(exception.offset - 1, 0) if exception.offset is not None else ""
)
return (
f'File "{exception.filename}", line {exception.lineno}\n'
f" {exception.text.rstrip()}\n"
f" {caret_indent}^\n"
f"{type(exception).__name__}: {exception.msg}"
)
# If a few edge cases, SyntaxErrors don't have all these nice fields. So we
# have a fall back here.
# Example edge case error message: encoding declaration in Unicode string
return str(exception)
def _get_stack_trace_str_list(exception: BaseException) -> list[str]:
"""Get the stack trace for the given exception.
Parameters
----------
exception : BaseException
The exception to extract the traceback from
Returns
-------
tuple of two string lists
The exception traceback as two lists of strings. The first represents the part
of the stack trace the users don't typically want to see, containing internal
Streamlit code. The second is whatever comes after the Streamlit stack trace,
which is usually what the user wants.
"""
extracted_traceback: traceback.StackSummary | None = None
if isinstance(exception, StreamlitAPIWarning):
extracted_traceback = exception.tacked_on_stack
elif hasattr(exception, "__traceback__"):
extracted_traceback = traceback.extract_tb(exception.__traceback__)
# Format the extracted traceback and add it to the protobuf element.
if extracted_traceback is None:
trace_str_list = [
"Cannot extract the stack trace for this exception. "
"Try calling exception() within the `catch` block."
]
else:
internal_frames, external_frames = _split_internal_streamlit_frames(
extracted_traceback
)
if external_frames:
trace_str_list = traceback.format_list(external_frames)
else:
trace_str_list = traceback.format_list(internal_frames)
trace_str_list = [item.strip() for item in trace_str_list]
return trace_str_list
def _is_in_package(file: str, package_path: str) -> bool:
"""True if the given file is part of package_path."""
try:
common_prefix = os.path.commonprefix([os.path.realpath(file), package_path])
except ValueError:
# Raised if paths are on different drives.
return False
return common_prefix == package_path
def _split_internal_streamlit_frames(
extracted_tb: traceback.StackSummary,
) -> tuple[list[traceback.FrameSummary], list[traceback.FrameSummary]]:
"""Split the traceback into a Streamlit-internal part and an external part.
The internal part is everything up to (but excluding) the first frame belonging to
the user's code. The external part is everything else.
So if the stack looks like this:
1. Streamlit frame
2. Pandas frame
3. Altair frame
4. Streamlit frame
5. User frame
6. User frame
7. Streamlit frame
8. Matplotlib frame
...then this should return 1-4 as the internal traceback and 5-8 as the external.
(Note that something like the example above is extremely unlikely to happen since
it's not like Altair is calling Streamlit code, but you get the idea.)
"""
ctx = get_script_run_ctx()
if not ctx:
return [], list(extracted_tb)
package_path = os.path.join(os.path.realpath(str(ctx.main_script_parent)), "")
return _split_list(
extracted_tb,
split_point=lambda tb: _is_in_package(tb.filename, package_path),
)
T = TypeVar("T")
def _split_list(
orig_list: list[T], split_point: Callable[[T], bool]
) -> tuple[list[T], list[T]]:
before: list[T] = []
after: list[T] = []
saw_split_point = False
for item in orig_list:
if not saw_split_point and split_point(item):
saw_split_point = True
if saw_split_point:
after.append(item)
else:
before.append(item)
return before, after
| ExceptionMixin |
python | doocs__leetcode | solution/0000-0099/0057.Insert Interval/Solution.py | {
"start": 0,
"end": 532
} | class ____:
def insert(
self, intervals: List[List[int]], newInterval: List[int]
) -> List[List[int]]:
def merge(intervals: List[List[int]]) -> List[List[int]]:
intervals.sort()
ans = [intervals[0]]
for s, e in intervals[1:]:
if ans[-1][1] < s:
ans.append([s, e])
else:
ans[-1][1] = max(ans[-1][1], e)
return ans
intervals.append(newInterval)
return merge(intervals)
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 27117,
"end": 27221
} | class ____(BaseModel):
conn_id: str
type: Literal["GetConnection"] = "GetConnection"
| GetConnection |
python | bokeh__bokeh | src/bokeh/models/mappers.py | {
"start": 10755,
"end": 11149
} | class ____(ColorMapper):
''' Abstract base class for color mappers that operate on ``ImageStack``
glyphs.
These map 3D data arrays of shape ``(ny, nx, nstack)`` to 2D RGBA images
of shape ``(ny, nx)``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| StackColorMapper |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_dms.py | {
"start": 4148,
"end": 5462
} | class ____(TestBaseDmsTrigger):
EXPECTED_WAITER_NAME = "replication_config_deleted"
REPLICATION_CONFIG_ARN = "arn:aws:dms:region:account:config"
def test_serialization(self):
trigger = DmsReplicationConfigDeletedTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN)
classpath, kwargs = trigger.serialize()
assert classpath == BASE_TRIGGER_CLASSPATH + "DmsReplicationConfigDeletedTrigger"
assert kwargs.get("replication_config_arn") == self.REPLICATION_CONFIG_ARN
@pytest.mark.asyncio
@mock.patch.object(DmsHook, "get_waiter")
@mock.patch.object(DmsHook, "get_async_conn")
async def test_complete(self, mock_async_conn, mock_get_waiter):
mock_async_conn.__aenter__.return_value = mock.MagicMock()
mock_get_waiter().wait = AsyncMock()
trigger = DmsReplicationConfigDeletedTrigger(replication_config_arn=self.REPLICATION_CONFIG_ARN)
generator = trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent(
{"status": "success", "replication_config_arn": self.REPLICATION_CONFIG_ARN}
)
assert_expected_waiter_type(mock_get_waiter, self.EXPECTED_WAITER_NAME)
mock_get_waiter().wait.assert_called_once()
| TestDmsReplicationConfigDeletedTrigger |
python | PyCQA__pylint | tests/functional/t/too/too_few_public_methods_37.py | {
"start": 481,
"end": 556
} | class ____:
date = None
@dataclass(frozen=True)
| ScheduledTxSearchModelOne |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 77841,
"end": 79787
} | class ____(TestCase):
# Test ones, zeros, empty and full.
def setUp(self):
super().setUp()
dtypes = {np.dtype(tp) for tp in "efdFDBbhil?"}
self.dtypes = dtypes
self.orders = {
"C": "c_contiguous"
} # XXX: reeenable when implemented, 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = ((0, 1, 2), range(self.ndims), self.orders, self.dtypes)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {"fill_value": fill_value}
for size, ndims, order, dtype in itertools.product(*par):
shape = ndims * [size]
arr = func(shape, order=order, dtype=dtype, **fill_kwarg)
assert_equal(arr.dtype, dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.ones)
def test_empty(self):
self.check_function(np.empty)
def test_full(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
@skipif(TEST_WITH_TORCHDYNAMO, reason="fails with dynamo")
@skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim] * 10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim] * 10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim] * 10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim] * 10, 0)
assert_(sys.getrefcount(dim) == beg)
@skip(reason="implement order etc") # FIXME: make xfail
@instantiate_parametrized_tests
| TestCreationFuncs |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 122927,
"end": 125167
} | class ____(Response):
"""
Response of projects.get_task_parents endpoint.
:param parents: The list of unique task parents sorted by their names
:type parents: Sequence[dict]
"""
_service = "projects"
_action = "get_task_parents"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"parents": {
"description": "The list of unique task parents sorted by their names",
"items": {
"properties": {
"id": {
"description": "The ID of the parent task",
"type": "string",
},
"name": {
"description": "The name of the parent task",
"type": "string",
},
"project": {
"id": {
"description": "The ID of the parent task project",
"type": "string",
},
"name": {
"description": "The name of the parent task project",
"type": "string",
},
"type": "object",
},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, parents: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskParentsResponse, self).__init__(**kwargs)
self.parents = parents
@schema_property("parents")
def parents(self) -> Optional[List[dict]]:
return self._property_parents
@parents.setter
def parents(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_parents = None
return
self.assert_isinstance(value, "parents", (list, tuple))
self.assert_isinstance(value, "parents", (dict,), is_array=True)
self._property_parents = value
| GetTaskParentsResponse |
python | pytorch__pytorch | test/dynamo/test_guard_manager.py | {
"start": 1564,
"end": 30365
} | class ____(torch._dynamo.test_case.TestCase):
def test_global_state_guard(self):
root = RootGuardManager()
guard = guards.GLOBAL_STATE(root, ["global_state_check"])
self.assertTrue(guard(None))
with set_default_dtype(torch.double):
self.assertFalse(guard(None))
self.assertExpectedInline(
str(guard.check_verbose(None)),
"""\
GuardDebugInfo(
result=0,
verbose_code_parts=['GLOBAL_STATE changed: default_dtype '],
num_guards_executed=0)
""",
)
self.assertTrue(guard(None))
self.assertTrue(guard.check_verbose(None).result)
_orig = torch.are_deterministic_algorithms_enabled()
try:
torch.use_deterministic_algorithms(not _orig)
self.assertFalse(guard(None))
self.assertExpectedInline(
str(guard.check_verbose(None)),
"""\
GuardDebugInfo(
result=0,
verbose_code_parts=['GLOBAL_STATE changed: deterministic_algorithms '],
num_guards_executed=0)
""",
)
finally:
torch.use_deterministic_algorithms(_orig)
self.assertTrue(guard(None))
self.assertTrue(guard.check_verbose(None).result)
def test_global_state_reason(self):
with torch.enable_grad():
guards = GlobalStateGuard()
with torch.no_grad():
self.assertIs(guards.check(), False)
self.assertEqual(guards.reason(), "grad_mode ")
def test_python_lambda_leaf_guard(self):
root = RootGuardManager()
const_guard = guards.LAMBDA_GUARD(
root,
functools.partial(equals_match, expected=5),
equals_match_verbose_code_parts(5),
)
self.assertTrue(const_guard(5))
self.assertFalse(const_guard(4))
self.assertFalse(const_guard("foo"))
def test_type_guard(self):
root = RootGuardManager()
foo = 4
guard = guards.TYPE_MATCH(root, id_type(foo), ["type(x) == int"])
self.assertTrue(guard(5))
self.assertTrue(guard(4))
self.assertFalse(guard("foo"))
foo = {"a": 1}
guard = guards.TYPE_MATCH(root, id_type(foo), ["type(x) == dict"])
self.assertTrue(guard(foo))
self.assertTrue(guard({}))
self.assertFalse(guard(5))
self.assertFalse(guard("foo"))
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
foo = Foo(1, 2)
guard = guards.TYPE_MATCH(root, id_type(foo), ["type(x) == Foo"])
self.assertTrue(guard(foo))
self.assertFalse(guard({}))
self.assertFalse(guard(5))
self.assertFalse(guard("foo"))
def test_id_guard(self):
root = RootGuardManager()
foo = 4
guard = guards.ID_MATCH(root, id(foo), ["id(x) == id(foo)"])
self.assertTrue(guard(foo))
self.assertFalse(guard(5))
self.assertFalse(guard("foo"))
foo = {"a": 1}
guard = guards.ID_MATCH(root, id(foo), ["id(x) == id(foo)"])
self.assertTrue(guard(foo))
self.assertFalse(guard({"a": 1}))
self.assertFalse(guard({}))
self.assertFalse(guard(5))
def test_equals_guard(self):
root = RootGuardManager()
foo = 4
guard = guards.EQUALS_MATCH(root, foo, ["x == 4"])
self.assertTrue(guard(4))
self.assertFalse(guard(5))
self.assertFalse(guard("foo"))
# tuple
foo = (1, 2, 3)
guard = guards.EQUALS_MATCH(root, foo, ["x == foo"])
self.assertTrue(guard(foo))
self.assertTrue(guard((1, 2, 3)))
self.assertFalse(guard((1, 2, 3, 4)))
self.assertFalse(guard({}))
# list
foo = [1, 2, 3]
guard = guards.EQUALS_MATCH(root, foo, ["x == foo"])
self.assertTrue(guard(foo))
self.assertTrue(guard([1, 2, 3]))
self.assertFalse(guard([1, 2, 3, 4]))
# type
foo = int
guard = guards.EQUALS_MATCH(root, foo, ["x == foo"])
self.assertTrue(guard(foo))
self.assertTrue(guard(int))
self.assertFalse(guard(float))
def test_default_device_guard(self):
root = RootGuardManager()
foo = 1
guard = guards.DEFAULT_DEVICE(root, ["cpu device"])
self.assertTrue(guard(foo))
try:
torch.set_default_device("cuda")
self.assertFalse(guard(foo))
finally:
torch.set_default_device(None)
def test_length_check_guard(self):
root = RootGuardManager()
foo = [1, 2, 3]
guard = guards.LENGTH_CHECK(root, len(foo), ["len(x) == len(foo)"])
self.assertTrue(guard(foo))
self.assertFalse(guard([]))
def test_no_hasattr_guard(self):
root = RootGuardManager()
class Bar:
def __init__(self) -> None:
self.bar = 2
bar = Bar()
class Foo:
def __init__(self) -> None:
self.foo = 2
foo = Foo()
guard = guards.NO_HASATTR(root, "foo", ["hasattr(x, 'foo') == False"])
self.assertTrue(guard(bar))
self.assertFalse(guard(foo))
def test_tensor_aliasing_guard(self):
guard_manager = RootGuardManager()
a = torch.randn(3, 4)
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
f_locals = Foo(a, a)
x_guard_mgr = guard_manager.getattr_manager("x", "", a, default_mgr_enum)
y_guard_mgr = guard_manager.getattr_manager("y", "", a, default_mgr_enum)
install_object_aliasing_guard(x_guard_mgr, y_guard_mgr, ["x is y"])
# Check structure
x_guards = x_guard_mgr.get_leaf_guards()
y_guards = y_guard_mgr.get_leaf_guards()
self.assertEqual(len(x_guards), 1)
self.assertEqual(len(y_guards), 1)
self.assertTrue(isinstance(x_guards[0], OBJECT_ALIASING))
self.assertTrue(isinstance(y_guards[0], OBJECT_ALIASING))
# Check that the two guards are the same object
self.assertTrue(x_guards[0] is y_guards[0])
f_locals_unaliased = Foo(torch.randn(3, 4), torch.randn(3, 4))
self.assertEqual(len(x_guard_mgr.get_leaf_guards()), 1)
self.assertEqual(len(y_guard_mgr.get_leaf_guards()), 1)
self.assertTrue(guard_manager.check(f_locals))
self.assertFalse(guard_manager.check(f_locals_unaliased))
def test_dict_version_guard(self):
root = RootGuardManager()
foo = {"a": 1, "b": 2}
guard = guards.DICT_VERSION(root, foo, ["x.version == foo.version"])
self.assertTrue(guard(foo))
self.assertFalse(guard(dict(foo)))
foo["a"] = 2
self.assertFalse(guard(foo))
self.assertFalse(guard({"a": 1, "b": 2}))
self.assertFalse(guard({}))
def test_dynamic_indices_guard(self):
root = RootGuardManager()
guard1 = guards.DYNAMIC_INDICES(root, set(), ["x.size(0) == y.size(0)"])
guard2 = guards.DYNAMIC_INDICES(root, set({0, 1}), ["x.size(0) == y.size(0)"])
x = torch.randn(4)
self.assertTrue(guard1(x))
self.assertTrue(guard2(x))
x._dynamo_dynamic_indices = set({0})
self.assertFalse(guard1(x))
self.assertTrue(guard2(x))
x._dynamo_dynamic_indices = set({2})
self.assertFalse(guard1(x))
self.assertFalse(guard2(x))
def test_tensor_match_guard(self):
guard_manager = RootGuardManager()
x = torch.randn(4, 4)
size = list(x.size())
stride = list(x.stride())
guard_manager.add_tensor_match_guard(
x,
size,
stride,
"x",
["check_tensor(x)"],
type(x),
torch._C._dispatch_keys(x),
)
self.assertTrue(guard_manager.check(x))
self.assertTrue(guard_manager.check_verbose(x).result)
self.assertTrue(guard_manager.check(torch.randn(4, 4)))
self.assertTrue(guard_manager.check_verbose(torch.randn(4, 4)).result)
self.assertFalse(guard_manager.check(x.t_()))
x = torch.randn(4, 4)
x.t_()
debug_info = guard_manager.check_verbose(x)
print(debug_info.verbose_code_parts[0])
self.assertTrue(
"tensor 'x' stride mismatch" in debug_info.verbose_code_parts[0]
)
def test_no_tensor_aliasing_guard(self):
guard_manager = RootGuardManager()
a = torch.randn(3, 4)
class Foo:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
f_locals = Foo(a, a, a)
x_guard_mgr = guard_manager.getattr_manager("x", "", a, default_mgr_enum)
y_guard_mgr = guard_manager.getattr_manager("y", "", a, default_mgr_enum)
z_guard_mgr = guard_manager.getattr_manager("z", "", a, default_mgr_enum)
install_no_tensor_aliasing_guard(
[x_guard_mgr, y_guard_mgr, z_guard_mgr],
["x", "y", "z"],
["no_aliasing(x, y, z)"],
)
# Check structure
x_guards = x_guard_mgr.get_leaf_guards()
y_guards = y_guard_mgr.get_leaf_guards()
z_guards = z_guard_mgr.get_leaf_guards()
self.assertEqual(len(x_guards), 1)
self.assertEqual(len(y_guards), 1)
self.assertEqual(len(z_guards), 1)
self.assertTrue(isinstance(x_guards[0], NO_TENSOR_ALIASING))
self.assertTrue(isinstance(y_guards[0], NO_TENSOR_ALIASING))
self.assertTrue(isinstance(z_guards[0], NO_TENSOR_ALIASING))
# Check that the two guards are the same object
self.assertTrue(x_guards[0] is y_guards[0] is z_guards[0])
self.assertFalse(guard_manager.check(f_locals))
self.assertFalse(guard_manager.check_verbose(f_locals).result)
f_locals_unaliased = Foo(
torch.randn(3, 4),
torch.randn(3, 4),
torch.randn(3, 4),
)
self.assertTrue(guard_manager.check(f_locals_unaliased))
self.assertTrue(guard_manager.check_verbose(f_locals_unaliased).result)
# Check that hash map is cleared.
self.assertTrue(guard_manager.check(f_locals_unaliased))
f_locals_unaliased = Foo(
a,
torch.randn(3, 4),
a,
)
self.assertFalse(guard_manager.check(f_locals_unaliased))
self.assertFalse(guard_manager.check_verbose(f_locals_unaliased).result)
def test_weakref_alive_guard(self):
root = RootGuardManager()
x = torch.rand(3, 4)
weakref_x = weakref.ref(x)
guard = guards.NOT_NONE(root, ["weakref_x is not None"])
self.assertTrue(guard(weakref_x()))
del x
self.assertFalse(guard(weakref_x()))
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_call_function_no_args_guard(self):
root = RootGuardManager()
x = torch.cuda.current_device()
guard = guards.EQUALS_MATCH(root, x, [0])
self.assertTrue(guard(0))
self.assertFalse(guard(1))
self.assertFalse(guard(2))
def test_guard_manager_leaf_guard(self):
guard_manager = RootGuardManager()
guard_manager.add_type_match_guard(id_type(5), ["type(x) == int"])
guard_manager.add_lambda_guard(
functools.partial(ge_match, expected=5),
ge_match_verbose_code_parts(expected=5),
)
guard_manager.add_lambda_guard(
functools.partial(less_match, expected=10),
less_match_verbose_code_parts(expected=10),
)
self.assertEqual(len(guard_manager.get_leaf_guards()), 3)
self.assertEqual(len(guard_manager.get_accessors()), 0)
self.assertTrue(guard_manager.check(6))
self.assertFalse(guard_manager.check(4))
self.assertFalse(guard_manager.check("foo"))
def test_attr_guard_manager(self):
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
foo = Foo(1, 2)
guard_manager = RootGuardManager()
guard_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
guard_manager.getattr_manager("x", "x", 1, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo.x),
equals_match_verbose_code_parts(foo.x),
)
guard_manager.getattr_manager("y", "y", 2, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo.y),
equals_match_verbose_code_parts(foo.y),
)
self.assertEqual(len(guard_manager.get_leaf_guards()), 1)
# 2 child managers, one for x and one for y
self.assertEqual(len(guard_manager.get_accessors()), 2)
self.assertTrue(
isinstance(guard_manager.get_accessors()[0], GetAttrGuardAccessor)
)
self.assertTrue(
isinstance(guard_manager.get_accessors()[1], GetAttrGuardAccessor)
)
# Check leaf guards on child managers
self.assertEqual(
len(
guard_manager.getattr_manager(
attr="x",
source="x",
example_value=None,
guard_manager_enum=default_mgr_enum,
).get_leaf_guards()
),
1,
)
self.assertEqual(
len(
guard_manager.getattr_manager(
"y", "y", None, default_mgr_enum
).get_leaf_guards()
),
1,
)
self.assertTrue(guard_manager.check(foo))
self.assertFalse(guard_manager.check(Foo(3, 4)))
self.assertFalse(guard_manager.check("foo"))
def test_item_guard_manager(self):
foo = [1, 2]
guard_manager = RootGuardManager()
guard_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
guard_manager.getitem_manager(0, "", 1, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo[0]),
equals_match_verbose_code_parts(foo[0]),
)
guard_manager.getitem_manager(1, "", 2, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo[1]),
equals_match_verbose_code_parts(foo[1]),
)
self.assertEqual(len(guard_manager.get_leaf_guards()), 1)
# 2 child managers, one for x and one for y
self.assertEqual(len(guard_manager.get_accessors()), 2)
self.assertTrue(
isinstance(guard_manager.get_accessors()[0], GetItemGuardAccessor)
)
self.assertTrue(
isinstance(guard_manager.get_accessors()[1], GetItemGuardAccessor)
)
# Check leaf guards on child managers
self.assertEqual(
len(
guard_manager.getitem_manager(
0, "", None, default_mgr_enum
).get_leaf_guards()
),
1,
)
self.assertEqual(
len(
guard_manager.getitem_manager(
1, "", None, default_mgr_enum
).get_leaf_guards()
),
1,
)
self.assertTrue(guard_manager.check(foo))
self.assertFalse(guard_manager.check([3, 4]))
self.assertFalse(guard_manager.check("foo"))
def test_framelocals_accessor(self):
foo = {
"a": 1,
"b": 2,
}
guards_manager = RootGuardManager()
guards_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
guards_manager.framelocals_manager(
("a", 0), "", 1, default_mgr_enum
).add_equals_match_guard(1, ["a == 1"])
guards_manager.framelocals_manager(
("b", 1), "", 2, default_mgr_enum
).add_equals_match_guard(2, ["b == 2"])
self.assertTrue(guards_manager.check(foo))
self.assertFalse(guards_manager.check({"a": 1, "b": 3}))
def test_framelocals_guard_e2e(self):
def fn(x, y, z):
return x + y + z[0]
opt_fn = torch.compile(fn, backend="eager")
ref = opt_fn(torch.ones(3), 2, {0: 1, 2: 3})
with torch._dynamo.set_stance("fail_on_recompile"):
res = opt_fn(torch.ones(3), 2, {0: 1, 2: 3})
self.assertEqual(ref, res)
c1 = _debug_get_cache_entry_list(fn.__code__)
self.assertEqual(len(c1), 1)
guard_str = str(c1[0].guard_manager)
self.assertIn(
"source=L['x'], accessed_by=FrameLocalsGuardAccessor(key='x', framelocals_idx=0)",
guard_str,
)
self.assertIn(
"source=L['y'], accessed_by=FrameLocalsGuardAccessor(key='y', framelocals_idx=1)",
guard_str,
)
self.assertIn(
"source=L['z'], accessed_by=FrameLocalsGuardAccessor(key='z', framelocals_idx=2)",
guard_str,
)
def test_dict_getitem_accessor(self):
foo = {
"a": 1,
"b": 2,
}
guards_manager = RootGuardManager()
guards_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
guards_manager.dict_getitem_manager(
"a", "", 1, default_mgr_enum
).add_equals_match_guard(1, ["a == 1"])
guards_manager.dict_getitem_manager(
"b", "", 2, default_mgr_enum
).add_equals_match_guard(2, ["b == 2"])
self.assertTrue(guards_manager.check(foo))
self.assertFalse(guards_manager.check({"a": 1, "b": 3}))
def test_globals(self):
global global_pair, Pair
guard_manager = RootGuardManager()
gpair_mgr = guard_manager.globals_dict_manager(
globals(), "", None, default_mgr_enum
).getitem_manager("global_pair", "", global_pair, default_mgr_enum)
gpair_mgr.add_lambda_guard(
lambda x: isinstance(x, Pair)
and isinstance(x.x, torch.Tensor)
and isinstance(x.y, int),
"global guard fail",
)
self.assertTrue(guard_manager.check(global_pair))
global_pair.y = "foo"
self.assertFalse(guard_manager.check(global_pair))
def test_type_manager(self):
guard_manager = RootGuardManager()
class A:
a = 4
class B(A):
def mul(self, x):
super().mul(x)
foo = B()
f_locals = {"foo": foo}
# len(type(foo).__mro__) == 2
foo_mgr = guard_manager.getitem_manager("foo", "", foo, default_mgr_enum)
type_manager = foo_mgr.type_manager("", type(foo), default_mgr_enum)
self.assertTrue(isinstance(foo_mgr.get_accessors()[0], TypeGuardAccessor))
mro_manager = type_manager.getattr_manager(
"__mro__", "", type(foo).__mro__, default_mgr_enum
)
self.assertTrue(
isinstance(type_manager.get_accessors()[0], GetAttrGuardAccessor)
)
mro_manager.add_length_check_guard(
3,
"Expected len(type(foo).__mro__) == 3",
)
# type(foo).__mro__[0].a = 4
item_manager = mro_manager.getitem_manager(
1, "", type(foo).__mro__[1], default_mgr_enum
)
self.assertTrue(
isinstance(mro_manager.get_accessors()[0], GetItemGuardAccessor)
)
attr_manager = item_manager.getattr_manager(
"a", "", type(foo).__mro__[0].a, default_mgr_enum
)
self.assertTrue(
isinstance(item_manager.get_accessors()[0], GetAttrGuardAccessor)
)
attr_manager.add_lambda_guard(
lambda x: x == 4,
"Expected value 4",
)
self.assertTrue(guard_manager.check(f_locals))
def test_tuple_iterator_getitem(self):
a = (1, 2, 3, 4, 5, 6)
foo = iter(a)
next(foo) # foo points at index=1
guard_manager = RootGuardManager()
# Check a[3] which is tuple_iterator_getitem(foo, 2)
guard_manager.add_tuple_iterator_length_guard(
5, id_type(iter(())), ["len == 5"]
)
guard_manager.tuple_iterator_getitem_manager(
2, "", foo, default_mgr_enum
).add_equals_match_guard(a[3], ["x==4"])
# Check that type match works
self.assertFalse(guard_manager.check(False))
self.assertTrue(guard_manager.check(foo))
# Check that index error fails gracefully
b = (1, 2)
b_foo = iter(b)
self.assertFalse(guard_manager.check(b_foo))
def test_global_weakref(self):
guard_manager = RootGuardManager()
globals_manager = guard_manager.globals_dict_manager(
globals(), "", None, default_mgr_enum
)
weakref_manager = globals_manager.global_weakref_manager(
"weakref_x", "", None, default_mgr_enum
)
weakref_manager.add_lambda_guard(
lambda x: isinstance(x, torch.Tensor),
"global weakref fail",
)
self.assertTrue(guard_manager.check(None))
global x
del x
self.assertFalse(guard_manager.check(None))
def test_lambda_manager(self):
a = (1, 1, 3, 4, 5, 6)
guard_manager = RootGuardManager()
# Check that we can use the same accessor
foo_mgr = guard_manager.lambda_manager(
lambda x: x[2], "", None, default_mgr_enum
)
foo_mgr.add_lambda_guard(
lambda x: x == 3,
"Expected value 3",
)
self.assertTrue(guard_manager.check(a))
# test that exception works
guard_manager = RootGuardManager()
def fn(x):
raise AssertionError("Test")
return x
foo_mgr = guard_manager.lambda_manager(fn, "", None, default_mgr_enum)
self.assertFalse(guard_manager.check(None))
debug_info = guard_manager.check_verbose(None)
self.assertFalse(debug_info.result)
self.assertTrue("Test" in debug_info.verbose_code_parts[0])
def test_dict_contains_guard(self):
root = RootGuardManager()
foo = {"a": 1, "b": 2}
guard = guards.DICT_CONTAINS(root, True, "a", ["has a"])
self.assertTrue(guard(foo))
self.assertTrue(guard({"a": 1, "b": 2}))
self.assertFalse(guard({"b": 2, "c": 3}))
self.assertFalse(guard({}))
guard = guards.DICT_CONTAINS(root, False, "c", ["not has c"])
self.assertTrue(guard(foo))
self.assertTrue(guard({"a": 1, "b": 2}))
self.assertFalse(guard({"b": 2, "c": 3}))
self.assertTrue(guard({}))
def test_dict_guard_manager(self):
root = RootGuardManager()
def nothing():
pass
f_locals = {
"d": {"a": 1, nothing: {"z": 3}, 100: torch.randn(4)},
}
# its a getitem_manager just for f_locals. But the child guard manager
# should be a DictGuardManager.
dict_mgr = root.getitem_manager(
"d",
"",
f_locals["d"],
torch._dynamo.guards.GuardManagerType.DICT_GUARD_MANAGER,
)
self.assertTrue(isinstance(dict_mgr, DictGuardManager))
self.assertTrue(root.check(f_locals))
# Check that no one can add a leaf guard
with self.assertRaises(RuntimeError):
dict_mgr.add_id_match_guard(id_type(f_locals), "id match")
# Check that no one can add an arbitrary accessor
with self.assertRaises(RuntimeError):
dict_mgr.getitem_manager("a", "", f_locals["d"]["a"])
# Check that it fails with different length dict
f_locals_prime = {
"d": {"a": 1, "b": 2},
}
self.assertFalse(root.check(f_locals_prime))
# Add key-value manager ("a" : 1)
self.assertTrue(root.check(f_locals))
dict_mgr.get_key_manager(0, "", "a", default_mgr_enum).add_equals_match_guard(
"a",
["dict.keys()[0] == a"],
)
self.assertTrue(root.check(f_locals))
dict_mgr.get_value_manager(0, "", 1, default_mgr_enum).add_equals_match_guard(
1, ["d[0] == 1"]
)
self.assertTrue(root.check(f_locals))
# Add key-value manager (nothing : {"z" : 3})
self.assertTrue(root.check(f_locals))
dict_mgr.get_key_manager(1, "", nothing, default_mgr_enum).add_lambda_guard(
lambda x: x is nothing, ["x is nothing"]
)
self.assertTrue(root.check(f_locals))
value_mgr = dict_mgr.get_value_manager(
1,
"",
f_locals["d"][nothing],
torch._dynamo.guards.GuardManagerType.DICT_GUARD_MANAGER,
)
self.assertTrue(isinstance(value_mgr, DictGuardManager))
self.assertTrue(root.check(f_locals))
# Check structure
# Check that we are only guarding on two keys. This is common in
# LazyVariableTracker.
self.assertEqual(len(dict_mgr.get_key_value_managers()), 2)
f_locals["d"]["a"] = 2
self.assertFalse(root.check(f_locals))
self.assertFalse(root.check_verbose(f_locals).result)
f_locals["d"]["a"] = 1
self.assertTrue(root.check(f_locals))
f_locals["d"].pop(100)
# fails because of len check
self.assertFalse(root.check(f_locals))
def test_clone(self):
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
def hook(guard_wrapper, f_locals, builder):
root = guard_wrapper.root
# Check full cloning works as expected
cloned_root = root.clone_manager(lambda x: True)
self.assertTrue(cloned_root.check(f_locals))
f_locals["foo"] = [3, 4]
self.assertFalse(cloned_root.check(f_locals))
f_locals["foo"] = [2, 3]
# Skip guarding on foo
cloned_root = root.clone_manager(lambda x: "foo" not in x.get_source())
f_locals["foo"] = [3, 4]
# Original root should fail, but new root should pass because of
# absence of guards on foo.
self.assertFalse(root.check(f_locals))
self.assertTrue(cloned_root.check(f_locals))
class Bar:
x = 4
y = torch.randn(4)
foo = [2, 3]
bar = Bar()
def fn(x, foo, bar):
return x + foo[0] + bar.x * bar.y
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
opt_fn(x, foo, bar)
def test_diff_guard_manager(self):
try:
from .utils import install_guard_manager_testing_hook
except ImportError:
from utils import install_guard_manager_testing_hook
counter = 0
def hook(guard_wrapper, f_locals, builder):
nonlocal counter
root = guard_wrapper.root
diff_guard_root = guard_wrapper.diff_guard_root
# Check full cloning works as expected
self.assertTrue(root.check(f_locals))
self.assertTrue(diff_guard_root.check(f_locals))
# Check that tensor guards run well
old_tensor = f_locals["bar"].y
f_locals["bar"].y = torch.randn(5)
self.assertFalse(root.check(f_locals))
self.assertFalse(diff_guard_root.check(f_locals))
f_locals["bar"].y = old_tensor
# Original root should fail on foo changes, but diff_guard_root
# should pass because it does not have foo guards on counter = 0. On
# counter = 1, it should pass because we have caused a recompile
# because of foo, causing it to recompile on foo.
f_locals["foo"] = [3, 3]
self.assertFalse(root.check(f_locals))
if counter == 0:
self.assertTrue(diff_guard_root.check(f_locals))
else:
self.assertFalse(diff_guard_root.check(f_locals))
counter += 1
class Bar:
def __init__(self):
self.x = 4
self.y = torch.randn(4)
bar = Bar()
def fn(x, foo, bar):
return x + foo[0] + bar.x * bar.y
x = torch.randn(4)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
with install_guard_manager_testing_hook(hook):
foo = (12.0, 13)
opt_fn(x, foo, bar)
foo = (10.0, 11)
opt_fn(x, foo, bar)
| GuardManagerTests |
python | google__flatbuffers | tests/py_test.py | {
"start": 88979,
"end": 92513
} | class ____(unittest.TestCase):
def test_nested_union_tables(self):
nestUnion = MyGame.Example.NestedUnion.NestedUnionTest.NestedUnionTestT()
nestUnion.name = 'testUnion1'
nestUnion.id = 1
nestUnion.data = MyGame.Example.NestedUnion.Vec3.Vec3T()
nestUnion.dataType = MyGame.Example.NestedUnion.Any.Any.Vec3
nestUnion.data.x = 4.278975356
nestUnion.data.y = 5.32
nestUnion.data.z = -6.464
nestUnion.data.test1 = 0.9
nestUnion.data.test2 = MyGame.Example.NestedUnion.Color.Color.Red
nestUnion.data.test3 = MyGame.Example.NestedUnion.Test.TestT()
nestUnion.data.test3.a = 5
nestUnion.data.test3.b = 2
b = flatbuffers.Builder(0)
b.Finish(nestUnion.Pack(b))
nestUnionDecode = (
MyGame.Example.NestedUnion.NestedUnionTest.NestedUnionTest.GetRootAs(
b.Bytes, b.Head()
)
)
nestUnionDecodeT = (
MyGame.Example.NestedUnion.NestedUnionTest.NestedUnionTestT.InitFromObj(
nestUnionDecode
)
)
self.assertEqual(nestUnionDecodeT.name, nestUnion.name)
self.assertEqual(nestUnionDecodeT.id, nestUnion.id)
self.assertEqual(nestUnionDecodeT.dataType, nestUnion.dataType)
self.assertEqual(nestUnionDecodeT.data.x, nestUnion.data.x)
self.assertEqual(nestUnionDecodeT.data.y, nestUnion.data.y)
self.assertEqual(nestUnionDecodeT.data.z, nestUnion.data.z)
self.assertEqual(nestUnionDecodeT.data.test1, nestUnion.data.test1)
self.assertEqual(nestUnionDecodeT.data.test2, nestUnion.data.test2)
self.assertEqual(nestUnionDecodeT.data.test3.a, nestUnion.data.test3.a)
self.assertEqual(nestUnionDecodeT.data.test3.b, nestUnion.data.test3.b)
nestUnionDecodeTFromBuf = MyGame.Example.NestedUnion.NestedUnionTest.NestedUnionTestT.InitFromPackedBuf(
b.Bytes, b.Head()
)
self.assertEqual(nestUnionDecodeTFromBuf.name, nestUnion.name)
self.assertEqual(nestUnionDecodeTFromBuf.id, nestUnion.id)
self.assertEqual(nestUnionDecodeTFromBuf.dataType, nestUnion.dataType)
self.assertEqual(nestUnionDecodeTFromBuf.data.x, nestUnion.data.x)
self.assertEqual(nestUnionDecodeTFromBuf.data.y, nestUnion.data.y)
self.assertEqual(nestUnionDecodeTFromBuf.data.z, nestUnion.data.z)
self.assertEqual(nestUnionDecodeTFromBuf.data.test1, nestUnion.data.test1)
self.assertEqual(nestUnionDecodeTFromBuf.data.test2, nestUnion.data.test2)
self.assertEqual(
nestUnionDecodeTFromBuf.data.test3.a, nestUnion.data.test3.a
)
self.assertEqual(
nestUnionDecodeTFromBuf.data.test3.b, nestUnion.data.test3.b
)
nestUnionDecodeTFromBuf2 = MyGame.Example.NestedUnion.NestedUnionTest.NestedUnionTestT.InitFromPackedBuf(
b.Output()
)
self.assertEqual(nestUnionDecodeTFromBuf2.name, nestUnion.name)
self.assertEqual(nestUnionDecodeTFromBuf2.id, nestUnion.id)
self.assertEqual(nestUnionDecodeTFromBuf2.dataType, nestUnion.dataType)
self.assertEqual(nestUnionDecodeTFromBuf2.data.x, nestUnion.data.x)
self.assertEqual(nestUnionDecodeTFromBuf2.data.y, nestUnion.data.y)
self.assertEqual(nestUnionDecodeTFromBuf2.data.z, nestUnion.data.z)
self.assertEqual(nestUnionDecodeTFromBuf2.data.test1, nestUnion.data.test1)
self.assertEqual(nestUnionDecodeTFromBuf2.data.test2, nestUnion.data.test2)
self.assertEqual(
nestUnionDecodeTFromBuf2.data.test3.a, nestUnion.data.test3.a
)
self.assertEqual(
nestUnionDecodeTFromBuf2.data.test3.b, nestUnion.data.test3.b
)
| TestNestedUnionTables |
python | scrapy__scrapy | tests/test_pipeline_media.py | {
"start": 17686,
"end": 17880
} | class ____(MockedMediaPipeline):
def media_failed(self, failure, request, info):
self._mockcalled.append("media_failed")
return failure # deprecated
| MediaFailedFailurePipeline |
python | pandas-dev__pandas | pandas/tests/extension/test_datetime.py | {
"start": 4622,
"end": 4678
} | class ____(base.NDArrayBacked2DTests):
pass
| Test2DCompat |
python | openai__openai-python | src/openai/types/responses/apply_patch_tool.py | {
"start": 191,
"end": 311
} | class ____(BaseModel):
type: Literal["apply_patch"]
"""The type of the tool. Always `apply_patch`."""
| ApplyPatchTool |
python | joke2k__faker | tests/providers/test_lorem.py | {
"start": 9167,
"end": 11988
} | class ____:
"""Test cs_CZ lorem provider"""
word_list = [word.lower() for word in CsCzLoremProvider.word_list]
def test_paragraph(self, faker, num_samples):
num_sentences = 10
for _ in range(num_samples):
paragraph = faker.paragraph(nb_sentences=num_sentences)
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_paragraphs(self, faker, num_samples):
num_paragraphs = 5
for _ in range(num_samples):
paragraphs = faker.paragraphs(nb=num_paragraphs)
for paragraph in paragraphs:
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentence(self, faker, num_samples):
num_words = 10
for _ in range(num_samples):
sentence = faker.sentence(nb_words=num_words)
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentences(self, faker, num_samples):
num_sentences = 5
for _ in range(num_samples):
sentences = faker.sentences(nb=num_sentences)
for sentence in sentences:
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_text(self, faker, num_samples):
num_chars = 25
for _ in range(num_samples):
text = faker.text(max_nb_chars=num_chars)
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_texts(self, faker, num_samples):
num_texts = 5
num_chars = 25
for _ in range(num_samples):
texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
for text in texts:
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_word(self, faker, num_samples):
for _ in range(num_samples):
word = faker.word()
assert isinstance(word, str) and word in CsCzLoremProvider.word_list
def test_words(self, faker, num_samples):
num_words = 5
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in CsCzLoremProvider.word_list for word in words)
| TestCsCz |
python | django__django | django/views/generic/dates.py | {
"start": 3768,
"end": 5143
} | class ____:
"""Mixin for views manipulating day-based data."""
day_format = "%d"
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""Return the day for which this view should display data."""
day = self.day
if day is None:
try:
day = self.kwargs["day"]
except KeyError:
try:
day = self.request.GET["day"]
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""Get the next valid day."""
return _get_next_prev(self, date, is_previous=False, period="day")
def get_previous_day(self, date):
"""Get the previous valid day."""
return _get_next_prev(self, date, is_previous=True, period="day")
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""Return the start date of the current interval."""
return date
| DayMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.