language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 2187,
"end": 2272
} | class ____(BaseModel):
refusal: str
type: str = "refusal"
| ResponseOutputRefusal |
python | coleifer__peewee | peewee.py | {
"start": 154771,
"end": 155220
} | class ____(object):
def __init__(self, field):
self.field = field
self.model = field.rel_model
self.rel_model = field.model
def __get__(self, instance, instance_type=None):
if instance is not None:
dest = self.field.rel_field.name
return (self.rel_model
.select()
.where(self.field == getattr(instance, dest)))
return self
| BackrefAccessor |
python | gevent__gevent | src/greentest/3.10/test_ftplib.py | {
"start": 32163,
"end": 33791
} | class ____(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0),
af=socket.AF_INET6,
encoding=DEFAULT_ENCODING)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=DEFAULT_ENCODING)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode(self.client.encoding))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
| TestIPv6Environment |
python | keras-team__keras | keras/src/layers/layer.py | {
"start": 71405,
"end": 77857
} | class ____:
def __init__(self, signature, call_context_args, args, kwargs):
# Strip out user-supplied call-context args that this layer’s `call()`
# does not accept (otherwise `signature.bind` would raise).
# This includes built-in args like `training`, and user-defined args.
call_args = {
context_arg: kwargs.pop(context_arg)
for context_arg in call_context_args
if context_arg in kwargs and context_arg not in signature.parameters
}
bound_args = signature.bind(*args, **kwargs)
# Combine the two dicts.
self.user_arguments_dict = {**call_args, **bound_args.arguments}
bound_args.apply_defaults()
arg_dict = {}
arg_names = []
tensor_arg_dict = {}
tensor_args = []
tensor_arg_names = []
nested_tensor_arg_names = []
for name, value in bound_args.arguments.items():
arg_dict[name] = value
arg_names.append(name)
if is_backend_tensor_or_symbolic(value):
tensor_args.append(value)
tensor_arg_names.append(name)
tensor_arg_dict[name] = value
elif tree.is_nested(value) and len(value) > 0:
flat_values = tree.flatten(value)
if all(
is_backend_tensor_or_symbolic(x, allow_none=True)
for x in flat_values
):
tensor_args.append(value)
tensor_arg_names.append(name)
tensor_arg_dict[name] = value
nested_tensor_arg_names.append(name)
elif any(is_backend_tensor_or_symbolic(x) for x in flat_values):
raise ValueError(
"In a nested call() argument, "
"you cannot mix tensors and non-tensors. "
"Received invalid mixed argument: "
f"{name}={value}"
)
self.arguments_dict = arg_dict
self.argument_names = arg_names
self.tensor_arguments_dict = tensor_arg_dict
self.tensor_arguments_names = tensor_arg_names
self.nested_tensor_argument_names = nested_tensor_arg_names
self.first_arg = arg_dict[arg_names[0]]
if all(
backend.is_tensor(x) for x in self.tensor_arguments_dict.values()
):
self.eager = True
else:
self.eager = False
def get_arguments_dict(fn, args, kwargs):
"""Return a dict mapping argument names to their values."""
sig = inspect.signature(fn)
bound_args = sig.bind(*args, **kwargs)
arg_dict = {}
for name, value in bound_args.arguments.items():
arg_dict[name] = value
return arg_dict
def get_shapes_dict(call_spec):
"""Convert the call() arguments dict into a dict of input shape arguments.
Example:
```
>>> get_shapes_dict(call_spec)
{"input_a_shape": (2, 3)}
```
"""
def standardize_shape_or_none(x):
return None if x is None else backend.standardize_shape(x.shape)
shapes_dict = {}
for k, v in call_spec.tensor_arguments_dict.items():
if k == "mask" or k.endswith("_mask"):
# Do not include mask tensors in shapes dict
continue
if k == "kwargs" or k == "args":
# Do not include catch-alls in shapes dict
continue
if k in call_spec.nested_tensor_argument_names:
shapes_dict[f"{k}_shape"] = tree.map_structure(
standardize_shape_or_none, v
)
else:
shapes_dict[f"{k}_shape"] = standardize_shape_or_none(v)
return shapes_dict
def update_shapes_dict_for_target_fn(
target_fn,
shapes_dict,
call_spec,
class_name,
):
"""Updates a `shapes_dict` for `build()` or `compute_output_shape()`.
This function will align a dictionary of the shapes of all tensor
passed to `call`, with the signatures of `build()` or
`compute_output_shape()`.
The alignment is a follows:
- If `build()` or `compute_output_shape()` accept only one argument,
forward the shape of the first positional argument from call without
checking any argument names.
- If `build()` or `compute_output_shape()` accept multiple arguments,
enforce that all argument names match a call argument name, e.g.
`foo_shape` would match call argument `foo`.
Returns:
An updated `shapes_dict` that can be used to invoke
`target_fn(**shapes_dict)`.
"""
if utils.is_default(target_fn):
return None
sig = inspect.signature(target_fn)
expected_names = []
for name, param in sig.parameters.items():
if param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
param.KEYWORD_ONLY,
):
expected_names.append(name)
# Single arg: don't check names, pass first shape.
if len(expected_names) == 1:
key = expected_names[0]
values = tuple(shapes_dict.values())
if values:
input_shape = values[0]
else:
input_shape = None
return {key: input_shape}
# Multiple args: check that all names line up.
kwargs = {}
for name in expected_names:
method_name = target_fn.__name__
error_preamble = (
f"For a `{method_name}()` method with more than one argument, all "
"arguments should have a `_shape` suffix and match an argument "
f"from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` "
)
if not name.endswith("_shape"):
raise ValueError(
f"{error_preamble} For layer '{class_name}', "
f"Received `{method_name}()` argument "
f"`{name}`, which does not end in `_shape`."
)
expected_call_arg = utils.removesuffix(name, "_shape")
if expected_call_arg not in call_spec.arguments_dict:
raise ValueError(
f"{error_preamble} For layer '{class_name}', "
f"received `{method_name}()` argument "
f"`{name}`, but `call()` does not have argument "
f"`{expected_call_arg}`."
)
if name in shapes_dict:
kwargs[name] = shapes_dict[name]
return kwargs
| CallSpec |
python | scipy__scipy | scipy/linalg/tests/test_lapack.py | {
"start": 6125,
"end": 16438
} | class ____:
def test_gels(self):
rng = np.random.default_rng(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rng.random((m, n)).astype(dtype)
b1 = rng.random(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
| TestLeastSquaresSolvers |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column10.py | {
"start": 315,
"end": 1220
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column10.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45686144, 45722240]
data = [
["A", "B", "C", "D", "E"],
[1, 2, 3, 2, 1],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | allegroai__clearml | clearml/debugging/timer.py | {
"start": 115,
"end": 1915
} | class ____(object):
"""A class implementing a simple timer, with a reset option"""
def __init__(self) -> None:
self._start_time = 0.0
self._diff = 0.0
self._total_time = 0.0
self._average_time = 0.0
self._calls = 0
self.tic()
def reset(self) -> None:
self._start_time = 0.0
self._diff = 0.0
self.reset_average()
def reset_average(self) -> None:
"""Reset average counters (does not change current timer)"""
self._total_time = 0
self._average_time = 0
self._calls = 0
def tic(self) -> None:
try:
# using time.time instead of time.clock because time time.clock
# does not normalize for multi threading
self._start_time = time.time()
except Exception:
pass
def toc(self, average: bool = True) -> float:
self._diff = time.time() - self._start_time
self._total_time += self._diff
self._calls += 1
self._average_time = self._total_time / self._calls
if average:
return self._average_time
else:
return self._diff
@property
def average_time(self) -> float:
return self._average_time
@property
def total_time(self) -> float:
return self._total_time
def toc_with_reset(self, average: bool = True, reset_if_calls: int = 1000) -> float:
"""Enable toc with reset (slightly inaccurate if reset event occurs)"""
if self._calls > reset_if_calls:
last_diff = time.time() - self._start_time
self._start_time = time.time()
self._total_time = last_diff
self._average_time = 0
self._calls = 0
return self.toc(average=average)
| Timer |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 40143,
"end": 41698
} | class ____:
"""Test that secrets masker doesn't import kubernetes unnecessarily."""
def test_no_k8s_import_when_not_needed(self):
"""Ensure kubernetes is not imported when masking non-k8s secrets."""
# Ensure kubernetes is not already imported
k8s_modules = [m for m in sys.modules if m.startswith("kubernetes")]
if k8s_modules:
pytest.skip("Kubernetes already imported, cannot test import avoidance")
masker = SecretsMasker()
configure_secrets_masker_for_test(masker)
masker.add_mask("test_secret", "password")
redacted = masker.redact({"password": "test_secret", "user": "admin"})
assert redacted["password"] == "***"
assert redacted["user"] == "admin"
assert "kubernetes.client" not in sys.modules
def test_k8s_objects_still_detected_when_imported(self):
"""Ensure V1EnvVar objects are still properly detected when k8s is imported."""
pytest.importorskip("kubernetes")
from kubernetes.client import V1EnvVar
# Create a V1EnvVar object with a sensitive name
env_var = V1EnvVar(name="password", value="secret123")
masker = SecretsMasker()
configure_secrets_masker_for_test(masker)
# Redact the V1EnvVar object - the name field is sensitive
redacted = masker.redact(env_var)
# Should be redacted since "password" is a sensitive field name
assert redacted["value"] == "***"
assert redacted["name"] == "password"
| TestKubernetesImportAvoidance |
python | PyCQA__pycodestyle | tests/test_blank_lines.py | {
"start": 462,
"end": 1660
} | class ____(BlankLinesTestCase):
"""
Tests for default blank with 2 blank lines for top level and 1
blank line for methods.
"""
def test_initial_no_blank(self):
"""
It will accept no blank lines at the start of the file.
"""
result = errors_from_src("""def some_function():
pass
""")
self.assertNoErrors(result)
def test_initial_lines_one_blank(self):
"""
It will accept 1 blank lines before the first line of actual
code, even if in other places it asks for 2
"""
result = errors_from_src("""
def some_function():
pass
""")
self.assertNoErrors(result)
def test_initial_lines_two_blanks(self):
"""
It will accept 2 blank lines before the first line of actual
code, as normal.
"""
result = errors_from_src("""
def some_function():
pass
""")
self.assertNoErrors(result)
def test_method_less_blank_lines(self):
"""
It will trigger an error when less than 1 blank lin is found
before method definitions.
"""
result = errors_from_src("""# First comment line.
| TestBlankLinesDefault |
python | getsentry__sentry | src/sentry/api/base.py | {
"start": 24566,
"end": 25150
} | class ____:
def track_set_commits_local(self, request: Request, organization_id=None, project_ids=None):
try:
analytics.record(
ReleaseSetCommitsLocalEvent(
user_id=request.user.id if request.user and request.user.id else None,
organization_id=organization_id,
project_ids=project_ids,
user_agent=request.META.get("HTTP_USER_AGENT", ""),
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
| ReleaseAnalyticsMixin |
python | gevent__gevent | src/gevent/tests/test__os.py | {
"start": 2179,
"end": 5087
} | class ____(TestOS_tp):
def read(self, fd, count):
return os.nb_read(fd, count)
def write(self, fd, count):
return os.nb_write(fd, count)
def pipe(self):
r, w = super(TestOS_nb, self).pipe()
os.make_nonblocking(r)
os.make_nonblocking(w)
return r, w
def _make_ignored_oserror(self):
import errno
ignored_oserror = OSError()
ignored_oserror.errno = errno.EINTR
return ignored_oserror
def _check_hub_event_closed(self, mock_get_hub, fd, event):
mock_get_hub.assert_called_once_with()
hub = mock_get_hub.return_value
io = hub.loop.io
io.assert_called_once_with(fd, event)
event = io.return_value
event.close.assert_called_once_with()
def _test_event_closed_on_normal_io(self, nb_func, nb_arg,
mock_io, mock_get_hub, event):
mock_io.side_effect = [self._make_ignored_oserror(), 42]
fd = 100
result = nb_func(fd, nb_arg)
self.assertEqual(result, 42)
self._check_hub_event_closed(mock_get_hub, fd, event)
def _test_event_closed_on_io_error(self, nb_func, nb_arg,
mock_io, mock_get_hub, event):
mock_io.side_effect = [self._make_ignored_oserror(), ValueError()]
fd = 100
with self.assertRaises(ValueError):
nb_func(fd, nb_arg)
self._check_hub_event_closed(mock_get_hub, fd, event)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._write')
def test_event_closed_on_write(self, mock_write, mock_get_hub):
self._test_event_closed_on_normal_io(os.nb_write, b'buf',
mock_write, mock_get_hub,
2)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._write')
def test_event_closed_on_write_error(self, mock_write, mock_get_hub):
self._test_event_closed_on_io_error(os.nb_write, b'buf',
mock_write, mock_get_hub,
2)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._read')
def test_event_closed_on_read(self, mock_read, mock_get_hub):
self._test_event_closed_on_normal_io(os.nb_read, b'buf',
mock_read, mock_get_hub,
1)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._read')
def test_event_closed_on_read_error(self, mock_read, mock_get_hub):
self._test_event_closed_on_io_error(os.nb_read, b'buf',
mock_read, mock_get_hub,
1)
@greentest.skipUnless(hasattr(os, 'fork_and_watch'),
"Only on POSIX")
| TestOS_nb |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 15938,
"end": 16107
} | class ____(_Test_two_random_trees_periodic):
def setup_method(self):
super().setup_method()
self.d = 2
@KDTreeTest
| _Test_two_random_trees_far_periodic |
python | scrapy__scrapy | scrapy/selector/unified.py | {
"start": 1018,
"end": 3170
} | class ____(_ParselSelector, object_ref):
"""
An instance of :class:`Selector` is a wrapper over response to select
certain parts of its content.
``response`` is an :class:`~scrapy.http.HtmlResponse` or an
:class:`~scrapy.http.XmlResponse` object that will be used for selecting
and extracting data.
``text`` is a unicode string or utf-8 encoded text for cases when a
``response`` isn't available. Using ``text`` and ``response`` together is
undefined behavior.
``type`` defines the selector type, it can be ``"html"``, ``"xml"``, ``"json"``
or ``None`` (default).
If ``type`` is ``None``, the selector automatically chooses the best type
based on ``response`` type (see below), or defaults to ``"html"`` in case it
is used together with ``text``.
If ``type`` is ``None`` and a ``response`` is passed, the selector type is
inferred from the response type as follows:
* ``"html"`` for :class:`~scrapy.http.HtmlResponse` type
* ``"xml"`` for :class:`~scrapy.http.XmlResponse` type
* ``"json"`` for :class:`~scrapy.http.TextResponse` type
* ``"html"`` for anything else
Otherwise, if ``type`` is set, the selector type will be forced and no
detection will occur.
"""
__slots__ = ["response"]
selectorlist_cls = SelectorList
def __init__(
self,
response: TextResponse | None = None,
text: str | None = None,
type: str | None = None, # noqa: A002
root: Any | None = _NOT_SET,
**kwargs: Any,
):
if response is not None and text is not None:
raise ValueError(
f"{self.__class__.__name__}.__init__() received both response and text"
)
st = _st(response, type)
if text is not None:
response = _response_from_text(text, st)
if response is not None:
text = response.text
kwargs.setdefault("base_url", get_base_url(response))
self.response = response
if root is not _NOT_SET:
kwargs["root"] = root
super().__init__(text=text, type=st, **kwargs)
| Selector |
python | Netflix__metaflow | metaflow/_vendor/yaml/resolver.py | {
"start": 137,
"end": 6844
} | class ____:
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, str) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (str, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, str):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, str):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == '':
resolvers = self.yaml_implicit_resolvers.get('', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
| BaseResolver |
python | langchain-ai__langchain | libs/standard-tests/tests/unit_tests/test_basic_tool.py | {
"start": 234,
"end": 513
} | class ____(BaseTool):
name: str = "ParrotMultiplyTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
@override
def _run(self, a: int, b: int) -> int:
return a * b + 80
| ParrotMultiplyTool |
python | kamyu104__LeetCode-Solutions | Python/block-placement-queries.py | {
"start": 108,
"end": 1862
} | class ____(object):
def getResults(self, queries):
"""
:type queries: List[List[int]]
:rtype: List[bool]
"""
class BIT(object): # 0-indexed.
def __init__(self, n, default=0, fn=lambda x, y: x+y):
self.__bit = [default]*(n+1) # Extra one for dummy node.
self.__default = default
self.__fn = fn
def update(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] = self.__fn(self.__bit[i], val)
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = self.__default
while i > 0:
ret = self.__fn(ret, self.__bit[i])
i -= (i & -i)
return ret
sl = SortedList(q[1] for q in queries if q[0] == 1)
val_to_idx = {x:i for i, x in enumerate(sl)}
bit = BIT(len(val_to_idx), fn=max)
for i in xrange(len(sl)):
bit.update(val_to_idx[sl[i]], sl[i]-(sl[i-1] if i-1 >= 0 else 0))
result = []
for q in reversed(queries):
i = sl.bisect_left(q[1])
if q[0] == 1:
if i+1 < len(sl):
bit.update(val_to_idx[sl[i+1]], sl[i+1]-(sl[i-1] if i-1 >= 0 else 0))
del sl[i]
else:
result.append(q[1]-(sl[i-1] if i-1 >= 0 else 0) >= q[2] or (i-1 >= 0 and bit.query(val_to_idx[sl[i-1]]) >= q[2]))
result.reverse()
return result
# Time: O(qlogq)
# Space: O(q)
from sortedcontainers import SortedList
# sorted list, segment tree
| Solution |
python | yaml__pyyaml | lib/yaml/serializer.py | {
"start": 163,
"end": 4165
} | class ____:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| Serializer |
python | gevent__gevent | src/gevent/tests/test__local.py | {
"start": 2008,
"end": 10727
} | class ____(greentest.TestCase):
# pylint:disable=attribute-defined-outside-init,blacklisted-name
def setUp(self):
del deleted_sentinels[:]
del created_sentinels[:]
tearDown = setUp
def test_create_local_subclass_init_args(self):
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local("foo")
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local(kw="foo")
def test_local_opts_not_subclassed(self):
l = local()
l.attr = 1
self.assertEqual(l.attr, 1)
def test_cannot_set_delete_dict(self):
l = local()
with self.assertRaises(AttributeError):
l.__dict__ = 1
with self.assertRaises(AttributeError):
del l.__dict__
def test_delete_with_no_dict(self):
l = local()
with self.assertRaises(AttributeError):
delattr(l, 'thing')
def del_local():
with self.assertRaises(AttributeError):
delattr(l, 'thing')
t = Thread(target=del_local)
t.start()
t.join()
def test_slot_and_type_attributes(self):
a = A(Obj())
a.initialized = 1
self.assertEqual(a.initialized, 1)
# The slot is shared
def demonstrate_slots_shared():
self.assertEqual(a.initialized, 1)
a.initialized = 2
greenlet = Thread(target=demonstrate_slots_shared)
greenlet.start()
greenlet.join()
self.assertEqual(a.initialized, 2)
# The slot overrides dict values
a.__dict__['initialized'] = 42 # pylint:disable=unsupported-assignment-operation
self.assertEqual(a.initialized, 2)
# Deleting the slot deletes the slot, but not the dict
del a.initialized
self.assertFalse(hasattr(a, 'initialized'))
self.assertIn('initialized', a.__dict__)
# We can delete the 'path' ivar
# and fall back to the type
del a.path
self.assertEqual(a.path, '')
with self.assertRaises(AttributeError):
del a.path
# A read property calls get
self.assertEqual(a.read_property, 42)
a.read_property = 1
self.assertEqual(a.read_property, 1)
self.assertIsInstance(A.read_property, ReadProperty)
# Type attributes can be read
self.assertEqual(a.type_path, 'MyPath')
self.assertNotIn('type_path', a.__dict__)
# and replaced in the dict
a.type_path = 'Local'
self.assertEqual(a.type_path, 'Local')
self.assertIn('type_path', a.__dict__)
def test_attribute_error(self):
# pylint:disable=attribute-defined-outside-init
a = A(Obj())
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def set_fizz_buzz():
a.fizz_buzz = 1
greenlet = Thread(target=set_fizz_buzz)
greenlet.start()
greenlet.join()
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def test_getattr_called(self):
getter = WithGetattr()
self.assertEqual(42, getter.foo)
getter.foo = 'baz'
self.assertEqual('baz', getter.foo)
def test_copy(self):
a = A(Obj())
a.path = '123'
a.obj.echo = 'test'
b = copy(a)
# Copy makes a shallow copy. Meaning that the attribute path
# has to be independent in the original and the copied object because the
# value is a string, but the attribute obj should be just reference to
# the instance of the class Obj
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
self.assertEqual(a.obj, b.obj, 'The values must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
a.obj.echo = "works"
self.assertEqual(a.obj, b.obj, 'The values must be equal')
def test_copy_no_subclass(self):
a = local()
setattr(a, 'thing', 42)
b = copy(a)
self.assertEqual(b.thing, 42)
self.assertIsNot(a.__dict__, b.__dict__)
def test_objects(self):
# Test which failed in the eventlet?!
a = A({})
a.path = '123'
b = A({'one': 2})
b.path = '123'
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
def test_class_attr(self, kind=MyLocal):
mylocal = kind()
self.assertEqual(42, mylocal.CLASS_PROP)
mylocal.CLASS_PROP = 1
self.assertEqual(1, mylocal.CLASS_PROP)
self.assertEqual(mylocal.__dict__['CLASS_PROP'], 1) # pylint:disable=unsubscriptable-object
del mylocal.CLASS_PROP
self.assertEqual(42, mylocal.CLASS_PROP)
self.assertIs(mylocal, mylocal.desc)
def test_class_attr_subclass(self):
self.test_class_attr(kind=MyLocalSubclass)
def test_locals_collected_when_greenlet_dead_but_still_referenced(self):
# https://github.com/gevent/gevent/issues/387
import gevent
my_local = MyLocal()
my_local.sentinel = None
greentest.gc_collect_if_needed()
del created_sentinels[:]
del deleted_sentinels[:]
def demonstrate_my_local():
# Get the important parts
getattr(my_local, 'sentinel')
# Create and reference greenlets
greenlets = [Thread(target=demonstrate_my_local) for _ in range(5)]
for t in greenlets:
t.start()
gevent.sleep()
self.assertEqual(len(created_sentinels), len(greenlets))
for g in greenlets:
assert not g.is_alive()
gevent.sleep() # let the callbacks run
greentest.gc_collect_if_needed()
# The sentinels should be gone too
self.assertEqual(len(deleted_sentinels), len(greenlets))
@greentest.skipOnLibuvOnPyPyOnWin("GC makes this non-deterministic, especially on Windows")
def test_locals_collected_when_unreferenced_even_in_running_greenlet(self):
# In fact only on Windows do we see GC being an issue;
# pypy2 5.0 on macos and travis don't have a problem.
# https://github.com/gevent/gevent/issues/981
import gevent
import gc
gc.collect()
count = 1000
running_greenlet = None
def demonstrate_my_local():
for _ in range(1000):
x = MyLocal()
self.assertIsNotNone(x.sentinel)
x = None
gc.collect()
gc.collect()
self.assertEqual(count, len(created_sentinels))
# They're all dead, even though this greenlet is
# still running
self.assertEqual(count, len(deleted_sentinels))
# The links were removed as well.
self.assertFalse(running_greenlet.has_links())
running_greenlet = gevent.spawn(demonstrate_my_local)
gevent.sleep()
running_greenlet.join()
self.assertEqual(count, len(deleted_sentinels))
@greentest.ignores_leakcheck
def test_local_dicts_for_greenlet(self):
import gevent
from gevent.local import all_local_dicts_for_greenlet
class MyGreenlet(gevent.Greenlet):
results = None
id_x = None
def _run(self): # pylint:disable=method-hidden
x = local()
x.foo = 42
self.id_x = id(x)
self.results = all_local_dicts_for_greenlet(self)
g = MyGreenlet()
g.start()
g.join()
self.assertTrue(g.successful, g)
self.assertEqual(g.results,
[((local, g.id_x), {'foo': 42})])
def test_local_with_abc(self):
# an ABC (or generally any non-exact-type) in the MRO doesn't
# break things. See https://github.com/gevent/gevent/issues/1201
x = LocalWithABC()
x.d = {'a': 1}
self.assertEqual({'a': 1}, x.d)
# The ABC part works
self.assertIn('a', x.d)
self.assertEqual(['a'], list(x.keys()))
def test_local_with_staticmethod(self):
x = LocalWithStaticMethod()
self.assertEqual(42, x.a_staticmethod())
def test_local_with_classmethod(self):
x = LocalWithClassMethod()
self.assertIs(LocalWithClassMethod, x.a_classmethod())
| TestGeventLocal |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/tryExcept1.py | {
"start": 841,
"end": 1447
} | class ____(*base_exceptions): ...
def func4():
try:
pass
except Exception1:
pass
except Exception2:
pass
def func5():
try:
return 1
# This should generate an error.
except int:
pass
# This should generate an error.
except (NotImplementedError, str):
pass
# This should generate an error.
except [Exception, ValueError]:
pass
except BaseException:
pass
T = TypeVar("T", bound=BaseException)
def func6(*errors: type[T]):
try:
return 1
except errors as e:
return e
| Exception2 |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/continue_statements_test.py | {
"start": 958,
"end": 5508
} | class ____(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, continue_statements)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues(self):
def f(x):
v = []
while x > 0:
x -= 1
if x > 1:
continue
if x > 2:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues_in_nested_scope(self):
def f(a):
v = []
for x in a:
x -= 1
if x > 100:
continue
try:
raise ValueError('intentional')
except ValueError:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_for_loop(self):
def f(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_nested_with(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_statements(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_nested_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested(self):
def f(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
continue
v.append(x)
return v, u, w
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_guarded_continues_with_side_effects(self):
def f(x):
def track(u, x):
u.append(x)
return x
u = []
v = []
while x > 0:
x -= 1
if track(u, x) > 1:
continue
if track(u, x) > 2:
continue
v.append(x)
return u, v
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 2)
if __name__ == '__main__':
test.main()
| ContinueCanonicalizationTest |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 19956,
"end": 21797
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"requires-python": 9000, "dynamic": ["requires-python"]}}
)
with pytest.raises(
ValueError,
match=(
"Metadata field `requires-python` cannot be both statically defined and "
"listed in field `project.dynamic`"
),
):
_ = metadata.core.requires_python
@pytest.mark.parametrize("attribute", ["requires_python", "python_constraint"])
def test_not_string(self, isolation, attribute):
metadata = ProjectMetadata(str(isolation), None, {"project": {"requires-python": 9000}})
with pytest.raises(TypeError, match="Field `project.requires-python` must be a string"):
_ = getattr(metadata.core, attribute)
def test_invalid(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"requires-python": "^1"}})
with pytest.raises(ValueError, match="Field `project.requires-python` is invalid: .+"):
_ = metadata.core.requires_python
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.requires_python == metadata.core.requires_python == ""
for major_version in map(str, range(10)):
assert metadata.core.python_constraint.contains(major_version)
def test_custom(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"requires-python": ">2"}})
assert metadata.core.requires_python == metadata.core.requires_python == ">2"
assert not metadata.core.python_constraint.contains("2")
assert metadata.core.python_constraint.contains("3")
| TestRequiresPython |
python | optuna__optuna | optuna/pruners/_wilcoxon.py | {
"start": 528,
"end": 9623
} | class ____(BasePruner):
"""Pruner based on the `Wilcoxon signed-rank test <https://en.wikipedia.org/w/index.php?title=Wilcoxon_signed-rank_test&oldid=1195011212>`__.
This pruner performs the Wilcoxon signed-rank test between the current trial and the current best trial,
and stops whenever the pruner is sure up to a given p-value that the current trial is worse than the best one.
This pruner is effective for optimizing the mean/median of some (costly-to-evaluate) performance scores over a set of problem instances.
Example applications include the optimization of:
* the mean performance of a heuristic method (simulated annealing, genetic algorithm, SAT solver, etc.) on a set of problem instances,
* the k-fold cross-validation score of a machine learning model, and
* the accuracy of outputs of a large language model (LLM) on a set of questions.
There can be "easy" or "hard" instances (the pruner handles correspondence of the instances between different trials).
In each trial, it is recommended to shuffle the evaluation order, so that the optimization doesn't overfit to the instances in the beginning.
When you use this pruner, you must call ``Trial.report(value, step)`` method for each step (instance id) with
the evaluated value. The instance id may not be in ascending order.
This is different from other pruners in that the reported value need not converge
to the real value. To use pruners such as :class:`~optuna.pruners.SuccessiveHalvingPruner`
in the same setting, you must provide e.g., the historical average of the evaluated values.
.. seealso::
Please refer to :meth:`~optuna.trial.Trial.report`.
Example:
.. testcode::
import optuna
import numpy as np
# We minimize the mean evaluation loss over all the problem instances.
def evaluate(param, instance):
# A toy loss function for demonstrative purpose.
return (param - instance) ** 2
problem_instances = np.linspace(-1, 1, 100)
def objective(trial):
# Sample a parameter.
param = trial.suggest_float("param", -1, 1)
# Evaluate performance of the parameter.
results = []
# For best results, shuffle the evaluation order in each trial.
instance_ids = np.random.permutation(len(problem_instances))
for instance_id in instance_ids:
loss = evaluate(param, problem_instances[instance_id])
results.append(loss)
# Report loss together with the instance id.
# CAVEAT: You need to pass the same id for the same instance,
# otherwise WilcoxonPruner cannot correctly pair the losses across trials and
# the pruning performance will degrade.
trial.report(loss, instance_id)
if trial.should_prune():
# Return the current predicted value instead of raising `TrialPruned`.
# This is a workaround to tell the Optuna about the evaluation
# results in pruned trials. (See the note below.)
return sum(results) / len(results)
return sum(results) / len(results)
study = optuna.create_study(pruner=optuna.pruners.WilcoxonPruner(p_threshold=0.1))
study.optimize(objective, n_trials=100)
.. note::
This pruner cannot handle ``infinity`` or ``nan`` values.
Trials containing those values are never pruned.
.. note::
If :func:`~optuna.trial.FrozenTrial.should_prune` returns :obj:`True`, you can return an
estimation of the final value (e.g., the average of all evaluated
values) instead of ``raise optuna.TrialPruned()``.
This is a workaround for the problem that currently there is no way
to tell Optuna the predicted objective value for trials raising
:class:`optuna.TrialPruned`.
Args:
p_threshold:
The p-value threshold for pruning. This value should be between 0 and 1.
A trial will be pruned whenever the pruner is sure up to the given p-value
that the current trial is worse than the best trial.
The larger this value is, the more aggressive pruning will be performed.
Defaults to 0.1.
.. note::
This pruner repeatedly performs statistical tests between the
current trial and the current best trial with increasing samples.
The false-positive rate of such a sequential test is different from
performing the test only once. To get the nominal false-positive rate,
please specify the Pocock-corrected p-value.
n_startup_steps:
The number of steps before which no trials are pruned.
Pruning starts only after you have ``n_startup_steps`` steps of
available observations for comparison between the current trial
and the best trial.
Defaults to 2. Note that the trial is not pruned at the first and second steps even if
the `n_startup_steps` is set to 0 or 1 due to the lack of enough data for comparison.
""" # NOQA: E501
def __init__(
self,
*,
p_threshold: float = 0.1,
n_startup_steps: int = 2,
) -> None:
if n_startup_steps < 0: # TODO: Consider changing the RHS to 2.
raise ValueError(f"n_startup_steps must be nonnegative but got {n_startup_steps}.")
if not 0.0 <= p_threshold <= 1.0:
raise ValueError(f"p_threshold must be between 0 and 1 but got {p_threshold}.")
self._n_startup_steps = n_startup_steps
self._p_threshold = p_threshold
def prune(self, study: "optuna.study.Study", trial: FrozenTrial) -> bool:
if len(trial.intermediate_values) == 0:
return False
steps, step_values = np.array(list(trial.intermediate_values.items())).T
if np.any(~np.isfinite(step_values)):
optuna_warn(
f"The intermediate values of the current trial (trial {trial.number}) "
f"contain infinity/NaNs. WilcoxonPruner will not prune this trial."
)
return False
try:
best_trial = study.best_trial
except ValueError:
return False
if len(best_trial.intermediate_values) == 0:
optuna_warn(
"The best trial has no intermediate values so WilcoxonPruner cannot prune trials. "
"If you have added the best trial with Study.add_trial, please consider setting "
"intermediate_values argument."
)
return False
best_steps, best_step_values = np.array(list(best_trial.intermediate_values.items())).T
if np.any(~np.isfinite(best_step_values)):
optuna_warn(
f"The intermediate values of the best trial (trial {best_trial.number}) "
f"contain infinity/NaNs. WilcoxonPruner will not prune the current trial."
)
return False
_, idx1, idx2 = np.intersect1d(steps, best_steps, return_indices=True)
if len(idx1) < len(step_values):
# This if-statement is never satisfied if following "average_is_best" safety works,
# because the safety ensures that the best trial always has the all steps.
optuna_warn(
"WilcoxonPruner finds steps existing in the current trial "
"but does not exist in the best trial. "
"Those values are ignored."
)
diff_values = step_values[idx1] - best_step_values[idx2]
if len(diff_values) < max(2, self._n_startup_steps):
return False
alt: Literal["less", "greater"]
if study.direction == StudyDirection.MAXIMIZE:
alt = "less"
average_is_best = sum(best_step_values) / len(best_step_values) <= sum(
step_values
) / len(step_values)
else:
alt = "greater"
average_is_best = sum(best_step_values) / len(best_step_values) >= sum(
step_values
) / len(step_values)
# We use zsplit to avoid the problem when all values are zero.
p = ss.wilcoxon(diff_values, alternative=alt, zero_method="zsplit").pvalue
if p < self._p_threshold and average_is_best:
# ss.wilcoxon found the current trial is probably worse than the best trial,
# but the value of the best trial was not better than
# the average of the current trial's intermediate values.
# For safety, WilcoxonPruner concludes not to prune it for now.
return False
# convert the `np.bool_` to a `builtins.bool`
return bool(p < self._p_threshold)
| WilcoxonPruner |
python | getsentry__sentry | tests/sentry/issues/test_search_issues_dataset.py | {
"start": 192,
"end": 1171
} | class ____(SnubaTestCase, TestCase):
def test_query_dataset_returns_empty(self) -> None:
# make a random query just to verify the table exists
now = datetime.now()
json_body = {
"selected_columns": ["project_id"],
"offset": 0,
"limit": 100,
"project": [1],
"dataset": "search_issues",
"groupby": ["project_id"],
"conditions": [
["project_id", "IN", [2]],
["timestamp", ">=", now - timedelta(minutes=1)],
["timestamp", "<", now + timedelta(minutes=1)],
],
"aggregations": [["count()", "", "count"]],
"consistent": False,
"tenant_ids": {"referrer": "search_issues", "organization_id": 1},
}
request = json_to_snql(json_body, "search_issues")
request.validate()
result = raw_snql_query(request)
assert len(result["data"]) == 0
| DatasetTest |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 211161,
"end": 211840
} | class ____(TestCase):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8 - 1)
wanted = np.dtype("uint8")
assert_equal(wanted, dt)
# three tests below are added based on what numpy does
def test_complex(self):
dt = np.min_scalar_type(0 + 0j)
assert dt == np.dtype("complex64")
def test_float(self):
dt = np.min_scalar_type(0.1)
assert dt == np.dtype("float16")
def test_nonscalar(self):
dt = np.min_scalar_type([0, 1, 2])
assert dt == np.dtype("int64")
from numpy.core._internal import _dtype_from_pep3118
@skip(reason="dont worry about buffer protocol")
| TestMinScalarType |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/scoped_css.py | {
"start": 454,
"end": 669
} | class ____(App):
def compose(self) -> ComposeResult:
yield MyWidget()
yield MyWidget()
yield Label("I should not be styled")
if __name__ == "__main__":
app = MyApp()
app.run()
| MyApp |
python | ray-project__ray | rllib/utils/replay_buffers/prioritized_episode_buffer.py | {
"start": 633,
"end": 33219
} | class ____(EpisodeReplayBuffer):
"""Prioritized Replay Buffer that stores episodes by their ID.
This replay buffer stores episode data (more specifically `SingleAgentEpisode`
objects) and implements prioritized experience replay first proposed
in the paper by Schaul et al. (2016, https://arxiv.org/abs/1511.05952).
Implementation is based on segment trees as suggested by the authors of
the cited paper, i.e. we use proportional prioritization with an order
of O(log N) in updating and sampling.
Each "row" (a slot in a deque) in the buffer is occupied by one episode. If an
incomplete episode is added to the buffer and then another chunk of that episode is
added at a later time, the buffer will automatically concatenate the new fragment to
the original episode. This way, episodes can be completed via subsequent `add`
calls.
Sampling returns a size `B` episode list (number of 'rows'), where each episode
holds a tuple tuple of the form
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
where `o_t` is the observation in `t`, `a_t` the action chosen at observation `o_t`,
`o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is the sum of
all rewards collected over the time steps between `t+1` and `t+n`. The `n`-step can
be chosen freely when sampling and defaults to `1`. If `n_step` is a tuple it is
sampled uniformly across the interval defined by the tuple (for each row in the
batch).
Each episode contains - in addition to the data tuples presented above - two further
elements in its ` extra_model_outputs`, namely `n_steps` and `weights`. The former
holds the `n_step` used for the sampled timesteps in the episode and the latter the
corresponding (importance sampling) weight for the transition.
After sampling priorities can be updated (for the last sampled episode list) with
`self.update_priorities`. This method assigns the new priorities automatically to
the last sampled timesteps. Note, this implies that sampling timesteps and updating
their corresponding priorities needs to alternate (e.g. sampling several times and
then updating the priorities would not work because the buffer caches the last
sampled timestep indices).
.. testcode::
import gymnasium as gym
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.replay_buffers import (
PrioritizedEpisodeReplayBuffer
)
# Create the environment.
env = gym.make("CartPole-v1")
# Set up the loop variables
terminated = False
truncated = False
num_timesteps = 10000
episodes = []
# Initialize the first episode entries.
eps = SingleAgentEpisode()
obs, info = env.reset()
eps.add_env_reset(obs, info)
# Sample 10,000 timesteps.
for i in range(num_timesteps):
# If terminated we create a new episode.
if terminated:
episodes.append(eps.to_numpy())
eps = SingleAgentEpisode()
obs, info = env.reset()
eps.add_env_reset(obs, info)
action = env.action_space.sample()
obs, reward, terminated, truncated, info = env.step(action)
eps.add_env_step(
obs,
action,
reward,
info,
terminated=terminated,
truncated=truncated
)
# Add the last (truncated) episode to the list of episodes.
if not terminated or truncated:
episodes.append(eps)
# Create the buffer.
buffer = PrioritizedEpisodeReplayBuffer()
# Add the list of episodes sampled.
buffer.add(episodes)
# Pull a sample from the buffer using an `n-step` of 3.
sample = buffer.sample(num_items=256, gamma=0.95, n_step=3)
"""
def __init__(
self,
capacity: int = 10000,
*,
batch_size_B: int = 16,
batch_length_T: int = 1,
alpha: float = 1.0,
metrics_num_episodes_for_smoothing: int = 100,
**kwargs,
):
"""Initializes a `PrioritizedEpisodeReplayBuffer` object
Args:
capacity: The total number of timesteps to be storable in this buffer.
Will start ejecting old episodes once this limit is reached.
batch_size_B: The number of episodes returned from `sample()`.
batch_length_T: The length of each episode in the episode list returned from
`sample()`.
alpha: The amount of prioritization to be used: `alpha=1.0` means full
prioritization, `alpha=0.0` means no prioritization.
"""
super().__init__(
capacity=capacity,
batch_size_B=batch_size_B,
batch_length_T=batch_length_T,
metrics_num_episodes_for_smoothing=metrics_num_episodes_for_smoothing,
)
# `alpha` should be non-negative.
assert alpha >= 0
self._alpha = alpha
# Initialize segment trees for the priority weights. Note, b/c the trees
# are binary we need for them a capacity that is an exponential of 2.
# Double it to enable temporary buffer overflow (we need then free nodes
# in the trees).
tree_capacity = int(2 ** np.ceil(np.log2(self.capacity)))
self._max_priority = 1.0
self._sum_segment = SumSegmentTree(2 * tree_capacity)
self._min_segment = MinSegmentTree(2 * tree_capacity)
# At initialization all nodes are free.
self._free_nodes = deque(
list(range(2 * tree_capacity)), maxlen=2 * tree_capacity
)
# Keep track of the maximum index used from the trees. This helps
# to not traverse the complete trees.
self._max_idx = 0
# Map from tree indices to sample indices (i.e. `self._indices`).
self._tree_idx_to_sample_idx = {}
# Keep track of the indices that were sampled last for updating the
# weights later.
self._last_sampled_indices = []
@override(EpisodeReplayBuffer)
def add(
self,
episodes: Union[List["SingleAgentEpisode"], "SingleAgentEpisode"],
weight: Optional[float] = None,
) -> None:
"""Adds incoming episodes to the replay buffer.
Note, if the incoming episodes' time steps cause the buffer to overflow,
older episodes are evicted. Because episodes usually come in chunks and
not complete, this could lead to edge cases (e.g. with very small capacity
or very long episode length) where the first part of an episode is evicted
while the next part just comes in.
To defend against such case, the complete episode is evicted, including
the new chunk, unless the episode is the only one in the buffer. In the
latter case the buffer will be allowed to overflow in a temporary fashion,
i.e. during the next addition of samples to the buffer an attempt is made
to fall below capacity again.
The user is advised to select a large enough buffer with regard to the maximum
expected episode length.
Args:
episodes: A list of `SingleAgentEpisode`s that contain the episode data.
weight: A starting priority for the time steps in `episodes`. If `None`
the maximum priority is used, i.e. 1.0 (as suggested in the original
paper we scale weights to the interval [0.0, 1.0])..
"""
# TODO (sven, simon): Eventually allow here an array?
if weight is None:
weight = self._max_priority
episodes = force_list(episodes)
# Set up some counters for metrics.
num_env_steps_added = 0
agent_to_num_steps_added = {DEFAULT_AGENT_ID: 0}
module_to_num_steps_added = {DEFAULT_MODULE_ID: 0}
num_episodes_added = 0
agent_to_num_episodes_added = {DEFAULT_AGENT_ID: 0}
module_to_num_episodes_added = {DEFAULT_MODULE_ID: 0}
num_episodes_evicted = 0
agent_to_num_episodes_evicted = {DEFAULT_AGENT_ID: 0}
module_to_num_episodes_evicted = {DEFAULT_MODULE_ID: 0}
num_env_steps_evicted = 0
agent_to_num_steps_evicted = {DEFAULT_AGENT_ID: 0}
module_to_num_steps_evicted = {DEFAULT_MODULE_ID: 0}
# Add first the timesteps of new episodes to have info about how many
# episodes should be evicted to stay below capacity.
new_episode_ids = []
for eps in episodes:
new_episode_ids.append(eps.id_)
self._num_timesteps += len(eps)
self._num_timesteps_added += len(eps)
eps_evicted = []
eps_evicted_ids = []
eps_evicted_idxs = []
while (
self._num_timesteps > self.capacity
and self._num_remaining_episodes(new_episode_ids, eps_evicted_ids) != 1
):
# Evict episode
eps_evicted.append(self.episodes.popleft())
eps_evicted_ids.append(eps_evicted[-1].id_)
eps_evicted_idxs.append(self.episode_id_to_index.pop(eps_evicted_ids[-1]))
num_episodes_evicted += 1
num_env_steps_evicted += len(eps_evicted[-1])
agent_to_num_episodes_evicted[DEFAULT_AGENT_ID] += 1
agent_to_num_steps_evicted[DEFAULT_AGENT_ID] += eps_evicted[
-1
].agent_steps()
module_to_num_episodes_evicted[DEFAULT_MODULE_ID] += 1
module_to_num_steps_evicted[DEFAULT_MODULE_ID] += eps_evicted[
-1
].agent_steps()
# If this episode has a new chunk in the new episodes added,
# we subtract it again.
# TODO (sven, simon): Should we just treat such an episode chunk
# as a new episode?
if eps_evicted_ids[-1] in new_episode_ids:
# TODO (simon): Apply the same logic as in the MA-case.
len_to_subtract = len(
episodes[new_episode_ids.index(eps_evicted_idxs[-1])]
)
self._num_timesteps -= len_to_subtract
self._num_timesteps_added -= len_to_subtract
# Remove the timesteps of the evicted episode from the counter.
self._num_timesteps -= len(eps_evicted[-1])
self._num_episodes_evicted += 1
# Remove corresponding indices, if episodes were evicted.
# TODO (simon): Refactor into method such that MultiAgent
# version can inherit.
if eps_evicted_idxs:
new_indices = []
i = 0
for idx_triple in self._indices:
# If the index comes from an evicted episode free the nodes.
if idx_triple[0] in eps_evicted_idxs:
# Here we need the index of a sample in the segment tree.
self._free_nodes.appendleft(idx_triple[2])
# Also remove the potentially maximum index.
self._max_idx -= 1 if self._max_idx == idx_triple[2] else 0
self._sum_segment[idx_triple[2]] = 0.0
self._min_segment[idx_triple[2]] = float("inf")
self._tree_idx_to_sample_idx.pop(idx_triple[2])
# Otherwise update the index in the index mapping.
else:
new_indices.append(idx_triple)
self._tree_idx_to_sample_idx[idx_triple[2]] = i
i += 1
# Assign the new list of indices.
self._indices = new_indices
# Now append the indices for the new episodes.
j = len(self._indices)
for eps in episodes:
# If the episode chunk is part of an evicted episode continue.
if eps.id_ in eps_evicted_ids:
continue
# Otherwise, add the episode data to the buffer.
else:
eps = copy.deepcopy(eps)
# If the episode is part of an already existing episode, concatenate.
if eps.id_ in self.episode_id_to_index:
eps_idx = self.episode_id_to_index[eps.id_]
existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]
old_len = len(existing_eps)
self._indices.extend(
[
(
eps_idx,
old_len + i,
# Get the index in the segment trees.
self._get_free_node_and_assign(j + i, weight),
)
for i in range(len(eps))
]
)
existing_eps.concat_episode(eps)
# Otherwise, create a new entry.
else:
num_episodes_added += 1
agent_to_num_episodes_added[DEFAULT_AGENT_ID] += 1
module_to_num_episodes_added[DEFAULT_MODULE_ID] += 1
self.episodes.append(eps)
eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted
self.episode_id_to_index[eps.id_] = eps_idx
self._indices.extend(
[
(
eps_idx,
i,
self._get_free_node_and_assign(j + i, weight),
)
for i in range(len(eps))
]
)
num_env_steps_added += len(eps)
agent_to_num_steps_added[DEFAULT_AGENT_ID] += eps.agent_steps()
module_to_num_steps_added[DEFAULT_MODULE_ID] += eps.agent_steps()
# Increase index to the new length of `self._indices`.
j = len(self._indices)
# Increase metrics.
self._update_add_metrics(
num_episodes_added=num_episodes_added,
num_env_steps_added=num_env_steps_added,
num_episodes_evicted=num_episodes_evicted,
num_env_steps_evicted=num_env_steps_evicted,
agent_to_num_episodes_added=agent_to_num_episodes_added,
agent_to_num_steps_added=agent_to_num_steps_added,
agent_to_num_episodes_evicted=agent_to_num_episodes_evicted,
agent_to_num_steps_evicted=agent_to_num_steps_evicted,
module_to_num_episodes_added=module_to_num_steps_added,
module_to_num_steps_added=module_to_num_episodes_added,
module_to_num_episodes_evicted=module_to_num_episodes_evicted,
module_to_num_steps_evicted=module_to_num_steps_evicted,
)
@override(EpisodeReplayBuffer)
def sample(
self,
num_items: Optional[int] = None,
*,
batch_size_B: Optional[int] = None,
batch_length_T: Optional[int] = None,
n_step: Optional[Union[int, Tuple]] = None,
beta: float = 0.0,
gamma: float = 0.99,
include_infos: bool = False,
include_extra_model_outputs: bool = False,
to_numpy: bool = False,
**kwargs,
) -> SampleBatchType:
"""Samples from a buffer in a prioritized way.
This sampling method also adds (importance sampling) weights to
the returned batch. See for prioritized sampling Schaul et al.
(2016).
Each sampled item defines a transition of the form:
`(o_t, a_t, sum(r_(t+1:t+n+1)), o_(t+n), terminated_(t+n), truncated_(t+n))`
where `o_(t+n)` is drawn by prioritized sampling, i.e. the priority
of `o_(t+n)` led to the sample and defines the importance weight that
is returned in the sample batch. `n` is defined by the `n_step` applied.
If requested, `info`s of a transitions last timestep `t+n` are added to
the batch.
Args:
num_items: Number of items (transitions) to sample from this
buffer.
batch_size_B: The number of rows (transitions) to return in the
batch
batch_length_T: THe sequence length to sample. At this point in time
only sequences of length 1 are possible.
n_step: The n-step to apply. For the default the batch contains in
`"new_obs"` the observation and in `"obs"` the observation `n`
time steps before. The reward will be the sum of rewards
collected in between these two observations and the action will
be the one executed n steps before such that we always have the
state-action pair that triggered the rewards.
If `n_step` is a tuple, it is considered as a range to sample
from. If `None`, we use `n_step=1`.
beta: The exponent of the importance sampling weight (see Schaul et
al. (2016)). A `beta=0.0` does not correct for the bias introduced
by prioritized replay and `beta=1.0` fully corrects for it.
gamma: The discount factor to be used when applying n-step calculations.
The default of `0.99` should be replaced by the `Algorithm`s
discount factor.
include_infos: A boolean indicating, if `info`s should be included in
the batch. This could be of advantage, if the `info` contains
values from the environment important for loss computation. If
`True`, the info at the `"new_obs"` in the batch is included.
include_extra_model_outputs: A boolean indicating, if
`extra_model_outputs` should be included in the batch. This could be
of advantage, if the `extra_mdoel_outputs` contain outputs from the
model important for loss computation and only able to compute with the
actual state of model e.g. action log-probabilities, etc.). If `True`,
the extra model outputs at the `"obs"` in the batch is included (the
timestep at which the action is computed).
Returns:
A list of 1-step long episodes containing all basic episode data and if
requested infos and extra model outputs.
"""
assert beta >= 0.0
if num_items is not None:
assert batch_size_B is None, (
"Cannot call `sample()` with both `num_items` and `batch_size_B` "
"provided! Use either one."
)
batch_size_B = num_items
# Use our default values if no sizes/lengths provided.
batch_size_B = batch_size_B or self.batch_size_B
# TODO (simon): Implement trajectory sampling for RNNs.
batch_length_T = batch_length_T or self.batch_length_T
# Sample the n-step if necessary.
actual_n_step = n_step or 1
random_n_step = isinstance(n_step, tuple)
# Keep track of the indices that were sampled last for updating the
# weights later (see `ray.rllib.utils.replay_buffer.utils.
# update_priorities_in_episode_replay_buffer`).
self._last_sampled_indices = []
sampled_episodes = []
# Record all the env step buffer indices that are contained in the sample.
sampled_env_step_idxs = set()
# Record all the episode buffer indices that are contained in the sample.
sampled_episode_idxs = set()
# Record all n-steps that have been used.
sampled_n_steps = []
# Record the number of times it needs to be resampled.
num_resamples = 0
# Sample proportionally from replay buffer's segments using the weights.
total_segment_sum = self._sum_segment.sum()
p_min = self._min_segment.min() / total_segment_sum
max_weight = (p_min * self.get_num_timesteps()) ** (-beta)
B = 0
while B < batch_size_B:
# First, draw a random sample from Uniform(0, sum over all weights).
# Note, transitions with higher weight get sampled more often (as
# more random draws fall into larger intervals).
random_sum = self.rng.random() * self._sum_segment.sum()
# Get the highest index in the sum-tree for which the sum is
# smaller or equal the random sum sample.
# Note, in contrast to Schaul et al. (2018) (who sample `o_(t + n_step)`,
# Algorithm 1) we sample `o_t`.
idx = self._sum_segment.find_prefixsum_idx(random_sum)
# Get the theoretical probability mass for drawing this sample.
p_sample = self._sum_segment[idx] / total_segment_sum
# Compute the importance sampling weight.
weight = (p_sample * self.get_num_timesteps()) ** (-beta)
# Now, get the transition stored at this index.
index_triple = self._indices[self._tree_idx_to_sample_idx[idx]]
# Compute the actual episode index (offset by the number of
# already evicted episodes)
episode_idx, episode_ts = (
index_triple[0] - self._num_episodes_evicted,
index_triple[1],
)
episode = self.episodes[episode_idx]
# If we use random n-step sampling, draw the n-step for this item.
if random_n_step:
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
# Skip, if we are too far to the end and `episode_ts` + n_step would go
# beyond the episode's end.
if episode_ts + actual_n_step > len(episode):
num_resamples += 1
continue
# Note, this will be the reward after executing action
# `a_(episode_ts-n_step+1)`. For `n_step>1` this will be the discounted
# sum of all discounted rewards that were collected over the last n steps.
raw_rewards = episode.get_rewards(
slice(episode_ts, episode_ts + actual_n_step)
)
rewards = scipy.signal.lfilter([1], [1, -gamma], raw_rewards[::-1], axis=0)[
-1
]
# Generate the episode to be returned.
sampled_episode = SingleAgentEpisode(
# Ensure that each episode contains a tuple of the form:
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
# Two observations (t and t+n).
observations=[
episode.get_observations(episode_ts),
episode.get_observations(episode_ts + actual_n_step),
],
observation_space=episode.observation_space,
infos=(
[
episode.get_infos(episode_ts),
episode.get_infos(episode_ts + actual_n_step),
]
if include_infos
else None
),
actions=[episode.get_actions(episode_ts)],
action_space=episode.action_space,
rewards=[rewards],
# If the sampled time step is the episode's last time step check, if
# the episode is terminated or truncated.
terminated=(
False
if episode_ts + actual_n_step < len(episode)
else episode.is_terminated
),
truncated=(
False
if episode_ts + actual_n_step < len(episode)
else episode.is_truncated
),
extra_model_outputs={
# TODO (simon): Check, if we have to correct here for sequences
# later.
"weights": [weight / max_weight * 1], # actual_size=1
"n_step": [actual_n_step],
**(
{
k: [episode.get_extra_model_outputs(k, episode_ts)]
for k in episode.extra_model_outputs.keys()
}
if include_extra_model_outputs
else {}
),
},
# TODO (sven): Support lookback buffers.
len_lookback_buffer=0,
t_started=episode_ts,
)
# Record here the episode time step via a hash code.
sampled_env_step_idxs.add(
hashlib.sha256(f"{episode.id_}-{episode_ts}".encode()).hexdigest()
)
# Convert to numpy arrays, if required.
if to_numpy:
sampled_episode.to_numpy()
sampled_episodes.append(sampled_episode)
# Add the episode buffer index to the sampled indices.
sampled_episode_idxs.add(episode_idx)
# Record the actual n-step for this sample.
sampled_n_steps.append(actual_n_step)
# Increment counter.
B += 1
# Keep track of sampled indices for updating priorities later.
self._last_sampled_indices.append(idx)
# Add to the sampled timesteps counter of the buffer.
self.sampled_timesteps += batch_size_B
# Update the sample metrics.
num_env_steps_sampled = batch_size_B
num_episodes_per_sample = len(sampled_episode_idxs)
num_env_steps_per_sample = len(sampled_env_step_idxs)
sampled_n_step = sum(sampled_n_steps) / batch_size_B
agent_to_num_steps_sampled = {DEFAULT_AGENT_ID: num_env_steps_sampled}
agent_to_num_episodes_per_sample = {DEFAULT_AGENT_ID: num_episodes_per_sample}
agent_to_num_steps_per_sample = {DEFAULT_AGENT_ID: num_env_steps_per_sample}
agent_to_sampled_n_step = {DEFAULT_AGENT_ID: sampled_n_step}
agent_to_num_resamples = {DEFAULT_AGENT_ID: num_resamples}
module_to_num_steps_sampled = {DEFAULT_MODULE_ID: num_env_steps_sampled}
module_to_num_episodes_per_sample = {DEFAULT_MODULE_ID: num_episodes_per_sample}
module_to_num_steps_per_sample = {DEFAULT_MODULE_ID: num_env_steps_per_sample}
module_to_sampled_n_step = {DEFAULT_MODULE_ID: sampled_n_step}
module_to_num_resamples = {DEFAULT_MODULE_ID: num_resamples}
self._update_sample_metrics(
num_env_steps_sampled=num_env_steps_sampled,
num_episodes_per_sample=num_episodes_per_sample,
num_env_steps_per_sample=num_env_steps_per_sample,
sampled_n_step=sampled_n_step,
num_resamples=num_resamples,
agent_to_num_steps_sampled=agent_to_num_steps_sampled,
agent_to_num_episodes_per_sample=agent_to_num_episodes_per_sample,
agent_to_num_steps_per_sample=agent_to_num_steps_per_sample,
agent_to_sampled_n_step=agent_to_sampled_n_step,
agent_to_num_resamples=agent_to_num_resamples,
module_to_num_steps_sampled=module_to_num_steps_sampled,
module_to_num_episodes_per_sample=module_to_num_episodes_per_sample,
module_to_num_steps_per_sample=module_to_num_steps_per_sample,
module_to_sampled_n_step=module_to_sampled_n_step,
module_to_num_resamples=module_to_num_resamples,
)
return sampled_episodes
@override(EpisodeReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Gets the state of a `PrioritizedEpisodeReplayBuffer`.
Returns:
A state dict that can be stored in a checkpoint.
"""
# Get super's state.
state = super().get_state()
# Add additional attributes.
state.update(
{
"_sum_segment": self._sum_segment.get_state(),
"_min_segment": self._min_segment.get_state(),
"_free_nodes": list(self._free_nodes),
"_max_priority": self._max_priority,
"_max_idx": self._max_idx,
"_tree_idx_to_sample_idx": list(self._tree_idx_to_sample_idx.items()),
# TODO (sven, simon): Do we need these?
"_last_sampled_indices": self._last_sampled_indices,
}
)
return state
@override(EpisodeReplayBuffer)
def set_state(self, state) -> None:
"""Sets the state of a `PrioritizedEpisodeReplayBuffer`.
Args:
state: A buffer state stored (usually stored in a checkpoint).
"""
# Set super's state.
super().set_state(state)
# Set additional attributes.
self._sum_segment.set_state(state["_sum_segment"])
self._min_segment.set_state(state["_min_segment"])
self._free_nodes = deque(state["_free_nodes"])
self._max_priority = state["_max_priority"]
self._max_idx = state["_max_idx"]
self._tree_idx_to_sample_idx = dict(state["_tree_idx_to_sample_idx"])
# TODO (sven, simon): Do we need these?
self._last_sampled_indices = state["_last_sampled_indices"]
def update_priorities(
self, priorities: NDArray, module_id: Optional[ModuleID] = None
) -> None:
"""Update the priorities of items at corresponding indices.
Usually, incoming priorities are TD-errors.
Args:
priorities: Numpy array containing the new priorities to be used
in sampling for the items in the last sampled batch.
"""
assert len(priorities) == len(self._last_sampled_indices)
for idx, priority in zip(self._last_sampled_indices, priorities):
# Note, TD-errors come in as absolute values or results from
# cross-entropy loss calculations.
# assert priority > 0, f"priority was {priority}"
priority = max(priority, 1e-12)
assert 0 <= idx < self._sum_segment.capacity
# TODO (simon): Create metrics.
# delta = priority**self._alpha - self._sum_segment[idx]
# Update the priorities in the segment trees.
self._sum_segment[idx] = priority**self._alpha
self._min_segment[idx] = priority**self._alpha
# Update the maximal priority.
self._max_priority = max(self._max_priority, priority)
self._last_sampled_indices.clear()
def _get_free_node_and_assign(self, sample_index, weight: float = 1.0) -> int:
"""Gets the next free node in the segment trees.
In addition the initial priorities for a new transition are added
to the segment trees and the index of the nodes is added to the
index mapping.
Args:
sample_index: The index of the sample in the `self._indices` list.
weight: The initial priority weight to be used in sampling for
the item at index `sample_index`.
Returns:
The index in the segment trees `self._sum_segment` and
`self._min_segment` for the item at index `sample_index` in
``self._indices`.
"""
# Get an index from the free nodes in the segment trees.
idx = self._free_nodes.popleft()
self._max_idx = idx if idx > self._max_idx else self._max_idx
# Add the weight to the segments.
self._sum_segment[idx] = weight**self._alpha
self._min_segment[idx] = weight**self._alpha
# Map the index in the trees to the index in `self._indices`.
self._tree_idx_to_sample_idx[idx] = sample_index
# Return the index.
return idx
def _num_remaining_episodes(self, new_eps, evicted_eps):
"""Calculates the number of remaining episodes.
When adding episodes and evicting them in the `add()` method
this function calculates iteratively the number of remaining
episodes.
Args:
new_eps: List of new episode IDs.
evicted_eps: List of evicted episode IDs.
Returns:
Number of episodes remaining after evicting the episodes in
`evicted_eps` and adding the episode in `new_eps`.
"""
return len(
set(self.episode_id_to_index.keys()).union(set(new_eps)) - set(evicted_eps)
)
| PrioritizedEpisodeReplayBuffer |
python | django__django | tests/admin_inlines/admin.py | {
"start": 5427,
"end": 5579
} | class ____(forms.ModelForm):
extra_field = forms.CharField()
class Meta:
model = ShoppingWeakness
fields = "__all__"
| WeaknessForm |
python | bokeh__bokeh | src/bokeh/models/dom.py | {
"start": 2063,
"end": 2288
} | class ____(Model, Qualified):
""" Base class for DOM nodes. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| DOMNode |
python | jina-ai__jina | jina/serve/networking/instrumentation.py | {
"start": 214,
"end": 494
} | class ____:
"""
dataclass that contain the metrics used in the networking part
"""
sending_requests_time_metrics: Optional['Summary']
received_response_bytes: Optional['Summary']
send_requests_bytes_metrics: Optional['Summary']
@dataclass
| _NetworkingMetrics |
python | wandb__wandb | wandb/automations/events.py | {
"start": 8106,
"end": 8793
} | class ____(_BaseMutationEventInput):
"""A new artifact is linked to a collection.
Examples:
Define an event that triggers when an artifact is linked to the
collection "my-collection" with the alias "prod":
```python
from wandb import Api
from wandb.automations import OnLinkArtifact, ArtifactEvent
api = Api()
collection = api.artifact_collection(name="my-collection", type_name="model")
event = OnLinkArtifact(
scope=collection,
filter=ArtifactEvent.alias.eq("prod"),
)
```
"""
event_type: Literal[EventType.LINK_ARTIFACT] = EventType.LINK_ARTIFACT
| OnLinkArtifact |
python | django__django | tests/csrf_tests/tests.py | {
"start": 7325,
"end": 7653
} | class ____(HttpRequest):
"""
A version of HttpRequest that lets one track and change some things more
easily.
"""
def __init__(self):
super().__init__()
self.session = TestingSessionStore()
def is_secure(self):
return getattr(self, "_is_secure_override", False)
| TestingHttpRequest |
python | getsentry__sentry | tests/sentry/middleware/test_ratelimit_middleware.py | {
"start": 13190,
"end": 13560
} | class ____(Endpoint):
permission_classes = (AllowAny,)
enforce_rate_limit = False
rate_limits = RateLimitConfig(
limit_overrides={"GET": {RateLimitCategory.IP: RateLimit(limit=40, window=100)}}
)
def get(self, request):
return Response({"ok": True})
CONCURRENT_RATE_LIMIT = 3
CONCURRENT_ENDPOINT_DURATION = 0.2
| RaceConditionEndpoint |
python | pytorch__pytorch | torch/distributed/_composable/replicate.py | {
"start": 343,
"end": 5307
} | class ____(_State):
_ddp_weakref: weakref.ref
def __init__(self) -> None:
super().__init__()
self.module: nn.Module = nn.ParameterList()
self.has_initialized: bool = False
self._param_list: nn.ParameterList = nn.ParameterList()
# TODO(@fegin): this variable is originally create for testing, we
# should remove this if possible.
self._orig_module = self.module
self._param_names: list[str] = []
self._no_sync: bool = False
self._init_args: Optional[tuple[Any, ...]] = None
self._init_kwargs: dict[str, Any] = {}
self._comm_hook_args: list[Any] = []
def _collect_params(
self,
module: nn.Module,
ignored_modules: set[nn.Module],
ignored_params: set[nn.Parameter],
prefix: str = _ROOT_MODULE_PREFIX,
) -> None:
# skip if managed by fully_sharded API
if _is_fully_sharded(module):
return
# if a module is ignored, all descendants of the module are ignored.
if module in ignored_modules:
return
recurse_prefix = (
f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX
)
for n, p in module.named_parameters(recurse=False):
if p not in ignored_params:
self._param_list.append(p)
self._param_names.append(f"{recurse_prefix}{n}")
for name, child_module in module.named_children():
self._collect_params(
child_module,
ignored_modules,
ignored_params,
prefix=f"{recurse_prefix}{name}",
)
def lazy_init(self) -> None:
@torch._disable_dynamo(recursive=True)
def _lazy_init():
assert self._init_args is not None
self.init(*self._init_args, **self._init_kwargs)
self.register_comm_hook()
self._init_args = ()
self._init_kwargs = {}
_lazy_init()
def init(
self,
module: nn.Module,
ignored_modules: set[nn.Module],
**kwargs,
) -> None:
if self.has_initialized:
return
self.has_initialized = True
self.module = module
ignored_params = {p for m in ignored_modules for p in m.parameters()}
for submodule in module.modules():
if _is_fully_sharded(submodule):
ignored_params.update(submodule.parameters())
from torch.distributed.tensor.parallel.ddp import _localize_dtensor
_localize_dtensor(module, ignored_params=ignored_params)
self._collect_params(module, ignored_modules, ignored_params)
if "device_id" in kwargs:
# replicate() supports a small usability enhancement where
# user can pass in device_id as a Union[int, torch.device] even for
# CPU devices so users don't have to change code for CPU/GPU runs.
# We derive the right device_ids to feed into DDP to support this.
if kwargs["device_id"] is not None:
device_id = kwargs["device_id"]
# Convert to device_ids that DDP expects.
if isinstance(device_id, torch.device) and device_id.type == "cpu":
# CPU modules receive device_ids None
kwargs["device_ids"] = None
else:
# GPU modules expect device_ids=[cuda_device]
kwargs["device_ids"] = [device_id]
else:
kwargs["device_ids"] = None
kwargs.pop("device_id")
self._ddp = DistributedDataParallel(self._param_list, **kwargs)
# Weakref to the DDP instance is currently only used for testing.
replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
def register_comm_hook(self) -> None:
for comm_args, comm_kwargs in self._comm_hook_args:
self._ddp.register_comm_hook(*comm_args, **comm_kwargs)
self._comm_hook_args.clear()
def record_init_args(self, *args, **kwargs) -> None:
self._init_args = args
self._init_kwargs = kwargs
def forward_pre_hook(
self, module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> Any:
if self._init_args or self._init_kwargs:
self.lazy_init()
self._ddp.require_backward_grad_sync = not self._no_sync
return self._ddp._pre_forward(*args, **kwargs)
def forward_post_hook(
self,
module: nn.Module,
input: tuple[torch.Tensor],
output: torch.Tensor,
) -> torch.Tensor:
return self._ddp._post_forward(output)
def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> NoReturn:
raise AssertionError(
"DDP does not support deepcopy. Please use state dict for serialization."
)
# Follow the same pattern as FSDP/fully_shard
| _ReplicateState |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 310593,
"end": 311044
} | class ____(sgqlc.types.Input):
"""Ordering options for team member connections"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(TeamMemberOrderField), graphql_name="field")
"""The field to order team members by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| TeamMemberOrder |
python | scikit-learn__scikit-learn | sklearn/multioutput.py | {
"start": 40251,
"end": 45544
} | class ____(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
estimator : estimator
The base estimator from which the regressor chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If `None`, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : bool, default=False
If True, chain progress is output as each model is completed.
.. versionadded:: 1.2
base_estimator : estimator, default="deprecated"
Use `estimator` instead.
.. deprecated:: 1.7
`base_estimator` is deprecated and will be removed in 1.9.
Use `estimator` instead.
Attributes
----------
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `base_estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
ClassifierChain : Equivalent for classification.
MultiOutputRegressor : Learns each output independently rather than
chaining.
Examples
--------
>>> from sklearn.multioutput import RegressorChain
>>> from sklearn.linear_model import LogisticRegression
>>> logreg = LogisticRegression(solver='lbfgs')
>>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]]
>>> chain = RegressorChain(logreg, order=[0, 1]).fit(X, Y)
>>> chain.predict(X)
array([[0., 2.],
[1., 1.],
[2., 0.]])
"""
@_fit_context(
# RegressorChain.base_estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method at each step
of the regressor chain.
.. versionadded:: 0.23
Returns
-------
self : object
Returns a fitted instance.
"""
super().fit(X, Y, **fit_params)
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self._get_estimator(),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.single_output = False
tags.target_tags.multi_output = True
return tags
| RegressorChain |
python | pypa__pip | src/pip/_vendor/rich/live.py | {
"start": 676,
"end": 1249
} | class ____(Thread):
"""A thread that calls refresh() at regular intervals."""
def __init__(self, live: "Live", refresh_per_second: float) -> None:
self.live = live
self.refresh_per_second = refresh_per_second
self.done = Event()
super().__init__(daemon=True)
def stop(self) -> None:
self.done.set()
def run(self) -> None:
while not self.done.wait(1 / self.refresh_per_second):
with self.live._lock:
if not self.done.is_set():
self.live.refresh()
| _RefreshThread |
python | huggingface__transformers | src/transformers/models/xmod/modeling_xmod.py | {
"start": 35592,
"end": 40513
} | class ____(XmodPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.__init__ with Roberta->Xmod
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `XmodLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = XmodModel(config, add_pooling_layer=False)
self.lm_head = XmodLMHead(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.get_output_embeddings
def get_output_embeddings(self):
return self.lm_head.decoder
# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.set_output_embeddings
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
lang_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
lang_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of the language adapters that should be activated for each sample, respectively. Default: the index
that corresponds to `self.config.default_language`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, XmodForCausalLM, AutoConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-roberta-base")
>>> config = AutoConfig.from_pretrained("facebook/xmod-base")
>>> config.is_decoder = True
>>> model = XmodForCausalLM.from_pretrained("facebook/xmod-base", config=config)
>>> model.set_default_language("en_XX")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta(
input_ids,
lang_ids=lang_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring
| XmodForCausalLM |
python | numpy__numpy | numpy/lib/tests/test_arraypad.py | {
"start": 36257,
"end": 37287
} | class ____:
"""Check how padding behaves on arrays with an empty dimension."""
@pytest.mark.parametrize(
# Keep parametrization ordered, otherwise pytest-xdist might believe
# that different tests were collected during parallelization
"mode", sorted(_all_modes.keys() - {"constant", "empty"})
)
def test_pad_empty_dimension(self, mode):
match = ("can't extend empty axis 0 using modes other than 'constant' "
"or 'empty'")
with pytest.raises(ValueError, match=match):
np.pad([], 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.ndarray(0), 4, mode=mode)
with pytest.raises(ValueError, match=match):
np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_pad_non_empty_dimension(self, mode):
result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
assert result.shape == (8, 0, 4)
| TestEmptyArray |
python | gevent__gevent | src/greentest/3.12/test_threading.py | {
"start": 3561,
"end": 44318
} | class ____(BaseTestCase):
maxDiff = 9999
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
def test_args_argument(self):
# bpo-45735: Using list or tuple as *args* in constructor could
# achieve the same effect.
num_list = [1]
num_tuple = (1,)
str_list = ["str"]
str_tuple = ("str",)
list_in_tuple = ([1],)
tuple_in_list = [(1,)]
test_cases = (
(num_list, lambda arg: self.assertEqual(arg, 1)),
(num_tuple, lambda arg: self.assertEqual(arg, 1)),
(str_list, lambda arg: self.assertEqual(arg, "str")),
(str_tuple, lambda arg: self.assertEqual(arg, "str")),
(list_in_tuple, lambda arg: self.assertEqual(arg, [1])),
(tuple_in_list, lambda arg: self.assertEqual(arg, (1,)))
)
for args, target in test_cases:
with self.subTest(target=target, args=args):
t = threading.Thread(target=target, args=args)
t.start()
t.join()
@cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
lock = threading.Lock()
test.support.check_disallow_instantiation(self, type(lock))
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.current_thread().ident)
def f():
ident.append(threading.current_thread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
if support.check_sanitizer(thread=True):
# the thread running `time.sleep(100)` below will still be alive
# at process exit
self.skipTest("TSAN would report thread leak")
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
if support.check_sanitizer(thread=True):
# the thread running `time.sleep(2)` below will still be alive
# at process exit
self.skipTest("TSAN would report thread leak")
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
restore_default_excepthook(self)
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
with self.assertWarnsRegex(DeprecationWarning,
r'get the daemon attribute'):
t.isDaemon()
with self.assertWarnsRegex(DeprecationWarning,
r'set the daemon attribute'):
t.setDaemon(True)
with self.assertWarnsRegex(DeprecationWarning,
r'get the name attribute'):
t.getName()
with self.assertWarnsRegex(DeprecationWarning,
r'set the name attribute'):
t.setName("name")
e = threading.Event()
with self.assertWarnsRegex(DeprecationWarning, 'use is_set()'):
e.isSet()
cond = threading.Condition()
cond.acquire()
with self.assertWarnsRegex(DeprecationWarning, 'use notify_all()'):
cond.notifyAll()
with self.assertWarnsRegex(DeprecationWarning, 'use active_count()'):
threading.activeCount()
with self.assertWarnsRegex(DeprecationWarning, 'use current_thread()'):
threading.currentThread()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@skip_unless_reliable_fork
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time, warnings
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"always", category=DeprecationWarning)
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
assert ws[0].category == DeprecationWarning, ws[0]
assert 'fork' in str(ws[0].message), ws[0]
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@skip_unless_reliable_fork
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
# Ignore the warning about fork with threads.
with warnings.catch_warnings(category=DeprecationWarning,
action="ignore"):
if (pid := os.fork()) == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
ident = threading.get_ident()
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data,
"current ident True\n"
"main MainThread\n"
"main ident True\n"
"current is main True\n")
@skip_unless_reliable_fork
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys, warnings
from test import support
def func():
ident = threading.get_ident()
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"always", category=DeprecationWarning)
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name, type(main).__name__)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
assert ws[0].category == DeprecationWarning, ws[0]
assert 'fork' in str(ws[0].message), ws[0]
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err.decode('utf-8'), "")
self.assertEqual(data,
"current ident True\n"
"main Thread-1 (func) Thread\n"
"main ident True\n"
"current is main True\n"
)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@support.requires_fork()
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_foreign_thread(self, create_dummy=False):
code = """if 1:
import os, threading, sys, traceback, _thread
from test import support
def func(lock):
ident = threading.get_ident()
if %s:
# call current_thread() before fork to allocate DummyThread
current = threading.current_thread()
print("current", current.name, type(current).__name__)
print("ident in _active", ident in threading._active)
# flush before fork, so child won't flush it again
sys.stdout.flush()
pid = os.fork()
if pid == 0:
print("current ident", threading.get_ident() == ident)
main = threading.main_thread()
print("main", main.name, type(main).__name__)
print("main ident", main.ident == ident)
print("current is main", threading.current_thread() is main)
print("_dangling", [t.name for t in list(threading._dangling)])
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
try:
threading._shutdown()
os._exit(0)
except:
traceback.print_exc()
sys.stderr.flush()
os._exit(1)
else:
try:
support.wait_process(pid, exitcode=0)
except Exception:
# avoid 'could not acquire lock for
# <_io.BufferedWriter name='<stderr>'> at interpreter shutdown,'
traceback.print_exc()
sys.stderr.flush()
finally:
lock.release()
join_lock = _thread.allocate_lock()
join_lock.acquire()
th = _thread.start_new_thread(func, (join_lock,))
join_lock.acquire()
""" % create_dummy
# "DeprecationWarning: This process is multi-threaded, use of fork()
# may lead to deadlocks in the child"
_, out, err = assert_python_ok("-W", "ignore::DeprecationWarning", "-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err.decode(), "")
self.assertEqual(data,
("current Dummy-1 _DummyThread\n" if create_dummy else "") +
f"ident in _active {create_dummy!s}\n" +
"current ident True\n"
"main MainThread _MainThread\n"
"main ident True\n"
"current is main True\n"
"_dangling ['MainThread']\n")
def test_main_thread_after_fork_from_dummy_thread(self, create_dummy=False):
self.test_main_thread_after_fork_from_foreign_thread(create_dummy=True)
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
threading.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_gettrace_all_threads(self):
def fn(*args): pass
old_trace = threading.gettrace()
first_check = threading.Event()
second_check = threading.Event()
trace_funcs = []
def checker():
trace_funcs.append(sys.gettrace())
first_check.set()
second_check.wait()
trace_funcs.append(sys.gettrace())
try:
t = threading.Thread(target=checker)
t.start()
first_check.wait()
threading.settrace_all_threads(fn)
second_check.set()
t.join()
self.assertEqual(trace_funcs, [None, fn])
self.assertEqual(threading.gettrace(), fn)
self.assertEqual(sys.gettrace(), fn)
finally:
threading.settrace_all_threads(old_trace)
self.assertEqual(threading.gettrace(), old_trace)
self.assertEqual(sys.gettrace(), old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
def test_getprofile_all_threads(self):
def fn(*args): pass
old_profile = threading.getprofile()
first_check = threading.Event()
second_check = threading.Event()
profile_funcs = []
def checker():
profile_funcs.append(sys.getprofile())
first_check.set()
second_check.wait()
profile_funcs.append(sys.getprofile())
try:
t = threading.Thread(target=checker)
t.start()
first_check.wait()
threading.setprofile_all_threads(fn)
second_check.set()
t.join()
self.assertEqual(profile_funcs, [None, fn])
self.assertEqual(threading.getprofile(), fn)
self.assertEqual(sys.getprofile(), fn)
finally:
threading.setprofile_all_threads(old_profile)
self.assertEqual(threading.getprofile(), old_profile)
self.assertEqual(sys.getprofile(), old_profile)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_boolean_target(self):
# bpo-41149: A thread that had a boolean value of False would not
# run, regardless of whether it was callable. The correct behaviour
# is for a thread to do nothing if its target is None, and to call
# the target otherwise.
class BooleanTarget(object):
def __init__(self):
self.ran = False
def __bool__(self):
return False
def __call__(self):
self.ran = True
target = BooleanTarget()
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertTrue(target.ran)
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with threading_helper.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def test_start_new_thread_at_finalization(self):
code = """if 1:
import _thread
def f():
print("shouldn't be printed")
class AtFinalization:
def __del__(self):
print("OK")
_thread.start_new_thread(f, ())
at_finalization = AtFinalization()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out.strip(), b"OK")
self.assertIn(b"can't create new thread at interpreter shutdown", err)
def test_start_new_thread_failed(self):
# gh-109746: if Python fails to start newly created thread
# due to failure of underlying PyThread_start_new_thread() call,
# its state should be removed from interpreter' thread states list
# to avoid its double cleanup
try:
from resource import setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
code = """if 1:
import resource
import _thread
def f():
print("shouldn't be printed")
limits = resource.getrlimit(resource.RLIMIT_NPROC)
[_, hard] = limits
resource.setrlimit(resource.RLIMIT_NPROC, (0, hard))
try:
_thread.start_new_thread(f, ())
except RuntimeError:
print('ok')
else:
print('!skip!')
"""
_, out, err = assert_python_ok("-u", "-c", code)
out = out.strip()
if b'!skip!' in out:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
self.assertEqual(out, b'ok')
self.assertEqual(err, b'')
| ThreadTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 265537,
"end": 265827
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("viewer_can_delete",)
viewer_can_delete = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanDelete"
)
| Deletable |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 17474,
"end": 17586
} | class ____(Psycopg2ConnectionPool, TpoolConnectionPool, TestPsycopg2Base):
__test__ = True
| Test01Psycopg2Tpool |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/unit_tests/integration/test_availability_strategy.py | {
"start": 422,
"end": 3389
} | class ____(TestCase):
def setUp(self) -> None:
self._strategy = SalesforceAvailabilityStrategy()
def test_handle_http_error_with_json_decode_error_then_raise_exception(self) -> None:
mock_response = Mock()
mock_response.json.side_effect = exceptions.JSONDecodeError("Expecting value", "<html>Error</html>", 0)
http_error = exceptions.HTTPError(response=mock_response)
http_error.response.status_code = 403
with pytest.raises(exceptions.HTTPError) as exception:
self._strategy.handle_http_error(Mock(), Mock(), Mock(), http_error)
assert type(exception.value.__cause__) == exceptions.JSONDecodeError
assert type(exception.value) == exceptions.HTTPError
assert exception.value == http_error
def test_handle_http_error_with_forbidden_and_request_limit_exceeded_error_code_then_return_tuple(self) -> None:
mock_response = Mock()
mock_response.json.return_value = [{"errorCode": "REQUEST_LIMIT_EXCEEDED"}]
http_error = exceptions.HTTPError(response=mock_response)
http_error.response.status_code = 403
stream = Mock()
stream.name = _STREAM_NAME
output = self._strategy.handle_http_error(stream, Mock(), Mock(), http_error)
assert output == (True, None)
def test_handle_http_error_with_bad_request_and_request_limit_exceeded_error_code_then_return_tuple(self) -> None:
mock_response = Mock()
mock_response.json.return_value = [{"errorCode": "REQUEST_LIMIT_EXCEEDED"}]
http_error = exceptions.HTTPError(response=mock_response)
http_error.response.status_code = 400
stream = Mock()
stream.name = _STREAM_NAME
output = self._strategy.handle_http_error(stream, Mock(), Mock(), http_error)
assert output == (True, None)
def test_handle_http_error_with_other_error_code_then_return_tuple(self) -> None:
mock_response = Mock()
mock_response.json.return_value = [{"errorCode": "OTHER_ERROR_CODE", "message": "OTHER_ERROR_MESSAGE"}]
http_error = exceptions.HTTPError(response=mock_response)
http_error.response.status_code = 403
stream = Mock()
stream.name = _STREAM_NAME
output = self._strategy.handle_http_error(stream, Mock(), Mock(), http_error)
assert output == (False, f"Cannot receive data for stream '{_STREAM_NAME}', error message: 'OTHER_ERROR_MESSAGE'")
def test_handle_http_error_with_server_error_code_then_raise_exception(self) -> None:
mock_response = Mock()
http_error = exceptions.HTTPError(response=mock_response)
http_error.response.status_code = 500
with pytest.raises(exceptions.HTTPError) as exception:
self._strategy.handle_http_error(Mock(), Mock(), Mock(), http_error)
assert type(exception.value) == exceptions.HTTPError
assert exception.value == http_error
| AvailabilityStrategyTest |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 4798,
"end": 5135
} | class ____(TypedDict):
type: Literal["datetime"]
format: NotRequired[
str | Literal["localized", "distance", "calendar", "iso8601"] | None
]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | float | None]
timezone: NotRequired[str | None]
| DatetimeColumnConfig |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 2281,
"end": 2360
} | class ____(AutoEnum):
FLOW_RUN = "flow_run"
TASK_RUN = "task_run"
| RunType |
python | streamlit__streamlit | lib/tests/streamlit/web/server/app_static_file_handler_test.py | {
"start": 1178,
"end": 9786
} | class ____(tornado.testing.AsyncHTTPTestCase):
def setUp(self) -> None:
self._tmpdir = tempfile.TemporaryDirectory(dir=os.getcwd())
self._tmpfile = tempfile.NamedTemporaryFile(dir=self._tmpdir.name, delete=False)
self._tmp_js_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="script.js", delete=False
)
self._tmp_webp_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.webp", delete=False
)
self._tmp_png_image_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="image.png", delete=False
)
self._tmp_jpeg_image_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="image.jpeg", delete=False
)
self._tmp_jpg_image_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="image.jpg", delete=False
)
self._tmp_pdf_document_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="document.pdf", delete=False
)
self._tmp_webp_image_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="image.webp", delete=False
)
self._tmp_woff2_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.woff2", delete=False
)
self._tmp_woff_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.woff", delete=False
)
self._tmp_ttf_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.ttf", delete=False
)
self._tmp_otf_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.otf", delete=False
)
self._tmp_xml_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.xml", delete=False
)
self._tmp_json_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.json", delete=False
)
self._tmp_dir_inside_static_folder = tempfile.TemporaryDirectory(
dir=self._tmpdir.name
)
self._symlink_outside_directory = "symlink_outside"
self._symlink_inside_directory = "symlink_inside"
os.symlink(
"/", os.path.join(self._tmpdir.name, self._symlink_outside_directory)
)
os.symlink(
self._tmpfile.name,
os.path.join(self._tmpdir.name, self._symlink_inside_directory),
)
self._temp_filenames = {
"js": os.path.basename(self._tmp_js_file.name),
"png": os.path.basename(self._tmp_png_image_file.name),
"jpeg": os.path.basename(self._tmp_jpeg_image_file.name),
"jpg": os.path.basename(self._tmp_jpg_image_file.name),
"pdf": os.path.basename(self._tmp_pdf_document_file.name),
"webp": os.path.basename(self._tmp_webp_image_file.name),
"xml": os.path.basename(self._tmp_xml_file.name),
"json": os.path.basename(self._tmp_json_file.name),
"woff2": os.path.basename(self._tmp_woff2_file.name),
"woff": os.path.basename(self._tmp_woff_file.name),
"ttf": os.path.basename(self._tmp_ttf_file.name),
"otf": os.path.basename(self._tmp_otf_file.name),
}
self._filename = os.path.basename(self._tmpfile.name)
super().setUp()
def tearDown(self) -> None:
super().tearDown()
self._tmpdir.cleanup()
def get_app(self):
return tornado.web.Application(
[
(
r"/app/static/(.*)",
AppStaticFileHandler,
{"path": self._tmpdir.name},
)
]
)
def test_static_files_200(self):
"""Files with extensions NOT listed in app_static_file_handler.py
`SAFE_APP_STATIC_FILE_EXTENSIONS` should have the `Content-Type` header value
equals to `text-plain`.
"""
responses = [
# self._filename is file without extension
self.fetch(f"/app/static/{self._filename}"),
# self._js_filename is file with '.js' extension
self.fetch(f"/app/static/{self._temp_filenames['js']}"),
# self._symlink_inside_directory is symlink to
# self._tmpfile (inside static directory)
self.fetch(f"/app/static/{self._symlink_inside_directory}"),
]
for r in responses:
assert r.headers["Content-Type"] == "text/plain"
assert r.headers["X-Content-Type-Options"] == "nosniff"
assert r.code == 200
@parameterized.expand(
[
("png", "image/png"),
("webp", "image/webp"),
("jpg", "image/jpeg"),
("jpeg", "image/jpeg"),
("pdf", "application/pdf"),
("xml", "application/xml"),
("woff2", "font/woff2"),
("woff", "font/woff"),
("ttf", "font/ttf"),
("otf", "font/otf"),
("json", "application/json"),
],
)
def test_static_files_with_safe_extensions_200(
self, filename: str, expected_content_type: str
):
"""Files with extensions listed in SAFE_APP_STATIC_FILE_EXTENSIONS should have
the correct Content-Type header based on their extension.
"""
response = self.fetch(f"/app/static/{self._temp_filenames[filename]}")
assert response.code == 200
assert response.headers["Content-Type"] == expected_content_type
assert response.headers["X-Content-Type-Options"] == "nosniff"
@patch("os.path.getsize", MagicMock(return_value=MAX_APP_STATIC_FILE_SIZE + 1))
def test_big_file_404(self):
"""Files with size greater than MAX_APP_STATIC_FILE_SIZE should return 404."""
response = self.fetch(f"/app/static/{self._temp_filenames['png']}")
assert response.code == 404
assert (
response.body
== b"<html><title>404: File is too large</title><body>404: File is too large</body></html>"
)
def test_staticfiles_404(self):
"""Non-existent files, files outside static directory and symlinks pointing to
files outside static directory and directories should return 404.
"""
responses = [
# Access to directory without trailing slash
self.fetch("/app/static"),
# Access to non-existent file
self.fetch("/app/static/nonexistent.jpg"),
]
for r in responses:
assert r.code == 404
assert (
r.body == b"<html><title>404: Not Found</title>"
b"<body>404: Not Found</body></html>"
)
def test_staticfiles_403(self):
"""files outside static directory and symlinks pointing to
files outside static directory and directories should return 403.
"""
responses = [
# Access to directory with trailing slash
self.fetch("/app/static/"),
# Access to directory inside static folder without trailing slash
self.fetch(f"/app/static/{self._tmp_dir_inside_static_folder.name}"),
# Access to directory inside static folder with trailing slash
self.fetch(f"/app/static/{self._tmp_dir_inside_static_folder.name}/"),
# Access to file outside static directory
self.fetch("/app/static/../test_file_outside_directory.py"),
# Access to file outside static directory with same prefix
self.fetch(
f"/app/static/{self._tmpdir.name}_foo/test_file_outside_directory.py"
),
# Access to symlink outside static directory
self.fetch(f"/app/static/{self._symlink_outside_directory}"),
]
for r in responses:
assert r.code == 403
assert (
r.body == b"<html><title>403: Forbidden</title>"
b"<body>403: Forbidden</body></html>"
)
def test_mimetype_is_overridden_by_server(self):
"""Test content type of webps are set correctly"""
mimetypes.add_type("custom/webp", ".webp")
r = self.fetch(f"/app/static/{self._temp_filenames['webp']}")
assert r.headers["Content-Type"] == "custom/webp"
Server.initialize_mimetypes()
r = self.fetch(f"/app/static/{self._temp_filenames['webp']}")
assert r.headers["Content-Type"] == "image/webp"
| AppStaticFileHandlerTest |
python | tensorflow__tensorflow | tensorflow/lite/python/convert_saved_model_test.py | {
"start": 1494,
"end": 11093
} | class ____(test_util.TensorFlowTestCase):
def _createSimpleSavedModel(self, shape):
"""Create a simple SavedModel on the fly."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor = array_ops.placeholder(shape=shape, dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
inputs = {"x": in_tensor}
outputs = {"y": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _createSavedModelTwoInputArrays(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), "simple_savedmodel")
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputB")
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name="inputA")
out_tensor = in_tensor_1 + in_tensor_2
inputs = {"x": in_tensor_1, "y": in_tensor_2}
outputs = {"z": out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _getArrayNames(self, tensors):
return [tensor.name for tensor in tensors]
def _getArrayShapes(self, tensors):
dims = []
for tensor in tensors:
dim_tensor = []
for dim in tensor.shape:
if isinstance(dim, tensor_shape.Dimension):
dim_tensor.append(dim.value)
else:
dim_tensor.append(dim)
dims.append(dim_tensor)
return dims
def _convertSavedModel(self,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
if tag_set is None:
tag_set = set([tag_constants.SERVING])
if signature_key is None:
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, in_tensors, out_tensors, _ = (
convert_saved_model.freeze_saved_model(
saved_model_dir=saved_model_dir,
input_arrays=input_arrays,
input_shapes=input_shapes,
output_arrays=output_arrays,
tag_set=tag_set,
signature_key=signature_key))
return graph_def, in_tensors, out_tensors
def testSimpleSavedModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testSimpleSavedModelWithNoneBatchSizeInShape(self):
"""Test a SavedModel with None in input tensor's shape."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(saved_model_dir)
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithInvalidSignatureKey(self):
"""Test a SavedModel that fails due to an invalid signature_key."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, signature_key="invalid-key")
self.assertEqual(
"No 'invalid-key' in the SavedModel's SignatureDefs. "
"Possible values are 'serving_default'.", str(error.exception))
def testSimpleSavedModelWithInvalidOutputArray(self):
"""Test a SavedModel that fails due to invalid output arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, output_arrays=["invalid-output"])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
def testSimpleSavedModelWithWrongInputArrays(self):
"""Test a SavedModel that fails due to invalid input arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
# Check invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(saved_model_dir, input_arrays=["invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Check valid and invalid input_arrays.
with self.assertRaises(ValueError) as error:
self._convertSavedModel(
saved_model_dir, input_arrays=["Placeholder", "invalid-input"])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
def testSimpleSavedModelWithCorrectArrays(self):
"""Test a SavedModel with correct input_arrays and output_arrays."""
saved_model_dir = self._createSimpleSavedModel(shape=[None, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
output_arrays=["add"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[None, 16, 16, 3]])
def testSimpleSavedModelWithCorrectInputArrays(self):
"""Test a SavedModel with correct input_arrays and input_shapes."""
saved_model_dir = self._createSimpleSavedModel(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["Placeholder"],
input_shapes={"Placeholder": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testTwoInputArrays(self):
"""Test a simple SavedModel."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputB", "inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0", "inputB:0"])
self.assertEqual(
self._getArrayShapes(in_tensors), [[1, 16, 16, 3], [1, 16, 16, 3]])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModelTwoInputArrays(shape=[1, 16, 16, 3])
# Check case where input shape is given.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
input_arrays=["inputA"],
input_shapes={"inputA": [1, 16, 16, 3]})
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
# Check case where input shape is None.
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir, input_arrays=["inputA"])
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["inputA:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 16, 16, 3]])
def testMultipleMetaGraphDef(self):
"""Test saved model with multiple MetaGraphDefs."""
saved_model_dir = os.path.join(self.get_temp_dir(), "savedmodel_two_mgd")
builder = saved_model.builder.SavedModelBuilder(saved_model_dir)
with session.Session(graph=ops.Graph()) as sess:
# MetaGraphDef 1
in_tensor = array_ops.placeholder(shape=[1, 28, 28], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sig_input_tensor = saved_model.utils.build_tensor_info(in_tensor)
sig_input_tensor_signature = {"x": sig_input_tensor}
sig_output_tensor = saved_model.utils.build_tensor_info(out_tensor)
sig_output_tensor_signature = {"y": sig_output_tensor}
predict_signature_def = (
saved_model.signature_def_utils.build_signature_def(
sig_input_tensor_signature, sig_output_tensor_signature,
saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_map = {
saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
}
builder.add_meta_graph_and_variables(
sess,
tags=[saved_model.tag_constants.SERVING, "additional_test_tag"],
signature_def_map=signature_def_map)
# MetaGraphDef 2
builder.add_meta_graph(tags=["tflite"])
builder.save(True)
# Convert to tflite
_, in_tensors, out_tensors = self._convertSavedModel(
saved_model_dir=saved_model_dir,
tag_set=set([saved_model.tag_constants.SERVING, "additional_test_tag"]))
self.assertEqual(self._getArrayNames(out_tensors), ["add:0"])
self.assertEqual(self._getArrayNames(in_tensors), ["Placeholder:0"])
self.assertEqual(self._getArrayShapes(in_tensors), [[1, 28, 28]])
if __name__ == "__main__":
test.main()
| FreezeSavedModelTest |
python | google__pytype | pytype/annotation_utils.py | {
"start": 588,
"end": 27572
} | class ____(utils.ContextWeakrefMixin):
"""Utility class for inline type annotations."""
def __init__(self, ctx):
super().__init__(ctx)
# calling sub_one_annotation is costly, due to calling multiple of chained
# constructors (via annot.replace) and generating complex data structure.
# And in some corner cases which includes recursive generic types with
# overloads, it causes massive call to construction of bad match which calls
# sub_one_annotations. We only store caches in case when all types are
# ground i.e. subst is empty.
# A better solution might be not to make those seemingly redundant request
# from the type checker, but for now this is a comprimise to gain
# performance in those weird corner cases.
self.annotation_sub_cache: dict[
tuple[cfg.CFGNode, abstract.BaseValue], abstract.BaseValue
] = dict()
def sub_annotations(self, node, annotations, substs, instantiate_unbound):
"""Apply type parameter substitutions to a dictionary of annotations."""
if substs and all(substs):
return {
name: self.sub_one_annotation(
node, annot, substs, instantiate_unbound
)
for name, annot in annotations.items()
}
return annotations
def _get_type_parameter_subst(
self,
node: cfg.CFGNode,
annot: abstract.TypeParameter,
substs: Sequence[Mapping[str, cfg.Variable]],
instantiate_unbound: bool,
) -> abstract.BaseValue:
"""Helper for sub_one_annotation."""
# We use the given substitutions to bind the annotation if
# (1) every subst provides at least one binding, and
# (2) none of the bindings are ambiguous, and
# (3) at least one binding is non-empty.
if all(
annot.full_name in subst and subst[annot.full_name].bindings
for subst in substs
):
vals = sum((subst[annot.full_name].data for subst in substs), [])
else:
vals = None
if (
vals is None
or any(isinstance(v, abstract.AMBIGUOUS) for v in vals)
or all(isinstance(v, abstract.Empty) for v in vals)
):
if instantiate_unbound:
vals = annot.instantiate(node).data
else:
vals = [annot]
return self.ctx.convert.merge_classes(vals)
def sub_one_annotation(
self,
node: cfg.CFGNode,
annot: abstract.BaseValue,
substs: Sequence[Mapping[str, cfg.Variable]],
instantiate_unbound: bool = True,
):
def get_type_parameter_subst(annotation):
return self._get_type_parameter_subst(
node, annotation, substs, instantiate_unbound
)
do_caching = not substs or (len(substs) == 1 and not substs[0])
if do_caching:
res = self.annotation_sub_cache.get((node, annot), None)
if res:
return res
res = self._do_sub_one_annotation(node, annot, get_type_parameter_subst)
if do_caching:
self.annotation_sub_cache[(node, annot)] = res
return res
def _do_sub_one_annotation(
self,
node: cfg.CFGNode,
annot: abstract.BaseValue,
get_type_parameter_subst_fn: Callable[
[abstract.BaseValue], abstract.BaseValue
],
):
"""Apply type parameter substitutions to an annotation."""
# We push annotations onto 'stack' and move them to the 'done' stack as they
# are processed. For each annotation, we also track an 'inner_type_keys'
# value, which is meaningful only for a NestedAnnotation. For a
# NestedAnnotation, inner_type_keys=None indicates the annotation has not
# yet been seen, so we push its inner types onto the stack, followed by the
# annotation itself with its real 'inner_type_keys' value. When we see the
# annotation again, we pull the processed inner types off the 'done' stack
# and construct the final annotation.
stack = [(annot, None)]
late_annotations = {}
done = []
while stack:
cur, inner_type_keys = stack.pop()
if not cur.formal: # pytype: disable=attribute-error
done.append(cur)
elif isinstance(cur, mixin.NestedAnnotation):
if cur.is_late_annotation() and any(t[0] == cur for t in stack):
# We've found a recursive type. We generate a LateAnnotation as a
# placeholder for the substituted type.
if cur not in late_annotations:
param_strings = []
for t in utils.unique_list(self.get_type_parameters(cur)):
s = pytd_utils.Print(
get_type_parameter_subst_fn(t).to_pytd_type_of_instance(node)
)
param_strings.append(s)
expr = f"{cur.expr}[{', '.join(param_strings)}]"
late_annot = abstract.LateAnnotation(expr, cur.stack, cur.ctx)
late_annotations[cur] = late_annot
done.append(late_annotations[cur])
elif inner_type_keys is None:
keys, vals = zip(*cur.get_inner_types())
stack.append((cur, keys))
stack.extend((val, None) for val in vals)
else:
inner_types = []
for k in inner_type_keys:
inner_types.append((k, done.pop()))
done_annot = cur.replace(inner_types)
if cur in late_annotations:
# If we've generated a LateAnnotation placeholder for cur's
# substituted type, replace it now with the real type.
late_annot = late_annotations.pop(cur)
late_annot.set_type(done_annot)
if "[" in late_annot.expr:
if self.ctx.vm.late_annotations is None:
self.ctx.vm.flatten_late_annotation(
node, late_annot, self.ctx.vm.frame.f_globals
)
else:
self.ctx.vm.late_annotations[
late_annot.expr.split("[", 1)[0]
].append(late_annot)
done.append(done_annot)
else:
done.append(get_type_parameter_subst_fn(cur))
assert len(done) == 1
return done[0]
def sub_annotations_for_parameterized_class(
self,
cls: abstract.ParameterizedClass,
annotations: dict[str, abstract.BaseValue],
) -> dict[str, abstract.BaseValue]:
"""Apply type parameter substitutions to a dictionary of annotations.
Args:
cls: ParameterizedClass that defines type parameter substitutions.
annotations: A dictionary of annotations to which type parameter
substition should be applied.
Returns:
Annotations with type parameters substituted.
"""
formal_type_parameters = cls.get_formal_type_parameters()
def get_type_parameter_subst(
annotation: abstract.BaseValue,
) -> abstract.BaseValue | None:
# Normally the type parameter module is set correctly at this point.
# Except for the case when a method that references this type parameter
# is inherited in a subclass that does not specialize this parameter:
# class A(Generic[T]):
# def f(self, t: T): ...
#
# class B(Generic[T], A[T]):
# pass
#
# class C(B[int]): ...
# In this case t in A[T].f will be annotated with T with no module set,
# since we don't know the correct module until T is specialized in
# B[int].
for name in (
f"{cls.full_name}.{annotation.name}",
f"{cls.name}.{annotation.name}",
):
if name in formal_type_parameters:
return formal_type_parameters[name]
# Method parameter can be annotated with a typevar that doesn't
# belong to the class template:
# class A(Generic[T]):
# def f(self, t: U): ...
# In this case we return it as is.
return annotation
return {
name: self._do_sub_one_annotation(
self.ctx.root_node, annot, get_type_parameter_subst
)
for name, annot in annotations.items()
}
def add_scope(self, annot, types, cls, seen=None):
"""Add scope for type parameters.
In original type class, all type parameters that should be added a scope
will be replaced with a new copy.
Args:
annot: The type class.
types: A type name list that should be added a scope.
cls: The class that type parameters should be scoped to.
seen: Already seen types.
Returns:
The type with fresh type parameters that have been added the scope.
"""
if seen is None:
seen = {annot}
elif annot in seen or not annot.formal:
return annot
else:
seen.add(annot)
if isinstance(annot, abstract.TypeParameter):
if annot.name in types:
return annot.with_scope(cls.full_name)
elif annot.full_name == "typing.Self":
bound_annot = annot.copy()
bound_annot.bound = cls
return bound_annot
else:
return annot
elif isinstance(annot, mixin.NestedAnnotation):
inner_types = [
(key, self.add_scope(typ, types, cls, seen))
for key, typ in annot.get_inner_types()
]
return annot.replace(inner_types)
return annot
def get_type_parameters(self, annot, seen=None):
"""Returns all the TypeParameter instances that appear in the annotation.
Note that if you just need to know whether or not the annotation contains
type parameters, you can check its `.formal` attribute.
Args:
annot: An annotation.
seen: A seen set.
"""
seen = seen or set()
if annot in seen or not annot.formal:
return []
if isinstance(annot, mixin.NestedAnnotation):
# We track parameterized classes to avoid recursion errors when a class
# contains itself.
seen = seen | {annot}
if isinstance(annot, abstract.TypeParameter):
return [annot]
elif isinstance(annot, abstract.TupleClass):
annots = []
for idx in range(annot.tuple_length):
annots.extend(
self.get_type_parameters(annot.formal_type_parameters[idx], seen)
)
return annots
elif isinstance(annot, mixin.NestedAnnotation):
return sum(
(
self.get_type_parameters(t, seen)
for _, t in annot.get_inner_types()
),
[],
)
return []
def get_callable_type_parameter_names(self, val: abstract.BaseValue):
"""Gets all TypeParameter names that appear in a Callable in 'val'."""
type_params = set()
seen = set()
stack = [val]
while stack:
annot = stack.pop()
if annot in seen or not annot.formal:
continue
seen.add(annot)
if annot.full_name == "typing.Callable":
params = collections.Counter(self.get_type_parameters(annot))
if isinstance(annot, abstract.CallableClass):
# pytype represents Callable[[T1, T2], None] as
# CallableClass({0: T1, 1: T2, ARGS: Union[T1, T2], RET: None}),
# so we have to fix double-counting of argument type parameters.
params -= collections.Counter(
self.get_type_parameters(
annot.formal_type_parameters[abstract_utils.ARGS]
)
)
# Type parameters that appear only once in a function signature are
# invalid, so ignore them.
type_params.update(p.name for p, n in params.items() if n > 1)
elif isinstance(annot, mixin.NestedAnnotation):
stack.extend(v for _, v in annot.get_inner_types())
return type_params
def convert_function_type_annotation(self, name, typ):
visible = typ.data
if len(visible) > 1:
self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, visible, name)
return None
else:
return visible[0]
def convert_function_annotations(self, node, raw_annotations):
"""Convert raw annotations to a {name: annotation} dict."""
if raw_annotations:
# {"i": int, "return": str} is stored as (int, str, ("i", "return"))
names = abstract_utils.get_atomic_python_constant(raw_annotations[-1])
type_list = raw_annotations[:-1]
annotations_list = []
for name, t in zip(names, type_list):
name = abstract_utils.get_atomic_python_constant(name)
t = self.convert_function_type_annotation(name, t)
annotations_list.append((name, t))
return self.convert_annotations_list(node, annotations_list)
else:
return {}
def convert_annotations_list(self, node, annotations_list):
"""Convert a (name, raw_annot) list to a {name: annotation} dict."""
annotations = {}
for name, t in annotations_list:
if t is None or abstract_utils.is_ellipsis(t):
# '...' is an experimental "inferred type"; see b/213607272.
continue
annot = self._process_one_annotation(
node, t, name, self.ctx.vm.simple_stack()
)
if annot is not None:
annotations[name] = annot
return annotations
def convert_class_annotations(self, node, raw_annotations):
"""Convert a name -> raw_annot dict to annotations."""
annotations = {}
raw_items = raw_annotations.items()
for name, t in raw_items:
# Don't use the parameter name, since it's often something unhelpful
# like `0`.
annot = self._process_one_annotation(
node, t, None, self.ctx.vm.simple_stack()
)
annotations[name] = annot or self.ctx.convert.unsolvable
return annotations
def init_annotation(self, node, name, annot, container=None, extra_key=None):
value = self.ctx.vm.init_class(
node, annot, container=container, extra_key=extra_key
)
for d in value.data:
d.from_annotation = name
return node, value
def _in_class_frame(self):
frame = self.ctx.vm.frame
if not frame.func:
return False
return (
isinstance(frame.func.data, abstract.BoundFunction)
or frame.func.data.is_attribute_of_class
)
def extract_and_init_annotation(self, node, name, var):
"""Extracts an annotation from var and instantiates it."""
frame = self.ctx.vm.frame
substs = frame.substs
if self._in_class_frame():
self_var = frame.first_arg
if self_var:
# self_var is an instance of (a subclass of) the class on which
# frame.func is defined. We walk self_var's class's MRO to find the
# defining class and grab its type parameter substitutions.
defining_cls_name, _, _ = frame.func.data.name.rpartition(".")
type_params = []
defining_classes = []
for v in self_var.data:
v_cls = v if isinstance(v, abstract.Class) else v.cls
for cls in v_cls.mro:
if cls.name == defining_cls_name:
# Normalize type parameter names by dropping the scope.
type_params.extend(p.with_scope(None) for p in cls.template)
defining_classes.append(cls)
break
self_substs = tuple(
abstract_utils.get_type_parameter_substitutions(cls, type_params)
for cls in defining_classes
)
substs = abstract_utils.combine_substs(substs, self_substs)
typ = self.extract_annotation(
node,
var,
name,
self.ctx.vm.simple_stack(),
allowed_type_params=set(itertools.chain(*substs)),
)
if isinstance(typ, typing_overlay.Final):
return typ, self.ctx.new_unsolvable(node)
return self._sub_and_instantiate(node, name, typ, substs)
def _sub_and_instantiate(self, node, name, typ, substs):
if isinstance(typ, abstract.FinalAnnotation):
t, value = self._sub_and_instantiate(node, name, typ.annotation, substs)
return abstract.FinalAnnotation(t, self.ctx), value
if typ.formal:
substituted_type = self.sub_one_annotation(
node, typ, substs, instantiate_unbound=False
)
else:
substituted_type = typ
if typ.formal and self._in_class_frame():
class_substs = abstract_utils.combine_substs(
substs, [{"typing.Self": self.ctx.vm.frame.first_arg}]
)
type_for_value = self.sub_one_annotation( # pytype: disable=wrong-arg-types
node, typ, class_substs, instantiate_unbound=False
)
else:
type_for_value = substituted_type
_, value = self.init_annotation(node, name, type_for_value)
return substituted_type, value
def apply_annotation(self, node, op, name, value):
"""If there is an annotation for the op, return its value."""
assert op is self.ctx.vm.frame.current_opcode
if op.code.filename != self.ctx.vm.filename:
return AnnotatedValue(None, value)
if not op.annotation:
return AnnotatedValue(None, value)
annot = op.annotation
if annot == "...":
# Experimental "inferred type": see b/213607272.
return AnnotatedValue(None, value)
frame = self.ctx.vm.frame
stack = self.ctx.vm.simple_stack()
with self.ctx.vm.generate_late_annotations(stack):
var, errorlog = abstract_utils.eval_expr(
self.ctx, node, frame.f_globals, frame.f_locals, annot
)
if errorlog:
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.frames, annot, details=errorlog.details
)
typ, annot_val = self.extract_and_init_annotation(node, name, var)
if isinstance(typ, typing_overlay.Final):
# return the original value, we want type inference here.
return AnnotatedValue(None, value, final=True)
elif isinstance(typ, abstract.FinalAnnotation):
return AnnotatedValue(typ.annotation, annot_val, final=True)
elif typ.full_name == "typing.TypeAlias":
# Validate that 'value' is a legal type alias and use it.
annot = self.extract_annotation(node, value, name, stack)
return AnnotatedValue(None, annot.to_variable(node))
else:
return AnnotatedValue(typ, annot_val)
def extract_annotation(
self,
node,
var,
name,
stack,
allowed_type_params: set[str] | None = None,
):
"""Returns an annotation extracted from 'var'.
Args:
node: The current node.
var: The variable to extract from.
name: The annotated name.
stack: The frame stack.
allowed_type_params: Type parameters that are allowed to appear in the
annotation. 'None' means all are allowed. If non-None, the result of
calling get_callable_type_parameter_names on the extracted annotation is
also added to the allowed set.
"""
try:
typ = abstract_utils.get_atomic_value(var)
except abstract_utils.ConversionError:
self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, None, name)
return self.ctx.convert.unsolvable
typ = self._process_one_annotation(node, typ, name, stack)
if not typ:
return self.ctx.convert.unsolvable
if typ.formal and allowed_type_params is not None:
allowed_type_params = (
allowed_type_params | self.get_callable_type_parameter_names(typ)
)
if self.ctx.vm.frame.func and (
isinstance(self.ctx.vm.frame.func.data, abstract.BoundFunction)
or self.ctx.vm.frame.func.data.is_class_builder
):
allowed_type_params.add("typing.Self")
illegal_params = []
for x in self.get_type_parameters(typ):
if not allowed_type_params.intersection([x.name, x.full_name]):
illegal_params.append(x.name)
if illegal_params:
self._log_illegal_params(illegal_params, stack, typ, name)
return self.ctx.convert.unsolvable
return typ
def _log_illegal_params(self, illegal_params, stack, typ, name):
out_of_scope_params = utils.unique_list(illegal_params)
details = "TypeVar(s) %s not in scope" % ", ".join(
repr(p) for p in out_of_scope_params
)
if self.ctx.vm.frame.func:
method = self.ctx.vm.frame.func.data
if isinstance(method, abstract.BoundFunction):
desc = "class"
frame_name = method.name.rsplit(".", 1)[0]
else:
desc = "class" if method.is_class_builder else "method"
frame_name = method.name
details += f" for {desc} {frame_name!r}"
if "AnyStr" in out_of_scope_params:
str_type = "Union[str, bytes]"
details += f"\nNote: For all string types, use {str_type}."
self.ctx.errorlog.invalid_annotation(stack, typ, details, name)
def eval_multi_arg_annotation(self, node, func, annot, stack):
"""Evaluate annotation for multiple arguments (from a type comment)."""
args, errorlog = self._eval_expr_as_tuple(node, annot, stack)
if errorlog:
self.ctx.errorlog.invalid_function_type_comment(
stack, annot, details=errorlog.details
)
code = func.code
expected = code.get_arg_count()
names = code.varnames
# This is a hack. Specifying the type of the first arg is optional in
# class and instance methods. There is no way to tell at this time
# how the function will be used, so if the first arg is self or cls we
# make it optional. The logic is somewhat convoluted because we don't
# want to count the skipped argument in an error message.
if len(args) != expected:
if expected and names[0] in ["self", "cls"]:
expected -= 1
names = names[1:]
if len(args) != expected:
self.ctx.errorlog.invalid_function_type_comment(
stack,
annot,
details="Expected %d args, %d given" % (expected, len(args)),
)
return
for name, arg in zip(names, args):
resolved = self._process_one_annotation(node, arg, name, stack)
if resolved is not None:
func.signature.set_annotation(name, resolved)
def _process_one_annotation(
self,
node: cfg.CFGNode,
annotation: abstract.BaseValue,
# We require `stack` to be a tuple to make sure we pass in a frozen
# snapshot of the frame stack, rather than the actual stack, since late
# annotations need to snapshot the stack at time of creation in order to
# get the right line information for error messages.
name: str | None,
stack: tuple[state.FrameType, ...],
) -> abstract.BaseValue | None:
"""Change annotation / record errors where required."""
if isinstance(annotation, abstract.AnnotationContainer):
annotation = annotation.base_cls
if isinstance(annotation, typing_overlay.Union):
self.ctx.errorlog.invalid_annotation(
stack, annotation, "Needs options", name
)
return None
elif (
name is not None
and name != "return"
and annotation.full_name in abstract_utils.TYPE_GUARDS
):
self.ctx.errorlog.invalid_annotation(
stack,
annotation,
f"{annotation.name} is only allowed as a return annotation",
name,
)
return None
elif (
isinstance(annotation, abstract.Instance)
and annotation.cls == self.ctx.convert.str_type
):
# String annotations : Late evaluation
if isinstance(annotation, abstract.PythonConstant):
expr = annotation.pyval
if not expr:
self.ctx.errorlog.invalid_annotation(
stack, annotation, "Cannot be an empty string", name
)
return None
frame = self.ctx.vm.frame
# Immediately try to evaluate the reference, generating LateAnnotation
# objects as needed. We don't store the entire string as a
# LateAnnotation because:
# - With __future__.annotations, all annotations look like forward
# references - most of them don't need to be late evaluated.
# - Given an expression like "Union[str, NotYetDefined]", we want to
# evaluate the union immediately so we don't end up with a complex
# LateAnnotation, which can lead to bugs when instantiated.
with self.ctx.vm.generate_late_annotations(stack):
v, errorlog = abstract_utils.eval_expr(
self.ctx, node, frame.f_globals, frame.f_locals, expr
)
if errorlog:
self.ctx.errorlog.copy_from(errorlog.errors, stack)
if len(v.data) == 1:
return self._process_one_annotation(node, v.data[0], name, stack)
self.ctx.errorlog.ambiguous_annotation(stack, [annotation], name)
return None
elif annotation.cls == self.ctx.convert.none_type:
# PEP 484 allows to write "NoneType" as "None"
return self.ctx.convert.none_type
elif isinstance(annotation, mixin.NestedAnnotation):
if annotation.processed:
return annotation
annotation.processed = True
for key, typ in annotation.get_inner_types():
if (
annotation.full_name == "typing.Callable"
and key == abstract_utils.RET
):
inner_name = "return"
else:
inner_name = name
processed = self._process_one_annotation(node, typ, inner_name, stack)
if processed is None:
return None
elif (
name == inner_name
and processed.full_name in abstract_utils.TYPE_GUARDS
):
self.ctx.errorlog.invalid_annotation(
stack, typ, f"{processed.name} is not allowed as inner type", name
)
return None
annotation.update_inner_type(key, processed)
return annotation
elif isinstance(
annotation,
(
abstract.Class,
abstract.AMBIGUOUS_OR_EMPTY,
abstract.TypeParameter,
abstract.ParamSpec,
abstract.ParamSpecArgs,
abstract.ParamSpecKwargs,
abstract.Concatenate,
abstract.FinalAnnotation,
function.ParamSpecMatch,
typing_overlay.Final,
typing_overlay.Never,
),
):
return annotation
else:
self.ctx.errorlog.invalid_annotation(
stack, annotation, "Not a type", name
)
return None
def _eval_expr_as_tuple(self, node, expr, stack):
"""Evaluate an expression as a tuple."""
if not expr:
return (), None
f_globals = self.ctx.vm.frame.f_globals
f_locals = self.ctx.vm.frame.f_locals
with self.ctx.vm.generate_late_annotations(stack):
result_var, errorlog = abstract_utils.eval_expr(
self.ctx, node, f_globals, f_locals, expr
)
result = abstract_utils.get_atomic_value(result_var)
# If the result is a tuple, expand it.
if isinstance(result, abstract.PythonConstant) and isinstance(
result.pyval, tuple
):
return (
tuple(abstract_utils.get_atomic_value(x) for x in result.pyval),
errorlog,
)
else:
return (result,), errorlog
def deformalize(self, value):
# TODO(rechen): Instead of doing this, call sub_one_annotation() to replace
# type parameters with their bound/constraints.
while value.formal:
if isinstance(value, abstract.ParameterizedClass):
value = value.base_cls
else:
value = self.ctx.convert.unsolvable
return value
| AnnotationUtils |
python | sqlalchemy__sqlalchemy | test/engine/test_reconnect.py | {
"start": 29223,
"end": 33901
} | class ____(fixtures.TestBase):
"""real test for issue #5648, which had to be revisited for 2.0 as the
initial version was not adequately tested and non-implementation for
mysql, postgresql was not caught
"""
__backend__ = True
__requires__ = ("graceful_disconnects",)
@testing.fixture
def ping_fixture(self, testing_engine):
engine = testing_engine(
options={"pool_pre_ping": True, "_initialize": False}
)
existing_connect = engine.dialect.dbapi.connect
fail = False
fail_count = itertools.count()
DBAPIError = engine.dialect.dbapi.Error
class ExplodeConnection(DBAPIProxyConnection):
def ping(self, *arg, **kw):
if fail and next(fail_count) < 1:
raise DBAPIError("unhandled disconnect situation")
else:
return True
class ExplodeCursor(DBAPIProxyCursor):
def execute(self, stmt, parameters=None, **kw):
if fail and next(fail_count) < 1:
raise DBAPIError("unhandled disconnect situation")
else:
return super().execute(stmt, parameters=parameters, **kw)
def mock_connect(*arg, **kw):
real_connection = existing_connect(*arg, **kw)
return ExplodeConnection(engine, real_connection, ExplodeCursor)
with mock.patch.object(
engine.dialect.loaded_dbapi, "connect", mock_connect
):
# set up initial connection. pre_ping works on subsequent connects
engine.connect().close()
# ping / exec will fail
fail = True
yield engine
@testing.fixture
def ping_fixture_all_errs_disconnect(self, ping_fixture):
engine = ping_fixture
with mock.patch.object(
engine.dialect, "is_disconnect", lambda *arg, **kw: True
):
yield engine
def test_control(self, ping_fixture):
"""test the fixture raises on connect"""
engine = ping_fixture
with expect_raises_message(
exc.DBAPIError, "unhandled disconnect situation"
):
engine.connect()
def test_downgrade_control(self, ping_fixture_all_errs_disconnect):
"""test the disconnect fixture doesn't raise, since it considers
all errors to be disconnect errors.
"""
engine = ping_fixture_all_errs_disconnect
conn = engine.connect()
conn.close()
def test_event_handler_didnt_upgrade_disconnect(self, ping_fixture):
"""test that having an event handler that doesn't do anything
keeps the behavior in place for a fatal error.
"""
engine = ping_fixture
@event.listens_for(engine, "handle_error")
def setup_disconnect(ctx):
assert not ctx.is_disconnect
with expect_raises_message(
exc.DBAPIError, "unhandled disconnect situation"
):
engine.connect()
def test_event_handler_didnt_downgrade_disconnect(
self, ping_fixture_all_errs_disconnect
):
"""test that having an event handler that doesn't do anything
keeps the behavior in place for a disconnect error.
"""
engine = ping_fixture_all_errs_disconnect
@event.listens_for(engine, "handle_error")
def setup_disconnect(ctx):
assert ctx.is_pre_ping
assert ctx.is_disconnect
conn = engine.connect()
conn.close()
def test_event_handler_can_upgrade_disconnect(self, ping_fixture):
"""test that an event hook can receive a fatal error and convert
it to be a disconnect error during pre-ping"""
engine = ping_fixture
@event.listens_for(engine, "handle_error")
def setup_disconnect(ctx):
assert ctx.is_pre_ping
ctx.is_disconnect = True
conn = engine.connect()
# no error
conn.close()
def test_event_handler_can_downgrade_disconnect(
self, ping_fixture_all_errs_disconnect
):
"""test that an event hook can receive a disconnect error and convert
it to be a fatal error during pre-ping"""
engine = ping_fixture_all_errs_disconnect
@event.listens_for(engine, "handle_error")
def setup_disconnect(ctx):
assert ctx.is_disconnect
if ctx.is_pre_ping:
ctx.is_disconnect = False
with expect_raises_message(
exc.DBAPIError, "unhandled disconnect situation"
):
engine.connect()
| RealPrePingEventHandlerTest |
python | redis__redis-py | tests/test_pipeline.py | {
"start": 194,
"end": 14717
} | class ____:
def test_pipeline_is_true(self, r):
"Ensure pipeline instances are not false-y"
with r.pipeline() as pipe:
assert pipe
def test_pipeline(self, r):
with r.pipeline() as pipe:
(
pipe.set("a", "a1")
.get("a")
.zadd("z", {"z1": 1})
.zadd("z", {"z2": 4})
.zincrby("z", 1, "z1")
)
assert pipe.execute() == [
True,
b"a1",
True,
True,
2.0,
]
def test_pipeline_memoryview(self, r):
with r.pipeline() as pipe:
(pipe.set("a", memoryview(b"a1")).get("a"))
assert pipe.execute() == [True, b"a1"]
def test_pipeline_length(self, r):
with r.pipeline() as pipe:
# Initially empty.
assert len(pipe) == 0
# Fill 'er up!
pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert len(pipe) == 3
# Execute calls reset(), so empty once again.
pipe.execute()
assert len(pipe) == 0
def test_pipeline_no_transaction(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert pipe.execute() == [True, True, True]
assert r["a"] == b"a1"
assert r["b"] == b"b1"
assert r["c"] == b"c1"
@pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch(self, r):
r["a"] = 0
with r.pipeline(transaction=False) as pipe:
pipe.watch("a")
a = pipe.get("a")
pipe.multi()
pipe.set("a", int(a) + 1)
assert pipe.execute() == [True]
@pytest.mark.onlynoncluster
def test_pipeline_no_transaction_watch_failure(self, r):
r["a"] = 0
with r.pipeline(transaction=False) as pipe:
pipe.watch("a")
a = pipe.get("a")
r["a"] = "bad"
pipe.multi()
pipe.set("a", int(a) + 1)
with pytest.raises(redis.WatchError):
pipe.execute()
assert r["a"] == b"bad"
def test_exec_error_in_response(self, r):
"""
an invalid pipeline command at exec time adds the exception instance
to the list of returned values
"""
r["c"] = "a"
with r.pipeline() as pipe:
pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
result = pipe.execute(raise_on_error=False)
assert result[0]
assert r["a"] == b"1"
assert result[1]
assert r["b"] == b"2"
# we can't lpush to a key that's a string value, so this should
# be a ResponseError exception
assert isinstance(result[2], redis.ResponseError)
assert r["c"] == b"a"
# since this isn't a transaction, the other commands after the
# error are still executed
assert result[3]
assert r["d"] == b"4"
# make sure the pipe was restored to a working state
assert pipe.set("z", "zzz").execute() == [True]
assert r["z"] == b"zzz"
def test_exec_error_raised(self, r):
r["c"] = "a"
with r.pipeline() as pipe:
pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
assert str(ex.value).startswith(
"Command # 3 (LPUSH c 3) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert pipe.set("z", "zzz").execute() == [True]
assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_transaction_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
values in the pipeline no matter the raise_on_error preference
"""
for error_switch in (True, False):
with r.pipeline() as pipe:
pipe.set("a", 1).mget([]).set("c", 3)
result = pipe.execute(raise_on_error=error_switch)
assert result[0]
assert result[1] == []
assert result[2]
@pytest.mark.onlynoncluster
def test_pipeline_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
values in the pipeline no matter the raise_on_error preference
"""
for error_switch in (True, False):
with r.pipeline(transaction=False) as pipe:
pipe.set("a", 1).mget([]).set("c", 3)
result = pipe.execute(raise_on_error=error_switch)
assert result[0]
assert result[1] == []
assert result[2]
def test_parse_error_raised(self, r):
with r.pipeline() as pipe:
# the zrem is invalid because we don't pass any keys to it
pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
assert str(ex.value).startswith(
"Command # 2 (ZREM b) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert pipe.set("z", "zzz").execute() == [True]
assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_parse_error_raised_transaction(self, r):
with r.pipeline() as pipe:
pipe.multi()
# the zrem is invalid because we don't pass any keys to it
pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
assert str(ex.value).startswith(
"Command # 2 (ZREM b) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert pipe.set("z", "zzz").execute() == [True]
assert r["z"] == b"zzz"
@pytest.mark.onlynoncluster
def test_watch_succeed(self, r):
r["a"] = 1
r["b"] = 2
with r.pipeline() as pipe:
pipe.watch("a", "b")
assert pipe.watching
a_value = pipe.get("a")
b_value = pipe.get("b")
assert a_value == b"1"
assert b_value == b"2"
pipe.multi()
pipe.set("c", 3)
assert pipe.execute() == [True]
assert not pipe.watching
@pytest.mark.onlynoncluster
def test_watch_failure(self, r):
r["a"] = 1
r["b"] = 2
with r.pipeline() as pipe:
pipe.watch("a", "b")
r["b"] = 3
pipe.multi()
pipe.get("a")
with pytest.raises(redis.WatchError):
pipe.execute()
assert not pipe.watching
@pytest.mark.onlynoncluster
def test_watch_failure_in_empty_transaction(self, r):
r["a"] = 1
r["b"] = 2
with r.pipeline() as pipe:
pipe.watch("a", "b")
r["b"] = 3
pipe.multi()
with pytest.raises(redis.WatchError):
pipe.execute()
assert not pipe.watching
@pytest.mark.onlynoncluster
def test_unwatch(self, r):
r["a"] = 1
r["b"] = 2
with r.pipeline() as pipe:
pipe.watch("a", "b")
r["b"] = 3
pipe.unwatch()
assert not pipe.watching
pipe.get("a")
assert pipe.execute() == [b"1"]
@pytest.mark.onlynoncluster
def test_watch_exec_no_unwatch(self, r):
r["a"] = 1
r["b"] = 2
with r.monitor() as m:
with r.pipeline() as pipe:
pipe.watch("a", "b")
assert pipe.watching
a_value = pipe.get("a")
b_value = pipe.get("b")
assert a_value == b"1"
assert b_value == b"2"
pipe.multi()
pipe.set("c", 3)
assert pipe.execute() == [True]
assert not pipe.watching
unwatch_command = wait_for_command(r, m, "UNWATCH")
assert unwatch_command is None, "should not send UNWATCH"
@pytest.mark.onlynoncluster
def test_watch_reset_unwatch(self, r):
r["a"] = 1
with r.monitor() as m:
with r.pipeline() as pipe:
pipe.watch("a")
assert pipe.watching
pipe.reset()
assert not pipe.watching
unwatch_command = wait_for_command(r, m, "UNWATCH")
assert unwatch_command is not None
assert unwatch_command["command"] == "UNWATCH"
@pytest.mark.onlynoncluster
def test_close_is_reset(self, r):
with r.pipeline() as pipe:
called = 0
def mock_reset():
nonlocal called
called += 1
with mock.patch.object(pipe, "reset", mock_reset):
pipe.close()
assert called == 1
@pytest.mark.onlynoncluster
def test_closing(self, r):
with closing(r.pipeline()):
pass
@pytest.mark.onlynoncluster
def test_transaction_callable(self, r):
r["a"] = 1
r["b"] = 2
has_run = []
def my_transaction(pipe):
a_value = pipe.get("a")
assert a_value in (b"1", b"2")
b_value = pipe.get("b")
assert b_value == b"2"
# silly run-once code... incr's "a" so WatchError should be raised
# forcing this all to run again. this should incr "a" once to "2"
if not has_run:
r.incr("a")
has_run.append("it has")
pipe.multi()
pipe.set("c", int(a_value) + int(b_value))
result = r.transaction(my_transaction, "a", "b")
assert result == [True]
assert r["c"] == b"4"
@pytest.mark.onlynoncluster
def test_transaction_callable_returns_value_from_callable(self, r):
def callback(pipe):
# No need to do anything here since we only want the return value
return "a"
res = r.transaction(callback, "my-key", value_from_callable=True)
assert res == "a"
def test_exec_error_in_no_transaction_pipeline(self, r):
r["a"] = 1
with r.pipeline(transaction=False) as pipe:
pipe.llen("a")
pipe.expire("a", 100)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
assert str(ex.value).startswith(
"Command # 1 (LLEN a) of pipeline caused error: "
)
assert r["a"] == b"1"
def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r):
key = chr(3456) + "abcd" + chr(3421)
r[key] = 1
with r.pipeline(transaction=False) as pipe:
pipe.llen(key)
pipe.expire(key, 100)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
expected = f"Command # 1 (LLEN {key}) of pipeline caused error: "
assert str(ex.value).startswith(expected)
assert r[key] == b"1"
def test_exec_error_in_pipeline_truncated(self, r):
key = "a" * 50
a_value = "a" * 20
b_value = "b" * 20
r[key] = 1
with r.pipeline(transaction=False) as pipe:
pipe.hset(key, mapping={"field_a": a_value, "field_b": b_value})
pipe.expire(key, 100)
with pytest.raises(redis.ResponseError) as ex:
pipe.execute()
expected = f"Command # 1 (HSET {key} field_a {a_value} field_b...) of pipeline caused error: "
assert str(ex.value).startswith(expected)
def test_pipeline_with_bitfield(self, r):
with r.pipeline() as pipe:
pipe.set("a", "1")
bf = pipe.bitfield("b")
pipe2 = (
bf.set("u8", 8, 255)
.get("u8", 0)
.get("u4", 8) # 1111
.get("u4", 12) # 1111
.get("u4", 13) # 1110
.execute()
)
pipe.get("a")
response = pipe.execute()
assert pipe == pipe2
assert response == [True, [0, 0, 15, 15, 14], b"1"]
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.0.0")
def test_pipeline_discard(self, r):
# empty pipeline should raise an error
with r.pipeline() as pipe:
pipe.set("key", "someval")
pipe.discard()
with pytest.raises(redis.exceptions.ResponseError):
pipe.execute()
# setting a pipeline and discarding should do the same
with r.pipeline() as pipe:
pipe.set("key", "someval")
pipe.set("someotherkey", "val")
response = pipe.execute()
pipe.set("key", "another value!")
pipe.discard()
pipe.set("key", "another vae!")
with pytest.raises(redis.exceptions.ResponseError):
pipe.execute()
pipe.set("foo", "bar")
response = pipe.execute()
assert response[0]
assert r.get("foo") == b"bar"
@pytest.mark.onlynoncluster
def test_send_set_commands_over_pipeline(self, r: redis.Redis):
pipe = r.pipeline()
pipe.hset("hash:1", "foo", "bar")
pipe.hset("hash:1", "bar", "foo")
pipe.hset("hash:1", "baz", "bar")
pipe.hgetall("hash:1")
resp = pipe.execute()
assert resp == [1, 1, 1, {b"bar": b"foo", b"baz": b"bar", b"foo": b"bar"}]
@pytest.mark.onlycluster
@skip_if_server_version_lt("8.3.224")
def test_pipeline_with_msetex(self, r):
r.delete("key1", "key2", "key1_transaction", "key2_transaction")
p = r.pipeline()
with pytest.raises(RedisClusterException):
p.msetex({"key1": "value1", "key2": "value2"}, ex=1000)
p_transaction = r.pipeline(transaction=True)
with pytest.raises(RedisClusterException):
p_transaction.msetex(
{"key1_transaction": "value1", "key2_transaction": "value2"}, ex=10
)
| TestPipeline |
python | openai__openai-python | src/openai/types/completion_usage.py | {
"start": 1213,
"end": 1735
} | class ____(BaseModel):
completion_tokens: int
"""Number of tokens in the generated completion."""
prompt_tokens: int
"""Number of tokens in the prompt."""
total_tokens: int
"""Total number of tokens used in the request (prompt + completion)."""
completion_tokens_details: Optional[CompletionTokensDetails] = None
"""Breakdown of tokens used in a completion."""
prompt_tokens_details: Optional[PromptTokensDetails] = None
"""Breakdown of tokens used in the prompt."""
| CompletionUsage |
python | django__django | tests/async/tests.py | {
"start": 1035,
"end": 2064
} | class ____(SimpleTestCase):
"""
async_unsafe decorator should work correctly and returns the correct
message.
"""
@async_unsafe
def dangerous_method(self):
return True
async def test_async_unsafe(self):
# async_unsafe decorator catches bad access and returns the right
# message.
msg = (
"You cannot call this from an async context - use a thread or "
"sync_to_async."
)
with self.assertRaisesMessage(SynchronousOnlyOperation, msg):
self.dangerous_method()
@mock.patch.dict(os.environ, {"DJANGO_ALLOW_ASYNC_UNSAFE": "true"})
@async_to_sync # mock.patch() is not async-aware.
async def test_async_unsafe_suppressed(self):
# Decorator doesn't trigger check when the environment variable to
# suppress it is set.
try:
self.dangerous_method()
except SynchronousOnlyOperation:
self.fail("SynchronousOnlyOperation should not be raised.")
| AsyncUnsafeTest |
python | django__django | tests/queries/models.py | {
"start": 1536,
"end": 1932
} | class ____(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE, null=True)
value = models.IntegerField(null=True)
date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True)
filterable = models.BooleanField(default=True)
class Meta:
ordering = ["info"]
def __str__(self):
return self.info
| ExtraInfo |
python | apache__airflow | helm-tests/tests/helm_tests/webserver/test_ingress_web.py | {
"start": 914,
"end": 9669
} | class ____:
"""Tests ingress web."""
def test_should_pass_validation_with_just_ingress_enabled_v1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}, "airflowVersion": "2.10.5"},
show_only=["templates/webserver/webserver-ingress.yaml"],
) # checks that no validation exception is raised
def test_should_pass_validation_with_just_ingress_enabled_v1beta1(self):
render_chart(
values={"ingress": {"web": {"enabled": True}}, "airflowVersion": "2.10.5"},
show_only=["templates/webserver/webserver-ingress.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_allow_more_than_one_annotation(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {"web": {"enabled": True, "annotations": {"aa": "bb", "cc": "dd"}}},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert jmespath.search("metadata.annotations", docs[0]) == {"aa": "bb", "cc": "dd"}
def test_should_set_ingress_class_name(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {"web": {"enabled": True, "ingressClassName": "foo"}},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert jmespath.search("spec.ingressClassName", docs[0]) == "foo"
def test_should_ingress_hosts_objs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "oldsecret"},
"hosts": [
{"name": "*.a-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "b-host", "tls": {"enabled": True, "secretName": "newsecret2"}},
{"name": "c-host", "tls": {"enabled": True, "secretName": "newsecret1"}},
{"name": "d-host", "tls": {"enabled": False, "secretName": ""}},
{"name": "e-host"},
],
"host": "old-host",
},
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.a-host",
"b-host",
"c-host",
"d-host",
"e-host",
]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host"], "secretName": "newsecret1"},
{"hosts": ["b-host"], "secretName": "newsecret2"},
{"hosts": ["c-host"], "secretName": "newsecret1"},
]
def test_should_ingress_hosts_strs_have_priority_over_host(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "secret"},
"hosts": ["*.a-host", "b-host", "c-host", "d-host"],
"host": "old-host",
},
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert jmespath.search("spec.rules[*].host", docs[0]) == ["*.a-host", "b-host", "c-host", "d-host"]
assert jmespath.search("spec.tls[*]", docs[0]) == [
{"hosts": ["*.a-host", "b-host", "c-host", "d-host"], "secretName": "secret"}
]
def test_should_ingress_deprecated_host_and_top_level_tls_still_work(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {
"web": {
"enabled": True,
"tls": {"enabled": True, "secretName": "supersecret"},
"host": "old-host",
},
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert (
["old-host"]
== jmespath.search("spec.rules[*].host", docs[0])
== jmespath.search("spec.tls[0].hosts", docs[0])
)
def test_should_ingress_host_entry_not_exist(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {
"web": {
"enabled": True,
}
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert not jmespath.search("spec.rules[*].host", docs[0])
@pytest.mark.parametrize(
("global_value", "web_value", "expected"),
[
(None, None, False),
(None, False, False),
(None, True, True),
(False, None, False),
(True, None, True),
(False, True, True), # We will deploy it if _either_ are true
(True, False, True),
],
)
def test_ingress_created(self, global_value, web_value, expected):
values = {"airflowVersion": "2.10.5", "ingress": {}}
if global_value is not None:
values["ingress"]["enabled"] = global_value
if web_value is not None:
values["ingress"]["web"] = {"enabled": web_value}
if values["ingress"] == {}:
del values["ingress"]
docs = render_chart(values=values, show_only=["templates/webserver/webserver-ingress.yaml"])
assert expected == (len(docs) == 1)
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"ingress": {"enabled": True},
"webserver": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_can_ingress_hosts_be_templated(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"testValues": {
"scalar": "aa",
"list": ["bb", "cc"],
"dict": {
"key": "dd",
},
},
"ingress": {
"web": {
"enabled": True,
"hosts": [
{"name": "*.{{ .Release.Namespace }}.example.com"},
{"name": "{{ .Values.testValues.scalar }}.example.com"},
{"name": "{{ index .Values.testValues.list 1 }}.example.com"},
{"name": "{{ .Values.testValues.dict.key }}.example.com"},
],
},
},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
namespace="airflow",
)
assert jmespath.search("spec.rules[*].host", docs[0]) == [
"*.airflow.example.com",
"aa.example.com",
"cc.example.com",
"dd.example.com",
]
def test_backend_service_name(self):
docs = render_chart(
values={"airflowVersion": "2.10.5", "ingress": {"web": {"enabled": True}}},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "release-name-webserver"
)
def test_backend_service_name_with_fullname_override(self):
docs = render_chart(
values={
"airflowVersion": "2.10.5",
"fullnameOverride": "test-basic",
"useStandardNaming": True,
"ingress": {"web": {"enabled": True}},
},
show_only=["templates/webserver/webserver-ingress.yaml"],
)
assert (
jmespath.search("spec.rules[0].http.paths[0].backend.service.name", docs[0])
== "test-basic-webserver"
)
| TestIngressWeb |
python | mlflow__mlflow | mlflow/server/auth/entities.py | {
"start": 2883,
"end": 3856
} | class ____:
def __init__(
self,
experiment_id,
user_id,
permission,
):
self._experiment_id = experiment_id
self._user_id = user_id
self._permission = permission
@property
def experiment_id(self):
return self._experiment_id
@property
def user_id(self):
return self._user_id
@property
def permission(self):
return self._permission
@permission.setter
def permission(self, permission):
self._permission = permission
def to_json(self):
return {
"experiment_id": self.experiment_id,
"user_id": self.user_id,
"permission": self.permission,
}
@classmethod
def from_json(cls, dictionary):
return cls(
experiment_id=dictionary["experiment_id"],
user_id=dictionary["user_id"],
permission=dictionary["permission"],
)
| ExperimentPermission |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 11111,
"end": 11758
} | class ____(BaseSafeMigrationTest):
app = "good_flow_delete_pending_with_fk_constraints_app"
migrate_from = "0001"
migrate_to = "0003"
def test(self) -> None:
self._run_migration(self.app, "0001_initial")
assert f"{self.app}_testtable" in connection.introspection.table_names()
self._run_migration(self.app, "0002_remove_constraints_and_pending")
assert f"{self.app}_testtable" in connection.introspection.table_names()
self._run_migration(self.app, "0003_delete")
assert f"{self.app}_testtable" not in connection.introspection.table_names()
| DeletionModelGoodDeleteRemoveFKConstraints |
python | gevent__gevent | src/gevent/_config.py | {
"start": 12590,
"end": 13112
} | class ____(IntSettingMixin, Setting):
name = 'trace_malloc'
environment_key = 'PYTHONTRACEMALLOC'
default = False
desc = """\
Should FFI objects track their allocation?
This is only useful for low-level debugging.
On Python 3, this environment variable is built in to the
interpreter, and it may also be set with the ``-X
tracemalloc`` command line argument.
On Python 2, gevent interprets this argument and adds extra
tracking information for FFI objects.
"""
| TraceMalloc |
python | google__pytype | pytype/tests/test_variable_annotations.py | {
"start": 119,
"end": 1175
} | class ____(test_base.BaseTest):
"""Tests for PEP526 variable annotations."""
def test_pyi_annotations(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List
x: int
y: List[int]
class A:
a: int
b: str
""",
)
errors = self.CheckWithErrors(
"""
import foo
def f(x: int) -> None:
pass
obj = foo.A()
f(foo.x)
f(foo.y) # wrong-arg-types[e1]
f(obj.a)
f(obj.b) # wrong-arg-types[e2]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e1": r"int.*list", "e2": r"int.*str"})
def test_typevar_annot_with_subclass(self):
self.Check("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def f(self):
x: T = None
return x
class Bar(Foo[str]):
pass
assert_type(Bar().f(), str)
""")
| VariableAnnotationsBasicTest |
python | django__django | tests/forms_tests/field_tests/test_typedchoicefield.py | {
"start": 150,
"end": 3602
} | class ____(SimpleTestCase):
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean("1"))
msg = "'Select a valid choice. 2 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("2")
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean("1"))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True,
# remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertTrue(f.clean("-1"))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion
# function can't coerce, you'll still get a validation error. Don't do
# this!
f = TypedChoiceField(choices=[("A", "A"), ("B", "B")], coerce=int)
msg = "'Select a valid choice. B is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("B")
# Required fields require values
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(
choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False
)
self.assertEqual("", f.clean(""))
# If you want cleaning an empty value to return a different type, tell
# the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(
choices=[(1, "+1"), (-1, "-1")],
coerce=int,
required=False,
empty_value=None,
)
self.assertIsNone(f.clean(""))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ""))
self.assertFalse(f.has_changed(1, "1"))
self.assertFalse(f.has_changed("1", "1"))
f = TypedChoiceField(
choices=[("", "---------"), ("a", "a"), ("b", "b")],
coerce=str,
required=False,
initial=None,
empty_value=None,
)
self.assertFalse(f.has_changed(None, ""))
self.assertTrue(f.has_changed("", "a"))
self.assertFalse(f.has_changed("a", "a"))
def test_typedchoicefield_special_coerce(self):
"""
A coerce function which results in a value not present in choices
should raise an appropriate error (#21397).
"""
def coerce_func(val):
return decimal.Decimal("1.%s" % val)
f = TypedChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True
)
self.assertEqual(decimal.Decimal("1.2"), f.clean("2"))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("3")
| TypedChoiceFieldTest |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 63944,
"end": 64410
} | class ____:
def __init__(self, keys, instance):
self.keys = keys
self.instance = instance
def get_or_create(self, **kwargs):
defaults = kwargs.pop('defaults', {})
if kwargs == self.keys:
return self.instance, False
kwargs.update(defaults)
instance = FakeModel.create(**kwargs)
instance.id = 2
return instance, True
def using(self, db):
return self
| BetterFakeModelManager |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_required_Sphinx.py | {
"start": 1507,
"end": 1838
} | class ____:
"""Example of a class function trying to use `type` as return
documentation in a Sphinx style docstring
"""
def test_ignores_non_property_return_type_sphinx( # [missing-return-doc, missing-return-type-doc]
self,
):
"""docstring ...
:type: int
"""
return 10
| Foo |
python | wandb__wandb | wandb/vendor/pygments/lexers/graph.py | {
"start": 457,
"end": 2370
} | class ____(RegexLexer):
"""
For `Cypher Query Language
<http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
For the Cypher version in Neo4J 2.0
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by)\b', Keyword),
],
'clauses': [
# TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
(r'(all|any|as|asc|create|create\s+unique|delete|'
r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
r'order\s+by|return|set|skip|single|start|union|where|with)\b',
Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
| CypherLexer |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 2728,
"end": 3128
} | class ____(BaseIO):
fname = "__test__.csv"
def setup(self):
rng = date_range("2000", periods=100_000, freq="s")
self.data = DataFrame({"a": 1}, index=rng)
def time_frame_date_formatting_index(self):
self.data.to_csv(self.fname, date_format="%Y-%m-%d %H:%M:%S")
def time_frame_date_no_format_index(self):
self.data.to_csv(self.fname)
| ToCSVDatetimeIndex |
python | pytorch__pytorch | torch/_inductor/runtime/triton_heuristics.py | {
"start": 139070,
"end": 139280
} | class ____(GridExpr):
def generate(self, meta: dict[str, int]) -> None:
self.x_grid = self.ceildiv("xnumel", meta.get("XBLOCK"))
self.y_grid = self.ceildiv("ynumel", meta.get("YBLOCK"))
| Grid2D |
python | ansible__ansible | lib/ansible/galaxy/dependency_resolution/dataclasses.py | {
"start": 23712,
"end": 24738
} | class ____(
_ComputedReqKindsMixin,
CandidateNamedTuple,
):
"""A concrete collection candidate with its version resolved."""
def __new__(cls, *args: object, **kwargs: object) -> t.Self:
self = CandidateNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args: object, **kwargs: object) -> None:
super(Candidate, self).__init__()
def with_signatures_repopulated(self) -> Candidate:
"""Populate a new Candidate instance with Galaxy signatures.
:raises AnsibleAssertionError: If the supplied candidate is not sourced from a Galaxy-like index.
"""
if self.type != 'galaxy':
raise AnsibleAssertionError(f"Invalid collection type for {self!r}: unable to get signatures from a galaxy server.")
signatures = self.src.get_collection_signatures(self.namespace, self.name, self.ver)
return self.__class__(self.fqcn, self.ver, self.src, self.type, frozenset([*self.signatures, *signatures]))
| Candidate |
python | pydata__xarray | xarray/computation/ops.py | {
"start": 8912,
"end": 9137
} | class ____:
__slots__ = ()
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if getattr(cls, "_reduce_method", None):
inject_reduce_methods(cls)
| IncludeReduceMethods |
python | kubernetes-client__python | kubernetes/client/models/v1_container_state_waiting.py | {
"start": 383,
"end": 4375
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'reason': 'str'
}
attribute_map = {
'message': 'message',
'reason': 'reason'
}
def __init__(self, message=None, reason=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerStateWaiting - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._message = None
self._reason = None
self.discriminator = None
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
@property
def message(self):
"""Gets the message of this V1ContainerStateWaiting. # noqa: E501
Message regarding why the container is not yet running. # noqa: E501
:return: The message of this V1ContainerStateWaiting. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running. # noqa: E501
:param message: The message of this V1ContainerStateWaiting. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ContainerStateWaiting. # noqa: E501
(brief) reason the container is not yet running. # noqa: E501
:return: The reason of this V1ContainerStateWaiting. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ContainerStateWaiting.
(brief) reason the container is not yet running. # noqa: E501
:param reason: The reason of this V1ContainerStateWaiting. # noqa: E501
:type: str
"""
self._reason = reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerStateWaiting):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerStateWaiting):
return True
return self.to_dict() != other.to_dict()
| V1ContainerStateWaiting |
python | huggingface__transformers | src/transformers/models/dia/generation_dia.py | {
"start": 1440,
"end": 22024
} | class ____(GenerationMixin):
# Indicates CFG which needs preparation to be properly handled by repeats
_uses_cfg = None
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: Optional[int] = None,
encoder_input_ids: Optional[torch.LongTensor] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
logits_processor: Optional[LogitsProcessorList] = None,
device: Optional[str] = None,
model_kwargs: Optional[dict[str, Any]] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
) -> LogitsProcessorList:
# Need either custom order or custom processor instead
# (Temporarily disabling those for the super function)
original_guidance_scale = generation_config.guidance_scale
original_temperature = generation_config.temperature
generation_config.guidance_scale = None
generation_config.temperature = None
# Get base processors and those we can integrate easily
custom_processors = LogitsProcessorList()
if original_temperature is not None and original_temperature != 1.0:
custom_processors.append(TemperatureLogitsWarper(original_temperature))
custom_processors.append(
DiaEOSChannelFilterLogitsProcessor(
num_channels=len(self.config.delay_pattern),
eos_token_id=self.config.eos_token_id,
)
)
merged_processors = super()._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=encoder_input_ids,
prefix_allowed_tokens_fn=None,
logits_processor=custom_processors,
device=device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
# Custom processors we need at specific positions
if original_guidance_scale is not None and original_guidance_scale != 1:
cfg_processor = DiaClassifierFreeGuidanceLogitsProcessor(
guidance_scale=original_guidance_scale,
guidance_top_k=generation_config.top_k,
)
merged_processors.insert(0, cfg_processor)
merged_processors.append(
DiaEOSDelayPatternLogitsProcessor(
delay_pattern=self.config.delay_pattern,
eos_token_id=self.config.eos_token_id,
max_generation_len=generation_config.max_length,
device=device,
)
)
# Enable temporarily disabled values back
generation_config.guidance_scale = original_guidance_scale
generation_config.temperature = original_temperature
return merged_processors
def _prepare_generation_config(
self, generation_config: Optional[GenerationConfig], use_model_defaults: Optional[bool] = None, **kwargs: Any
) -> tuple[GenerationConfig, dict]:
generation_config, model_kwargs = super()._prepare_generation_config(
generation_config, use_model_defaults, **kwargs
)
# We allow generation up to max length + max delay pattern
# (will revert back to max length after generation)
generation_config.max_length += max(self.config.delay_pattern)
# Internal flag to indicate CFG that needs to prepare unconditioned input
self._uses_cfg = generation_config.guidance_scale is not None and generation_config.guidance_scale != 1
return generation_config, model_kwargs
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[torch.Tensor] = None,
model_kwargs: Optional[dict[str, torch.Tensor]] = None,
) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]:
inputs, input_name, model_kwargs = super()._prepare_model_inputs(
inputs=inputs,
bos_token_id=bos_token_id,
model_kwargs=model_kwargs,
)
# If CFG is requested we fill in the unconditioned parts
if self._uses_cfg:
unconditioned_inputs = torch.zeros_like(inputs)
inputs = torch.cat([inputs, unconditioned_inputs], dim=0)
if model_kwargs.get("attention_mask", None) is not None:
model_kwargs["attention_mask"] = model_kwargs["attention_mask"].repeat(2, 1)
return inputs, input_name, model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
model_input_name: str,
model_kwargs: dict[str, torch.Tensor],
decoder_start_token_id: torch.Tensor,
device: Optional[torch.device] = None,
) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]:
"""Prepares `decoder_input_ids` for generation with encoder-decoder models"""
# 1. Check whether the user has defined `decoder_input_ids` and `decoder_attention_mask`; if not error out
decoder_input_ids = decoder_attention_mask = None
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
if model_kwargs is not None and "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
# We allow generating without preparation (no proper delay) but discourage it
if decoder_input_ids is None or decoder_attention_mask is None:
logger.warning_once(
"In order to generate with Dia, we need the processed audio input: Got `decoder_input_ids`:"
f" {decoder_input_ids is not None} and got `decoder_attention_mask`={decoder_attention_mask is not None}."
f" This can be achieved via the [`DiaProcessor`] but now defaulting to non-delayed generation."
)
num_channels = self.config.decoder_config.num_channels
real_batch_size = batch_size // 2 if self._uses_cfg else batch_size
if decoder_input_ids is None:
decoder_input_ids = torch.full(
(real_batch_size, 1, num_channels), decoder_start_token_id, dtype=torch.long, device=device
)
decoder_attention_mask = torch.ones(
size=(real_batch_size, decoder_input_ids.shape[1]), dtype=torch.long, device=device
)
# 2. Determine the valid input and what works as mask within the input
delay_mask = decoder_input_ids.long()
valid_input_size = (
decoder_input_ids.shape[1] - (decoder_input_ids[:, :, 0] == self.config.pad_token_id).sum(dim=-1).max()
)
decoder_input_ids = delay_mask[:, :valid_input_size].transpose(1, 2).long()
decoder_attention_mask = decoder_attention_mask[:, :valid_input_size].long()
# 3. Overwrite into model kwargs
model_kwargs["decoder_attention_mask"] = decoder_attention_mask
model_kwargs["decoder_delay_mask"] = delay_mask
return decoder_input_ids, model_kwargs
def prepare_inputs_for_generation(
self,
input_ids,
encoder_outputs=None, # Using this to easily get the batch size
decoder_delay_mask=None,
**kwargs,
):
# Reshape decoder input_ids to 3D to be compile friendly and to fit the expected model input shape
batch_size = encoder_outputs[0].shape[0] // 2 if self._uses_cfg else encoder_outputs[0].shape[0]
input_ids = input_ids.reshape(batch_size, self.config.decoder_config.num_channels, -1).transpose(1, 2)
# Base method handles most things except CFG and the delay pattern mask
model_inputs = super().prepare_inputs_for_generation(input_ids, encoder_outputs=encoder_outputs, **kwargs)
# Post processing for CFG and overwriting via delay pattern mask
# 1. Delay pattern mask -- force tokens if not allowed to predict (!= pad_token in mask)
model_inputs["decoder_input_ids"] = self.apply_delay_mask(
input_ids, self.config.pad_token_id, decoder_delay_mask
)
# Depending on cache usage we need to pass all or just one
if model_inputs.get("use_cache", False) and model_inputs["cache_position"][0] > 0:
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"][:, -1, :][:, None, :]
# Be compile friendly
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"].contiguous()
# 2. Apply CFG duplication if needed
if self._uses_cfg:
for key in ["decoder_input_ids", "decoder_attention_mask", "decoder_position_ids"]:
if model_inputs.get(key, None) is not None:
# double first dimension and keep everything else the same
repeat_pattern = tuple([2] + [1] * (model_inputs[key].ndim - 1))
model_inputs[key] = model_inputs[key].repeat(*repeat_pattern)
return model_inputs
@staticmethod
def apply_delay_mask(input_ids: torch.Tensor, pad_id: int, delay_mask: Optional[torch.Tensor]) -> torch.Tensor:
if delay_mask is None:
return input_ids
mask_len = min(input_ids.shape[1], delay_mask.shape[1])
valid_mask = delay_mask[:, :mask_len, :]
valid_input = input_ids[:, :mask_len, :]
# Overwrite the respective parts of the input
input_ids[:, :mask_len, :] = torch.where(valid_mask == pad_id, valid_input, valid_mask)
return input_ids
def _main_generate_loop(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
use_model_defaults: Optional[bool] = None,
custom_generate: Optional[str] = None,
**kwargs,
):
# ********** mostly taken from main generate function up to calling the different methods (see NOTE) **********
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
generation_mode_kwargs = self._extract_generation_mode_kwargs(
custom_generate,
kwargs,
synced_gpus,
assistant_model,
streamer,
)
generation_config, model_kwargs = self._prepare_generation_config(
generation_config, use_model_defaults, **kwargs
)
generation_mode = generation_config.get_generation_mode(assistant_model)
if generation_mode not in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
raise ValueError(
"Got incompatible mode for generation, should be one of greedy or sampling. "
"Ensure that beam search is de-activated by setting `num_beams=1`."
)
self._validate_model_kwargs(model_kwargs.copy())
self._validate_generation_mode(generation_mode, generation_config, generation_mode_kwargs)
# 2. Set generation parameters if not already defined
if synced_gpus is None:
synced_gpus = (is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
# 3. Define model inputs
kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
device = inputs_tensor.device
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=device)
# 4. Define other model kwargs
if "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name, generation_config
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config._decoder_start_token_tensor,
device=inputs_tensor.device,
)
if generation_config.token_healing:
input_ids = self.heal_tokens(input_ids, generation_mode_kwargs.get("tokenizer"))
if streamer is not None:
streamer.put(input_ids.cpu())
# 6. Prepare `max_length` depending on other stopping criteria.
# NOTE: incorrect `input_ids.shape[1]` previously
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name,
inputs_tensor=inputs_tensor,
input_ids_length=input_ids_length,
)
# If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
# logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
# dynamically overrides this value as it can need more than the last token logits
if self._supports_logits_to_keep() and "logits_to_keep" not in model_kwargs:
model_kwargs["logits_to_keep"] = 1
self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
# 7. Prepare the cache.
# - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
# - different models have a different cache name expected by the model (default = "past_key_values")
# - `max_length`, prepared above, is used to determine the maximum cache length
max_cache_length = generation_config.max_length - 1
if (
inputs_tensor.shape[1] != input_ids_length
and model_input_name == "inputs_embeds"
and not self.config.is_encoder_decoder
):
max_cache_length += inputs_tensor.shape[1]
self._prepare_cache_for_generation(
generation_config, model_kwargs, generation_mode, batch_size, max_cache_length
)
# 8. prepare logits processors and stopping criteria
prepared_logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
device=inputs_tensor.device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
prepared_stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config,
stopping_criteria=stopping_criteria,
tokenizer=generation_mode_kwargs.get("tokenizer"),
)
# Set model_kwargs `use_cache` so we can use it later in forward runs
model_kwargs["use_cache"] = generation_config.use_cache
# ******************* taken from main generate function up to calling the different methods *******************
# Prepare inner 2D logic in generation loop
input_ids = input_ids.reshape(-1, input_ids.shape[-1])
model_kwargs = self._get_initial_cache_position(input_ids.shape[1], input_ids.device, model_kwargs)
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# 10. Prefill
model_inputs.update({"output_attentions": generation_config.output_attentions})
model_inputs.update({"output_hidden_states": generation_config.output_hidden_states})
outputs = self(**model_inputs, return_dict=True)
# 11. expand input_ids with `num_return_sequences` additional sequences per batch
if generation_config.num_return_sequences > 1:
raise ValueError("`num_return_sequences>1` is incompatible with Dia.")
# 12. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)
return self._sample(
input_ids,
logits_processor=prepared_logits_processor,
stopping_criteria=prepared_stopping_criteria,
generation_config=generation_config,
prefill_outputs=outputs,
**generation_mode_kwargs,
**model_kwargs,
)
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
use_model_defaults: Optional[bool] = None,
custom_generate: Optional[str] = None,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
# We expect the initial input ids to be the complete mask (delayed input)
delay_mask = kwargs.get("decoder_input_ids")
if delay_mask is not None:
delay_mask = delay_mask.clone()
output = self._main_generate_loop(
inputs=inputs,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
assistant_model=assistant_model,
streamer=streamer,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
use_model_defaults=use_model_defaults,
custom_generate=custom_generate,
**kwargs,
)
return_dict_in_generate = not isinstance(output, torch.Tensor)
if return_dict_in_generate:
output_sequences = output.sequences
else:
output_sequences = output
# Reshape from 2D (bsz * channels, seq_len) to 3D (bsz, seq_len, channels)
num_channels = self.config.decoder_config.num_channels
bsz = output_sequences.shape[0] // num_channels
output_sequences = output_sequences.reshape(bsz, num_channels, -1).transpose(1, 2)
# Apply delay mask
output_sequences = self.apply_delay_mask(output_sequences, self.config.pad_token_id, delay_mask)
if return_dict_in_generate:
output.sequences = output_sequences
else:
output = output_sequences
return output
| DiaGenerationMixin |
python | jina-ai__jina | jina/jaml/__init__.py | {
"start": 17578,
"end": 17916
} | class ____(type):
"""
Metaclass for :class:`JAMLCompatible`.
It enables any class inherit from :class:`JAMLCompatible` to auto-register itself at :class:`JAML`
"""
def __new__(cls, *args, **kwargs):
_cls = super().__new__(cls, *args, **kwargs)
JAML.register(_cls)
return _cls
| JAMLCompatibleType |
python | ray-project__ray | python/ray/tests/test_actor_retry_1.py | {
"start": 89,
"end": 400
} | class ____:
def __init__(self) -> None:
self.count = 0
def increment(self) -> int:
c = self.count
self.count += 1
return c
def get_count(self) -> int:
return self.count
# TODO: also do work for async and threaded actors
@ray.remote(max_task_retries=3)
| Counter |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/global_shuffle_test.py | {
"start": 1430,
"end": 7243
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for global shuffling of tf.data datasets."""
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[1, 100],
seed=[None, 42],
use_tensor_seed=[True, False],
prefetch=[True, False])))
def testRange(
self,
dataset_range: int,
seed: Optional[int],
use_tensor_seed: bool,
prefetch: bool):
dataset = dataset_ops.Dataset.range(dataset_range)
if prefetch:
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
seed = (constant_op.constant(seed, dtype=dtypes.int64)
if seed and use_tensor_seed else seed)
dataset = global_shuffle_op._global_shuffle(dataset, seed=seed)
dataset = dataset.repeat(3)
output = self.getDatasetOutput(dataset, requires_initialization=True)
self.assertCountEqual(output, list(range(dataset_range)) * 3)
output_per_iteration = [
output[i : i + dataset_range]
for i in range(0, len(output), dataset_range)]
self.assertCountEqual(output_per_iteration[0], list(range(dataset_range)))
self.assertCountEqual(output_per_iteration[1], list(range(dataset_range)))
self.assertCountEqual(output_per_iteration[2], list(range(dataset_range)))
if dataset_range > 1:
self.assertNotEqual(output_per_iteration[0], output_per_iteration[1])
self.assertNotEqual(output_per_iteration[0], output_per_iteration[2])
self.assertNotEqual(output_per_iteration[1], output_per_iteration[2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(dataset_range=[1, 100], seed=[None, 42])))
def testNegativeRange(self, dataset_range: int, seed: Optional[int]):
dataset = dataset_ops.Dataset.range(dataset_range, -dataset_range, -1)
dataset = global_shuffle_op._global_shuffle(dataset)
dataset = dataset.repeat(3)
output = self.getDatasetOutput(dataset, requires_initialization=True)
self.assertCountEqual(
output, list(range(dataset_range, -dataset_range, -1)) * 3)
output_per_iteration = [
output[i : i + dataset_range * 2]
for i in range(0, len(output), dataset_range * 2)]
self.assertCountEqual(output_per_iteration[0],
list(range(dataset_range, -dataset_range, -1)))
self.assertCountEqual(output_per_iteration[1],
list(range(dataset_range, -dataset_range, -1)))
self.assertCountEqual(output_per_iteration[2],
list(range(dataset_range, -dataset_range, -1)))
if dataset_range > 1:
self.assertNotEqual(output_per_iteration[0], output_per_iteration[1])
self.assertNotEqual(output_per_iteration[0], output_per_iteration[2])
self.assertNotEqual(output_per_iteration[1], output_per_iteration[2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleRepeatEpochs(self, reshuffle: bool, seed: Optional[int]):
dataset_range = 100
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle)
dataset = dataset.repeat(2)
output = self.getDatasetOutput(dataset, requires_initialization=True)
self.assertCountEqual(output, list(range(dataset_range)) * 2)
output_per_iteration = [
output[i : i + dataset_range]
for i in range(0, len(output), dataset_range)]
if reshuffle:
self.assertNotEqual(output_per_iteration[0], output_per_iteration[1])
else:
self.assertEqual(output_per_iteration[0], output_per_iteration[1])
# Creating multiple iterators with the same seed is only supported in v2 API.
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleIterationEpochs(self, reshuffle: bool, seed: Optional[int]):
# TensorFlow unit tests set the global graph seed. We unset it here so that
# we can control determinism via the `seed` parameter.
random_seed.set_random_seed(None)
dataset_range = 100
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle)
first_epoch = self.getDatasetOutput(dataset)
second_epoch = self.getDatasetOutput(dataset)
if reshuffle:
self.assertNotEqual(first_epoch, second_epoch)
else:
self.assertEqual(first_epoch, second_epoch)
@combinations.generate(test_base.default_test_combinations())
def testEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"`global_shuffle` requires the input dataset to have a non-empty "
"finite cardinality."):
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testUnsupportedDataset(self):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.shuffle(buffer_size=1)
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"`global_shuffle` requires all upstream transformations be compatible "
"with random access."):
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
| GlobalShuffleTest |
python | pytorch__pytorch | torch/testing/_internal/common_subclass.py | {
"start": 282,
"end": 2230
} | class ____(torch.Tensor):
@staticmethod
def __new__(cls, *args, **kwargs):
t, kwargs = cls.get_wrapper_properties(*args, **kwargs)
if "size" not in kwargs:
size = t.size()
else:
size = kwargs["size"]
del kwargs["size"]
if "dtype" not in kwargs:
kwargs["dtype"] = t.dtype
if "layout" not in kwargs:
kwargs["layout"] = t.layout
if "device" not in kwargs:
kwargs["device"] = t.device
if "requires_grad" not in kwargs:
kwargs["requires_grad"] = False
# Ignore memory_format and pin memory for now as I don't know how to
# safely access them on a Tensor (if possible??)
wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs)
wrapper._validate_methods()
return wrapper
@classmethod
def get_wrapper_properties(cls, *args, **kwargs):
# Should return both an example Tensor and a dictionary of kwargs
# to override any of that example Tensor's properly.
# This is very similar to the `t.new_*(args)` API
raise NotImplementedError("You need to implement get_wrapper_properties")
def _validate_methods(self):
# Skip this if not in debug mode?
# Changing these on the python side is wrong as it would not be properly reflected
# on the c++ side
# This doesn't catch attributes set in the __init__
forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"]
for el in forbidden_overrides:
if getattr(self.__class__, el) is not getattr(torch.Tensor, el):
raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the "
f"property {el} but this is not allowed as such change would "
"not be reflected to c++ callers.")
| WrapperTensor |
python | huggingface__transformers | tests/models/metaclip_2/test_modeling_metaclip_2.py | {
"start": 15600,
"end": 18378
} | class ____(MetaClip2ModelTesterMixin, unittest.TestCase):
all_model_classes = (MetaClip2TextModel, MetaClip2TextModelWithProjection) if is_torch_available() else ()
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = MetaClip2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=MetaClip2TextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_projection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="MetaClip2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/metaclip-2-worldwide-huge-quickgelu"
model = MetaClip2TextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_model_with_projection_from_pretrained(self):
model_name = "facebook/metaclip-2-worldwide-huge-quickgelu"
model = MetaClip2TextModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "text_projection"))
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@slow
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# adding only flaky decorator here and call the parent test method
return getattr(ModelTesterMixin, self._testMethodName)(self)
def test_sdpa_can_dispatch_composite_models(self):
super().test_sdpa_can_dispatch_composite_models()
def test_sdpa_can_dispatch_on_flash(self):
self.skipTest(
reason="MetaClip2TextModel has two attention masks: `causal_attention_mask` and `attention_mask`"
)
| MetaClip2TextModelTest |
python | spyder-ide__spyder | spyder/plugins/updatemanager/plugin.py | {
"start": 586,
"end": 4869
} | class ____(SpyderPluginV2):
NAME = 'update_manager'
REQUIRES = [Plugins.Preferences]
OPTIONAL = [Plugins.Application, Plugins.MainMenu, Plugins.StatusBar]
CONTAINER_CLASS = UpdateManagerContainer
CONF_SECTION = 'update_manager'
CONF_FILE = False
CAN_BE_DISABLED = False
# ---- SpyderPluginV2 API
# -------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Update Manager')
@classmethod
def get_icon(cls):
return cls.create_icon('genprefs')
@staticmethod
def get_description():
return _('Manage application updates.')
# ---- Plugin initialization
def on_initialize(self):
pass
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
# Register conf page
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.Application)
def on_application_available(self):
if self.is_plugin_available(Plugins.MainMenu):
self._populate_help_menu()
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
if self.is_plugin_enabled(Plugins.Application):
if self.is_plugin_available(Plugins.Application):
self._populate_help_menu()
else:
self._populate_help_menu()
@on_plugin_available(plugin=Plugins.StatusBar)
def on_statusbar_available(self):
# Add status widget
statusbar = self.get_plugin(Plugins.StatusBar)
statusbar.add_status_widget(self.update_manager_status)
# ---- Plugin teardown
@on_plugin_teardown(plugin=Plugins.StatusBar)
def on_statusbar_teardown(self):
# Remove status widget if created
statusbar = self.get_plugin(Plugins.StatusBar)
statusbar.remove_status_widget(self.update_manager_status.ID)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_main_menu_teardown(self):
self._depopulate_help_menu()
def on_close(self, _unused=True):
# The container is closed directly in the plugin registry
pass
def on_mainwindow_visible(self):
"""Actions after the mainwindow in visible."""
container = self.get_container()
# Initialize status.
# Note that NO_STATUS also hides the statusbar widget.
container.update_manager_status.set_no_status()
# Check for updates on startup
if self.get_conf('check_updates_on_startup'):
container.start_check_update(startup=True)
# ---- Private API
# ------------------------------------------------------------------------
def _populate_help_menu(self):
"""Add update action and menu to the Help menu."""
mainmenu = self.get_plugin(Plugins.MainMenu)
from spyder.plugins.application.api import ApplicationActions
help_spyder_action = ApplicationActions.HelpSpyderAction
mainmenu.add_item_to_application_menu(
self.check_update_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.About,
before=help_spyder_action,
)
@property
def _window(self):
return self.main.window()
def _depopulate_help_menu(self):
"""Remove update action from the Help main menu."""
mainmenu = self.get_plugin(Plugins.MainMenu)
mainmenu.remove_item_from_application_menu(
UpdateManagerActions.SpyderCheckUpdateAction,
menu_id=ApplicationMenus.Help)
# ---- Public API
# ------------------------------------------------------------------------
@property
def check_update_action(self):
"""Check if a new version of Spyder is available."""
return self.get_container().check_update_action
@property
def update_manager_status(self):
"""Get Update manager statusbar widget"""
return self.get_container().update_manager_status
| UpdateManager |
python | pytest-dev__pytest-asyncio | pytest_asyncio/plugin.py | {
"start": 17277,
"end": 29857
} | class ____(PytestAsyncioFunction):
"""
Pytest item that is coroutine or an asynchronous generator decorated by
@hypothesis.given.
"""
def setup(self) -> None:
if not getattr(self.obj, "hypothesis", False) and getattr(
self.obj, "is_hypothesis_test", False
):
pytest.fail(
f"test function `{self!r}` is using Hypothesis, but pytest-asyncio "
"only works with Hypothesis 3.64.0 or later."
)
return super().setup()
@staticmethod
def _can_substitute(item: Function) -> bool:
func = item.obj
return (
getattr(func, "is_hypothesis_test", False) # type: ignore[return-value]
and getattr(func, "hypothesis", None)
and inspect.iscoroutinefunction(func.hypothesis.inner_test)
)
@property
def _synchronization_target_attr(self) -> tuple[object, str]:
return self.obj.hypothesis, "inner_test"
# The function name needs to start with "pytest_"
# see https://github.com/pytest-dev/pytest/issues/11307
@pytest.hookimpl(specname="pytest_pycollect_makeitem", hookwrapper=True)
def pytest_pycollect_makeitem_convert_async_functions_to_subclass(
collector: pytest.Module | pytest.Class, name: str, obj: object
) -> Generator[None, pluggy.Result, None]:
"""
Converts coroutines and async generators collected as pytest.Functions
to AsyncFunction items.
"""
hook_result = yield
try:
node_or_list_of_nodes: (
pytest.Item | pytest.Collector | list[pytest.Item | pytest.Collector] | None
) = hook_result.get_result()
except BaseException as e:
hook_result.force_exception(e)
return
if not node_or_list_of_nodes:
return
if isinstance(node_or_list_of_nodes, Sequence):
node_iterator = iter(node_or_list_of_nodes)
else:
# Treat single node as a single-element iterable
node_iterator = iter((node_or_list_of_nodes,))
updated_node_collection = []
for node in node_iterator:
updated_item = node
if isinstance(node, Function):
specialized_item_class = PytestAsyncioFunction.item_subclass_for(node)
if specialized_item_class:
if _get_asyncio_mode(
node.config
) == Mode.AUTO and not node.get_closest_marker("asyncio"):
node.add_marker("asyncio")
if node.get_closest_marker("asyncio"):
updated_item = specialized_item_class._from_function(node)
updated_node_collection.append(updated_item)
hook_result.force_result(updated_node_collection)
@contextlib.contextmanager
def _temporary_event_loop_policy(policy: AbstractEventLoopPolicy) -> Iterator[None]:
old_loop_policy = _get_event_loop_policy()
try:
old_loop = _get_event_loop_no_warn()
except RuntimeError:
old_loop = None
_set_event_loop_policy(policy)
try:
yield
finally:
_set_event_loop_policy(old_loop_policy)
_set_event_loop(old_loop)
def _get_event_loop_policy() -> AbstractEventLoopPolicy:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return asyncio.get_event_loop_policy()
def _set_event_loop_policy(policy: AbstractEventLoopPolicy) -> None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
asyncio.set_event_loop_policy(policy)
def _get_event_loop_no_warn(
policy: AbstractEventLoopPolicy | None = None,
) -> asyncio.AbstractEventLoop:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
if policy is not None:
return policy.get_event_loop()
else:
return asyncio.get_event_loop()
def _set_event_loop(loop: AbstractEventLoop | None) -> None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
asyncio.set_event_loop(loop)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: Function) -> object | None:
"""Pytest hook called before a test case is run."""
if pyfuncitem.get_closest_marker("asyncio") is not None:
if is_async_test(pyfuncitem):
asyncio_mode = _get_asyncio_mode(pyfuncitem.config)
for fixname, fixtures in pyfuncitem._fixtureinfo.name2fixturedefs.items():
# name2fixturedefs is a dict between fixture name and a list of matching
# fixturedefs. The last entry in the list is closest and the one used.
func = fixtures[-1].func
if (
asyncio_mode == Mode.STRICT
and _is_coroutine_or_asyncgen(func)
and not _is_asyncio_fixture_function(func)
):
warnings.warn(
PytestDeprecationWarning(
f"asyncio test {pyfuncitem.name!r} requested async "
"@pytest.fixture "
f"{fixname!r} in strict mode. "
"You might want to use @pytest_asyncio.fixture or switch "
"to auto mode. "
"This will become an error in future versions of "
"pytest-asyncio."
),
stacklevel=1,
)
# no stacklevel points at the users code, so we set stacklevel=1
# so it at least indicates that it's the plugin complaining.
# Pytest gives the test file & name in the warnings summary at least
else:
pyfuncitem.warn(
pytest.PytestWarning(
f"The test {pyfuncitem} is marked with '@pytest.mark.asyncio' "
"but it is not an async function. "
"Please remove the asyncio mark. "
"If the test is not marked explicitly, "
"check for global marks applied via 'pytestmark'."
)
)
yield
return None
def _synchronize_coroutine(
func: Callable[..., CoroutineType],
runner: asyncio.Runner,
context: contextvars.Context,
):
"""
Return a sync wrapper around a coroutine executing it in the
specified runner and context.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
coro = func(*args, **kwargs)
runner.run(coro, context=context)
return inner
@pytest.hookimpl(wrapper=True)
def pytest_fixture_setup(fixturedef: FixtureDef, request) -> object | None:
asyncio_mode = _get_asyncio_mode(request.config)
if not _is_asyncio_fixture_function(fixturedef.func):
if asyncio_mode == Mode.STRICT:
# Ignore async fixtures without explicit asyncio mark in strict mode
# This applies to pytest_trio fixtures, for example
return (yield)
if not _is_coroutine_or_asyncgen(fixturedef.func):
return (yield)
default_loop_scope = request.config.getini("asyncio_default_fixture_loop_scope")
loop_scope = (
getattr(fixturedef.func, "_loop_scope", None)
or default_loop_scope
or fixturedef.scope
)
runner_fixture_id = f"_{loop_scope}_scoped_runner"
runner = request.getfixturevalue(runner_fixture_id)
synchronizer = _fixture_synchronizer(fixturedef, runner, request)
_make_asyncio_fixture_function(synchronizer, loop_scope)
with MonkeyPatch.context() as c:
c.setattr(fixturedef, "func", synchronizer)
hook_result = yield
return hook_result
_DUPLICATE_LOOP_SCOPE_DEFINITION_ERROR = """\
An asyncio pytest marker defines both "scope" and "loop_scope", \
but it should only use "loop_scope".
"""
_MARKER_SCOPE_KWARG_DEPRECATION_WARNING = """\
The "scope" keyword argument to the asyncio marker has been deprecated. \
Please use the "loop_scope" argument instead.
"""
def _get_marked_loop_scope(
asyncio_marker: Mark, default_loop_scope: _ScopeName
) -> _ScopeName:
assert asyncio_marker.name == "asyncio"
if asyncio_marker.args or (
asyncio_marker.kwargs and set(asyncio_marker.kwargs) - {"loop_scope", "scope"}
):
raise ValueError("mark.asyncio accepts only a keyword argument 'loop_scope'.")
if "scope" in asyncio_marker.kwargs:
if "loop_scope" in asyncio_marker.kwargs:
raise pytest.UsageError(_DUPLICATE_LOOP_SCOPE_DEFINITION_ERROR)
warnings.warn(PytestDeprecationWarning(_MARKER_SCOPE_KWARG_DEPRECATION_WARNING))
scope = asyncio_marker.kwargs.get("loop_scope") or asyncio_marker.kwargs.get(
"scope"
)
if scope is None:
scope = default_loop_scope
assert scope in {"function", "class", "module", "package", "session"}
return scope
def _get_default_test_loop_scope(config: Config) -> Any:
return config.getini("asyncio_default_test_loop_scope")
_RUNNER_TEARDOWN_WARNING = """\
An exception occurred during teardown of an asyncio.Runner. \
The reason is likely that you closed the underlying event loop in a test, \
which prevents the cleanup of asynchronous generators by the runner.
This warning will become an error in future versions of pytest-asyncio. \
Please ensure that your tests don't close the event loop. \
Here is the traceback of the exception triggered during teardown:
%s
"""
def _create_scoped_runner_fixture(scope: _ScopeName) -> Callable:
@pytest.fixture(
scope=scope,
name=f"_{scope}_scoped_runner",
)
def _scoped_runner(
event_loop_policy,
request: FixtureRequest,
) -> Iterator[Runner]:
new_loop_policy = event_loop_policy
debug_mode = _get_asyncio_debug(request.config)
with _temporary_event_loop_policy(new_loop_policy):
runner = Runner(debug=debug_mode).__enter__()
try:
yield runner
except Exception as e:
runner.__exit__(type(e), e, e.__traceback__)
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", ".*BaseEventLoop.shutdown_asyncgens.*", RuntimeWarning
)
try:
runner.__exit__(None, None, None)
except RuntimeError:
warnings.warn(
_RUNNER_TEARDOWN_WARNING % traceback.format_exc(),
RuntimeWarning,
)
return _scoped_runner
for scope in Scope:
globals()[f"_{scope.value}_scoped_runner"] = _create_scoped_runner_fixture(
scope.value
)
@pytest.fixture(scope="session", autouse=True)
def event_loop_policy() -> AbstractEventLoopPolicy:
"""Return an instance of the policy used to create asyncio event loops."""
return _get_event_loop_policy()
def is_async_test(item: Item) -> TypeIs[PytestAsyncioFunction]:
"""Returns whether a test item is a pytest-asyncio test"""
return isinstance(item, PytestAsyncioFunction)
def _unused_port(socket_type: int) -> int:
"""Find an unused localhost port from 1024-65535 and return it."""
with contextlib.closing(socket.socket(type=socket_type)) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1]
@pytest.fixture
def unused_tcp_port() -> int:
return _unused_port(socket.SOCK_STREAM)
@pytest.fixture
def unused_udp_port() -> int:
return _unused_port(socket.SOCK_DGRAM)
@pytest.fixture(scope="session")
def unused_tcp_port_factory() -> Callable[[], int]:
"""A factory function, producing different unused TCP ports."""
produced = set()
def factory():
"""Return an unused port."""
port = _unused_port(socket.SOCK_STREAM)
while port in produced:
port = _unused_port(socket.SOCK_STREAM)
produced.add(port)
return port
return factory
@pytest.fixture(scope="session")
def unused_udp_port_factory() -> Callable[[], int]:
"""A factory function, producing different unused UDP ports."""
produced = set()
def factory():
"""Return an unused port."""
port = _unused_port(socket.SOCK_DGRAM)
while port in produced:
port = _unused_port(socket.SOCK_DGRAM)
produced.add(port)
return port
return factory
| AsyncHypothesisTest |
python | kamyu104__LeetCode-Solutions | Python/split-array-into-fibonacci-sequence.py | {
"start": 32,
"end": 1193
} | class ____(object):
def splitIntoFibonacci(self, S):
"""
:type S: str
:rtype: List[int]
"""
def startswith(S, k, x):
y = 0
for i in xrange(k, len(S)):
y = 10*y + int(S[i])
if y == x:
return i-k+1
elif y > x:
break
return 0
MAX_INT = 2**31-1
a = 0
for i in xrange(len(S)-2):
a = 10*a + int(S[i])
b = 0
for j in xrange(i+1, len(S)-1):
b = 10*b + int(S[j])
fib = [a, b]
k = j+1
while k < len(S):
if fib[-2] > MAX_INT-fib[-1]:
break
c = fib[-2]+fib[-1]
length = startswith(S, k, c)
if length == 0:
break
fib.append(c)
k += length
else:
return fib
if b == 0:
break
if a == 0:
break
return []
| Solution |
python | sphinx-doc__sphinx | sphinx/directives/patches.py | {
"start": 2800,
"end": 4424
} | class ____(SphinxDirective):
"""Parse and mark up content of a code block.
This is compatible with docutils' :rst:dir:`code` directive.
"""
optional_arguments = 1
option_spec: ClassVar[OptionSpec] = {
'class': directives.class_option,
'force': directives.flag,
'name': directives.unchanged,
'number-lines': optional_int,
}
has_content = True
def run(self) -> list[Node]:
self.assert_has_content()
self.options = _normalize_options(self.options)
code = '\n'.join(self.content)
node = nodes.literal_block(
code,
code,
classes=self.options.get('classes', []),
force='force' in self.options,
highlight_args={},
)
self.add_name(node)
set_source_info(self, node)
if self.arguments:
# highlight language specified
node['language'] = self.arguments[0]
else:
# no highlight language specified. Then this directive refers the current
# highlight setting via ``highlight`` directive or ``highlight_language``
# configuration.
node['language'] = (
self.env.current_document.highlight_language
or self.config.highlight_language
)
if 'number-lines' in self.options:
node['linenos'] = True
# if number given, treat as lineno-start.
if self.options['number-lines']:
node['highlight_args']['linenostart'] = self.options['number-lines']
return [node]
| Code |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_business_hour.py | {
"start": 41631,
"end": 58547
} | class ____:
# opening time should be affected by sign of n, not by n's value and end
opening_time_cases = [
(
[
BusinessHour(),
BusinessHour(n=2),
BusinessHour(n=4),
BusinessHour(end="10:00"),
BusinessHour(n=2, end="4:00"),
BusinessHour(n=4, end="15:00"),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9),
),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 2, 10): (
datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9),
),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9),
),
datetime(2014, 7, 7, 9, 1): (
datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9),
),
},
),
(
[
BusinessHour(start="11:15"),
BusinessHour(n=2, start="11:15"),
BusinessHour(n=3, start="11:15"),
BusinessHour(start="11:15", end="10:00"),
BusinessHour(n=2, start="11:15", end="4:00"),
BusinessHour(n=3, start="11:15", end="15:00"),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15),
),
datetime(2014, 7, 2, 10): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15),
),
datetime(2014, 7, 2, 11, 15): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15),
),
datetime(2014, 7, 2, 11, 15, 1): (
datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15),
),
datetime(2014, 7, 7, 9, 1): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15),
),
},
),
(
[
BusinessHour(-1),
BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end="10:00"),
BusinessHour(n=-2, end="4:00"),
BusinessHour(n=-4, end="15:00"),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9),
),
datetime(2014, 7, 2, 10): (
datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 7, 9): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9),
),
datetime(2014, 7, 7, 9, 1): (
datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9),
),
},
),
(
[
BusinessHour(start="17:00", end="05:00"),
BusinessHour(n=3, start="17:00", end="03:00"),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 4, 17): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 7, 17, 1): (
datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17),
),
},
),
(
[
BusinessHour(-1, start="17:00", end="05:00"),
BusinessHour(n=-2, start="17:00", end="03:00"),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 2, 16, 59): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17),
),
datetime(2014, 7, 7, 18): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17),
),
},
),
(
[
BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),
BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),
BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),
BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),
BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 15),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 15),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 15),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 15),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 15),
),
datetime(2014, 7, 2, 10): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 15),
),
datetime(2014, 7, 2, 11, 15): (
datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15),
),
datetime(2014, 7, 2, 11, 15, 1): (
datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 11, 15),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 15),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 15),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 15),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 15),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 15),
),
datetime(2014, 7, 7, 9, 1): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 15),
),
datetime(2014, 7, 7, 12): (
datetime(2014, 7, 7, 15),
datetime(2014, 7, 7, 11, 15),
),
},
),
(
[
BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),
BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),
],
{
datetime(2014, 7, 1, 11): (
datetime(2014, 7, 1, 8),
datetime(2014, 7, 1, 17),
),
datetime(2014, 7, 1, 18): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 8),
),
datetime(2014, 7, 1, 23): (
datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 8),
),
datetime(2014, 7, 2, 8): (
datetime(2014, 7, 2, 8),
datetime(2014, 7, 2, 8),
),
datetime(2014, 7, 2, 9): (
datetime(2014, 7, 2, 8),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 2, 16, 59): (
datetime(2014, 7, 2, 8),
datetime(2014, 7, 2, 17),
),
datetime(2014, 7, 5, 10): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 8),
),
datetime(2014, 7, 4, 10): (
datetime(2014, 7, 4, 8),
datetime(2014, 7, 4, 17),
),
datetime(2014, 7, 4, 23): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 8),
),
datetime(2014, 7, 6, 10): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 8),
),
datetime(2014, 7, 7, 5): (
datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 8),
),
datetime(2014, 7, 7, 18): (
datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 8),
),
},
),
]
@pytest.mark.parametrize("case", opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in cases.items():
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
| TestOpeningTimes |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 20230,
"end": 20542
} | class ____:
Alias = AliasAction
Data = DataAction
Collections = CollectionsAction
Roles = RolesAction
Cluster = ClusterAction
Nodes = NodesAction
Backups = BackupsAction
Tenants = TenantsAction
Users = UsersAction
Replicate = ReplicateAction
Groups = GroupAction
| Actions |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 16822,
"end": 16918
} | class ____(_SetOfTypes):
"""A union type that contains all types in self.type_list."""
| UnionType |
python | getsentry__sentry | tests/sentry/rules/processing/test_processor.py | {
"start": 1940,
"end": 25698
} | class ____(TestCase, PerformanceIssueTestCase):
def setUp(self) -> None:
event = self.store_event(data={}, project_id=self.project.id)
self.group_event = event.for_group(cast(Group, event.group))
Rule.objects.filter(project=self.group_event.project).delete()
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
self.rule = Rule.objects.create(
project=self.group_event.project,
data={"conditions": [EVERY_EVENT_COND_DATA], "actions": [EMAIL_ACTION_DATA]},
)
self.user_count_condition = {
"interval": "1h",
"id": "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyCondition",
"value": 100,
}
self.event_frequency_condition = {
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"interval": "1h",
"value": 1,
}
# this test relies on a few other tests passing
def test_integrated(self) -> None:
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
assert len(results) == 1
callback, futures = results[0]
assert len(futures) == 1
assert futures[0].rule == self.rule
assert futures[0].kwargs == {}
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 1
)
# should not apply twice due to default frequency
results = list(rp.apply())
assert len(results) == 0
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 1
)
# now ensure that moving the last update backwards
# in time causes the rule to trigger again
GroupRuleStatus.objects.filter(rule=self.rule).update(
last_active=timezone.now() - timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1)
)
results = list(rp.apply())
assert len(results) == 1
rule_fire_histories = RuleFireHistory.objects.filter(
rule=self.rule, group=self.group_event.group
)
assert rule_fire_histories.count() == 2
for rule_fire_history in rule_fire_histories:
assert getattr(rule_fire_history, "notification_uuid", None) is not None
def test_escalating_event_condition_with_reappeared(self) -> None:
self.rule.update(
data={
"conditions": [ESCALATING_EVENT_COND_DATA],
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_reappeared=True,
has_escalated=False,
)
results = list(rp.apply())
assert len(results) == 0
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 0
)
def test_escalating_event_condition_with_escalated(self) -> None:
self.rule.update(
data={
"conditions": [ESCALATING_EVENT_COND_DATA],
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_escalated=True,
has_reappeared=False,
)
results = list(rp.apply())
assert len(results) == 1
callback, futures = results[0]
assert len(futures) == 1
assert futures[0].rule == self.rule
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 1
)
def test_escalating_event_condition_with_escalated_and_reappeared(self) -> None:
self.rule.update(
data={
"conditions": [ESCALATING_EVENT_COND_DATA],
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_reappeared=True,
has_escalated=True,
)
results = list(rp.apply())
assert len(results) == 1
callback, futures = results[0]
assert len(futures) == 1
assert futures[0].rule == self.rule
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 1
)
def test_escalating_event_condition_not_escalated_or_reappeared(self) -> None:
self.rule.update(
data={
"conditions": [ESCALATING_EVENT_COND_DATA],
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_reappeared=False,
has_escalated=False,
)
results = list(rp.apply())
assert len(results) == 0
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 0
)
def test_delayed_rule_match_any_slow_conditions(self) -> None:
"""
Test that a rule with only 'slow' conditions and action match of 'any' for a performance issue gets added to the Redis buffer and does not immediately fire when the 'fast' condition fails to pass
"""
self.rule.update(
data={
"conditions": [self.user_count_condition, self.event_frequency_condition],
"action_match": "any",
"actions": [EMAIL_ACTION_DATA],
},
)
tags = [["foo", "guux"], ["sentry:release", "releaseme"]]
contexts = {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}}
for i in range(3):
perf_event = self.create_performance_issue(
tags=tags,
fingerprint="group-5",
contexts=contexts,
)
assert isinstance(perf_event, GroupEvent)
start_timestamp = datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC)
rp = RuleProcessor(
perf_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
start_timestamp=start_timestamp,
)
results = list(rp.apply())
assert len(results) == 0
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, timezone.now().timestamp()
)
assert len(project_ids) == 1
assert project_ids[0][0] == self.project.id
rulegroup_to_events = buffer.backend.get_hash(
model=Project, field={"project_id": self.project.id}
)
assert perf_event.group is not None
assert rulegroup_to_events == {
f"{self.rule.id}:{perf_event.group.id}": json.dumps(
{
"event_id": perf_event.event_id,
"occurrence_id": perf_event.occurrence_id,
"start_timestamp": start_timestamp,
}
)
}
def test_delayed_rule_match_any_slow_fast_conditions(self) -> None:
"""
Test that a rule with a 'slow' condition, a 'fast' condition, and action match of 'any' gets added to the Redis buffer and does not immediately fire when the 'fast' condition fails to pass
"""
first_seen_condition = {
"id": "sentry.rules.conditions.reappeared_event.ReappearedEventCondition"
}
self.rule.update(
data={
"conditions": [first_seen_condition, self.event_frequency_condition],
"action_match": "any",
"actions": [EMAIL_ACTION_DATA],
},
)
start_timestamp = datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=False,
start_timestamp=start_timestamp,
)
results = list(rp.apply())
assert len(results) == 0
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, timezone.now().timestamp()
)
assert len(project_ids) == 1
assert project_ids[0][0] == self.project.id
rulegroup_to_events = buffer.backend.get_hash(
model=Project, field={"project_id": self.project.id}
)
assert rulegroup_to_events == {
f"{self.rule.id}:{self.group_event.group.id}": json.dumps(
{
"event_id": self.group_event.event_id,
"occurrence_id": None,
"start_timestamp": start_timestamp,
}
)
}
def test_delayed_rule_match_error_slow_fast_conditions(self) -> None:
"""
Test that a rule with a 'slow' condition, a 'fast' condition, and action match of 'garbage' errors and does not fire or get added to the Redis queue
"""
first_seen_condition = {
"id": "sentry.rules.conditions.reappeared_event.ReappearedEventCondition"
}
self.rule.update(
data={
"conditions": [first_seen_condition, self.event_frequency_condition],
"action_match": "garbage",
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=False,
)
results = list(rp.apply())
assert len(results) == 0
def test_rule_match_any_slow_fast_conditions_fast_passes(self) -> None:
"""
Test that a rule with both 'slow' and 'fast' conditions and action match of 'any' where a fast condition passes fires and doesn't get enqueued
"""
self.rule.update(
data={
"conditions": [EVERY_EVENT_COND_DATA, self.event_frequency_condition],
"action_match": "any",
"actions": [EMAIL_ACTION_DATA],
},
)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
assert len(results) == 1
def test_delayed_rule_match_all(self) -> None:
"""
Test that a rule with a 'slow' condition and action match of 'all' gets added to the Redis buffer and does not immediately fire
"""
self.rule.update(
data={
"conditions": [
EVERY_EVENT_COND_DATA,
{
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"interval": "1h",
"value": 1,
},
],
"action_match": "all",
"actions": [EMAIL_ACTION_DATA],
},
)
start_timestamp = datetime(2020, 9, 1, 3, 8, 24, 880386, tzinfo=UTC)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
start_timestamp=start_timestamp,
)
results = list(rp.apply())
assert len(results) == 0
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, timezone.now().timestamp()
)
assert len(project_ids) == 1
assert project_ids[0][0] == self.project.id
rulegroup_to_events = buffer.backend.get_hash(
model=Project, field={"project_id": self.project.id}
)
assert rulegroup_to_events == {
f"{self.rule.id}:{self.group_event.group.id}": json.dumps(
{
"event_id": self.group_event.event_id,
"occurrence_id": None,
"start_timestamp": start_timestamp,
}
)
}
def test_ignored_issue(self) -> None:
self.group_event.group.status = GroupStatus.IGNORED
self.group_event.group.substatus = None
self.group_event.group.save()
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
assert len(results) == 0
def test_resolved_issue(self) -> None:
self.group_event.group.status = GroupStatus.RESOLVED
self.group_event.group.substatus = None
self.group_event.group.save()
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
assert len(results) == 0
def test_disabled_rule(self) -> None:
self.rule.status = ObjectStatus.DISABLED
self.rule.save()
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
assert len(results) == 0
assert (
RuleFireHistory.objects.filter(rule=self.rule, group=self.group_event.group).count()
== 0
)
def test_muted_slack_rule(self) -> None:
"""Test that we don't sent a notification for a muted Slack rule"""
integration = install_slack(self.organization)
action_data = [
{
"channel": "#my-channel",
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": integration.id,
},
]
slack_rule = self.create_project_rule(self.project, action_data)
action_data[0].update({"channel": "#my-other-channel"})
muted_slack_rule = self.create_project_rule(self.project, action_data)
self.snooze_rule(
owner_id=self.user.id,
rule=muted_slack_rule,
)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
# this indicates that both email and slack notifs were sent, though there could be more than one of each type
assert len(results) == 2
# this checks that there was only 1 slack notification sent
slack_notifs = results[1][1]
assert len(slack_notifs) == 1
assert slack_notifs[0].rule == slack_rule
email_notifs = results[0][1]
# this checks that there was only 1 email notification sent
assert len(email_notifs) == 1
assert results[0][1][0].rule == self.rule
assert (
RuleFireHistory.objects.filter(
rule=muted_slack_rule, group=self.group_event.group
).count()
== 0
)
slack_rule_fire_history = RuleFireHistory.objects.filter(
rule=slack_rule, group=self.group_event.group
)
assert slack_rule_fire_history.count() == 1
assert getattr(slack_rule_fire_history[0], "notification_uuid", None) is not None
rule_fire_history = RuleFireHistory.objects.filter(
rule=self.rule, group=self.group_event.group
)
assert rule_fire_history.count() == 1
assert getattr(rule_fire_history[0], "notification_uuid", None) is not None
def test_muted_msteams_rule(self) -> None:
"""Test that we don't sent a notification for a muted MSTeams rule"""
tenant_id = "50cccd00-7c9c-4b32-8cda-58a084f9334a"
integration = self.create_integration(
self.organization,
tenant_id,
metadata={
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"service_url": "https://testserviceurl.com/testendpoint/",
"installation_type": "tenant",
"expires_at": 1234567890,
"tenant_id": tenant_id,
},
name="Personal Installation",
provider="msteams",
)
action_data = [
{
"channel": "secrets",
"id": "sentry.integrations.msteams.notify_action.MsTeamsNotifyServiceAction",
"team": integration.id,
},
]
msteams_rule = self.create_project_rule(self.project, action_data, [])
action_data[0].update({"channel": "#secreter-secrets"})
muted_msteams_rule = self.create_project_rule(self.project, action_data, [])
self.snooze_rule(
owner_id=self.user.id,
rule=muted_msteams_rule,
)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = list(rp.apply())
# this indicates that both email and msteams notifs were sent, though there could be more than one of each type
assert len(results) == 2
slack_notifs = results[1][1]
# this checks that there was only 1 msteams notification sent
assert len(slack_notifs) == 1
assert slack_notifs[0].rule == msteams_rule
email_notifs = results[0][1]
# this checks that there was only 1 email notification sent
assert len(email_notifs) == 1
assert results[0][1][0].rule == self.rule
assert (
RuleFireHistory.objects.filter(
rule=muted_msteams_rule, group=self.group_event.group
).count()
== 0
)
msteams_rule_fire_history = RuleFireHistory.objects.filter(
rule=msteams_rule, group=self.group_event.group
)
assert (
RuleFireHistory.objects.filter(rule=msteams_rule, group=self.group_event.group).count()
== 1
)
assert getattr(msteams_rule_fire_history[0], "notification_uuid", None) is not None
rule_fire_history = RuleFireHistory.objects.filter(
rule=self.rule, group=self.group_event.group
)
assert rule_fire_history.count() == 1
assert getattr(rule_fire_history[0], "notification_uuid", None) is not None
def run_query_test(self, rp, expected_queries):
with CaptureQueriesContext(connections[DEFAULT_DB_ALIAS]) as queries:
results = list(rp.apply())
status_queries = [
q
for q in queries.captured_queries
if "grouprulestatus" in str(q) and "UPDATE" not in str(q)
]
assert len(status_queries) == expected_queries, "\n".join(
"%d. %s" % (i, query["sql"]) for i, query in enumerate(status_queries, start=1)
)
assert len(results) == 2
def test_multiple_rules(self) -> None:
rule_2 = Rule.objects.create(
project=self.group_event.project,
data={"conditions": [EVERY_EVENT_COND_DATA], "actions": [EMAIL_ACTION_DATA]},
)
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
self.run_query_test(rp, 3)
GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
last_active=timezone.now() - timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1)
)
# GroupRuleStatus queries should be cached
self.run_query_test(rp, 0)
cache.clear()
GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
last_active=timezone.now() - timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1)
)
# GroupRuleStatus rows should be created, so we should perform two fewer queries since we
# don't need to create/fetch the rows
self.run_query_test(rp, 1)
cache.clear()
GroupRuleStatus.objects.filter(rule__in=[self.rule, rule_2]).update(
last_active=timezone.now() - timedelta(minutes=Rule.DEFAULT_FREQUENCY + 1)
)
# Test that we don't get errors if we try to create statuses that already exist due to a
# race condition
with mock.patch(
"sentry.rules.processing.processor.GroupRuleStatus"
) as mocked_GroupRuleStatus:
call_count = 0
def mock_filter(*args, **kwargs):
nonlocal call_count
if call_count == 0:
call_count += 1
# Make a query here to not throw the query counts off
return GroupRuleStatus.objects.filter(id=-1)
return GroupRuleStatus.objects.filter(*args, **kwargs)
mocked_GroupRuleStatus.objects.filter.side_effect = mock_filter
# Even though the rows already exist, we should go through the creation step and make
# the extra queries. The conflicting insert doesn't seem to be counted here since it
# creates no rows.
self.run_query_test(rp, 2)
@patch(
"sentry.constants._SENTRY_RULES",
[
"sentry.mail.actions.NotifyEmailAction",
"sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"tests.sentry.rules.processing.test_processor.MockConditionTrue",
],
)
def test_slow_conditions_evaluate_last(self) -> None:
# Make sure slow/expensive conditions are evaluated last, so that we can skip evaluating
# them if cheaper conditions satisfy the rule.
self.rule.update(
data={
"conditions": [
{"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition"},
{"id": "tests.sentry.rules.processing.test_processor.MockConditionTrue"},
],
"action_match": "any",
"actions": [EMAIL_ACTION_DATA],
},
)
with (
patch("sentry.rules.processing.processor.rules", init_registry()),
patch(
"sentry.rules.conditions.event_frequency.BaseEventFrequencyCondition.passes"
) as passes,
):
rp = RuleProcessor(
self.group_event,
is_new=True,
is_regression=True,
is_new_group_environment=True,
has_reappeared=True,
)
results = rp.apply()
assert len(results) == 1
# We should never call `passes` on the frequency condition since we should run the cheap
# mock condition first.
assert passes.call_count == 0
| RuleProcessorTest |
python | cython__cython | tests/run/py3k_super.py | {
"start": 2105,
"end": 2788
} | class ____(A):
"""
>>> obj = C()
>>> obj.method_1()
2
>>> obj.method_2()
3
>>> obj.method_3()
['__class__', 'self']
>>> obj.method_4()
['self']
>>> obj.method_5() # doctest: +ELLIPSIS
<class '...py3k_super.C'>
>>> obj.super_class() # doctest: +ELLIPSIS
<class '...py3k_super.A'>
"""
def method_1(self):
return __class__.class_method()
def method_2(self):
return __class__.static_method()
def method_3(self):
__class__
return sorted(list(locals().keys()))
def method_4(self):
return sorted(list(locals().keys()))
def method_5(self):
return __class__
| C |
python | django__django | tests/fixtures/models.py | {
"start": 3431,
"end": 3546
} | class ____(models.Manager):
def get_by_natural_key(self, key):
return self.get(key=key)
| NaturalKeyManager |
python | spack__spack | lib/spack/spack/llnl/util/lock.py | {
"start": 5932,
"end": 6389
} | class ____:
READ = 0
WRITE = 1
@staticmethod
def to_str(tid):
ret = "READ"
if tid == LockType.WRITE:
ret = "WRITE"
return ret
@staticmethod
def to_module(tid):
lock = fcntl.LOCK_SH
if tid == LockType.WRITE:
lock = fcntl.LOCK_EX
return lock
@staticmethod
def is_valid(op: int) -> bool:
return op == LockType.READ or op == LockType.WRITE
| LockType |
python | walkccc__LeetCode | solutions/1575. Count All Possible Routes/1575.py | {
"start": 0,
"end": 634
} | class ____:
def countRoutes(
self,
locations: list[int],
start: int,
finish: int,
fuel: int,
) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(i: int, fuel: int) -> int:
"""
Returns the number of ways to reach the `finish` city from the i-th city
with `fuel` fuel.
"""
if fuel < 0:
return 0
res = 1 if i == finish else 0
for j in range(len(locations)):
if j == i:
continue
res += dp(j, fuel - abs(locations[i] - locations[j]))
res %= MOD
return res
return dp(start, fuel)
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_redshift_cluster.py | {
"start": 22781,
"end": 29077
} | class ____:
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.cluster_status")
@mock.patch.object(RedshiftHook, "conn")
def test_delete_cluster_with_wait_for_completion(self, mock_conn, mock_cluster_status):
mock_cluster_status.return_value = "cluster_not_found"
redshift_operator = RedshiftDeleteClusterOperator(
task_id="task_test", cluster_identifier="test_cluster", aws_conn_id="aws_conn_test"
)
redshift_operator.execute(None)
mock_conn.delete_cluster.assert_called_once_with(
ClusterIdentifier="test_cluster",
SkipFinalClusterSnapshot=True,
FinalClusterSnapshotIdentifier="",
)
@mock.patch.object(RedshiftHook, "conn")
def test_delete_cluster_without_wait_for_completion(self, mock_conn):
redshift_operator = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
aws_conn_id="aws_conn_test",
wait_for_completion=False,
)
redshift_operator.execute(None)
mock_conn.delete_cluster.assert_called_once_with(
ClusterIdentifier="test_cluster",
SkipFinalClusterSnapshot=True,
FinalClusterSnapshotIdentifier="",
)
mock_conn.cluster_status.assert_not_called()
@mock.patch.object(RedshiftHook, "delete_cluster")
@mock.patch.object(RedshiftHook, "conn")
@mock.patch("time.sleep", return_value=None)
def test_delete_cluster_multiple_attempts(self, _, mock_conn, mock_delete_cluster):
exception = boto3.client("redshift").exceptions.InvalidClusterStateFault({}, "test")
returned_exception = type(exception)
mock_conn.exceptions.InvalidClusterStateFault = returned_exception
mock_delete_cluster.side_effect = [exception, exception, True]
redshift_operator = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
aws_conn_id="aws_conn_test",
wait_for_completion=False,
)
redshift_operator.execute(None)
assert mock_delete_cluster.call_count == 3
@mock.patch.object(RedshiftHook, "delete_cluster")
@mock.patch.object(RedshiftHook, "conn")
@mock.patch("time.sleep", return_value=None)
def test_delete_cluster_multiple_attempts_fail(self, _, mock_conn, mock_delete_cluster):
exception = boto3.client("redshift").exceptions.InvalidClusterStateFault({}, "test")
returned_exception = type(exception)
mock_conn.exceptions.InvalidClusterStateFault = returned_exception
mock_delete_cluster.side_effect = exception
redshift_operator = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
aws_conn_id="aws_conn_test",
wait_for_completion=False,
)
with pytest.raises(returned_exception):
redshift_operator.execute(None)
assert mock_delete_cluster.call_count == 10
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.cluster_status")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.delete_cluster")
def test_delete_cluster_deferrable_mode(self, mock_delete_cluster, mock_cluster_status):
"""Test delete cluster operator with defer when deferrable param is true"""
mock_delete_cluster.return_value = True
mock_cluster_status.return_value = "available"
delete_cluster = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
deferrable=True,
wait_for_completion=False,
)
with pytest.raises(TaskDeferred) as exc:
delete_cluster.execute(context=None)
assert isinstance(exc.value.trigger, RedshiftDeleteClusterTrigger), (
"Trigger is not a RedshiftDeleteClusterTrigger"
)
@mock.patch("airflow.providers.amazon.aws.operators.redshift_cluster.RedshiftDeleteClusterOperator.defer")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.cluster_status")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.delete_cluster")
def test_delete_cluster_deferrable_mode_in_paused_state(
self, mock_delete_cluster, mock_cluster_status, mock_defer
):
"""Test delete cluster operator with defer when deferrable param is true"""
mock_delete_cluster.return_value = True
mock_cluster_status.return_value = "creating"
delete_cluster = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
deferrable=True,
wait_for_completion=False,
)
with pytest.raises(AirflowException):
delete_cluster.execute(context=None)
assert not mock_defer.called
def test_delete_cluster_execute_complete_success(self):
"""Asserts that logging occurs as expected"""
task = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
deferrable=True,
wait_for_completion=False,
)
with mock.patch.object(task.log, "info") as mock_log_info:
task.execute_complete(context=None, event={"status": "success", "message": "Cluster deleted"})
mock_log_info.assert_called_with("Cluster deleted successfully")
def test_delete_cluster_execute_complete_fail(self):
redshift_operator = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
deferrable=True,
wait_for_completion=False,
)
with pytest.raises(AirflowException):
redshift_operator.execute_complete(
context=None, event={"status": "error", "message": "test failure message"}
)
def test_template_fields(self):
operator = RedshiftDeleteClusterOperator(
task_id="task_test",
cluster_identifier="test_cluster",
)
validate_template_fields(operator)
| TestDeleteClusterOperator |
python | pallets__werkzeug | src/werkzeug/sansio/multipart.py | {
"start": 383,
"end": 464
} | class ____(Event):
name: str
headers: Headers
@dataclass(frozen=True)
| Field |
python | mkdocs__mkdocs | mkdocs/tests/build_tests.py | {
"start": 39434,
"end": 39936
} | class ____(markdown.preprocessors.Preprocessor):
def __init__(self, base_path: str) -> None:
self.base_path = base_path
def run(self, lines: list[str]) -> list[str]:
for i, line in enumerate(lines):
if m := re.search(r'^--8<-- "(.+)"$', line):
try:
lines[i] = Path(self.base_path, m[1]).read_text()
except OSError:
lines[i] = f"(Failed to read {m[1]!r})\n"
return lines
| _TestPreprocessor |
python | google__pytype | pytype/tools/xref/indexer.py | {
"start": 9225,
"end": 9361
} | class ____:
"""Representation of an expression function call argument."""
names: list[str]
type: Any
@dataclasses.dataclass
| ExprArg |
python | kamyu104__LeetCode-Solutions | Python/partition-array-into-disjoint-intervals.py | {
"start": 29,
"end": 408
} | class ____(object):
def partitionDisjoint(self, A):
"""
:type A: List[int]
:rtype: int
"""
B = A[:]
for i in reversed(xrange(len(A)-1)):
B[i] = min(B[i], B[i+1])
p_max = 0
for i in xrange(1, len(A)):
p_max = max(p_max, A[i-1])
if p_max <= B[i]:
return i
| Solution |
python | graphql-python__graphene | graphene/types/tests/test_scalar.py | {
"start": 4624,
"end": 6240
} | class ____:
def test_query(self):
"""
Test that a normal query works.
"""
result = schema.execute("{ optional { float(input: 20) } }")
assert not result.errors
assert result.data == {"optional": {"float": 20.0}}
result = schema.execute("{ optional { float(input: 20.2) } }")
assert not result.errors
assert result.data == {"optional": {"float": 20.2}}
def test_optional_input(self):
"""
Test that we can provide a null value to an optional input
"""
result = schema.execute("{ optional { float(input: null) } }")
assert not result.errors
assert result.data == {"optional": {"float": None}}
def test_invalid_input(self):
"""
Test that if an invalid type is provided we get an error
"""
result = schema.execute('{ optional { float(input: "20") } }')
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == 'Float cannot represent non numeric value: "20"'
)
result = schema.execute('{ optional { float(input: "a") } }')
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == 'Float cannot represent non numeric value: "a"'
)
result = schema.execute("{ optional { float(input: true) } }")
assert result.errors
assert len(result.errors) == 1
assert (
result.errors[0].message == "Float cannot represent non numeric value: true"
)
| TestFloat |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/engine.py | {
"start": 6344,
"end": 6834
} | class ____(Enum):
max_examples = "settings.max_examples={s.max_examples}"
max_iterations = (
"settings.max_examples={s.max_examples}, "
"but < 10% of examples satisfied assumptions"
)
max_shrinks = f"shrunk example {MAX_SHRINKS} times"
finished = "nothing left to do"
flaky = "test was flaky"
very_slow_shrinking = "shrinking was very slow"
def describe(self, settings: Settings) -> str:
return self.value.format(s=settings)
| ExitReason |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 8250,
"end": 11447
} | class ____: # pragma: no cover
"""
Base class that inline patterns subclass.
Inline patterns are handled by means of `Pattern` subclasses, one per regular expression.
Each pattern object uses a single regular expression and must support the following methods:
[`getCompiledRegExp`][markdown.inlinepatterns.Pattern.getCompiledRegExp] and
[`handleMatch`][markdown.inlinepatterns.Pattern.handleMatch].
All the regular expressions used by `Pattern` subclasses must capture the whole block. For this
reason, they all start with `^(.*)` and end with `(.*)!`. When passing a regular expression on
class initialization, the `^(.*)` and `(.*)!` are added automatically and the regular expression
is pre-compiled.
It is strongly suggested that the newer style [`markdown.inlinepatterns.InlineProcessor`][] that
use a more efficient and flexible search approach be used instead. However, the older style
`Pattern` remains for backward compatibility with many existing third-party extensions.
"""
ANCESTOR_EXCLUDES: Collection[str] = tuple()
"""
A collection of elements which are undesirable ancestors. The processor will be skipped if it
would cause the content to be a descendant of one of the listed tag names.
"""
compiled_re: re.Pattern[str]
md: Markdown | None
def __init__(self, pattern: str, md: Markdown | None = None):
"""
Create an instant of an inline pattern.
Arguments:
pattern: A regular expression that matches a pattern.
md: An optional pointer to the instance of `markdown.Markdown` and is available as
`self.md` on the class instance.
"""
self.pattern = pattern
self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern,
re.DOTALL | re.UNICODE)
self.md = md
def getCompiledRegExp(self) -> re.Pattern:
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m: re.Match[str]) -> etree.Element | str:
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Arguments:
m: A match object containing a match of the pattern.
Returns: An ElementTree Element object.
"""
pass # pragma: no cover
def type(self) -> str:
""" Return class name, to define pattern type """
return self.__class__.__name__
def unescape(self, text: str) -> str:
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.md.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, str):
return value
else:
# An `etree` Element - return text content only
return ''.join(value.itertext())
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
| Pattern |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/models.py | {
"start": 1530,
"end": 7506
} | class ____(ABC):
requires_metadata: bool = True
runs_on_released_connectors: bool = True
@property
@abstractmethod
def name(self) -> str:
"""The name of the QA check
Raises:
NotImplementedError: Subclasses must implement name property/attribute
Returns:
str: The name of the QA check
"""
raise NotImplementedError("Subclasses must implement name property/attribute")
@property
def required(self) -> bool:
"""Whether the QA check is required
Returns:
bool: Whether the QA check is required
"""
return True
@property
@abstractmethod
def description(self) -> str:
"""A full description of the QA check. Used for documentation purposes.
It can use markdown syntax.
Raises:
NotImplementedError: Subclasses must implement description property/attribute
Returns:
str: The description of the QA check
"""
raise NotImplementedError("Subclasses must implement description property/attribute")
@property
def applies_to_connector_languages(self) -> List[ConnectorLanguage]:
"""The connector languages that the QA check applies to
Raises:
NotImplementedError: Subclasses must implement applies_to_connector_languages property/attribute
Returns:
List[ConnectorLanguage]: The connector languages that the QA check applies to
"""
return ALL_LANGUAGES
@property
def applies_to_connector_types(self) -> List[str]:
"""The connector types that the QA check applies to
Returns:
List[str]: The connector types that the QA check applies to
"""
return ALL_TYPES
@property
def applies_to_connector_ab_internal_sl(self) -> int:
"""The connector ab_internal_s that the QA check applies to
Returns:
int: integer value for connector ab_internal_sl level
"""
return 0
@property
@abstractmethod
def category(self) -> CheckCategory:
"""The category of the QA check
Raises:
NotImplementedError: Subclasses must implement category property/attribute
Returns:
CheckCategory: The category of the QA check
"""
raise NotImplementedError("Subclasses must implement category property/attribute")
@property
def applies_to_connector_support_levels(self) -> Optional[List[str]]:
"""The connector's support levels that the QA check applies to
Returns:
List[str]: None if connector's support levels that the QA check applies to is not specified
"""
return None
@property
def applies_to_connector_cloud_usage(self) -> Optional[List[str]]:
"""The connector's cloud usage level that the QA check applies to
Returns:
List[str]: None if connector's cloud usage levels that the QA check applies to is not specified
"""
return None
def run(self, connector: Connector) -> CheckResult:
if not self.runs_on_released_connectors and connector.is_released:
return self.skip(
connector,
"Check does not apply to released connectors",
)
if not connector.metadata and self.requires_metadata:
return self.fail(
connector,
f"This checks requires metadata file to run. Please add {consts.METADATA_FILE_NAME} file to the connector code directory.",
)
if not connector.language:
return self.fail(connector, "Connector language could not be inferred")
if connector.language not in self.applies_to_connector_languages:
return self.skip(
connector,
f"Check does not apply to {connector.language.value} connectors",
)
if connector.connector_type not in self.applies_to_connector_types:
return self.skip(
connector,
f"Check does not apply to {connector.connector_type} connectors",
)
if self.applies_to_connector_support_levels and connector.support_level not in self.applies_to_connector_support_levels:
return self.skip(
connector,
f"Check does not apply to {connector.support_level} connectors",
)
if self.applies_to_connector_cloud_usage and connector.cloud_usage not in self.applies_to_connector_cloud_usage:
return self.skip(
connector,
f"Check does not apply to {connector.cloud_usage} connectors",
)
if connector.ab_internal_sl < self.applies_to_connector_ab_internal_sl:
return self.skip(
connector,
f"Check does not apply to connectors with sl < {self.applies_to_connector_ab_internal_sl}",
)
return self._run(connector)
def _run(self, connector: Connector) -> CheckResult:
raise NotImplementedError("Subclasses must implement run method")
def pass_(self, connector: Connector, message: str) -> CheckResult:
return CheckResult(connector=connector, check=self, status=CheckStatus.PASSED, message=message)
def fail(self, connector: Connector, message: str) -> CheckResult:
return CheckResult(connector=connector, check=self, status=CheckStatus.FAILED, message=message)
def skip(self, connector: Connector, reason: str) -> CheckResult:
return CheckResult(connector=connector, check=self, status=CheckStatus.SKIPPED, message=reason)
def create_check_result(self, connector: Connector, passed: bool, message: str) -> CheckResult:
status = CheckStatus.PASSED if passed else CheckStatus.FAILED
return CheckResult(check=self, connector=connector, status=status, message=message)
@dataclass
| Check |
python | kamyu104__LeetCode-Solutions | Python/maximum-ascending-subarray-sum.py | {
"start": 29,
"end": 389
} | class ____(object):
def maxAscendingSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = curr = 0
for i in xrange(len(nums)):
if not (i and nums[i-1] < nums[i]):
curr = 0
curr += nums[i]
result = max(result, curr)
return result
| Solution |
python | astropy__astropy | astropy/utils/masked/core.py | {
"start": 14087,
"end": 14672
} | class ____:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
| MaskedInfoBase |
python | python-pillow__Pillow | src/PIL/BlpImagePlugin.py | {
"start": 14293,
"end": 16533
} | class ____(ImageFile.PyEncoder):
_pushes_fd = True
def _write_palette(self) -> bytes:
data = b""
assert self.im is not None
palette = self.im.getpalette("RGBA", "RGBA")
for i in range(len(palette) // 4):
r, g, b, a = palette[i * 4 : (i + 1) * 4]
data += struct.pack("<4B", b, g, r, a)
while len(data) < 256 * 4:
data += b"\x00" * 4
return data
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
palette_data = self._write_palette()
offset = 20 + 16 * 4 * 2 + len(palette_data)
data = struct.pack("<16I", offset, *((0,) * 15))
assert self.im is not None
w, h = self.im.size
data += struct.pack("<16I", w * h, *((0,) * 15))
data += palette_data
for y in range(h):
for x in range(w):
data += struct.pack("<B", self.im.getpixel((x, y)))
return len(data), 0, data
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "P":
msg = "Unsupported BLP image mode"
raise ValueError(msg)
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
fp.write(magic)
assert im.palette is not None
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
alpha_depth = 1 if im.palette.mode == "RGBA" else 0
if magic == b"BLP1":
fp.write(struct.pack("<L", alpha_depth))
else:
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
fp.write(struct.pack("<b", alpha_depth))
fp.write(struct.pack("<b", 0)) # alpha encoding
fp.write(struct.pack("<b", 0)) # mips
fp.write(struct.pack("<II", *im.size))
if magic == b"BLP1":
fp.write(struct.pack("<i", 5))
fp.write(struct.pack("<i", 0))
ImageFile._save(im, fp, [ImageFile._Tile("BLP", (0, 0) + im.size, 0, im.mode)])
Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
Image.register_extension(BlpImageFile.format, ".blp")
Image.register_decoder("BLP1", BLP1Decoder)
Image.register_decoder("BLP2", BLP2Decoder)
Image.register_save(BlpImageFile.format, _save)
Image.register_encoder("BLP", BLPEncoder)
| BLPEncoder |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 90656,
"end": 98843
} | class ____(fixtures.DeclarativeMappedTest):
"""test for #10006"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
employee_m2m = Table(
"employee_m2m",
Base.metadata,
Column(
"left", Integer, ForeignKey("employee.id"), primary_key=True
),
Column(
"right", Integer, ForeignKey("employee.id"), primary_key=True
),
)
class Property(ComparableEntity, Base):
__tablename__ = "property"
id: Mapped[int] = mapped_column(primary_key=True)
value: Mapped[str] = mapped_column(name="value")
user_id: Mapped[int] = mapped_column(ForeignKey("employee.id"))
class Employee(ComparableEntity, Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
type: Mapped[str]
prop1 = relationship(Property, lazy="raise", uselist=False)
colleagues = relationship(
"Employee",
secondary=employee_m2m,
primaryjoin=lambda: Employee.id == employee_m2m.c.left,
secondaryjoin=lambda: Employee.id == employee_m2m.c.right,
lazy="raise",
collection_class=set,
)
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "employee",
}
class Manager(Employee):
if cls.inheritance_type == "joined":
__tablename__ = "manager"
id: Mapped[int] = mapped_column( # noqa: A001
ForeignKey("employee.id"), primary_key=True
)
__mapper_args__ = {"polymorphic_identity": "manager"}
class Engineer(Employee):
if cls.inheritance_type == "joined":
__tablename__ = "engineer"
id: Mapped[int] = mapped_column( # noqa: A001
ForeignKey("employee.id"), primary_key=True
)
__mapper_args__ = {"polymorphic_identity": "engineer"}
class Clerk(Employee):
if cls.inheritance_type == "joined":
__tablename__ = "clerk"
id: Mapped[int] = mapped_column( # noqa: A001
ForeignKey("employee.id"), primary_key=True
)
__mapper_args__ = {"polymorphic_identity": "clerk"}
class UnitHead(Employee):
if cls.inheritance_type == "joined":
__tablename__ = "unithead"
id: Mapped[int] = mapped_column( # noqa: A001
ForeignKey("employee.id"), primary_key=True
)
managers = relationship(
"Manager",
secondary=employee_m2m,
primaryjoin=lambda: Employee.id == employee_m2m.c.left,
secondaryjoin=lambda: (
and_(
Employee.id == employee_m2m.c.right,
Employee.type == "manager",
)
),
viewonly=True,
lazy="raise",
collection_class=set,
)
__mapper_args__ = {"polymorphic_identity": "unithead"}
@classmethod
def insert_data(cls, connection):
UnitHead, Manager, Engineer, Clerk, Property = cls.classes(
"UnitHead", "Manager", "Engineer", "Clerk", "Property"
)
with Session(connection) as sess:
unithead = UnitHead(
type="unithead",
name="unithead1",
prop1=Property(value="val unithead"),
)
manager = Manager(
type="manager",
name="manager1",
prop1=Property(value="val manager"),
)
other_manager = Manager(
type="manager",
name="manager2",
prop1=Property(value="val other manager"),
)
engineer = Engineer(
type="engineer",
name="engineer1",
prop1=Property(value="val engineer"),
)
clerk = Clerk(
type="clerk", name="clerk1", prop1=Property(value="val clerk")
)
unithead.colleagues.update([manager, other_manager])
manager.colleagues.update([engineer, clerk])
sess.add_all([unithead, manager, other_manager, engineer, clerk])
sess.commit()
@testing.variation("query_type", ["joinedload", "contains_eager"])
@testing.variation("use_criteria", [True, False])
def test_big_query(self, query_type, use_criteria):
Employee, UnitHead, Manager, Engineer, Clerk, Property = self.classes(
"Employee", "UnitHead", "Manager", "Engineer", "Clerk", "Property"
)
if query_type.contains_eager:
mgr = aliased(Manager)
clg = aliased(Employee)
clgs_prop1 = aliased(Property, name="clgs_prop1")
query = (
select(UnitHead)
.options(
contains_eager(UnitHead.managers.of_type(mgr))
.contains_eager(mgr.colleagues.of_type(clg))
.contains_eager(clg.prop1.of_type(clgs_prop1)),
)
.outerjoin(UnitHead.managers.of_type(mgr))
.outerjoin(mgr.colleagues.of_type(clg))
.outerjoin(clg.prop1.of_type(clgs_prop1))
)
if use_criteria:
ma_prop1 = aliased(Property)
uhead_prop1 = aliased(Property)
query = (
query.outerjoin(UnitHead.prop1.of_type(uhead_prop1))
.outerjoin(mgr.prop1.of_type(ma_prop1))
.where(
uhead_prop1.value == "val unithead",
ma_prop1.value == "val manager",
clgs_prop1.value == "val engineer",
)
)
elif query_type.joinedload:
if use_criteria:
query = (
select(UnitHead)
.options(
joinedload(
UnitHead.managers.and_(
Manager.prop1.has(value="val manager")
)
)
.joinedload(
Manager.colleagues.and_(
Employee.prop1.has(value="val engineer")
)
)
.joinedload(Employee.prop1),
)
.where(UnitHead.prop1.has(value="val unithead"))
)
else:
query = select(UnitHead).options(
joinedload(UnitHead.managers)
.joinedload(Manager.colleagues)
.joinedload(Employee.prop1),
)
session = fixture_session()
head = session.scalars(query).unique().one()
if use_criteria:
expected_managers = {
Manager(
name="manager1",
colleagues={Engineer(name="engineer1", prop1=Property())},
)
}
else:
expected_managers = {
Manager(
name="manager1",
colleagues={
Engineer(name="engineer1", prop1=Property()),
Clerk(name="clerk1"),
},
),
Manager(name="manager2"),
}
eq_(
head,
UnitHead(managers=expected_managers),
)
@testing.combinations(
(2,),
(3,),
id_="s",
argnames="num_levels",
)
@testing.combinations(
("with_poly_star",),
("inline",),
("selectin",),
("none",),
id_="s",
argnames="wpoly_type",
)
| MultiOfTypeContainsEagerTest |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py | {
"start": 2049,
"end": 2257
} | class ____:
""" __getnewargs_ex__ returns tuple with wrong type for both args """
def __getnewargs_ex__(self): # [invalid-getnewargs-ex-returned]
return ({'x': 'y'}, (2,))
| FifthBadGetNewArgsEx |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cache_data_api.py | {
"start": 2433,
"end": 4392
} | class ____(CachedFuncInfo[P, R]):
"""Implements the CachedFuncInfo interface for @st.cache_data."""
persist: CachePersistType
max_entries: int | None
ttl: float | timedelta | str | None
def __init__(
self,
func: Callable[P, R],
persist: CachePersistType,
max_entries: int | None,
ttl: float | timedelta | str | None,
show_spinner: bool | str,
show_time: bool = False,
hash_funcs: HashFuncsDict | None = None,
) -> None:
super().__init__(
func,
hash_funcs=hash_funcs,
show_spinner=show_spinner,
show_time=show_time,
)
self.persist = persist
self.max_entries = max_entries
self.ttl = ttl
self.validate_params()
@property
def cache_type(self) -> CacheType:
return CacheType.DATA
@property
def cached_message_replay_ctx(self) -> CachedMessageReplayContext:
return CACHE_DATA_MESSAGE_REPLAY_CTX
@property
def display_name(self) -> str:
"""A human-readable name for the cached function."""
return f"{self.func.__module__}.{self.func.__qualname__}"
def get_function_cache(self, function_key: str) -> Cache[R]:
return _data_caches.get_cache(
key=function_key,
persist=self.persist,
max_entries=self.max_entries,
ttl=self.ttl,
display_name=self.display_name,
)
def validate_params(self) -> None:
"""
Validate the params passed to @st.cache_data are compatible with cache storage.
When called, this method could log warnings if cache params are invalid
for current storage.
"""
_data_caches.validate_cache_params(
function_name=self.func.__name__,
persist=self.persist,
max_entries=self.max_entries,
ttl=self.ttl,
)
| CachedDataFuncInfo |
python | ionelmc__pytest-benchmark | tests/test_with_testcase.py | {
"start": 45,
"end": 285
} | class ____(unittest.TestCase):
@pytest.fixture(autouse=True)
def setupBenchmark(self, benchmark):
self.benchmark = benchmark
def test_foo(self):
self.benchmark(time.sleep, 0.000001)
| TerribleTerribleWayToWriteTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.